input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>wf49670/dpwb
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ppscan.py
MIT license (c) 2018 Asylum Computer Services LLC
"""
import re
import sys
import os
import argparse
from time import gmtime, strftime
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# class Cget
# gets a character at a time from the working buffer.
# at EOL with some text, returns a space
# at EOL with no text, returns '\n'
# at EOF returns -1
class Cget(object):
def __init__(self, buf):
self.wb = buf
self.cp = 0
self.ln = 0
self.pcp = 0
self.pln = 0
# getc returns one character and advances
def getc(self):
if self.ln == len(self.wb):
return -1 # no more text
# are we at the end of a line?
if self.cp == len(self.wb[self.ln]):
# yes. was there some text already on this line
if self.cp > 0:
# yes there was. return a space
ch = " "
else:
# nope. this is a blank line
ch = '\n'
self.ln += 1
self.cp = 0
else:
ch = self.wb[self.ln][self.cp]
self.cp += 1
self.pcp = self.cp # in case there is peek-ahead,
self.pln = self.ln # start here
return ch
# peeks ahead independently of the main ln, cp
# can be called repeatedly
def peekc(self):
if self.pln == len(self.wb):
return -1 # no more text
# are we at the end of a line?
if self.pcp == len(self.wb[self.pln]):
# yes. was there some text already on this line
if self.pcp > 0:
# yes there was. return a space
ch = " "
else:
# nope. this is a blank line
ch = '\n'
self.pln += 1
self.pcp = 0
else:
ch = self.wb[self.pln][self.pcp]
self.pcp += 1
return ch
# zero based
def where(self):
w = (self.ln, self.cp) # line and character position
return w
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# class Stack. traditional FIFO stack
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
def show(self):
return self.items
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
class Ppscan(object):
def __init__(self, args):
self.srcfile = args['infile']
self.outfile = args['outfile']
self.desc = args['desc']
self.verbose = args['verbose']
self.VERSION = "2018.04.02"
self.encoding = ""
self.wb = [] # list: one entry per line of source file
self.wl = {} # wordlist in a map, from wordfile.txt
self.wlf = {} # first letter removed
self.wll = {} # last letter removed
self.msgs = [] # messages file
self.rp = [] # report list
self.OSQ = '\u231c'
self.CSQ = '\u231d'
self.bothsq = False # true if both straight and curly quotes found
self.OP = r"(?:^|[\s‘“—\-_])" # define left edge of word
self.CP = r"(?:[ !;:'\".,\?'’—\-\s]|$)" # define right edge of a word
self.wfile = args['lang']
self.dictloc = "wordlists/master." + self.wfile # dictonary wordlist file
self.tcqlocs = [] # temporary close quote locations
self.stcqlocs = [] # saved temporary close quote locations
# display (fatal) error and exit
def fatal(self, message):
sys.stderr.write("fatal: " + message + "\n")
exit(1)
# load file from specified source file into working buffer
# accepts UTF-8 with or without BOM
# list of lines has no terminators (LF/CR)
def loadFile(self):
try:
wbuf = open(self.srcfile, "r", encoding='UTF-8').read()
self.encoding = "UTF-8"
self.wb = wbuf.split("\n")
# remove BOM on first line if present
t = ":".join("{0:x}".format(ord(c)) for c in self.wb[0])
if t[0:4] == 'feff':
self.wb[0] = self.wb[0][1:]
except UnicodeDecodeError:
wbuf = open(self.srcfile, "r", encoding='Latin-1').read()
self.encoding = "Latin-1"
wbuf = wbuf.encode('utf8').decode('latin-1')
self.wb = wbuf.split("\n")
except Exception as e:
self.fatal(
"loadFile: cannot open source file {}\n{}".format(self.srcfile, e))
self.wb = [s.rstrip() for s in self.wb]
# load a wordlist from an available dictionary
def loadDict(self):
my_file = os.path.abspath(self.dictloc)
if os.path.isfile(my_file):
tmp = open(my_file, "r", encoding="utf-8").read()
twl = tmp.split("\n")
twl = [s.rstrip() for s in twl]
# build dictionaries:
# wl complete words
# wlf words with first letter removed
# wll words with last letter removed
for word in twl:
if not word.startswith('#'): # comments, if any
self.wl[word] = '1'
self.wlf[word[1:]] = '1'
self.wll[word[:-1]] = '1'
else:
self.fatal("no dictionary file found at {}".format(my_file))
def check_mixed(self):
curly = False
straight = False
for line in self.wb:
if re.search(r"[\"']", line):
straight = True
if re.search(r"[“”‘’]", line):
curly = True
if straight and curly:
self.bothsq = True
def internal_contractions(self):
re1 = re.compile(r"([a-z])’([a-z])", re.IGNORECASE)
re2 = re.compile(r"([a-z])’([a-z])’([a-z])", re.IGNORECASE)
re3 = re.compile(r"([a-z])’([a-z])’([a-z])’([a-z])", re.IGNORECASE)
for i, _ in enumerate(self.wb):
self.wb[i] = re3.sub(r"\1"+self.CSQ+r"\2" +
self.CSQ+r"\3"+self.CSQ+r"\4", self.wb[i])
self.wb[i] = re2.sub(r"\1"+self.CSQ+r"\2" +
self.CSQ+r"\3", self.wb[i])
self.wb[i] = re1.sub(r"\1"+self.CSQ+r"\2", self.wb[i])
return
def closing_accept(self):
fmap = {}
rlist = []
re1 = re.compile(r"{}(\w+’){}".format(self.OP, self.CP))
for i, line in enumerate(self.wb):
m = re1.finditer(line)
if m:
# keep track of how frequently the word occurs
for _, t in enumerate(m):
wd = t.group(1)
if wd in fmap:
fmap[wd] += 1
else:
fmap[wd] = 1
# go through the map and find entries for which if the final apostrophe
# is dropped, it is not a word (∴ it must be a contraction)
for tw in fmap:
if tw[:-1].lower() in self.wl:
# base in in dictionary. It might be a close single quote
# print("in dict: {} {}".format(tw, fmap[tw]))
# accept if used very frequently
if fmap[tw] > 5:
# print(tw, fmap[tw])
rlist.append(tw)
else:
# it is not in dictionary. add to replace list
# print("not in dict: {} {}".format(tw, fmap[tw]))
rlist.append(tw)
for i, _ in enumerate(self.wb):
for wd in rlist:
wd1 = wd[:-1]+self.CSQ
self.wb[i] = re.sub("({}){}({})".format(
self.OP, wd, self.CP), r"\1{}\2".format(wd1), self.wb[i])
def opening_accept(self):
fmap = {}
rlist = []
re1 = re.compile(r"{}(’\w+){}".format(self.OP, self.CP))
for i, line in enumerate(self.wb):
m = re1.finditer(line)
if m:
# keep track of how frequently the word occurs
for _, t in enumerate(m):
wd = t.group(1)
if wd in fmap:
fmap[wd] += 1
else:
fmap[wd] = 1
# go through the map and find entries for which if the lead apostrophe
# is dropped, it is not a word (∴ it must be a contraction)
for tw in fmap:
if tw[1:].lower() in self.wl:
# base in in dictionary. It might be an open single quote
# print("in dict: {} {}".format(tw, fmap[tw]))
# accept if used very frequently
if fmap[tw] > 5:
# print(tw, fmap[tw])
rlist.append(tw)
else:
# it is not in dictionary. add to replace list
# print("not in dict: {} {}".format(tw, fmap[tw]))
rlist.append(tw)
for i, _ in enumerate(self.wb):
for wd in rlist:
wd1 = self.CSQ+wd[1:]
self.wb[i] = re.sub("({}){}({})".format(
self.OP, wd, self.CP), r"\1{}\2".format(wd1), self.wb[i])
# limit common forms to those that do not become a word when the
# apostrophe is removed. It might be the start or end of a single quoted phrase
def common_forms(self):
commons_lead = {
"’em", "’a’", "’n’", "’twill", "’twon’t", "’twas", "’tain’t", "’taint", "’twouldn’t",
"’twasn’t", "’twere", "’twould", "’tis", "’twarn’t", "’tisn’t", "’twixt", "’till",
"’bout", "’casion", "’shamed", "’lowance", "’n", "’s", "’d", "’m", "’ave",
"’cordingly", "’baccy", "’cept", "’stead", "’spose", "’chute", "’im",
"’u’d", "’tend", "’rickshaw", "’appen", "’oo", "’urt", "’ud", "’ope", "’ow",
# higher risk follows
"’cause", "’way"
}
commons_tail = {
"especial’", "o’", "t’", "ag’in’", "ol’", "tha’", "canna’", "an’", "d’",
"G’-by", "ha’", "tak’", "th’", "i’", "wi’", "yo’", "ver’", "don’", "jes’",
"aroun’", "wan’", "M’sieu’", "nuthin’"
}
commons_both = {
"’cordin’"
}
for i, _ in enumerate(self.wb):
for _, common in enumerate(commons_lead):
c2 = re.sub("’", self.CSQ, common)
self.wb[i] = re.sub(r"({}){}({})".format(
self.OP, common, self.CP), r"\1{}\2".format(c2), self.wb[i])
# leading capital tests
common = "{}{}{}".format(
common[0], common[1].upper(), common[2:])
c2 = | |
import pandas as pd
import plotly.express as px
import dash_table
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from dataclasses import dataclass, field
from typing import List
import random
import numpy
from dash_extensions import Download
from dash_extensions.snippets import send_data_frame
import base64
import io
import more_itertools
import plotly.graph_objects as go
def is_number(a):
if not a:
return False
try:
float(a)
return True
except ValueError:
return False
# ------------------------ Anomalies Class -----------------------
@dataclass
class Anomalies:
# The dataframe
data: pd.DataFrame = None
# indexes from previous simulation
siv_ps: List = field(default_factory=lambda: [])
ds_ps: List = field(default_factory=lambda: [])
sv_ps: List = field(default_factory=lambda: [])
# Persistence min and max
persistence_min: int = 10
persistence_max: int = 10
# Amplitude min and max
amplitude_min: float = 1.
amplitude_max: float = 1.
# Minimum distance between anomalies
anomalies_dist_min: int = 5
def save_data_with_anomalies(self, csv_name="anomalies.csv"):
""" Method that saves the dataframe with anomalies """
self.data.to_csv(path_or_buf=csv_name, index=False)
def add_anomalies_name_and_value_columns(self):
""" Method that add anomalies columns name and values """
self.data["anomaly_name"] = ""
self.data["anomaly_value"] = ""
def choose_index(self, persistence=0):
# Find all the allowed indexes
allowed_indexes = list(numpy.where(self.data["anomaly_value"] == "")[0])
# Find the indexes that must be removed
indexes_to_remove = []
for ai in more_itertools.consecutive_groups(allowed_indexes):
ai_list = list(ai)
if ai_list:
# Remove points before
indexes_to_remove.extend(list(range(ai_list[0], ai_list[0] + self.anomalies_dist_min)))
# Remove points after
indexes_to_remove.extend(
list(range(ai_list[-1], ai_list[-1] - self.anomalies_dist_min - persistence, -1))
)
# Remove the anomalies_dist_min and persistence points
allowed_indexes = list(set(allowed_indexes) - set(indexes_to_remove))
return random.choice(allowed_indexes)
def spikes_in_values(
self,
amount='',
anomaly_name='',
persistence_min='',
persistence_max='',
amplitude_min='',
amplitude_max=''
):
# ------- Remove the previous anomaly simulation --------
# get the indexes from the previous simulation
if self.siv_ps:
self.data.loc[self.siv_ps, "anomaly_name"] = ""
self.data.loc[self.siv_ps, "anomaly_value"] = ""
# -------------------------------------------------------
# Clean self.siv_ps for a new simulation
self.siv_ps = []
# Check if the arguments are numeric
if is_number(amount):
for _ in range(int(amount)):
amin = self.amplitude_min
amax = self.amplitude_max
if is_number(amplitude_min):
amin = float(amplitude_min)
if is_number(amplitude_max):
amax = float(amplitude_max)
amplitude = random.uniform(min([amin, amax]), max([amin, amax]))
# Choose a proper index for the anomaly
spike_index = self.choose_index()
value = self.data.at[spike_index, settings["df_y_column"]] + amplitude
self.data.at[spike_index, "anomaly_name"] = anomaly_name
self.data.at[spike_index, "anomaly_value"] = value
self.siv_ps.append(spike_index)
def stationary_values(
self,
amount='',
anomaly_name='',
persistence_min='',
persistence_max='',
amplitude_min='',
amplitude_max=''
):
# ------- Remove the previous anomaly simulation --------
# get the indexes from the previous simulation
if self.sv_ps:
self.data.loc[self.sv_ps, "anomaly_name"] = ""
self.data.loc[self.sv_ps, "anomaly_value"] = ""
# -------------------------------------------------------
# Clean self.sv_ps for a new simulation
self.sv_ps = []
# Check if the arguments are numeric
if is_number(amount):
# ---------- Persistence -------------------
pmin = self.persistence_min
pmax = self.persistence_max
if is_number(persistence_min):
pmin = int(persistence_min)
if is_number(persistence_max):
pmax = int(persistence_max)
# ------------------------------------------
for _ in range(int(amount)):
# Always a random persistence for each anomaly
persistence = random.randint(min([pmin, pmax]), max([pmin, pmax]))
# Choose a proper index for the anomaly
index_s = self.choose_index(persistence=persistence)
index_e = index_s + persistence
self.data.loc[index_s:index_e, "anomaly_name"] = anomaly_name
self.data.loc[index_s:index_e, "anomaly_value"] = self.data.at[index_s, settings["df_y_column"]]
self.sv_ps.extend(list(range(index_s, index_e + 1)))
def sensor_displacement(
self,
amount='',
anomaly_name='',
persistence_min='',
persistence_max='',
amplitude_min='',
amplitude_max=''
):
# ------- Remove the previous anomaly simulation --------
# get the indexes from the previous simulation
if self.ds_ps:
self.data.loc[self.ds_ps, "anomaly_name"] = ""
self.data.loc[self.ds_ps, "anomaly_value"] = ""
# -------------------------------------------------------
# Clean self.ds_ps for a new simulation
self.ds_ps = []
# Check if the arguments are numeric
if amount.isnumeric():
# ---------- Amplitude -------------------
amin = self.amplitude_min
amax = self.amplitude_max
if is_number(amplitude_min):
amin = float(amplitude_min)
if is_number(amplitude_max):
amax = float(amplitude_max)
# ------------------------------------------
# ---------- Persistence -------------------
pmin = self.persistence_min
pmax = self.persistence_max
if is_number(persistence_min):
pmin = int(persistence_min)
if is_number(persistence_max):
pmax = int(persistence_max)
# ------------------------------------------
for _ in range(int(amount)):
# Always a random amplitude and persistence for each anomaly
amplitude = random.uniform(min([amin, amax]), max([amin, amax]))
persistence = random.randint(min([pmin, pmax]), max([pmin, pmax]))
# Choose a proper index for the anomaly
index_s = self.choose_index(persistence=persistence)
index_e = index_s + persistence
self.data.loc[index_s:index_e, "anomaly_name"] = anomaly_name
self.data.loc[index_s:index_e, "anomaly_value"] = self.data.loc[
index_s:index_e, settings["df_y_column"]
] + amplitude
self.ds_ps.extend(list(range(index_s, index_e + 1)))
def decode_csv_content(csv_content=None, filename=None):
df = None
if csv_content:
content_type, content_string = csv_content.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')),
float_precision='round_trip'
)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
return df
# ---------- The project Settings --------------
settings = {
"df_x_column": "datetime",
"df_y_column": "measured",
"plot_settings": {
"x_label": "Date",
"y_label": "Water Level",
"Original_Values": {
"color": "blue"
}
}
}
# ------------------ The anomlies and their methods -------------------
anomalies_methods = {
"Spikes": "spikes_in_values",
"Stationary Values": "stationary_values",
"Sensor Displacement": "sensor_displacement"
}
# Update the settings_plot from settings with the anomlies colors
colors = ["black", "red", "green", "black"]
for anomaly, index in zip(anomalies_methods, range(len(list(anomalies_methods.keys())))):
settings["plot_settings"].update(
{
anomaly: {
"color": colors[index]
}
}
)
# -------------------- params --------------------------
reference_parameters = {
"load_csv_n_clicks": 0,
"injects_anomalies_n_clicks": 0,
"upload_dataframe_content": "",
"fig": px.scatter(),
"plots_first_index": {}
}
# ----------------------- Start Anomalies Class and add the dataframe ---------------
anomalies = Anomalies()
# -------------------------- Tables ------------------------------
anomalies_table = dash_table.DataTable(
id='anomalies-table',
columns=(
[
{
'id': 'Anomaly', 'name': "Anomaly", 'editable': False
},
{
'id': 'Amount', 'name': "Amount", 'editable': True
},
{
'id': 'Amplitude (Min)', 'name': "Amplitude (Min)", 'editable': True
},
{
'id': 'Amplitude (Max)', 'name': "Amplitude (Max)", 'editable': True
},
{
'id': 'Persistence (Min)', 'name': "Persistence (Min)", 'editable': True
},
{
'id': 'Persistence (Max)', 'name': "Persistence (Max)", 'editable': True
}
]
),
data=[
{
"Anomaly": anomaly_name,
"Amount": "",
"Persistence (Min)": "",
"Persistence (Max)": "",
"Amplitude (Min)": "",
"Amplitude (Max)": ""
}
for anomaly_name in anomalies_methods
]
)
fig_table = dash_table.DataTable(
id='fig-table',
columns=(
[
{
'id': 'Date', 'name': "Date", 'editable': False
},
{
'id': 'Original Value', 'name': "Original Value", 'editable': False,
},
{
'id': 'Anomaly', 'name': "Anomaly", 'editable': False,
},
{
'id': 'Anomaly Value', 'name': "Anomaly Value", 'editable': False
}
]
),
data=[]
)
# ------------------- App --------------------------
app = dash.Dash(__name__)
# ------------------- App layout -------------------
app.layout = html.Div([
anomalies_table,
dcc.Upload(
id='upload-dataframe',
children=html.Div(
[
html.Button('Load csv', id='load-dataframe-button', n_clicks=0)
]
)
),
html.Button('Injects Anomalies', id='injects-anomalies-button', n_clicks=0),
html.Button('Download csv with Anomalies', id='download-dataframe-with-anomalies-button', n_clicks=0),
dcc.Graph(
id='anomalies-fig',
figure=reference_parameters['fig'],
),
fig_table,
Download(id="download-anomalies-csv"),
html.Div(id='output-data-upload')
])
# ---------------------------- Select Data display table Callback -----------------------
@app.callback(
Output('fig-table', 'data'),
[
Input('anomalies-fig', 'selectedData')
]
)
def select_data_display_table(selected_data):
data = []
if selected_data:
for point in selected_data['points']:
pi = point['pointIndex']
cn = point['curveNumber']
correct_index = pi + reference_parameters["plots_first_index"][cn]
data.append(
{
"Date": anomalies.data.at[correct_index, settings["df_x_column"]],
"Original Value": anomalies.data.at[correct_index, settings["df_y_column"]],
"Anomaly": anomalies.data.at[correct_index, "anomaly_name"],
"Anomaly Value": anomalies.data.at[correct_index, "anomaly_value"]
}
)
return data
# ---------------------------- Download Csv with Anomalies Callback -----------------------
@app.callback(
Output("download-anomalies-csv", "data"),
[
Input('download-dataframe-with-anomalies-button', 'n_clicks')
]
)
def download_dataframe_with_anomalies(n_clicks):
if n_clicks:
return send_data_frame(anomalies.data.to_csv, filename="anomalies.csv")
else:
return None
# -------------------------- Load CSV and Injects Anomalies Callback ----------------------
@app.callback(
Output('anomalies-fig', 'figure'),
[
Input('load-dataframe-button', 'n_clicks'),
Input('injects-anomalies-button', 'n_clicks'),
Input('anomalies-table', 'data'),
Input('upload-dataframe', 'contents')
],
[
State('upload-dataframe', 'filename')
]
)
def load_csv_and_injects_anomalies(
load_csv_n_clicks,
injects_anomalies_n_clicks,
anomalies_table_data,
upload_dataframe_content,
upload_dataframe_filename
):
# ----------------------------- LOAD THE CSV ------------------------------------
if load_csv_n_clicks != reference_parameters["load_csv_n_clicks"]:
if upload_dataframe_content:
if upload_dataframe_content != reference_parameters["upload_dataframe_content"]:
# Load and decode the csv
df = decode_csv_content(csv_content=upload_dataframe_content, filename=upload_dataframe_filename)
anomalies.data = df.copy()
anomalies.add_anomalies_name_and_value_columns()
# Create a figure for the csv
fig = px.scatter(df, x=settings["df_x_column"], y=settings["df_y_column"], render_mode='webgl')
fig.data[0].update(mode='markers+lines', marker={'size': 1, 'color': 'blue'})
fig.update_layout(
clickmode='event+select',
yaxis={"title": settings["plot_settings"]["y_label"]},
xaxis={"title": settings["plot_settings"]["x_label"]}
)
# Saving the first index of the plot because each plot
# will restart with index = 0
reference_parameters["plots_first_index"][0] = 0
# Update Reference Parameters
reference_parameters["load_csv_n_clicks"] = load_csv_n_clicks
reference_parameters["upload_dataframe_content"] = upload_dataframe_content
reference_parameters["fig"] = fig
# ------------------------ INJECTS ANOMALIES -----------------------------------------
if injects_anomalies_n_clicks != reference_parameters["injects_anomalies_n_clicks"]:
if upload_dataframe_content:
# Injects anomalies in the anomlies.data and return the
for aft in anomalies_table_data:
getattr(anomalies, anomalies_methods[aft["Anomaly"]])(
amount=aft["Amount"],
anomaly_name=aft["Anomaly"],
persistence_min=aft["Persistence (Min)"],
persistence_max=aft["Persistence (Max)"],
amplitude_min=aft["Amplitude (Min)"],
amplitude_max=aft["Amplitude (Max)"]
)
# ------------------ Break the fig in various Subplots ------------------------------
# Get the indexes for original values (without anomalies) and indexes with anomalies
original_indexes = numpy.where(anomalies.data["anomaly_value"] == "")[0]
anomalies_indexes = numpy.where(anomalies.data["anomaly_value"] != "")[0]
# The indexes with each plot
plots_indexes = []
# Break the indexes for each plot with original values
for plot in more_itertools.consecutive_groups(original_indexes):
plots_indexes.append(list(plot))
# Break the indexes for each plot with anomalies
for plot in more_itertools.consecutive_groups(anomalies_indexes):
plots_indexes.append(list(plot))
# Define a fig
# render_mode MUST BE webgl
fig = px.scatter(render_mode='webgl')
# Create a subplot for each plot_indexes
for plot_indexes, plot_id in zip(plots_indexes, range(len(plots_indexes))):
# Add the subplots with
# Get the name | |
<gh_stars>1-10
# CAR25onCSV.py
#
# A program to read a .CSV file that contains a list of trades
# and computes the safe-f and CAR25 metrics that best
# estimate the future performance of the system that produced
# these gains and losses.
# The risk_normalization library is managed by PyPi.
# Before running this program, use pip to install risk_normalization
# If you have set up a virtual environment, change directory to
# that environment.
# Then:
# pip3 install risk-normalization
import math
import matplotlib as plt
import numpy as np
import pandas as pd
import random
#import risk_normalization
import sklearn as skl
import statistics
import statsmodels as st
# These do not have a __version__ method
#print (f'math version: {math.__version__}')
#print (f'random version: {random.__version__}')
#print (f'risk_normalization version: {risk_normalization.__version__}')
#print (f'statistics version: {statistics.__version__}')
# These do
#print (f'matplotlib version: {plt.__version__}')
#print (f'numpy version: {np.__version__}')
#print (f'pandas version: {pd.__version__}')
#print (f'scikit-learn version: {skl.__version__}')
#print (f'statsmodels version: {st.__version__}')
#-----------------------------------------------------
"""
risk_normalization.py
Created on Mon Feb 28 19:45:18 2022
@author: <NAME>
This file created on Tuesday, October 9, 2020
Modified on Friday, December 11, 2020
... separated risk_normalization and related procedures
... in preparation for publishing on PyPI
Modified on Monday, February 28, 2022
... added missing global value
... modified calling sequence
Risk normalization routines designed by Dr. <NAME>,
Blue Owl Press.
License: MIT
This technique was originally published in the book,
"Modeling Trading System Performance," in 2011, as an
Excel Add-in.
Published again in the book "Quantitative Technical Analysis,"
in 2014, as a Python program.
#-----------------
Overview of the function risk_normalization:
Begin with a set of trades. These are analyzed as is to compute
safe-f, and are assumed to be the best estimate of future
performance. This set does not change throughout the procedure.
The risk normalization consists of two phases:
1. Compute the maximum fraction of the trading account
that can be used to take a position in the tradable issue
without exceeding the personal risk tolerance of the
trader. This is called "safe-f"
2. Using safe-f as a position size, compute the profit
potential for the forecast period. Convert the gain
to Compound Annual rate of Return, called "CAR25"
#--- Compute safe-f
Set the fraction an initial value of 1.00
Create many equally likely equity curves,
measure the maximum drawdown of each,
keep them in a list.
Treat the list of max drawdowns as a distribution
and determine the maximum drawdown at the high
risk tail -- probably at the 95th percentile.
Compare the trader's personal risk tolerance with
the tail risk of the distribution.
If they are equal, the current value of the
fraction is safe-f.
If they are not equal, adjust the fraction and
repeat.
safe-f has been established.
#--- Compute CAR25
Using safe-f as the fraction
Create many equally likely equity curves,
measure the final equity,
keep that in a list.
Treat the list of final equity as a distribution
and determine the equity at the 25th percentile.
Convert the relative gain in equity to a
compound annual rate of return.
That value is car25.
Return the mean and standard deviation of both safe-f and CAR25
#-----------------
Assumptions
A trade list has been created by some process.
It could be live trades, validation trades, in-sample trades,
or hypothetical trades.
Each trade represents the gain in equity of a single day,
resulting from a trade on that day,
such as the change in price from today's close to tomorrow's
close.
A gain of 1% is represented as 0.0100
A day where the position is flat has a gain of 0.0000
There are about 252 trades per year
The account is marked to market daily.
The account is managed daily.
The trader is able and willing to change position daily.
Use:
safe-f, CAR25 = risk_normalization(
trades,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital,
tail_percentile,
drawdown_tolerance,
number_equity_in_CDF,
number_repetitions
)
Parameters:
trades: The set of trades to evaluate.
Expecting a numpy array with one dimension.
number_days_in_forecast: the forecast period.
Typical = 504 for a 2 year forecast
Typical = 252 for a 1 year forecast
number_trades_in_forecast:
The number of trades to draw for each equity sequence.
If each trade represents one daya of market-to-market result:
If there is an entry for each and every day of the
trading, including entries of 0.0 for days not traded:
Typical = the same as number_days_in_forecast.
If there are entries for only those days traded:
Typical = the expected number of days with trades
in the forecast period.
If each trade represents a multiday trade:
Typical = the expected number of trades in the forecast period.
initial_capital: initial amount in the trading account.
Typical = $100,000.00
tail_percentile: The percentile at which to measure the tail risk.
Typical = 5 => the 95th percentile of the CDF.
drawdown_tolerance: The traders drawdown tolerance.
Expressed as a proportion of maximum equity to date.
Typical = 0.10 A 10% drawdown.
number_equity_in_CDF: The number of equity curves used
to compute a single CDF.
Typical = 1000
number_repetitions: The number of replications of calculation of
safe-f and CAR25 to compute the mean and standard deviation.
Typical = 10
Returns:
safe_f_mean: The fraction of the trading account that will be
used for each trade.
safe_f_stdev: standard deviation of safe_f calculations.
CAR25_mean: The compound annual rate of return for the given
set of trades and position size.
CAR25_stdev: standard deviation of CAR25 calculations.
definitions of variables
drawdown: list used to accumulate day by day drawdown
max_drawdown maximum drawdown to date
equity: list used to accumulate day by day equity
max_equity: maximum equity to date
file_name: name of csv or txt file containing trades
fraction: during calculations, the then current estimate
of position size, safe-f
"""
def make_one_equity_sequence(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital ):
"""
Given a set of trades, draw a random sequence of trades
and form an equity sequence.
Parameters:
trades: the set of trades to be analyzed
fraction: the proportion of the trading account
to be used for each trade.
number_days_in_forecast: Length of forecast in days.
number_trades_in_forecast: Length of forecast in trades.
initial_capital: Starting value of the trading account.
Returns:
Two scalars:
equity: The equity at the end of the sequence in dollars.
max_drawdown: The maximum drawdown experienced in the sequence
as a proportion of highest equity marked to market
after each trade.
"""
# initialize sequence
equity = initial_capital
max_equity = equity
drawdown = 0.0
max_drawdown = 0.0
# form the equity curve to display, if desired
daily_equity = np.zeros(number_days_in_forecast)
# form sequence
for i in range(number_trades_in_forecast):
trade_index = random.randint(0, len(trades) - 1)
trade = trades[trade_index]
trade_dollars = equity * fraction * trade
equity = equity + trade_dollars
daily_equity[i] = equity
max_equity = max(equity, max_equity)
drawdown = (max_equity - equity) / max_equity
max_drawdown = max(drawdown, max_drawdown)
# if necessary, fill remaining days
for i in range(number_trades_in_forecast,number_days_in_forecast):
daily_equity[i] = equity
# plt.plot(daily_equity)
# plt.show()
return (equity, max_drawdown)
def analyze_distribution_of_drawdown(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital,
tail_percentile,
number_equity_in_CDF ):
"""
Returns:
tail_risk: The maximum drawdown at the tail_percentile
of the distribution using the
current value of the position size.
"""
equity_list = []
max_dd_list = []
for i in range(number_equity_in_CDF):
equity, max_drawdown = make_one_equity_sequence(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital)
equity_list.append(equity)
max_dd_list.append(max_drawdown)
sorted_max_dd = np.sort(max_dd_list)
# plt.plot(sorted_max_dd)
# plt.show()
tail_risk = np.percentile(sorted_max_dd, 100 - tail_percentile)
return tail_risk
def form_distribution_of_equity(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital,
number_equity_in_CDF ):
# plt.hist(trades,bins=50)
# plt.show()
equity_list = []
max_dd_list = []
for i in range(number_equity_in_CDF):
equity, max_drawdown = make_one_equity_sequence(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital)
equity_list.append(equity)
max_dd_list.append(max_drawdown)
sorted_equity = np.sort(equity_list)
# plt.plot(sorted_equity)
# plt.show()
return sorted_equity
def risk_normalization(
trades,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital,
tail_percentile,
drawdown_tolerance,
number_equity_in_CDF,
number_repetitions
):
safe_fs = []
TWR25s = []
CAR25s = []
desired_accuracy = 0.003
for rep in range(number_repetitions):
# Fraction is initially set to use all available funds
# It will be adjusted in response to the risk of drawdown.
# The final value of fraction is safe-f
fraction = 1.0
done = False
while not done:
# print(f"fraction this pass: {fraction:0.3f}")
tail_risk = analyze_distribution_of_drawdown(
trades,
fraction,
number_days_in_forecast,
number_trades_in_forecast,
initial_capital,
tail_percentile,
number_equity_in_CDF)
# print(f"tail_risk this pass: {tail_risk:0.3f}")
if abs(tail_risk - drawdown_tolerance) < desired_accuracy:
done = True
else:
| |
from locust import HttpUser,SequentialTaskSet,task, between
from random import randrange
from random import randint
import json
import csv
import string
import random
#import sys, logging
#constants
def string_generator(size=7):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
host = "https://preprodms.embibe.com"
res = []
Params = []
email_password = []
#functions
# with open('email_password_embibe.csv', 'r') as csvfile:
# email_password = list (csv.reader(csvfile, delimiter=','))
with open('/locust-tasks/JIO_Params.csv', 'r') as csvfile:
Params = list (csv.reader(csvfile, delimiter=','))
#Declarations
#Payload Values
# body = {}
# user_id={'user_id':''}
# parent_id = {'parent_id':''}
# child_id={'child_id':''}
# email_id={'email_id':''}
# chapterid={'chapterid':''}
class UserBehaviour(SequentialTaskSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.headers = {
'Content-Type':'application/json',
'connection':'keep-alive',
'Accept-Encoding':'gzip, deflate, br',
'Accept':'*/*'
}
self.body = {}
self.parent_id = None
self.email_id = None
self.child_id = None
self.child_email_id = None
self.chapter_id = '5e79df54810bc73565cb2696'
self.value = 1
self.grade = randint(9,10)
@task
def Signup(self):
res = string_generator(7)
signup_data = {"login":"<EMAIL>"+<EMAIL>+"<EMAIL>","password":"<PASSWORD>", "flag":"sp"}
response = self.client.post(url = "/user_auth_lt/auth/sign_in",name="Signup",data=json.dumps(signup_data),auth=None, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"Signup -{host}/user_auth_lt/auth/sign_in")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
#print(response.json())
self.headers['embibe-token'] = response.headers['embibe-token']
json_dict = json.loads(response.content)
self.parent_id = json_dict["resource"]["id"]
self.email_id = json_dict["resource"]["email"]
#user_id['user_id'] = response.json()['user_id']
#email_id['email_id'] = response.json()['email']
@task
def add_user(self):
res = string_generator(6)
user_data = {
"parent_id": self.parent_id,
"first_name": "Test",
"user_type" : "child",
"email_id" : "<EMAIL>"+res+"@<EMAIL>",
"goal" : "g10",
"grade" : self.grade,
"board" : "CBSE",
"school" : "DPS",
"state" : "Delhi",
"city" : "Delhi",
"avatar_image" : "S3 Url (String)"
}
response = self.client.post(url = "/fiber_ms_lt/addUser",name="add_user",data=json.dumps(user_data), headers=self.headers)
json_dict = json.loads(response.content)
self.child_id = json_dict["linked_profiles"][0]["user_id"]
self.child_email_id = json_dict["linked_profiles"][0]["email"]
# print(self.child_id)
#print(response.json())
if (response.status_code != 200):
print(response.request.headers)
print(f"add_user -{host}/fiber_ms_lt/addUser")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def login(self):
# rnum = randrange(len(email_password)-1)
login_data={
"login": self.email_id,
"password":"<PASSWORD>"
}
response = self.client.post('/user_auth_lt/auth/sign_in', data=json.dumps(login_data), name="login",headers=self.headers)
#json_dict = json.loads(response.content)
#user_id['user_id'] = json_dict["resource"]["id"]
if (response.status_code != 200):
print(response.request.headers)
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
#logging.info('Response for login API is %s',response.content)
#self.headers ['embibe-token']= response.headers['embibe-token']
@task
def getSearchResults(self):
# generate some integers for size value
for _ in range(1000):
self.value = randint(1, 1000)
response = self.client.get(url = f"/fiber_ms_lt/search/results?query=magnet&user_id={self.parent_id}&size={self.value}&grade=10&goal=CBSE",name="getSearchResults",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getSearchResults -{host}/fiber_ms_lt/search/results?query=magnet&user_id={self.parent_id}&size={self.value}&grade=10&goal=CBSE")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getSearchSuggestions(self):
response = self.client.get(url = f"/fiber_ms_lt/search/suggestions?query=magnet&user_id={self.parent_id}&size={self.value}&grade=10&goal=CBSE",name="getSearchSuggestions",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getSearchSuggestions -{host}/fiber_ms_lt/search/suggestions?query=magnet&user_id={self.parent_id}&size={self.value}&grade=10&goal=CBSE")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def conceptconnect(self):
rnum = randrange(len(Params)-1)
# response = self.client.get(url = f"/concepts/connected/{Params[rnum][0]}?content_id={Params[rnum][1]}",name="conceptconnect",data=body, headers=self.headers)
response = self.client.get(url = f"/fiber_ms_lt/concepts/connected/new_KG4607?content_id={Params[rnum][1]}",name="conceptconnect",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"conceptconnect -{host}/fiber_ms_lt/concepts/connected/new_KG4607?content_id={Params[rnum][1]}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def conceptmoreconnect(self):
rnum = randrange(len(Params)-1)
#print (Params[rnum][0],Params[rnum][1])
# response = self.client.get(url = f"/concepts/more/{Params[rnum][0]}?content_id={Params[rnum][1]}",name="conceptmoreconnect",data=body,headers=self.headers)
response = self.client.get(url = f"/fiber_ms_lt/concepts/more/new_KG4607?content_id={Params[rnum][1]}",name="conceptmoreconnect",data=self.body,headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"conceptmoreconnect -{host}/fiber_ms_lt/concepts/more/new_KG4607?content_id={Params[rnum][1]}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getHomeData(self):
home_data = {
"child_id" : self.parent_id,
"grade" :"10",
"goal" : "CBSE"
}
response = self.client.post(url = "/fiber_ms_lt/home",name="getHomeData",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getHomeData -{host}/fiber_ms_lt/home")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getHomeData(self):
home_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home",name="getHomeData",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getHomeData -{host}/fiber_ms_lt/v1/home")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getfilteredHomeData(self):
filteredhome_data = {
"child_id" : self.parent_id,
"grade" : "10",
"goal" : "CBSE",
"onlyPractise" : True
}
response = self.client.post(url = "/fiber_ms_lt/home/Physics",name="getfilteredHomeData",data=json.dumps(filteredhome_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getfilteredHomeData -{host}/fiber_ms_lt/home/Physics")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homeSections(self):
rnum = randrange(len(Params)-1)
homeSections_data = {
"child_id": self.parent_id,
"grade": self.grade,
"goal": "CBSE",
"content_section_type": "BestLearningVideosFromInternet",
"offset": 2,
"size": 20
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/sections",name="homeSections",data=json.dumps(homeSections_data), headers=self.headers)
print(f"child id is {self.parent_id}")
if (response.status_code != 200):
print(response.request.headers)
print(f"homeSections -{host}/fiber_ms_lt/v1/home/sections")
print(response.content)
print(response.headers)
print(f"child id is {self.parent_id}")
print("------------------------------------------------------------------------------------------------------")
@task
def homepractise(self):
homepractise_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/practise",name="homepractise",data=json.dumps(homepractise_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homepractise -{host}/fiber_ms_lt/v1/home/practise")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homepractisesections(self):
rnum = randrange(len(Params)-1)
homepractisesections_data = {
"child_id": self.parent_id,
"grade": self.grade,
"goal": "CBSE",
"content_section_type": "PractiseMathematicsChapters",
"offset": 0,
"size": 20
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/practise/sections",name="homepractisesections",data=json.dumps(homepractisesections_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homepractisesections -{host}/fiber_ms_lt/v1/home/practise/sections")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def userHome(self):
userHome_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE"
}
response = self.client.post(url = "/fiber_ms_lt/v1/userHome",name="userHome",data=json.dumps(userHome_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"userHome -{host}/fiber_ms_lt/v1/userHome")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def Physics(self):
home_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE",
"fetch_all_content" : "true"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Physics",name="Physics",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"Physics -{host}/fiber_ms_lt/v1/home/Physics")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homePhysicsSection(self):
home_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE",
"content_section_type": "LearnPracticePhysicsBooks",
"offset": 0,
"size": 15
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Physics/sections",name="homePhysicsSection",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homePhysicsSection -{host}/fiber_ms_lt/v1/home/Physics/sections")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def Chemistry(self):
home_data = {
"child_id" : self.parent_id,
"grade" : self.grade,
"goal" : "CBSE",
"fetch_all_content" : "true"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Chemistry",name="Chemistry",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"Chemistry -{host}/fiber_ms_lt/v1/home/Chemistry")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homeChemistrySection(self):
home_data = {
"child_id" : self.parent_id,
"grade" :self.grade,
"goal" : "CBSE",
"content_section_type": "RealLifeExamplesVideosSyllabus",
"offset": 0,
"size": 20
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Chemistry/sections",name="homeChemistrySection",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homeChemistrySection -{host}/fiber_ms_lt/v1/home/Chemistry/sections")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def Biology(self):
home_data = {
"child_id" : self.parent_id,
"grade" : self.grade,
"goal" : "CBSE",
"fetch_all_content" : "true"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Biology",name="Biology",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"Biology -{host}/fiber_ms_lt/v1/home/Biology")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homeBiologySection(self):
home_data = {
"child_id" : self.parent_id,
"grade" : self.grade,
"goal" : "CBSE",
"content_section_type": "LearnPracticeBiologyBooks",
"offset": 0,
"size": 20
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Biology/sections",name="homeBiologySection",data=json.dumps(home_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homeBiologySection -{host}/fiber_ms_lt/v1/home/Biology/sections")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def Mathematics(self):
rnum = randrange(len(Params)-1)
Mathematics_data = {
"child_id": self.parent_id,
"grade": self.grade,
"goal": "CBSE",
"content_section_type": "PractiseMathematicsChapters",
"fetch_all_content" : "true"
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Mathematics",name="Mathematics",data=json.dumps(Mathematics_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"Mathematics -{host}/fiber_ms_lt/v1/home/Mathematics")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def homeMathematicssections(self):
rnum = randrange(len(Params)-1)
homeMathematicssections_data = {
"child_id": self.parent_id,
"grade": self.grade,
"goal": "CBSE",
"content_section_type": "RealLifeExamplesVideosSyllabus",
"offset": 0,
"size": 20
}
response = self.client.post(url = "/fiber_ms_lt/v1/home/Mathematics/sections",name="homeMathematicssections",data=json.dumps(homeMathematicssections_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"homeMathematicssections -{host}/fiber_ms_lt/v1/home/Mathematics/sections")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getRelatedData(self):
rnum = randrange(len(Params)-1)
response = self.client.get(url = f"/fiber_ms_lt/cg/related_data/{Params[rnum][0]}",name="getRelatedData",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getRelatedData -{host}/fiber_ms_lt/cg/related_data/{Params[rnum][0]}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def updateStatus(self):
rnum = randrange(len(Params)-1)
contentstatus_data = {
"content_id" : Params[rnum][1],
"content_type" :Params[rnum][2],
"is_watched" : True,
"content_status" :"COMPLETED",
"watched_duration" : 7000,
"child_id" : self.child_id
}
response = self.client.post(url = "/fiber_ms_lt/content-status",name="updateStatus",data=json.dumps(contentstatus_data), headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"updateStatus -{host}/fiber_ms_lt/content-status")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getStatus(self):
rnum = randrange(len(Params)-1)
#print (Params[rnum][1],Params[rnum][2],user_id['user_id'])
# response = self.client.get(url = f"/content-status/{Params[rnum][1]}/{Params[rnum][2]}?child_id={user_id['user_id']}",name="getStatus",headers=self.headers)
response = self.client.get(url ="/fiber_ms_lt/content-status/abc/Video?child_id=423423525",name="getStatus",headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getStatus -{host}/fiber_ms_lt/content-status/abc/Video?child_id=423423525")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
# @task
# def getChapterList(self):
# response = self.client.get(url = "/chapters",name="getChapterList",data=body, headers=self.headers)
# logging.info('Headers for getChapterList API is %s',response.content)
# chapterid['chapterid'] = response.json().get("id","5e79df54810bc73565cb2696")
@task
def getChapterDetail(self):
response = self.client.get(url = f"/fiber_ms_lt/chapters/chapterDetail/{self.chapter_id}",name="getChapterDetail",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getChapterDetail -{host}/fiber_ms_lt/chapters/chapterDetail/{self.chapter_id}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getChapterList_subject(self):
#response = self.client.get(url = "/fiber_ms_lt/chapters/Chemistry",name="getChapterList_subject",data=body, headers=self.headers)
response = self.client.get(url = "/fiber_ms_lt/chapters/Chemistry?grade=10",name="getChapterList_subject",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getChapterList_subject -{host}/fiber_ms_lt/chapters/Chemistry")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def concept_prerequisites(self):
rnum = randrange(len(Params)-1)
response = self.client.get(url = f"/fiber_ms_lt/concepts/prerequisites/new_KG4607?content_id={Params[rnum][1]}",name="concept_prerequisites",data=self.body, headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"concept_prerequisites -{host}/fiber_ms_lt/concepts/prerequisites/new_KG4607?content_id={Params[rnum][1]}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getContentDetails(self):
rnum = randrange(len(Params)-1)
contentdetails_data = {
"child_id" : self.parent_id,
"grade" : self.grade,
"goal" : "CBSE"
}
response = self.client.get(url = f"/fiber_ms_lt/contentDetails/{Params[rnum][2]}/{Params[rnum][1]}",name="getContentDetails",data=json.dumps(contentdetails_data),
headers=self.headers)
if (response.status_code != 200):
print(response.request.headers)
print(f"getContentDetails -{host}/fiber_ms_lt/contentDetails/{Params[rnum][2]}/{Params[rnum][1]}")
print(response.content)
print(response.headers)
print("------------------------------------------------------------------------------------------------------")
@task
def getUserHome(self):
home_data = {
"board": "CBSE",
"child_id": self.parent_id,
"goal":"CBSE",
| |
<reponame>PavloWasTaken/ProjectEyeliner
import cv2
import matplotlib.pyplot as plt
from matplotlib import transforms
import numpy as np
from Objects.ImageSegmentationClass import ImageSegmentationClass
from Objects.LayerClass import LayerClass
from Objects.ResultClass import ResultClass
from Utils import utils
from scipy import stats
class ProccesClass(object):
filter_img = None
width = None
height = None
mask = None
parameters = None
def __init__(self, parameters):
self.parameters = parameters
def _get_nearest_edge(self, edge_image, column, start_position):
#####################################################################
#Estimates the edge point on the next pixel based on previous points.
#####################################################################
#We establish a min and a max position where we can find the neares pixel corresponding to an edge.
min_pos = start_position - self.parameters.localization_top_window
max_pos = start_position + self.parameters.localization_bot_window
#We find the edges on the column where we are trying to find the nearest point.
diff = [int(edge_image[c,column]) - int(edge_image[c-1,column]) for c in range(min_pos + 1, max_pos)]
ii = [min_pos + i for i in range(0,len(diff)) if diff[i] == 255]
#If we don't find any edge on the column, we return -1
if len(ii) == 0:
return -1
min = max_pos
#We choose the point on the neares file from the previous point.
for i in ii:
if abs(i-start_position) < min:
min = abs(i-start_position)
result = i
return result
def _pre_get_masks(self):
#####################################################################
#Obtains the mask which exclude the non-interesting regions
#####################################################################
#Remove points with intensity < 5 after bluring the image.
filter_img = cv2.GaussianBlur(self.filter_img, (3, 3), 0)
_, mask = cv2.threshold(filter_img, 5, 255, cv2.THRESH_BINARY)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
#We use a opening to remove the points in regions with many removed points aswell
kernel = np.ones((9, 9), np.uint8)
open_img = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#We use a dilation to remove the edges between non-interesting regions and the images.
kernel = np.ones((11, 11), np.uint8)
dilated_img = cv2.morphologyEx(open_img, cv2.MORPH_ERODE, kernel)
return dilated_img
def _get_edges(self, kernel, canny_values, showEdges=False):
#####################################################################
#Obtains the edges of the image
#####################################################################
#Use canny to get the edges
edge_img = cv2.Canny(self.filter_img, canny_values[0], canny_values[1])
#Aply an opening to remove gaps inside the edges.
edge_img = cv2.morphologyEx(edge_img, cv2.MORPH_CLOSE, kernel)
if showEdges:
cv2.imshow("Edges", edge_img)
cv2.waitKey()
return edge_img
def _get_starting_pos(self, edge_img, row, previous_line, showStartingPos = False):
#####################################################################
#Obtains the first point that we use to aproximate pixel by pixel the edge.
#####################################################################
if previous_line != None:
has_previous = True
else:
has_previous = False
for i in range(0, self.width,10):
#We check the points over the estimated row
bottom_distance = -1
for j in range(row,row+self.parameters.max_dist_to_roi):
if edge_img[j,i] > 0:
if (not has_previous or j > previous_line.get_pos("bot", i) + self.parameters.edge_width) and self.check_point(edge_img,j,i):
bottom_distance = j
break
#We check the points below the estimated row
top_distance = -1
for j in range(row, row-self.parameters.max_dist_to_roi,-1):
if edge_img[j,i] > 0:
if (not has_previous or j > previous_line.get_pos("bot", i)) and self.check_point(edge_img,j,i):
top_distance = j
break
#If we find a valid point we return the column of the nearest one
if bottom_distance > -1 or top_distance > -1:
start_column = i
if showStartingPos:
plt.figure()
plt.plot(previous_line)
plt.imshow(self.filter_img)
plt.axvline(x=start_column)
if (row - top_distance > bottom_distance - row) and bottom_distance > -1:
if showStartingPos:
plt.axhline(y=bottom_distance)
plt.show()
return bottom_distance, start_column
else:
if showStartingPos:
plt.axhline(y=top_distance)
plt.show()
return top_distance, start_column
return -1, -1
def check_point(self, edge_image, x, y):
#####################################################################
#We check if the points seems to be an edge or may be noise.
#####################################################################
if sum(edge_image[x-5:x+5,y-10]) > 0 and sum(edge_image[x-5:x+5,y+10])>0:
return True
else:
return False
def _get_roi(self, edge_img, showRoi = False):
#####################################################################
#We obtain the Regions of interest in the images:
# * External edge of the lens
# * Internal edge of the lens
# * Cornea contour
#####################################################################
#We estimate the ROIs based on the study of the intensities across the rows.
rows = [sum(edge_img[row, :]) for row in range(0, self.height)]
#We derivate the intensities across the rows
diff = [rows[c] - rows[c-1] for c in range(1, len(rows))]
#We get the rows where it derivate is greater than the ROI threshold.
roi = np.argwhere(np.array(diff) > self.parameters.roi_th)
previous_result = -1
result = []
#We remove results too near to the previous one asuming that there are a min distance between edges.
while len(roi) > 0:
if previous_result == -1:
previous_result = int(roi[0])
result.append(previous_result)
else:
try:
previous_result = int(roi[roi > previous_result + self.parameters.min_dist_between_roi*self.height][0])
result.append(previous_result)
except:
break
if showRoi:
plt.figure(1)
plt.subplot(131)
for row in result:
plt.axhline(y=row)
plt.imshow(edge_img)
plt.subplot(132)
plt.plot(rows)
plt.subplot(133)
plt.axhline(y=self.parameters.roi_th, c='g')
plt.plot(diff)
plt.show()
return result
def _rotate_back(self, top_line, bot_line, rotation_matrix):
#####################################################################
#We rotate back the detected edges obtained on the rotated images
#####################################################################
M = cv2.invertAffineTransform(rotation_matrix)
top_line_ones = np.hstack([top_line, np.ones(shape=(len(top_line), 1))])
bot_line_ones = np.hstack([bot_line, np.ones(shape=(len(bot_line), 1))])
top_line_inv = M.dot(top_line_ones.T).T
bot_line_inv = M.dot(bot_line_ones.T).T
return top_line_inv, bot_line_inv
def _get_top_line(self, edge_img, start_column, start_value, layers):
#####################################################################
#Obtains the top line of an edge píxel by pixel
#####################################################################
left_end = 0
right_end = self.width
top_line = [-1] * self.width
gaps = []
in_gap = False
#We start the aproximation
top_line[start_column] = start_value
#We search first on the right
for i in range(start_column + 1, self.width):
#We obtain the next point based on the previous point
pos = self._get_nearest_edge(edge_img, i, top_line[i - 1])
#In case of a gap --> we don't find any edge on that column.
if pos == -1:
#We mantain the previous row on the next point only trying to not cross a previous line.
if len(layers) > 0:
pos = max(top_line[i - 1], layers[-1].get_pos("bot", i))
else:
pos = top_line[i - 1]
#If we are on a gap and detect that we are in the non-interest image region, we stop.
if self.mask[pos, i] == 0:
if sum(self.mask[pos, i:i + 10]) == 0:
if in_gap:
gaps.append((right_gap,i))
right_end = i
break
#Else, if we weren't on a gap previously, we start one.
if not in_gap:
in_gap = True
right_gap = i
#If we find a point and were previously on a gap, we close it.
elif in_gap:
gaps.append((right_gap, i))
in_gap = False
#We save the detected points.
top_line[i] = pos
in_gap = False
#Now we search the left points from the start column.
for i in range(start_column - 1, -1, -1):
# We obtain the next point based on the previous point
pos = self._get_nearest_edge(edge_img, i, top_line[i + 1])
# In case of a gap --> we don't find any edge on that column.
if pos == -1: # GAP
# We mantain the previous row on the next point only trying to not cross a previous line.
if len(layers) > 0:
try:
pos = max(top_line[i + 1], layers[-1].get_pos("bot", i))
except:
pos = top_line[i + 1]
else:
pos = top_line[i + 1]
# If we are on a gap and detect that we are in the non-interest image region, we stop.
if self.mask[pos, i] == 0:
if sum(self.mask[pos, i - 10:i]) == 0:
if in_gap:
gaps.append((i,left_gap))
left_end = i
break
# Else, if we weren't on a gap previously, we start one.
if not in_gap:
in_gap = True
left_gap = i
# If we find a point and were previously on a gap, we close it.
elif in_gap:
gaps.append((i, left_gap))
in_gap = False
# We save the detected points.
top_line[i] = pos
# We check if the line seems to be ok or it has many gaps.
if self._check_line(top_line, gaps, left_end, right_end):
# If it seems ok we save it
layer = LayerClass()
layer.set_top_line(top_line[left_end+1:right_end-1], left_end+1, right_end-1)
layer.set_gaps(gaps)
layer.interpolate_gaps()
return layer
else:
# If the line seems to be noise, we return None.
return None
def _get_bot_line(self, edge_img, layer):
#####################################################################
# We obtain the bot line of an edge based on the top line and the edge information.
#####################################################################
result = []
# We find the external pixel of an detected edge. We use a min distance representing the width of the edge
# and a max distance where we look for a candidate point.
for i in range(layer.get_start(), layer.get_end()):
y = layer.get_pos("top",i)
aux = [int(edge_img[j, i]) - int(edge_img[j - 1, i]) for j in
range(y + self.parameters.edge_width, y + self.parameters.edge_width + self.parameters.sample_window)]
result.append(int(np.argmax(np.array(aux) < 0) + y + self.parameters.edge_width))
return result
def _check_line(self, line, gaps, left_end, right_end):
| |
t.type])
return any(semantic_tags)
@property
def contains_correspondence(self):
'Boolean property indicating if this series containes correspondence.'
return 'correspondence' in unicode(self.did.unittitle).lower()
@property
def rdf_type(self):
''''rdf type to use for a semantically-tagged component item'''
# NOTE: initial implementation for Belfast Group sheets assumes manuscript
# type; should be refined for other types of content
rdf_type = None
if self.unittitle_titles:
# if type of first title is article, return article
if self.unittitle_titles[0].type and \
self.unittitle_titles[0].type.lower() == 'article':
rdf_type = 'bibo:Article'
# if two titles and the second has an issn, article in a periodical
# (TODO: is this close enough for all cases?)
elif len(self.unittitle_titles) == 2 and self.unittitle_titles[1].source \
and self.unittitle_titles[1].source.upper() == 'ISSN':
rdf_type = 'bibo:Article'
# if title has an isbn, assume it is a book
# - for now, also assume OCLC source is book (FIXME: is this accurate?)
elif self.unittitle_titles[0].source \
and self.unittitle_titles[0].source.upper() in ['ISBN', 'OCLC']:
rdf_type = 'bibo:Book'
else:
rdf_type = self.generic_rdf_type_by_series()
# if there are no titles but there is a name with a role of creator,
# the component describes some kind of entity, so set the type
# based on series
elif self.unittitle_names and 'dc:creator' in [n.role for n in self.unittitle_names]:
rdf_type = self.generic_rdf_type_by_series()
return rdf_type
def generic_rdf_type_by_series(self):
'''Calculate a generic RDF type based on the series an item belongs to.
Using bibo:Document for printed material, bibo:Image for photographs,
bibo:AudioVisualDocument for audio/visual materials, with a fallback
of bibo:Manuscript.'''
if self.series_title:
series_title = self.series_title.lower()
# if in a Printed Material series, assume bibo:Document
# - printed material is usually included in text for series and
# subseries names
if 'printed material' in series_title:
return 'bibo:Document'
# if in a Photographs series, use bibo:Image
elif 'photograph' in series_title:
return 'bibo:Image'
# if in an AudioVisual series, use bibo:AudioVisualDocument
elif 'audiovisual' in series_title or \
'audio recordings' in series_title or \
'video recordings' in series_title:
# audiovisual usually used at top-level, audio/video rec. used for subseries
return 'bibo:AudioVisualDocument'
# otherwise, use bibo:Manuscript
return 'bibo:Manuscript'
@property
def rdf_identifier(self):
# if the item in the unittitle has an rdf identifier, make it available
# for use in constructing RDFa in the templates
# for now, assuming that the first title listed is the *thing*
# in the collection. If we can generate an id for it (i.e.,
# it has a source & authfilenumber), use that
if self.unittitle_titles:
return self.unittitle_titles[0].rdf_identifier
# NOTE: previously, was only returning an rdf identifier for a
# single title
# for now, only return when these is one single title
# if len(self.unittitle_titles) == 1 :
# return self.unittitle_titles[0].rdf_identifier
@property
def rdf_mentions(self):
# names related to the title that should also be related to the collection
# titles after the first two need to be handled separately here also
return self.rdf_type is not None and len(self.unittitle_names) \
or len(self.unittitle_titles) > 1
@property
def mention_titles(self):
# list of secondary titles that should be mentioned
# if we have a multiple titles with an author, the titles
# are being treated as a list and should not be exposed
# (i.e., belfast group sheets)
if self.unittitle_names and any(n.role for n in self.unittitle_names) \
or len(self.unittitle_titles) <= 1:
return []
else:
# return all but the first title
return list(self.unittitle_titles)[1:]
# override component.c node_class
# subcomponents need to be initialized as Series to get display_label, series list...
# FIXME: look for a a better way to do this kind of XmlObject extension
eadmap.Component._fields['c'].node_class = Series
eadmap.SubordinateComponents._fields['c'].node_class = Series
# override DigitalArchivalObject with local version
eadmap.DescriptiveIdentification._fields['dao_list'].node_class = DigitalArchivalObject
eadmap.Component._fields['dao_list'].node_class = DigitalArchivalObject
eadmap.ArchivalDescription._fields['dao_list'].node_class = DigitalArchivalObject
def shortform_id(id, eadid=None):
"""Calculate a short-form id (without eadid prefix) for use in external urls.
Uses eadid if available; otherwise, relies on the id delimiter character.
:param id: id to be shortened
:param eadid: eadid prefix, if available
:returns: short-form id
"""
# if eadid is available, use that (should be the most reliable way to shorten id)
if eadid:
id = id.replace('%s_' % eadid, '')
# if eadid is not available, split on _ and return latter portion
elif ID_DELIMITER in id:
eadid, id = id.split(ID_DELIMITER)
# this shouldn't happen - one of the above two options should work
else:
raise Exception("Cannot calculate short id for %s" % id)
return id
class Series2(Series):
"""
c02 level subseries
Customized version of :class:`eulxml.xmlmap.eadmap.Component`; extends
:class:`Series`
"""
series = xmlmap.NodeField("parent::e:c01", Series)
":class:`findingaids.fa.models.Series` access to c01 series this subseries belongs to"
objects = Manager('//e:c02')
""":class:`eulexistdb.manager.Manager`
Configured to use *//c02* as base search path.
"""
class Series3(Series):
"""
c03 level subseries
Customized version of :class:`eulxml.xmlmap.eadmap.Component`; extends
:class:`Series`
"""
series2 = xmlmap.NodeField("parent::e:c02", Series2)
":class:`findingaids.fa.models.Subseries` access to c02 subseries this sub-subseries belongs to"
series = xmlmap.NodeField("ancestor::e:c01", Series)
":class:`findingaids.fa.models.Series` access to c01 series this sub-subseries belongs to"
objects = Manager('//e:c03')
""":class:`eulexistdb.manager.Manager`
Configured to use *//c03* as base search path.
"""
class Index(XmlModel, eadmap.Index):
"""
EAD Index, with index entries.
Customized version of :class:`eulxml.xmlmap.eadmap.Index`
"""
ROOT_NAMESPACES = {
'e': eadmap.EAD_NAMESPACE,
'xlink': eadmap.XLINK_NAMESPACE,
'exist': 'http://exist.sourceforge.net/NS/exist'
}
ead = xmlmap.NodeField("ancestor::e:ead", FindingAid)
":class:`findingaids.fa.models.FindingAid` access to ancestor EAD element"
parent = xmlmap.NodeField("parent::node()", "self")
objects = Manager('//e:index')
""":class:`eulexistdb.manager.Manager` - similar to an object manager
for django db objects, used for finding and retrieving index objects
in eXist.
Configured to use *//index* as base search path.
"""
match_count = xmlmap.IntegerField("count(.//exist:match)")
_short_id = None
@property
def short_id(self):
"Short-form id (without eadid prefix) for use in external urls."
if self._short_id is None:
# get eadid, if available
if hasattr(self, 'ead') and hasattr(self.ead, 'eadid') and self.ead.eadid.value:
eadid = self.ead.eadid.value
else:
eadid = None
self._short_id = shortform_id(self.id, eadid)
return self._short_id
# FIXME: look for a a better way to do this kind of XmlObject extension
eadmap.ArchivalDescription._fields['index'].node_class = Index
class FileComponent(XmlModel, eadmap.Component):
"""
Any EAD component with a level of *file*, with item-level information (box &
folder contents).
"""
ROOT_NAMESPACES = {
'e': eadmap.EAD_NAMESPACE,
'xlink': eadmap.XLINK_NAMESPACE,
'exist': 'http://exist.sourceforge.net/NS/exist'
}
ead = xmlmap.NodeField("ancestor::e:ead", FindingAid)
":class:`findingaids.fa.models.FindingAid` access to ancestor EAD element"
# NOTE: mapping parent, series1, and series2 to ensure there is enough
# information to generate a link to the series a FileComponent belongs to
parent = xmlmap.NodeField("parent::node()", Series)
":class:`findingaids.fa.models.Series` series this file belongs to (could be c01, c02, or c03)."
series1 = xmlmap.NodeField("ancestor::e:c01", Series)
":class:`findingaids.fa.models.Series` c01 series this file belongs to."
series2 = xmlmap.NodeField("ancestor::e:c02", Series)
":class:`findingaids.fa.models.Series` c02 series this file belongs to, if any."
#: count of public daos; same as in :attr:`FindingAid.public_dao_count`
public_dao_count = xmlmap.IntegerField('count(.//e:dao[@xlink:href][not(@xlink:show="none")][not(@audience) or @audience="external"])')
# objects = Manager('''(e:ead//e:c01|e:ead//e:c02|e:ead//e:c03|e:ead//e:c04)[@level="file"]''')
# eXist can query *much* more efficiently on generic paths
objects = Manager('''//*[@level="file"]''')
""":class:`eulexistdb.manager.Manager` - similar to an object manager
for django db objects, used for finding and retrieving c-series file objects
in eXist.
Configured to find any c-series (1-4) with a level of file.
"""
class Deleted(models.Model):
"""
Information about a previously published finding aid that has been deleted.
"""
eadid = models.CharField('EAD Identifier', max_length=50, unique=True)
title = models.CharField(max_length=200)
date = models.DateTimeField('Date removed', auto_now_add=True)
note = models.CharField(
max_length=400, blank=True,
help_text="Optional: Enter the reason this document is being deleted. " +
"These comments will be displayed to anyone who had the finding " +
"aid bookmarked and returns after it is gone.")
class Meta:
verbose_name = 'Deleted Record'
def __unicode__(self):
return self.eadid
class Archive(models.Model):
'''Model to define Archives associated with EAD documents, for use with
admin user permissions and to identify subversion repositories where
content will be published from.'''
label = models.CharField(max_length=10,
help_text='Short label to identify an archive')
name = models.CharField(max_length=255,
help_text='repository name (subarea) in EAD to identify finding aids associated with this archive')
svn = models.URLField('Subversion Repository',
help_text='URL to subversion repository containing EAD for this archive')
slug = models.SlugField(help_text='''shorthand id
(auto-generated from label; do not modify after initial archive definition)''')
contacts = models.ManyToManyField(settings.AUTH_USER_MODEL,
help_text='Contact person for display on the Request Materials page (email required)',
blank=True)
def __unicode__(self):
return self.label
@property
def svn_local_path(self):
return os.path.join(settings.SVN_WORKING_DIR, self.slug)
def contact_names(self):
'''List of contact names method for display in django admin list
display. Shows email if user has no full name.'''
return ', '.join([contact.get_full_name() or | |
<gh_stars>0
"""Re-Implementation of https://msdn.microsoft.com/en-us/library/windows/desktop/aa369729%28v=vs.85%29.aspx using the CFFI"""
import os
import cffi
import numpy
import time
import re
import collections
_ffi = cffi.FFI()
_package_dir, _ = os.path.split(__file__)
with open(os.path.join(_package_dir, 'mediafoundation.py.h'), 'rt') as f:
_ffi.cdef(f.read())
_combase = _ffi.dlopen('combase')
_ole32 = _ffi.dlopen('ole32')
class _COMLibrary:
"""General functionality of the COM library.
This class contains functionality related to the COM library, for:
- initializing and uninitializing the library.
- checking HRESULT codes.
- decrementing the reference count of COM objects.
"""
def __init__(self):
COINIT_MULTITHREADED = 0x0
hr = _combase.CoInitializeEx(_ffi.NULL, COINIT_MULTITHREADED)
self.check_error(hr)
def __del__(self):
_combase.CoUninitialize()
@staticmethod
def check_error(hresult):
"""Check a given HRESULT for errors.
Throws an error for non-S_OK HRESULTs.
"""
# see shared/winerror.h:
S_OK = 0
E_NOINTERFACE = 0x80004002
E_POINTER = 0x80004003
E_OUTOFMEMORY = 0x8007000e
E_INVALIDARG = 0x80070057
AUDCLNT_E_UNSUPPORTED_FORMAT = 0x88890008
if hresult == S_OK:
return
elif hresult+2**32 == E_NOINTERFACE:
raise RuntimeError('The specified class does not implement the '
'requested interface, or the controlling '
'IUnknown does not expose the requested '
'interface.')
elif hresult+2**32 == E_POINTER:
raise RuntimeError('An argument is NULL.')
elif hresult+2**32 == E_INVALIDARG:
raise RuntimeError("invalid argument")
elif hresult+2**32 == E_OUTOFMEMORY:
raise RuntimeError("out of memory")
elif hresult+2**32 == AUDCLNT_E_UNSUPPORTED_FORMAT:
raise RuntimeError("unsupported format")
else:
raise RuntimeError('Error {}'.format(hex(hresult+2**32)))
@staticmethod
def release(ppObject):
"""Decrement reference count on COM object."""
if ppObject[0] != _ffi.NULL:
ppObject[0][0].lpVtbl.Release(ppObject[0])
ppObject[0] = _ffi.NULL
_com = _COMLibrary()
def all_speakers():
"""A list of all connected speakers."""
with _DeviceEnumerator() as enum:
return [_Speaker(dev) for dev in enum.all_devices('speaker')]
def default_speaker():
"""The default speaker of the system."""
with _DeviceEnumerator() as enum:
return _Speaker(enum.default_device('speaker'))
def get_speaker(id):
"""Get a specific speaker by a variety of means.
id can be an a WASAPI id, a substring of the speaker name, or a
fuzzy-matched pattern for the speaker name.
"""
return _match_device(id, all_speakers())
def all_microphones():
"""A list of all connected microphones."""
with _DeviceEnumerator() as enum:
return [_Microphone(dev) for dev in enum.all_devices('microphone')]
def default_microphone():
"""The default microphone of the system."""
with _DeviceEnumerator() as enum:
return _Microphone(enum.default_device('microphone'))
def get_microphone(id):
"""Get a specific microphone by a variety of means.
id can be a WASAPI id, a substring of the microphone name, or a
fuzzy-matched pattern for the microphone name.
"""
return _match_device(id, all_microphones())
def _match_device(id, devices):
"""Find id in a list of devices.
id can be a WASAPI id, a substring of the device name, or a
fuzzy-matched pattern for the microphone name.
"""
devices_by_id = {device.id: device for device in devices}
devices_by_name = {device.name: device for device in devices}
if id in devices_by_id:
return devices_by_id[id]
# try substring match:
for name, device in devices_by_name.items():
if id in name:
return device
# try fuzzy match:
pattern = '.*'.join(id)
for name, device in devices_by_name.items():
if re.match(pattern, name):
return device
raise IndexError('no device with id {}'.format(id))
def _str2wstr(string):
"""Converts a Python str to a Windows WSTR_T."""
return _ffi.new('int16_t[]', [ord(s) for s in string]+[0])
def _guidof(uuid_str):
"""Creates a Windows LPIID from a str."""
IID = _ffi.new('LPIID')
# convert to zero terminated wide string
uuid = _str2wstr(uuid_str)
hr = _combase.IIDFromString(_ffi.cast("char*", uuid), IID)
_com.check_error(hr)
return IID
class _DeviceEnumerator:
"""Wrapper class for an IMMDeviceEnumerator**.
Provides methods for retrieving _Devices and pointers to the
underlying IMMDevices.
"""
def __init__(self):
self._ptr = _ffi.new('IMMDeviceEnumerator **')
IID_MMDeviceEnumerator = _guidof("{BCDE0395-E52F-467C-8E3D-C4579291692E}")
IID_IMMDeviceEnumerator = _guidof("{A95664D2-9614-4F35-A746-DE8DB63617E6}")
# see shared/WTypesbase.h and um/combaseapi.h:
CLSCTX_ALL = 23
hr = _combase.CoCreateInstance(IID_MMDeviceEnumerator, _ffi.NULL, CLSCTX_ALL,
IID_IMMDeviceEnumerator, _ffi.cast("void **", self._ptr))
_com.check_error(hr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
_com.release(self._ptr)
def __del__(self):
_com.release(self._ptr)
def _device_id(self, device_ptr):
"""Returns the WASAPI device ID for an IMMDevice**."""
ppId = _ffi.new('LPWSTR *')
hr = device_ptr[0][0].lpVtbl.GetId(device_ptr[0], ppId)
_com.check_error(hr)
return _ffi.string(ppId[0])
def all_devices(self, kind):
"""Yields all sound cards of a given kind.
Kind may be 'speaker' or 'microphone'.
Sound cards are returned as _Device objects.
"""
if kind == 'speaker':
data_flow = 0 # render
elif kind == 'microphone':
data_flow = 1 # capture
else:
raise TypeError(f'Invalid kind: {kind}')
DEVICE_STATE_ACTIVE = 0x1
ppDevices = _ffi.new('IMMDeviceCollection **')
hr = self._ptr[0][0].lpVtbl.EnumAudioEndpoints(self._ptr[0], data_flow, DEVICE_STATE_ACTIVE, ppDevices);
_com.check_error(hr)
for ppDevice in _DeviceCollection(ppDevices):
device = _Device(self._device_id(ppDevice))
_com.release(ppDevice)
yield device
def default_device(self, kind):
"""Returns the default sound card of a given kind.
Kind may be 'speaker' or 'microphone'.
Default sound card is returned as a _Device object.
"""
if kind == 'speaker':
data_flow = 0 # render
elif kind == 'microphone':
data_flow = 1 # capture
else:
raise TypeError(f'Invalid kind: {kind}')
ppDevice = _ffi.new('IMMDevice **')
eConsole = 0
hr = self._ptr[0][0].lpVtbl.GetDefaultAudioEndpoint(self._ptr[0], data_flow, eConsole, ppDevice);
_com.check_error(hr)
device = _Device(self._device_id(ppDevice))
_com.release(ppDevice)
return device
def device_ptr(self, devid):
"""Retrieve IMMDevice** for a WASAPI device ID."""
ppDevice = _ffi.new('IMMDevice **')
devid = _str2wstr(devid)
hr = self._ptr[0][0].lpVtbl.GetDevice(self._ptr[0], _ffi.cast('wchar_t *', devid), ppDevice);
_com.check_error(hr)
return ppDevice
class _DeviceCollection:
"""Wrapper class for an IMMDeviceCollection**.
Generator for IMMDevice** pointers.
"""
def __init__(self, ptr):
self._ptr = ptr
def __del__(self):
_com.release(self._ptr)
def __len__(self):
pCount = _ffi.new('UINT *')
hr = self._ptr[0][0].lpVtbl.GetCount(self._ptr[0], pCount)
_com.check_error(hr)
return pCount[0]
def __getitem__(self, idx):
if idx >= len(self):
raise StopIteration()
ppDevice = _ffi.new('IMMDevice **')
hr = self._ptr[0][0].lpVtbl.Item(self._ptr[0], idx, ppDevice)
_com.check_error(hr)
return ppDevice
class _PropVariant:
"""Wrapper class for a PROPVARIANT.
Correctly allocates and frees a PROPVARIANT. Normal CFFI
malloc/free is incompatible with PROPVARIANTs, since COM expects
PROPVARIANTS to be freely reallocatable by its own allocator.
Access the PROPVARIANT* pointer using .ptr.
"""
def __init__(self):
self.ptr = _combase.CoTaskMemAlloc(_ffi.sizeof('PROPVARIANT'))
self.ptr = _ffi.cast("PROPVARIANT *", self.ptr)
def __del__(self):
hr = _ole32.PropVariantClear(self.ptr)
_com.check_error(hr)
class _Device:
"""Wrapper class for an IMMDevice.
Implements memory management and retrieval of the device name, the
number of channels, and device activation.
Subclassed by _Speaker and _Microphone for playback and recording.
"""
def __init__(self, id):
self._id = id
def _device_ptr(self):
with _DeviceEnumerator() as enum:
return enum.device_ptr(self._id)
@property
def id(self):
return self._id
@property
def name(self):
# um/coml2api.h:
ppPropertyStore = _ffi.new('IPropertyStore **')
ptr = self._device_ptr()
hr = ptr[0][0].lpVtbl.OpenPropertyStore(ptr[0], 0, ppPropertyStore)
_com.release(ptr)
_com.check_error(hr)
propvariant = _PropVariant()
# um/functiondiscoverykeys_devpkey.h and https://msdn.microsoft.com/en-us/library/windows/desktop/dd370812(v=vs.85).aspx
PKEY_Device_FriendlyName = _ffi.new("PROPERTYKEY *",
[[0xa45c254e, 0xdf1c, 0x4efd, [0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0]],
14])
hr = ppPropertyStore[0][0].lpVtbl.GetValue(ppPropertyStore[0], PKEY_Device_FriendlyName, propvariant.ptr)
_com.check_error(hr)
if propvariant.ptr[0].vt != 31:
raise RuntimeError('Property was expected to be a string, but is not a string')
data = _ffi.cast("short*", propvariant.ptr[0].data)
for idx in range(256):
if data[idx] == 0:
break
devicename = ''.join(chr(c) for c in data[0:idx])
_com.release(ppPropertyStore)
return devicename
@property
def channels(self):
# um/coml2api.h:
ppPropertyStore = _ffi.new('IPropertyStore **')
ptr = self._device_ptr()
hr = ptr[0][0].lpVtbl.OpenPropertyStore(ptr[0], 0, ppPropertyStore)
_com.release(ptr)
_com.check_error(hr)
propvariant = _PropVariant()
# um/functiondiscoverykeys_devpkey.h and https://msdn.microsoft.com/en-us/library/windows/desktop/dd370812(v=vs.85).aspx
PKEY_AudioEngine_DeviceFormat = _ffi.new("PROPERTYKEY *",
[[0xf19f064d, 0x82c, 0x4e27, [0xbc, 0x73, 0x68, 0x82, 0xa1, 0xbb, 0x8e, 0x4c]],
0])
hr = ppPropertyStore[0][0].lpVtbl.GetValue(ppPropertyStore[0], PKEY_AudioEngine_DeviceFormat, propvariant.ptr)
_com.release(ppPropertyStore)
_com.check_error(hr)
if propvariant.ptr[0].vt != 65:
raise RuntimeError('Property was expected to be a blob, but is not a blob')
pPropVariantBlob = _ffi.cast("BLOB_PROPVARIANT *", propvariant.ptr)
assert pPropVariantBlob[0].blob.cbSize == 40
waveformat = _ffi.cast("WAVEFORMATEX *", pPropVariantBlob[0].blob.pBlobData)
channels = waveformat[0].nChannels
return channels
def _audio_client(self):
CLSCTX_ALL = 23
ppAudioClient = _ffi.new("IAudioClient **")
IID_IAudioClient = _guidof("{1CB9AD4C-DBFA-4C32-B178-C2F568A703B2}")
ptr = self._device_ptr()
hr = ptr[0][0].lpVtbl.Activate(ptr[0], IID_IAudioClient, CLSCTX_ALL, _ffi.NULL, _ffi.cast("void**", ppAudioClient))
_com.release(ptr)
_com.check_error(hr)
return ppAudioClient
class _Speaker(_Device):
"""A soundcard output. Can be used to play audio.
Use the `play` method to play one piece of audio, or use the
`player` method to get a context manager for playing continuous
audio.
Properties:
- `channels`: the number of available channels.
- `name`: the name of the sound card.
- `id`: the WASAPI ID of the sound card.
"""
def __init__(self, device):
self._id = device._id
def __repr__(self):
return f'<Speaker {self.name} ({self.channels} channels)>'
def player(self, samplerate, channels=None, blocksize=None):
if channels is None:
channels = self.channels
return _Player(self._audio_client(), samplerate, channels, blocksize)
def play(self, data, samplerate, channels=None, blocksize=None):
with self.player(samplerate, channels, blocksize) as p:
p.play(data)
class _Microphone(_Device):
"""A soundcard input. Can be used to record audio.
Use the `record` method to record one piece of audio, or use the
`recorder` method to get a context manager for recording
continuous audio.
Properties:
- `channels`: the number of available channels.
- `name`: the name of the sound card.
- `id`: the WASAPI ID of the sound card.
"""
def __init__(self, device):
self._id = device._id
def __repr__(self):
return f'<Microphone {self.name} ({self.channels} channels)>'
def recorder(self, samplerate, channels=None, blocksize=None):
if channels is None:
channels = self.channels
return _Recorder(self._audio_client(), samplerate, channels, blocksize)
def record(self, numframes, samplerate, channels=None, blocksize=None):
with self.recorder(samplerate, channels, blocksize) as r:
| |
from __future__ import unicode_literals
import configparser
import logging
import re
import time
from urllib.parse import urlparse
from collections import OrderedDict
from contextlib import closing
import requests
import io
import socket
import xml.etree.ElementTree as elementtree
# Constants
PREFIX_COUNTRY = 'country-'
PREFIX_STATE = 'state-'
PREFIX_LANGUAGE = 'language-'
PREFIX_TAG = 'tag-'
logger = logging.getLogger(__name__)
class PlaylistError(Exception):
pass
class cache(object):
# TODO: merge this to util library (copied from mopidy-spotify)
def __init__(self, ctl=0, ttl=3600):
logger.debug('RadioBrowser: Start radiobrowser.cache.__init__')
self.cache = {}
self.ctl = ctl
self.ttl = ttl
self._call_count = 0
def __call__(self, func):
logger.debug('RadioBrowser: Start radiobrowser.cache.__call__')
def _memoized(*args):
logger.debug('RadioBrowser: Start radiobrowser.cache.__call__._memoized')
now = time.time()
try:
value, last_update = self.cache[args]
age = now - last_update
if (self._call_count > self.ctl or age > self.ttl):
self._call_count = 0
raise AttributeError
if self.ctl:
self._call_count += 1
return value
except (KeyError, AttributeError):
value = func(*args)
if value:
self.cache[args] = (value, now)
return value
except TypeError:
return func(*args)
def clear():
logger.debug('RadioBrowser: Start radiobrowser.cache.__call__.clear')
self.cache.clear()
_memoized.clear = clear
return _memoized
def parse_m3u(data):
logger.debug('RadioBrowser: Start radiobrowser.parse_m3u')
# Copied from mopidy.audio.playlists
# Mopidy version expects a header but it's not always present
for line in data.readlines():
if not line.startswith('#') and line.strip():
yield line.strip()
def parse_pls(data):
logger.debug('RadioBrowser: Start radiobrowser.parse_pls')
# Copied from mopidy.audio.playlists
try:
cp = configparser.RawConfigParser()
cp.readfp(data)
except configparser.Error:
return
for section in cp.sections():
if section.lower() != 'playlist':
continue
for i in range(cp.getint(section, 'numberofentries')):
try:
# TODO: Remove this horrible hack to avoid adverts
if cp.has_option(section, 'length%d' % (i+1)):
if cp.get(section, 'length%d' % (i+1)) == '-1':
yield cp.get(section, 'file%d' % (i+1))
else:
yield cp.get(section, 'file%d' % (i+1))
except configparser.NoOptionError:
return
def fix_asf_uri(uri):
logger.debug('RadioBrowser: Start radiobrowser.fix_asf_uri')
return re.sub(r'http://(.+\?mswmext=\.asf)', r'mms://\1', uri, flags=re.IGNORECASE)
def parse_old_asx(data):
logger.debug('RadioBrowser: Start radiobrowser.parse_old_asx')
try:
cp = configparser.RawConfigParser()
cp.readfp(data)
except configparser.Error:
return
for section in cp.sections():
if section.lower() != 'reference':
continue
for option in cp.options(section):
if option.lower().startswith('ref'):
uri = cp.get(section, option).lower()
yield fix_asf_uri(uri)
def parse_new_asx(data):
logger.debug('RadioBrowser: Start radiobrowser.parse_new_asx')
# Copied from mopidy.audio.playlists
try:
for element in elementtree.iterparse(data):
element.tag = element.tag.lower() # normalize
except elementtree.ParseError:
return
for ref in element.findall('entry/ref[@href]'):
yield fix_asf_uri(ref.get('href', '').strip())
for entry in element.findall('entry[@href]'):
yield fix_asf_uri(entry.get('href', '').strip())
def parse_asx(data):
logger.debug('RadioBrowser: Start radiobrowser.parse_asx')
if 'asx' in data.getvalue()[0:50].lower():
return parse_new_asx(data)
else:
return parse_old_asx(data)
# This is all broken: mopidy/mopidy#225
# from gi.repository import TotemPlParser
# def totem_plparser(uri):
# results = []
# def entry_parsed(parser, uri, metadata):
# results.append(uri)
# parser = TotemPlParser.Parser.new()
# someid = parser.connect('entry-parsed', entry_parsed)
# res = parser.parse(uri, False)
# parser.disconnect(someid)
# if res != TotemPlParser.ParserResult.SUCCESS:
# logger.debug('Failed to parse playlist')
# return results
def find_playlist_parser(extension, content_type):
logger.debug('RadioBrowser: Start radiobrowser.find_playlist_parser')
extension_map = {'.asx': parse_asx,
'.wax': parse_asx,
'.m3u': parse_m3u,
'.pls': parse_pls}
content_type_map = {'video/x-ms-asf': parse_asx,
'application/x-mpegurl': parse_m3u,
'audio/x-scpls': parse_pls}
parser = extension_map.get(extension, None)
if not parser and content_type:
# Annoying case where the url gave us no hints so try and work it out
# from the header's content-type instead.
# This might turn out to be server-specific...
parser = content_type_map.get(content_type.lower(), None)
return parser
class RadioBrowser(object):
# Wrapper for the RadioBrowser API.
def __init__(self, timeout, encoding, wlexact, wltags, wlstates, dlang, drated, session=None):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.__init__')
hosts = []
ips = socket.getaddrinfo('all.api.radio-browser.info', 80, 0, 0, socket.IPPROTO_TCP)
for ip_tupel in ips:
ip = ip_tupel[4][0]
host_addr = socket.gethostbyaddr(ip)
if host_addr[0] not in hosts:
hosts.append(host_addr[0])
hosts.sort()
# old API: self._base_uri = 'http://www.radio-browser.info/webservice/json/%s'
self._base_uri = 'http://' + hosts[0] + '/json/%s'
self._session = session or requests.Session()
self._timeout = timeout / 1000.0
self._encoding = encoding
self._categories = [] # <type 'list'>
self._directories = {}
self._stations = {}
self._wlexact = wlexact
self._wltags = wltags
self._wlstates = wlstates
self._dlang = dlang
self._drated = drated
category = { # <type 'dict'>
# Countries
# _base_uri/countries
'URL' : self._base_uri % 'countrycodes',
'uri' : 'radiobrowser:category:countries',
'element': 'outline',
'key' : 'countries',
'text' : 'Countries',
'type' : 'link'
};
self.addCategory(category);
category = {
# Languages
# _base_uri/languages
'URL': self._base_uri % 'languages',
'uri' : 'radiobrowser:category:languages',
'element': 'outline',
'text' : 'Languages',
'key' : 'languages',
'type' : 'link'
};
if dlang:
self.addCategory(category);
category = {
# Tags
# _base_uri/tags
'URL' : self._base_uri % 'tags',
'uri' : 'radiobrowser:category:tags',
'element': 'outline',
'text' : 'Tags',
'key' : 'tags',
'type' : 'link'
};
self.addCategory(category);
category = {
# Top 50 clicked
# _base_uri/stations/topclick
'URL' : self._base_uri % 'stations/topclick/50',
'uri' : 'radiobrowser:category:click',
'element': 'outline',
'text' : 'Top 50 clicked',
'key' : 'clicks',
'type' : 'link'
};
if drated:
self.addCategory(category);
category = {
# Top 50 voted
# _base_uri/stations/topvote
'URL' : self._base_uri % 'stations/topvote/50',
'uri' : 'radiobrowser:category:vote',
'element': 'outline',
'text' : 'Top 50 voted',
'key' : 'votes',
'type' : 'link'
};
if drated:
self.addCategory(category);
def reload(self):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.reload')
self._stations.clear()
self._radiobrowser.clear()
self._get_playlist.clear()
def addCategory(self, category):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addCategory')
self._categories.append(category);
return True
def getCategory(self, categoryId):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getCategory')
if categoryId in self._categories:
category = self._categories[categoryId]
else:
logger.error('RadioBrowser: Unknown category with id=' + categoryId)
category = None
return category
def getCategories(self):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getCategories')
return self._categories
def browseCategory(self, key):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.browseCategory (key="' + key + '")')
# Use the key to find the category
for category in self._categories:
if key == category['key']:
url = category['URL']
results = list(self._radiobrowser(url, ''))
return results
return results
def addDirectory(self, directory):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addDirectory')
directoryId = directory['key']
if directoryId in self._directories:
# The directory always exists
return True
self._directories[directoryId] = directory
return True
def getDirectory(self, directoryId):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getDirectory')
if directoryId in self._directories:
directory = self._directories[directoryId]
else:
logger.error('RadioBrowser: Unknown directory with id=' + directoryId)
directory = None
return directory
def getDirectories(self):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getDirectories')
return self._directories
def browseDirectory(self, directory):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.browseDirectory')
url = directory['URL']
results = list(self._radiobrowser(url, ''))
return results
def addStation(self, station):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addStation: Station ID='+ station['stationuuid'] +' Codec is ' + station['codec'])
stationId = station['stationuuid']
stationCodec = station['codec']
if stationId in self._stations:
# The station always exist
return True
self._stations[stationId] = station
return True
def getStation(self, stationId):
if stationId in self._stations:
station = self._stations[stationId]
else:
station = self._station_info(stationId)
self._stations['stationId'] = station
encoding = station['codec'].lower()
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getStation: ' + station['name'] + '(' + encoding + ')')
return station
def addCountry(self, country):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addCountry: ' + country['name'])
if '0' == country['stationcount']:
return False
# Add the url to browse the country
# http://www.radio-browser.info/webservice/json/states/<country>
country['URL'] = self._base_uri % ('states/' + country['name'] + '/')
# country['URL'] = self._base_uri % ('states')
country['key'] = PREFIX_COUNTRY + country['a2']
# Condition a whitelist to contain or exact match users configured searchable countries
wlstates = self._wlstates.split(', ')
wlexact = self._wlexact
name = country['name'].lower()
if wlexact:
for wlstate in wlstates:
if wlstate.lower() == name:
self.addDirectory(country)
return True
else:
for wlstate in wlstates:
if wlstate.lower() in name:
self.addDirectory(country)
return True
def getCountry(self, countryId):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getCountry')
return self.getDirectory(PREFIX_COUNTRY + countryId)
def addState(self, state):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addState')
if '0' == state['stationcount']:
return False
# Add the url to browse the state
# http://www.radio-browser.info/webservice/json/stations/bystate/<name>
# http://www.radio-browser.info/webservice/json/stations/bystateexact/<name>
name = state['name'].strip()
identifier = name.replace(' ', '')
if len(name) == 2 and name == state['country']:
state['URL'] = self._base_uri % ('stations/bycountrycodeexact/' + name)
else:
state['URL'] = self._base_uri % ('stations/bystateexact/' + name)
state['key'] = PREFIX_STATE + identifier
self.addDirectory(state)
return True
def getState(self, stateId):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getState')
return self.getDirectory(PREFIX_STATE + stateId)
def addLanguage(self, language):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addLanguage')
if '0' == language['stationcount']:
return False
# Add the url to browse the language
# http://www.radio-browser.info/webservice/json/stations/bylanguage/<name>
# http://www.radio-browser.info/webservice/json/stations/bylanguageexact/<name>
name = language['name'].strip()
language['URL'] = self._base_uri % ('stations/bylanguageexact/' + name)
language['key'] = PREFIX_LANGUAGE + name.replace(' ', '')
self.addDirectory(language)
return True
def getLanguage(self, languageId):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.getLanguage')
return self.getDirectory(PREFIX_LANGUAGE + languageId)
def addTag(self, tag):
logger.debug('RadioBrowser: Start radiobrowser.RadioBrowser.addTag: ' + tag['name'].strip())
# Add the url to browse the tag
# http://www.radio-browser.info/webservice/json/stations/bytag/<name>
# http://www.radio-browser.info/webservice/json/stations/bytagexact/<name>
name = tag['name'].strip()
wltags = self._wltags.split(', ')
wlexact = self._wlexact
# Exact match on tag or return tags with keyword contained within
if wlexact:
for wltag in wltags:
if wltag == name:
searchName = name.replace('#', '')
tag['URL'] = self._base_uri % ('stations/bytagexact/' + searchName)
tag['key'] = PREFIX_TAG + name.replace(' ', '')
self.addDirectory(tag)
return True
else:
for wltag in wltags:
if wltag in name:
searchName = name.replace('#', '')
tag['URL'] = self._base_uri % ('stations/bytagexact/' + searchName)
tag['key'] = PREFIX_TAG + name.replace(' ', | |
those in incapacitated list should still be in combatants also
msglist = [ob.character for ob in self.ndb.combatants] + self.ndb.observers
if not exclude:
exclude = []
msglist = [ob for ob in msglist if ob not in exclude]
for ob in msglist:
mymsg = message
ob.msg(mymsg, options)
# ---------------------------------------------------------------------
# -----Admin Methods for OOC character status: adding, removing, etc----
def check_character_is_combatant(self, character):
"""Returns True if the character is one of our combatants."""
try:
state = character.combat.state
return state and state in self.ndb.combatants
except AttributeError:
return False
def add_combatant(self, character, adder=None, reset=False):
"""
Adds a character to combat. The adder is the character that started
the process, and the return message is sent to them. We return None
if they're already fighting, since then it's likely a state change
in defending or so on, and messages will be sent from elsewhere.
"""
# if we're already fighting, nothing happens
cdata = character.combat
if adder:
adder_state = adder.combat.state
else:
adder_state = None
if self.check_character_is_combatant(character):
if character == adder:
return "You are already in the fight."
if cdata.state and adder:
cdata.state.add_foe(adder)
if adder_state:
adder_state.add_foe(character)
return "%s is already fighting." % character.key
# check if attackable
if not character.attackable:
return "%s is not attackable." % character.key
if character.location != self.obj:
return "%s is not in the same room as the fight." % character.key
# if we were in observer list, we stop since we're participant now
self.remove_observer(character)
self.send_intro_message(character)
# add combat state to list of combatants
if character not in self.characters_in_combat:
CombatantStateHandler(character, self, reset=reset)
if character == adder:
return "{rYou have entered combat.{n"
# if we have an adder, they're fighting one another. set targets
elif self.check_character_is_combatant(adder):
# make sure adder is a combatant, not a GM
cdata.state.add_foe(adder)
cdata.state.prev_targ = adder
adder_state.add_foe(character)
adder_state.prev_targ = character
adder_state.setup_attacks()
cdata.state.setup_attacks()
return "You have added %s to a fight." % character.name
@property
def characters_in_combat(self):
"""Returns characters from our combat states"""
return [ob.character for ob in self.ndb.combatants]
def register_state(self, state):
"""
Stores reference to a CombatantStateHandler in self.ndb.combatants. Called by CombatantStateHandler's init,
done this way to avoid possible infinite recursion
"""
if state not in self.ndb.combatants:
self.ndb.combatants.append(state)
def finish_initialization(self):
"""
Finish the initial setup of combatants we add
"""
self.ndb.initializing = False
self.reset_combatants()
self.display_phase_status_to_all(intro=True)
def reset_combatants(self):
"""Resets all our combatants for the next round, displaying prep message to them"""
for state in self.ndb.combatants:
state.reset()
for state in self.ndb.combatants:
state.setup_phase_prep()
def check_if_combat_should_end(self):
"""Checks if combat should be over"""
if not self or not self.pk or self.ndb.shutting_down:
self.end_combat()
return True
if not self.ndb.combatants and not self.ndb.initializing and self.managed_mode_permits_ending():
self.msg("No combatants found. Exiting.")
self.end_combat()
return True
active_combatants = [ob for ob in self.ndb.combatants if ob.conscious]
active_fighters = [ob for ob in active_combatants if not (ob.automated and ob.queued_action and
ob.queued_action.qtype == "Pass")]
if not active_fighters and not self.ndb.initializing:
if self.managed_mode_permits_ending():
self.msg("All combatants are incapacitated or automated npcs who are passing their turn. Exiting.")
self.end_combat()
return True
def managed_mode_permits_ending(self):
"""If we're in managed mode, increment a counter of how many checks before we decide it's idle and end"""
if not self.managed_mode:
return True
if self.ndb.gm_afk_counter > 3:
return True
self.ndb.gm_afk_counter += 1
return False
def ready_check(self, checker=None):
"""
Check all combatants. If all ready, move to phase 2. If checker is
set, it's a character who is already ready but is using the command
to see a list of who might not be, so the message is only sent to them.
"""
self.ndb.ready = []
self.ndb.not_ready = []
if self.ndb.phase == 2:
# already in phase 2, do nothing
return
for state in self.ndb.combatants:
if state.ready:
self.ndb.ready.append(state.character)
elif not state.conscious:
self.ndb.ready.append(state.character)
else:
self.ndb.not_ready.append(state.character)
if self.ndb.not_ready: # not ready for phase 2, tell them why
if checker:
self.display_phase_status(checker, disp_intro=False)
else:
try:
self.start_phase_2()
except ValueError:
import traceback
traceback.print_exc()
self.end_combat()
def afk_check(self, checking_char, char_to_check):
"""
Prods a character to make a response. If the character is not in the
afk_check list, we add them and send them a warning message, then update
their combat data with the AFK timer. Subsequent checks are votes to
kick the player if they have been AFK longer than a given idle timer.
Any action removes them from AFK timer and resets the AFK timer in their
combat data as well as removes all votes there.
"""
# No, they can't vote themselves AFK as a way to escape combat
if checking_char == char_to_check:
checking_char.msg("You cannot vote yourself AFK to leave combat.")
return
if self.ndb.phase == 1 and char_to_check.combat.state.ready:
checking_char.msg("That character is ready to proceed " +
"with combat. They are not holding up the fight.")
return
if self.ndb.phase == 2 and not self.ndb.active_character == char_to_check:
checking_char.msg("It is not their turn to act. You may only " +
"vote them AFK if they are holding up the fight.")
return
if char_to_check not in self.ndb.afk_check:
msg = "{w%s is checking if you are AFK. Please take" % checking_char.name
msg += " an action within a few minutes.{n"
char_to_check.msg(msg)
checking_char.msg("You have nudged %s to take an action." % char_to_check.name)
self.ndb.afk_check.append(char_to_check)
char_to_check.combat.state.afk_timer = time.time() # current time
return
# character is in the AFK list. Check if they've been gone long enough to vote against
elapsed_time = time.time() - char_to_check.combat.state.afk_timer
if elapsed_time < MAX_AFK:
msg = "It has been %s since %s was first checked for " % (elapsed_time, char_to_check.name)
msg += "AFK. They have %s seconds to respond before " % (MAX_AFK - elapsed_time)
msg += "votes can be lodged against them to remove them from combat."
checking_char.msg(msg)
return
# record votes. if we have enough votes, boot 'em.
votes = char_to_check.combat.state.votes_to_kick
if checking_char in votes:
checking_char.msg("You have already voted for their removal. Every other player " +
"except for %s must vote for their removal." % char_to_check.name)
return
votes.append(checking_char)
if votes >= len(self.ndb.combatants) - 1:
self.msg("Removing %s from combat due to inactivity." % char_to_check.name)
self.move_to_observer(char_to_check)
return
char_to_check.msg("A vote has been lodged for your removal from combat due to inactivity.")
def remove_afk(self, character):
"""
Removes a character from the afk_check list after taking a combat
action. Resets relevant fields in combat data
"""
if character in self.ndb.afk_check:
self.ndb.afk_check.remove(character)
character.combat.state.afk_timer = None
character.combat.state.votes_to_kick = []
character.msg("You are no longer being checked for AFK.")
return
def move_to_observer(self, character):
"""
If a character is marked AFK or dies, they are moved from the
combatant list to the observer list.
"""
self.remove_combatant(character)
self.add_observer(character)
def remove_combatant(self, character, in_shutdown=False):
"""
Remove a character from combat altogether. Do a ready check if
we're in phase one.
"""
state = character.combat.state
self.clear_lists_of_character(character)
if state in self.ndb.combatants:
self.ndb.combatants.remove(state)
if state:
state.leave_combat()
# if we're already shutting down, avoid redundant messages
if len(self.ndb.combatants) < 2 and not in_shutdown:
# We weren't shutting down and don't have enough fighters to continue. end the fight.
self.end_combat()
return
if self.ndb.phase == 1 and not in_shutdown:
self.ready_check()
return
if self.ndb.phase == 2 and not in_shutdown:
if state in self.ndb.initiative_list:
self.ndb.initiative_list.remove(state)
return
if self.ndb.active_character == character:
self.next_character_turn()
def clear_lists_of_character(self, character):
"""Removes a character from any of the lists they might be in"""
if character in self.ndb.fleeing:
self.ndb.fleeing.remove(character)
if character in self.ndb.afk_check:
self.ndb.afk_check.remove(character)
if character in self.ndb.surrender_list:
self.ndb.surrender_list.remove(character)
def add_observer(self, character):
"""
Character becomes a non-participating observer. This is usually
for GMs who are watching combat, but other players may be moved
to this - dead characters are no longer combatants, nor are
characters who have been marked as AFK.
"""
# first make sure that any other combat they're watching removes them as a spectator
currently_spectating = character.combat.spectated_combat
if currently_spectating and currently_spectating != self:
currently_spectating.remove_observer(character)
# now we start them spectating
character.combat.spectated_combat = self
self.send_intro_message(character, combatant=False)
self.display_phase_status(character, disp_intro=False)
if character not in self.ndb.observers:
self.ndb.observers.append(character)
return
def remove_observer(self, character, quiet=True):
"""
Leave observer list, either due to stop observing or due to
joining the fight
"""
character.combat.spectated_combat = None
if character in self.ndb.observers:
character.msg("You stop spectating the fight.")
self.ndb.observers.remove(character)
return
if | |
<filename>3DLSCPTR/db/tools/utils.py<gh_stars>10-100
"""
Utility functions and default settings
Author: <NAME> (<EMAIL>)
Date: March, 2020
"""
import argparse
import errno
import os
import sys
import cv2
import matplotlib
import numpy as np
import torch
import torch.nn.init as init
import torch.optim
from torch.optim import lr_scheduler
import os.path as ops
from mpl_toolkits.mplot3d import Axes3D
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
plt.rcParams['figure.figsize'] = (35, 30)
def define_args():
parser = argparse.ArgumentParser(description='Lane_detection_all_objectives')
# Paths settings
parser.add_argument('--dataset_name', type=str, help='the dataset name to be used in saving model names')
parser.add_argument('--data_dir', type=str, help='The path saving train.json and val.json files')
parser.add_argument('--dataset_dir', type=str, help='The path saving actual data')
parser.add_argument('--save_path', type=str, default='data_splits/', help='directory to save output')
# Dataset settings
parser.add_argument('--org_h', type=int, default=1080, help='height of the original image')
parser.add_argument('--org_w', type=int, default=1920, help='width of the original image')
parser.add_argument('--crop_y', type=int, default=0, help='crop from image')
parser.add_argument('--cam_height', type=float, default=1.55, help='height of camera in meters')
parser.add_argument('--pitch', type=float, default=3, help='pitch angle of camera to ground in centi degree')
parser.add_argument('--fix_cam', type=str2bool, nargs='?', const=True, default=False, help='if to use fix camera')
parser.add_argument('--no_3d', action='store_true', help='if a dataset include laneline 3D attributes')
parser.add_argument('--no_centerline', action='store_true', help='if a dataset include centerline')
# 3DLaneNet settings
parser.add_argument('--mod', type=str, default='3DLaneNet', help='model to train')
parser.add_argument("--pretrained", type=str2bool, nargs='?', const=True, default=True, help="use pretrained vgg model")
parser.add_argument("--batch_norm", type=str2bool, nargs='?', const=True, default=True, help="apply batch norm")
parser.add_argument("--pred_cam", type=str2bool, nargs='?', const=True, default=False, help="use network to predict camera online?")
parser.add_argument('--ipm_h', type=int, default=208, help='height of inverse projective map (IPM)')
parser.add_argument('--ipm_w', type=int, default=128, help='width of inverse projective map (IPM)')
parser.add_argument('--resize_h', type=int, default=360, help='height of the original image')
parser.add_argument('--resize_w', type=int, default=480, help='width of the original image')
parser.add_argument('--y_ref', type=float, default=20.0, help='the reference Y distance in meters from where lane association is determined')
parser.add_argument('--prob_th', type=float, default=0.5, help='probability threshold for selecting output lanes')
# General model settings
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--nepochs', type=int, default=30, help='total numbers of epochs')
parser.add_argument('--learning_rate', type=float, default=5*1e-4, help='learning rate')
parser.add_argument('--no_cuda', action='store_true', help='if gpu available')
parser.add_argument('--nworkers', type=int, default=0, help='num of threads')
parser.add_argument('--no_dropout', action='store_true', help='no dropout in network')
parser.add_argument('--pretrain_epochs', type=int, default=20, help='Number of epochs to perform segmentation pretraining')
parser.add_argument('--channels_in', type=int, default=3, help='num channels of input image')
parser.add_argument('--flip_on', action='store_true', help='Random flip input images on?')
parser.add_argument('--test_mode', action='store_true', help='prevents loading latest saved model')
parser.add_argument('--start_epoch', type=int, default=0, help='prevents loading latest saved model')
parser.add_argument('--evaluate', action='store_true', help='only perform evaluation')
parser.add_argument('--resume', type=str, default='', help='resume latest saved run')
parser.add_argument('--vgg_mean', type=float, default=[0.485, 0.456, 0.406], help='Mean of rgb used in pretrained model on ImageNet')
parser.add_argument('--vgg_std', type=float, default=[0.229, 0.224, 0.225], help='Std of rgb used in pretrained model on ImageNet')
# Optimizer settings
parser.add_argument('--optimizer', type=str, default='adam', help='adam or sgd')
parser.add_argument('--weight_init', type=str, default='normal', help='normal, xavier, kaiming, orhtogonal weights initialisation')
parser.add_argument('--weight_decay', type=float, default=0, help='L2 weight decay/regularisation on?')
parser.add_argument('--lr_decay', action='store_true', help='decay learning rate with rule')
parser.add_argument('--niter', type=int, default=50, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=400, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--lr_policy', default=None, help='learning rate policy: lambda|step|plateau')
parser.add_argument('--lr_decay_iters', type=int, default=30, help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--clip_grad_norm', type=int, default=0, help='performs gradient clipping')
# CUDNN usage
parser.add_argument("--cudnn", type=str2bool, nargs='?', const=True, default=True, help="cudnn optimization active")
# Tensorboard settings
parser.add_argument("--no_tb", type=str2bool, nargs='?', const=True, default=False, help="Use tensorboard logging by tensorflow")
# Print settings
parser.add_argument('--print_freq', type=int, default=500, help='padding')
parser.add_argument('--save_freq', type=int, default=500, help='padding')
# Skip batch
parser.add_argument('--list', type=int, nargs='+', default=[954, 2789], help='Images you want to skip')
return parser
def tusimple_config(args):
# set dataset parameters
args.org_h = 720
args.org_w = 1280
args.crop_y = 80
args.no_centerline = True
args.no_3d = True
args.fix_cam = True
args.pred_cam = False
# set camera parameters for the test dataset
args.K = np.array([[1000, 0, 640],
[0, 1000, 400],
[0, 0, 1]])
args.cam_height = 1.6
args.pitch = 9
# specify model settings
"""
paper presented params:
args.top_view_region = np.array([[-10, 85], [10, 85], [-10, 5], [10, 5]])
args.anchor_y_steps = np.array([5, 20, 40, 60, 80, 100])
"""
# args.top_view_region = np.array([[-10, 82], [10, 82], [-10, 2], [10, 2]])
# args.anchor_y_steps = np.array([2, 3, 5, 10, 15, 20, 30, 40, 60, 80])
args.top_view_region = np.array([[-10, 103], [10, 103], [-10, 3], [10, 3]])
args.anchor_y_steps = np.array([5, 10, 15, 20, 30, 40, 50, 60, 80, 100])
args.num_y_steps = len(args.anchor_y_steps)
# initialize with pre-trained vgg weights
args.pretrained = False
# apply batch norm in network
args.batch_norm = True
def sim3d_config(args):
# set dataset parameters
args.org_h = 1080
args.org_w = 1920
args.crop_y = 0
args.no_centerline = False
args.no_3d = False
args.fix_cam = False
args.pred_cam = False
# set camera parameters for the test datasets
args.K = np.array([[2015., 0., 960.],
[0., 2015., 540.],
[0., 0., 1.]])
# specify model settings
"""
paper presented params:
args.top_view_region = np.array([[-10, 85], [10, 85], [-10, 5], [10, 5]])
args.anchor_y_steps = np.array([5, 20, 40, 60, 80, 100])
"""
# args.top_view_region = np.array([[-10, 83], [10, 83], [-10, 3], [10, 3]])
# args.anchor_y_steps = np.array([3, 5, 10, 20, 40, 60, 80, 100])
args.top_view_region = np.array([[-10, 103], [10, 103], [-10, 3], [10, 3]])
args.anchor_y_steps = np.array([5, 10, 15, 20, 30, 40, 50, 60, 80, 100])
args.num_y_steps = len(args.anchor_y_steps)
# initialize with pre-trained vgg weights
args.pretrained = False
# apply batch norm in network
args.batch_norm = True
class Visualizer:
def __init__(self, args, vis_folder='val_vis'):
self.save_path = args.save_path
self.vis_folder = vis_folder
self.no_3d = args.no_3d
self.no_centerline = args.no_centerline
self.vgg_mean = args.vgg_mean
self.vgg_std = args.vgg_std
self.ipm_w = args.ipm_w
self.ipm_h = args.ipm_h
self.num_y_steps = args.num_y_steps
if args.no_3d:
self.anchor_dim = args.num_y_steps + 1
else:
if 'ext' in args.mod:
self.anchor_dim = 3 * args.num_y_steps + 1
else:
self.anchor_dim = 2 * args.num_y_steps + 1
x_min = args.top_view_region[0, 0]
x_max = args.top_view_region[1, 0]
self.anchor_x_steps = np.linspace(x_min, x_max, np.int(args.ipm_w / 8), endpoint=True)
self.anchor_y_steps = args.anchor_y_steps
# transformation from ipm to ground region
H_ipm2g = cv2.getPerspectiveTransform(np.float32([[0, 0],
[self.ipm_w-1, 0],
[0, self.ipm_h-1],
[self.ipm_w-1, self.ipm_h-1]]),
np.float32(args.top_view_region))
self.H_g2ipm = np.linalg.inv(H_ipm2g)
# probability threshold for choosing visualize lanes
self.prob_th = args.prob_th
def draw_on_img(self, img, lane_anchor, P_g2im, draw_type='laneline', color=[0, 0, 1]):
"""
:param img: image in numpy array, each pixel in [0, 1] range
:param lane_anchor: lane anchor in N X C numpy ndarray, dimension in agree with dataloader
:param P_g2im: projection from ground 3D coordinates to image 2D coordinates
:param draw_type: 'laneline' or 'centerline' deciding which to draw
:param color: [r, g, b] color for line, each range in [0, 1]
:return:
"""
for j in range(lane_anchor.shape[0]):
# draw laneline
if draw_type is 'laneline' and lane_anchor[j, self.anchor_dim - 1] > self.prob_th:
x_offsets = lane_anchor[j, :self.num_y_steps]
x_3d = x_offsets + self.anchor_x_steps[j]
if P_g2im.shape[1] is 3:
x_2d, y_2d = homographic_transformation(P_g2im, x_3d, self.anchor_y_steps)
else:
z_3d = lane_anchor[j, self.num_y_steps:self.anchor_dim - 1]
x_2d, y_2d = projective_transformation(P_g2im, x_3d, self.anchor_y_steps, z_3d)
x_2d = x_2d.astype(np.int)
y_2d = y_2d.astype(np.int)
for k in range(1, x_2d.shape[0]):
img = cv2.line(img, (x_2d[k - 1], y_2d[k - 1]), (x_2d[k], y_2d[k]), color, 2)
# draw centerline
if draw_type is 'centerline' and lane_anchor[j, 2 * self.anchor_dim - 1] > self.prob_th:
x_offsets = lane_anchor[j, self.anchor_dim:self.anchor_dim + self.num_y_steps]
x_3d = x_offsets + self.anchor_x_steps[j]
if P_g2im.shape[1] is 3:
x_2d, y_2d = homographic_transformation(P_g2im, x_3d, self.anchor_y_steps)
else:
z_3d = lane_anchor[j, self.anchor_dim + self.num_y_steps:2 * self.anchor_dim - 1]
x_2d, y_2d = projective_transformation(P_g2im, x_3d, self.anchor_y_steps, z_3d)
x_2d = x_2d.astype(np.int)
y_2d = y_2d.astype(np.int)
for k in range(1, x_2d.shape[0]):
img = cv2.line(img, (x_2d[k - 1], y_2d[k - 1]), (x_2d[k], y_2d[k]), color, 2)
# draw the additional centerline for the merging case
if draw_type is 'centerline' and lane_anchor[j, 3 * self.anchor_dim - 1] > self.prob_th:
x_offsets = lane_anchor[j, 2 * self.anchor_dim:2 * self.anchor_dim + self.num_y_steps]
x_3d = x_offsets + self.anchor_x_steps[j]
if P_g2im.shape[1] is 3:
x_2d, y_2d = homographic_transformation(P_g2im, x_3d, self.anchor_y_steps)
else:
z_3d = lane_anchor[j, 2 * self.anchor_dim + self.num_y_steps:3 * self.anchor_dim - 1]
x_2d, y_2d = projective_transformation(P_g2im, x_3d, self.anchor_y_steps, z_3d)
x_2d = x_2d.astype(np.int)
y_2d = y_2d.astype(np.int)
for k in range(1, x_2d.shape[0]):
img = cv2.line(img, (x_2d[k - 1], y_2d[k - 1]), (x_2d[k], y_2d[k]), color, 2)
return img
def draw_on_img_new(self, img, lane_anchor, P_g2im, draw_type='laneline', color=[0, 0, 1]):
"""
:param img: image in numpy array, each pixel in [0, 1] range
:param lane_anchor: lane anchor in N X C numpy ndarray, dimension in agree with dataloader
:param P_g2im: projection from ground 3D coordinates to image 2D coordinates
:param draw_type: 'laneline' or 'centerline' deciding which to draw
:param color: [r, g, b] color for line, each range in [0, 1]
:return:
"""
for j in range(lane_anchor.shape[0]):
| |
< (
(self.min_r + 1) - self.annulus_width):
r_dim = 1
else:
r_dim = int(cd[2] - self.annulus_width)
cv2.circle(labeled_annulus_mask,
(int(cd[0]), int(cd[1])), r_dim,
(0, 0, 0), -1)
return labeled_annulus_mask
def overlay_image(self, image, annulus=None, dim=None):
"""Overlay image with circles of labeled mask."""
img = image.copy()
if dim is not None:
circ_dim = dim
else:
circ_dim = self._circles_dim
for dim in circ_dim:
if annulus is True:
if (int(dim[2] - self.annulus_width)) < (
(self.min_r + 1) - self.annulus_width):
r_dim = 1
else:
r_dim = int(dim[2] - self.annulus_width)
cv2.circle(img, (int(dim[0]), int(
dim[1])), r_dim, (0, 255, 0), 1)
cv2.circle(img, (int(dim[0]), int(dim[1])),
int(dim[2]), (0, 255, 0), 1)
return img
def _filter(self):
remove_list = np.where((self._circles_dim[:, 2] < self.min_r) & (
self._circles_dim[:, 2] > self.max_r))[0]
if remove_list.size > 0:
self._circles_dim = np.delete(
self._circles_dim, remove_list, axis=0)
for remove in remove_list:
self._labeled_mask[self._labeled_mask == remove + 1] = 0
self._labeled_annulus_mask[self._labeled_mask ==
remove + 1] = 0
if self.border_clear is True:
sk.segmentation.clear_border(self._labeled_mask,
in_place=True)
sk.segmentation.clear_border(self._labeled_annulus_mask,
in_place=True)
class SpectralUnmixing(ImageDataFrame):
"""Spectrally unmix images using reference spectra.
Unmix the spectral images to dye images, e.g., 620nm, 630nm, 650nm images
to Dy, Sm and Tm nanophospohorous lanthanides using reference spectra for
each dye.
Parameters
----------
ref_data : list, ndarray, Pandas DataFrame, mrbles.data.References
Reference spectra for each dye channel as Numpy Array: N x M, where N
are the spectral channels and M the dye channels.
"""
def __init__(self, ref_data):
"""Instantiate SpectralUnmixing."""
super(SpectralUnmixing, self).__init__()
self._ref_object = ref_data
if isinstance(ref_data, pd.DataFrame):
self._ref_data = ref_data.values
self._names = list(ref_data.keys())
elif hasattr(ref_data, 'data'):
self._ref_data = ref_data.data.values
self._names = list(ref_data.data.keys())
else:
raise TypeError(
"Wrong type. Only mrbles dataframes, or Pandas DataFrame types.") # NOQA
self._ref_size = self._ref_data[0, :].size
self._dataframe = None
self._c_size = None
self._y_size = None
self._x_size = None
def __repr__(self):
"""Return Xarray dataframe representation."""
return repr([self._dataframe])
def unmix(self, images):
"""Unmix images based on initiated reference data.
Unmix the spectral images to dye images, e.g., 620nm, 630nm, 650nm
images to Dy, Sm and Tm nanophospohorous lanthanides using reference
spectra for each dye.
Parameters
----------
image_data : NumPy array, Xarry DataArray, mrbles.Images
Spectral images as NumPy array: N x M x P,
where N are the spectral channels and M x P the image pixels
(Y x X).
"""
if isinstance(images, xr.DataArray):
images = images.values
if images.ndim > 3:
data = [self._unmix(image) for image in images]
self._dataframe = xr.concat(data, dim='f')
else:
self._dataframe = self._unmix(images)
def _unmix(self, images):
if self._ref_data.shape[0] != images.shape[0]:
print("Number of channels not equal. Ref: ",
self._ref_data.shape[0], " Image: ", images.shape[0])
raise IndexError
self._sizes(images)
img_flat = self._flatten(images)
unmix_flat = np.linalg.lstsq(self._ref_data, img_flat, rcond=RCOND)[0]
unmix_result = self._rebuilt(unmix_flat)
dataframe = xr.DataArray(unmix_result,
dims=['c', 'y', 'x'],
coords={'c': self._names})
return dataframe
# Private functions
def _sizes(self, images):
"""Get sizes images: Channels, Y, X."""
self._c_size = images[:, 0, 0].size
self._y_size = images[0, :, 0].size
self._x_size = images[0, 0, :].size
def _flatten(self, images):
"""Flatten X and Y of images in NumPy array."""
images_flat = images.reshape(
self._c_size, (self._y_size * self._x_size))
return images_flat
def _rebuilt(self, images_flat):
"""Rebuilt images to NumPy array."""
images = images_flat.reshape(
self._ref_size, self._y_size, self._x_size)
return images
class ICP(object):
"""Iterative Closest Point (ICP).
Iterative Closest Point (ICP) algorithm to minimize the difference
between two clouds of points.
Parameters
----------
matrix_method : string/function/list, optional
Transformation matrix method. Standard methods: 'max', 'mean', 'std'.
Other options: own function or list of initial guesses.
Defaults to 'std'.
offset : list of float, optional
max_iter : int, optional
Maximum number of iterations.
Defaults to 100.
tol : float, optional
Convergence threshold. ICP will stop after delta < tol.
Defaults to 1e-4.
outlier_pct : float, optional
Discard percentile 0.x of furthest distance from target. Percentile
given in fraction [0-1], e.g. '0.001'.
Defaults to 0.
train : boolean
Turn on (True) or off (False) traning mode.
This will keep the current tranformation from resetting to default
initial values.
Defaults to True.
echo : boolean
Turn on (True) or off (False) printing information while in process.
Prints the delta for each iteration, the final number of iterations,
and the final transformation and offset matrices.
Attributes
----------
matrix : NumPy array
This stores the transformation matrix.
offset : NumPy vector
This stores the offset vector.
Functions
---------
fit : function
Function to find ICP using set parameters and attributes.
transform : function
Function to apply transformat data using current transformation matrix
and offset vector.
"""
def __init__(self, target,
matrix_method='std',
offset=None,
max_iter=100,
tol=1e-4,
outlier_pct=0.01):
"""Instantiate Iterative Closest Point (ICP) object."""
if isinstance(target, pd.DataFrame):
target = target.values
self._target = target
self.matrix, self.matrix_func = self._set_matrix_method(matrix_method)
self.max_iter = max_iter
self.tol = tol
self.outlierpct = outlier_pct
self.offset = offset
self.train = False
self.echo = True
self._pdata = None
def _set_matrix_method(self, matrix_method):
"""Set matrix method."""
matrix = None
if matrix_method == 'max':
matrix_func = np.max
elif matrix_method == 'mean':
matrix_func = np.mean
elif matrix_method == 'std':
matrix_func = np.std
# Use own or other function
elif isinstance(matrix_method, types.FunctionType):
matrix_func = matrix_method
# Use list of initial ratios
elif isinstance(matrix_method, list):
matrix_func = matrix_method
naxes = len(matrix_method)
matrix = np.eye(naxes)
for n in range(naxes):
matrix[n, n] = matrix_method[n]
else:
raise ValueError("Matrix method invalid: %s" % matrix_method)
return matrix, matrix_func
def _set_matrix(self, data, target):
"""Set initial guess matrix."""
matrix = self.matrix_create(self.matrix_func, target, data)
return matrix
def _set_offset(self, data, target):
"""Set initial guess offset."""
naxes = len(data[0, :])
offset = np.ones(naxes)
for n in range(naxes):
offset[n] = np.min(target[:, n]) - np.min(data[:, n])
return offset
@staticmethod
def matrix_create(func, input1, input2):
"""Create identity matrix and set values with function on inputs e.g 'np.mean'.
Parameters
----------
func : function
Function to apply on input1 divided by input2, e.g. 'np.std'.
Insert function without function call: ().
input1 : list, ndarray
input2 : list, ndarray
Returns
-------
matrix : ndarray
Returns func(input1/input2)
"""
naxes1 = len(input1[0, :])
naxes2 = len(input2[0, :])
if naxes1 == naxes2:
matrix = np.eye(naxes1)
for n in range(naxes1):
matrix[n, n] = func(input1[:, n]) / func(input2[:, n])
else:
raise ValueError(
"Lengths of input1 = %s and input2 = %s do not match",
naxes1,
naxes2)
return matrix
def transform(self, data=None):
"""Apply transformation matrix to data."""
if (self._pdata is not None) and data is None:
self._pdata.reset_index(drop=True, inplace=True)
tdata = np.dot(self._pdata.values, self.matrix) + self.offset
result = pd.DataFrame()
for num, val in enumerate(self._pdata.index):
for n, v in enumerate(self._pdata.columns):
result.loc[val, ('%s_icp' % v)] = tdata[num, n]
else:
result = np.dot(data, self.matrix) + self.offset
return result
def fit(self, data, target=None):
"""Fit Iterative Closest Point."""
if isinstance(data, pd.DataFrame):
self._pdata = data
data = data.values
if target is None:
target = self._target
if (self.offset is None) or (self.train is False):
self.offset = self._set_offset(data, target)
else:
warnings.warn("Training mode: ON")
if (self.matrix is None) or (self.train is False):
self.matrix = self._set_matrix(data, target)
else:
warnings.warn("Training mode: ON")
delta = 1
for i in range(self.max_iter):
if delta < self.tol:
print("Converged after:", i)
break
# Copy old to compare to new
matrix_old = self.matrix
offset_old = self.offset
# Apply transform
data_transform = self.transform(data)
# Compare distances between tranformed data and target
distances = pairwise_distances(data_transform, target)
min_dist = np.min(distances, axis=1)
# Filter percentile of furthest away points
min_dist_pct = np.percentile(
min_dist, [0, (1 - self.outlierpct) * 100])[1]
min_dist_filt = np.argwhere(min_dist < min_dist_pct)[:, 0]
# Match codes and levels
matched_code = np.argmin(distances, axis=1)
matched_levels = target[matched_code[min_dist_filt], :]
# Least squares
dist = np.c_[data[min_dist_filt], np.ones(
len(data[min_dist_filt, 0]))]
mat = np.linalg.lstsq(dist, matched_levels, rcond=RCOND)[0]
# Store new tranformation matrix and offset vector
self.matrix = mat[0:-1, :]
self.offset = mat[-1, :]
# Compare step by step delta
d_compare = np.sum(np.square(self.matrix - matrix_old))
d_compare = d_compare + np.sum(np.square(self.offset - offset_old))
n_compare = np.sum(np.square(self.matrix)) + \
np.sum(np.square(self.offset))
delta = np.sqrt(d_compare / n_compare)
print("Delta: ", delta)
class Classify(object):
"""Classification of beads by Gaussian Mixture Model.
Parameters
----------
target : list, NumPy array
List of target ratios.
tol : float
Tolerance.
Defaults to 1e-5.
min_covar : float
Minimum covariance.
Defaults to 1e-7.
sigma : float
Minimum significance.
Defaults to 1e-5.
train : boolean
Sets training mode. Remembers covariance matrix or resets to initial
covariance matrix.
Defaults to False.
| |
our decoder_inputs which will be output_sequence_length long
for index, decoder_input in enumerate(decoder_inputs):
# If there has been a previous output then we will determine the next input
if previous_output is not None:
# Create the input layer to our DNN
network = previous_output # shape = (current_batch_size, lstm_hidden_units[-1])
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: network = {}".format(network))
# Create our dnn variable scope
with tf.variable_scope(name_or_scope = "dnn", reuse = tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
for units in params['dnn_hidden_units']:
network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size, dnn_hidden_units[i])
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: network = {}, units = {}".format(network, units))
# Connect the final hidden layer to a dense layer with no activation to get the logits
logits = tf.layers.dense(inputs = network, units = 1, activation = None) # shape = (current_batch_size, 1)
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: logits = {}\n".format(logits))
# If we are in inference then we will overwrite our next decoder_input with the logits we just calculated.
# Otherwise, we leave the decoder_input input as it was from the enumerated list
# We have to calculate the logits even when not using them so that the correct dnn subgraph will be generated here and after the encoder-decoder for both training and inference
if inference == True:
decoder_input = logits # shape = (current_batch_size, 1)
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: decoder_input = {}\n".format(decoder_input))
# If this isn't our first time through the loop, just reuse(share) the same variables for each iteration within the current variable scope
if index > 0:
tf.get_variable_scope().reuse_variables()
# Run the decoder input through the decoder stack picking up from the previous state
output, state = cell(decoder_input, state)
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: output = {}".format(output)) # shape = (current_batch_size, lstm_hidden_units[-1])
print("encoder_decoder_stacked_lstm_regression: rnn_decoder: state = {}".format(state)) # tuple of final decoder c_state and h_state
# Append the current decoder hidden state output to the outputs list
outputs.append(output) # growing list eventually output_sequence_length long of shape = (current_batch_size, lstm_hidden_units[-1])
# Set the previous output to the output just calculated
previous_output = output # shape = (current_batch_size, lstm_hidden_units[-1])
return outputs, state
# Encoder-decoders work differently during training/evaluation and inference so we will have two separate subgraphs for each
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
# Break 2-D labels tensor into a list of 1-D tensors
unstacked_labels = tf.unstack(value = labels, num = params['output_sequence_length'], axis = 1) # list of output_sequence_length long of shape = (current_batch_size,)
print("encoder_decoder_stacked_lstm_regression: unstacked_labels = {}".format(unstacked_labels))
# Expand each 1-D label tensor back into a 2-D tensor
expanded_unstacked_labels = [tf.expand_dims(input = tensor, axis = -1) for tensor in unstacked_labels] # list of output_sequence_length long of shape = (current_batch_size, 1)
print("encoder_decoder_stacked_lstm_regression: expanded_unstacked_labels = {}".format(expanded_unstacked_labels))
# Call our decoder using the labels as our inputs, the encoder final state as our initial state, our other LSTM stack as our cells, and inference set to false
decoder_outputs, decoder_states = rnn_decoder(decoder_inputs = expanded_unstacked_labels, initial_state = encoder_final_state, cell = stacked_lstm_cells, inference = False)
else:
# Since this is inference create fake labels. The list length needs to be the output sequence length even though only the first element is actually used (as our go signal)
fake_labels = [tf.zeros(shape = [current_batch_size, 1]) for _ in range(params['output_sequence_length'])]
print("encoder_decoder_stacked_lstm_regression: fake_labels = {}".format(fake_labels))
# Call our decoder using fake labels as our inputs, the encoder final state as our initial state, our other LSTM stack as our cells, and inference set to true
decoder_outputs, decoder_states = rnn_decoder(decoder_inputs = fake_labels, initial_state = encoder_final_state, cell = stacked_lstm_cells, inference = True)
print("encoder_decoder_stacked_lstm_regression: decoder_outputs = {}".format(decoder_outputs)) # list output_sequence_length long of shape = (current_batch_size, lstm_hidden_units[-1])
print("encoder_decoder_stacked_lstm_regression: decoder_states = {}".format(decoder_states)) # tuple of final decoder c_state and h_state
# Stack together the list of decoder output tensors into one
stacked_decoder_outputs = tf.stack(values = decoder_outputs, axis = 0) # shape = (current_batch_size * output_sequence_length, lstm_hidden_units[-1])
print("encoder_decoder_stacked_lstm_regression: stacked_decoder_outputs = {}".format(stacked_decoder_outputs))
################################################################################
# 3. Create the DNN structure now after the encoder-decoder LSTM stack
# Create the input layer to our DNN
network = stacked_decoder_outputs # shape = (current_batch_size * output_sequence_length, lstm_hidden_units[-1])
print("encoder_decoder_stacked_lstm_regression: network = {}".format(network))
# Reuse the same variable scope as we used within our decoder (for inference)
with tf.variable_scope(name_or_scope = "dnn", reuse = tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
for units in params['dnn_hidden_units']:
network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size * output_sequence_length, dnn_hidden_units[i])
print("encoder_decoder_stacked_lstm_regression: network = {}, units = {}".format(network, units))
# Connect the final hidden layer to a dense layer with no activation to get the logits
logits = tf.layers.dense(inputs = network, units = 1, activation = None) # shape = (current_batch_size * output_sequence_length, 1)
print("encoder_decoder_stacked_lstm_regression: logits = {}\n".format(logits))
# Now that we are through the final DNN for each sequence element for each example in the batch, reshape the predictions to match our labels
predictions = tf.reshape(tensor = logits, shape = [current_batch_size, params['output_sequence_length']]) # shape = (current_batch_size, output_sequence_length)
print("encoder_decoder_stacked_lstm_regression: predictions = {}\n".format(predictions))
# 3. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels = labels, predictions = predictions)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = params['learning_rate'],
optimizer = "Adam")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(labels = labels, predictions = predictions),
"mae": tf.metrics.mean_absolute_error(labels = labels, predictions = predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 4. Create predictions
predictions_dict = {"predicted": predictions}
# 5. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions)}
# 6. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator
def serving_input_fn(input_sequence_length, reverse_sequence):
# This function fixes the shape and type of our input strings
def fix_shape_and_type_for_serving(placeholder):
# String split each string in the batch and output the values from the resulting SparseTensors
split_string = tf.map_fn(
fn = lambda x: tf.string_split(source = [placeholder[x]], delimiter=',').values,
elems = tf.range(start = 0, limit = tf.shape(input = placeholder)[0]),
dtype = tf.string) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: split_string = {}".format(split_string))
# Convert each string in the split tensor to float
feature_tensor = tf.string_to_number(string_tensor = split_string, out_type = tf.float32) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = {}".format(feature_tensor))
return feature_tensor
# This function fixes dynamic shape ambiguity of last dimension so that we will be able to use it in our DNN (since tf.layers.dense require the last dimension to be known)
def get_shape_and_set_modified_shape_2D(tensor, additional_dimension_sizes):
# Get static shape for tensor and convert it to list
shape = tensor.get_shape().as_list()
# Set outer shape to additional_dimension_sizes[0] since we know that this is the correct size
shape[1] = additional_dimension_sizes[0]
# Set the shape of tensor to our modified shape
tensor.set_shape(shape = shape) # shape = (batch_size, additional_dimension_sizes[0])
print("serving_input_fn: get_shape_and_set_modified_shape_2D: tensor = {}, additional_dimension_sizes = {}".format(tensor, additional_dimension_sizes))
return tensor
# Create placeholders to accept the data sent to the model at serving time
feature_placeholders = { # all features come in as a batch of strings, shape = (batch_size,), this was so because of passing the arrays to online ml-engine prediction
'price': tf.placeholder(dtype = tf.string, shape = [None]),
'dayofweek': tf.placeholder(dtype = tf.string, shape = [None]),
'hourofday': tf.placeholder(dtype = tf.string, shape = [None])
}
print("\nserving_input_fn: feature_placeholders = {}".format(feature_placeholders))
# Create feature tensors
features = {key: fix_shape_and_type_for_serving(placeholder = tensor) for key, tensor in feature_placeholders.items()}
print("serving_input_fn: features = {}".format(features))
# Fix dynamic shape ambiguity of feature tensors for our DNN
features = {key: get_shape_and_set_modified_shape_2D(tensor = tensor, additional_dimension_sizes = [input_sequence_length]) for key, tensor in features.items()}
print("serving_input_fn: features = {}".format(features))
# These are our sequence lengths per batch, which is just our input_sequence_length tiled since all of our sequences are the same length
sequence_lengths | |
import itertools
import json
import os
import random
import unicodedata
import uuid
from collections import defaultdict
from typing import Type, Union
from unittest.mock import patch
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.sites.models import Site
from django.core.files import File
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from faker import Factory
from faker.providers import BaseProvider
from measurement.measures import Weight
from prices import Money, TaxedMoney
from ...account.models import Address, User
from ...account.utils import store_user_address
from ...checkout import AddressType
from ...core.permissions import (
AccountPermissions,
CheckoutPermissions,
GiftcardPermissions,
OrderPermissions,
get_permissions,
)
from ...core.utils import build_absolute_uri
from ...core.weight import zero_weight
from ...discount import DiscountValueType, VoucherType
from ...discount.models import Sale, Voucher
from ...discount.utils import fetch_discounts
from ...plugins.manager import get_plugins_manager
from ...giftcard.models import GiftCard
from ...menu.models import Menu
from ...menu.utils import update_menu
from ...order.models import Fulfillment, Order, OrderLine
from ...order.utils import update_order_status
from ...page.models import Page
from ...payment import gateway
from ...payment.utils import create_payment
from ...product.models import (
AssignedProductAttribute,
AssignedVariantAttribute,
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
CollectionProduct,
Product,
ProductImage,
ProductType,
ProductVariant,
)
from ...product.tasks import update_products_minimal_variant_prices_of_discount_task
from ...product.thumbnails import (
create_category_background_image_thumbnails,
create_collection_background_image_thumbnails,
create_product_thumbnails,
)
from ...shipping.models import ShippingMethod, ShippingMethodType, ShippingZone
from ...warehouse.management import increase_stock
from ...warehouse.models import Stock, Warehouse
fake = Factory.create()
PRODUCTS_LIST_DIR = "products-list/"
IMAGES_MAPPING = {
61: ["saleordemoproduct_paints_01.png"],
62: ["saleordemoproduct_paints_02.png"],
63: ["saleordemoproduct_paints_03.png"],
64: ["saleordemoproduct_paints_04.png"],
65: ["saleordemoproduct_paints_05.png"],
71: ["saleordemoproduct_fd_juice_06.png"],
72: ["saleordemoproduct_fd_juice_06.png"], # FIXME inproper image
73: ["saleordemoproduct_fd_juice_05.png"],
74: ["saleordemoproduct_fd_juice_01.png"],
75: ["saleordemoproduct_fd_juice_03.png"], # FIXME inproper image
76: ["saleordemoproduct_fd_juice_02.png"], # FIXME inproper image
77: ["saleordemoproduct_fd_juice_03.png"],
78: ["saleordemoproduct_fd_juice_04.png"],
79: ["saleordemoproduct_fd_juice_02.png"],
81: ["saleordemoproduct_wine-red.png"],
82: ["saleordemoproduct_wine-white.png"],
83: ["saleordemoproduct_beer-02_1.png", "saleordemoproduct_beer-02_2.png"],
84: ["saleordemoproduct_beer-01_1.png", "saleordemoproduct_beer-01_2.png"],
85: ["saleordemoproduct_cuschion01.png"],
86: ["saleordemoproduct_cuschion02.png"],
87: [
"saleordemoproduct_sneakers_01_1.png",
"saleordemoproduct_sneakers_01_2.png",
"saleordemoproduct_sneakers_01_3.png",
"saleordemoproduct_sneakers_01_4.png",
],
88: [
"saleordemoproduct_sneakers_02_1.png",
"saleordemoproduct_sneakers_02_2.png",
"saleordemoproduct_sneakers_02_3.png",
"saleordemoproduct_sneakers_02_4.png",
],
89: ["saleordemoproduct_cl_boot07_1.png", "saleordemoproduct_cl_boot07_2.png"],
107: ["saleordemoproduct_cl_polo01.png"],
108: ["saleordemoproduct_cl_polo02.png"],
109: ["saleordemoproduct_cl_polo03-woman.png"],
110: ["saleordemoproduct_cl_polo04-woman.png"],
111: [
"saleordemoproduct_cl_boot01_1.png",
"saleordemoproduct_cl_boot01_2.png",
"saleordemoproduct_cl_boot01_3.png",
],
112: ["saleordemoproduct_cl_boot03_1.png", "saleordemoproduct_cl_boot03_2.png"],
113: ["saleordemoproduct_cl_boot06_1.png", "saleordemoproduct_cl_boot06_2.png"],
114: [
"saleordemoproduct_cl_boot06_1.png",
"saleordemoproduct_cl_boot06_2.png",
], # FIXME incorrect image
115: ["saleordemoproduct_cl_bogo01_1.png"],
116: ["saleordemoproduct_cl_bogo02_1.png"],
117: ["saleordemoproduct_cl_bogo03_1.png"],
118: ["saleordemoproduct_cl_bogo04_1.png", "saleordemoproduct_cl_bogo04_2.png"],
}
CATEGORY_IMAGES = {7: "accessories.jpg", 8: "groceries.jpg", 9: "apparel.jpg"}
COLLECTION_IMAGES = {1: "summer.jpg", 2: "clothing.jpg"}
def get_weight(weight):
if not weight:
return zero_weight()
value, unit = weight.split()
return Weight(**{unit: value})
def create_product_types(product_type_data):
for product_type in product_type_data:
pk = product_type["pk"]
defaults = product_type["fields"]
defaults["weight"] = get_weight(defaults["weight"])
ProductType.objects.update_or_create(pk=pk, defaults=defaults)
def create_categories(categories_data, placeholder_dir):
placeholder_dir = get_product_list_images_dir(placeholder_dir)
for category in categories_data:
pk = category["pk"]
defaults = category["fields"]
parent = defaults["parent"]
image_name = (
CATEGORY_IMAGES[pk] if pk in CATEGORY_IMAGES else CATEGORY_IMAGES[parent]
)
background_image = get_image(placeholder_dir, image_name)
defaults["background_image"] = background_image
if parent:
defaults["parent"] = Category.objects.get(pk=parent)
Category.objects.update_or_create(pk=pk, defaults=defaults)
create_category_background_image_thumbnails.delay(pk)
def create_collections(data, placeholder_dir):
placeholder_dir = get_product_list_images_dir(placeholder_dir)
for collection in data:
pk = collection["pk"]
defaults = collection["fields"]
image_name = COLLECTION_IMAGES[pk]
background_image = get_image(placeholder_dir, image_name)
defaults["background_image"] = background_image
Collection.objects.update_or_create(pk=pk, defaults=defaults)
create_collection_background_image_thumbnails.delay(pk)
def assign_products_to_collections(associations: list):
for value in associations:
pk = value["pk"]
defaults = value["fields"]
defaults["collection_id"] = defaults.pop("collection")
defaults["product_id"] = defaults.pop("product")
CollectionProduct.objects.update_or_create(pk=pk, defaults=defaults)
def create_attributes(attributes_data):
for attribute in attributes_data:
pk = attribute["pk"]
defaults = attribute["fields"]
attr, _ = Attribute.objects.update_or_create(pk=pk, defaults=defaults)
def create_attributes_values(values_data):
for value in values_data:
pk = value["pk"]
defaults = value["fields"]
defaults["attribute_id"] = defaults.pop("attribute")
AttributeValue.objects.update_or_create(pk=pk, defaults=defaults)
def create_products(products_data, placeholder_dir, create_images):
for product in products_data:
pk = product["pk"]
# We are skipping products without images
if pk not in IMAGES_MAPPING:
continue
defaults = product["fields"]
set_field_as_money(defaults, "price")
defaults["weight"] = get_weight(defaults["weight"])
defaults["category_id"] = defaults.pop("category")
defaults["product_type_id"] = defaults.pop("product_type")
product, _ = Product.objects.update_or_create(pk=pk, defaults=defaults)
if create_images:
images = IMAGES_MAPPING.get(pk, [])
for image_name in images:
create_product_image(product, placeholder_dir, image_name)
def create_stocks(variant, warehouse_qs=None, **defaults):
if warehouse_qs is None:
warehouse_qs = Warehouse.objects.all()
for warehouse in warehouse_qs:
Stock.objects.update_or_create(
warehouse=warehouse, product_variant=variant, defaults=defaults
)
def create_product_variants(variants_data):
for variant in variants_data:
pk = variant["pk"]
defaults = variant["fields"]
defaults["weight"] = get_weight(defaults["weight"])
product_id = defaults.pop("product")
# We have not created products without images
if product_id not in IMAGES_MAPPING:
continue
defaults["product_id"] = product_id
set_field_as_money(defaults, "price_override")
set_field_as_money(defaults, "cost_price")
quantity = defaults.pop("quantity")
quantity_allocated = defaults.pop("quantity_allocated")
variant, _ = ProductVariant.objects.update_or_create(pk=pk, defaults=defaults)
create_stocks(variant, quantity=quantity, quantity_allocated=quantity_allocated)
def assign_attributes_to_product_types(
association_model: Union[Type[AttributeProduct], Type[AttributeVariant]],
attributes: list,
):
for value in attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["attribute_id"] = defaults.pop("attribute")
defaults["product_type_id"] = defaults.pop("product_type")
association_model.objects.update_or_create(pk=pk, defaults=defaults)
def assign_attributes_to_products(product_attributes):
for value in product_attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["product_id"] = defaults.pop("product")
defaults["assignment_id"] = defaults.pop("assignment")
assigned_values = defaults.pop("values")
assoc, created = AssignedProductAttribute.objects.update_or_create(
pk=pk, defaults=defaults
)
if created:
assoc.values.set(AttributeValue.objects.filter(pk__in=assigned_values))
def assign_attributes_to_variants(variant_attributes):
for value in variant_attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["variant_id"] = defaults.pop("variant")
defaults["assignment_id"] = defaults.pop("assignment")
assigned_values = defaults.pop("values")
assoc, created = AssignedVariantAttribute.objects.update_or_create(
pk=pk, defaults=defaults
)
if created:
assoc.values.set(AttributeValue.objects.filter(pk__in=assigned_values))
def set_field_as_money(defaults, field):
amount_field = f"{field}_amount"
if amount_field in defaults and defaults[amount_field] is not None:
defaults[field] = Money(defaults[amount_field], settings.DEFAULT_CURRENCY)
def create_products_by_schema(placeholder_dir, create_images):
path = os.path.join(
settings.PROJECT_ROOT, "saleor", "static", "populatedb_data.json"
)
with open(path) as f:
db_items = json.load(f)
types = defaultdict(list)
# Sort db objects by its model
for item in db_items:
model = item.pop("model")
types[model].append(item)
create_product_types(product_type_data=types["product.producttype"])
create_categories(
categories_data=types["product.category"], placeholder_dir=placeholder_dir
)
create_attributes(attributes_data=types["product.attribute"])
create_attributes_values(values_data=types["product.attributevalue"])
create_products(
products_data=types["product.product"],
placeholder_dir=placeholder_dir,
create_images=create_images,
)
create_product_variants(variants_data=types["product.productvariant"])
assign_attributes_to_product_types(
AttributeProduct, attributes=types["product.attributeproduct"]
)
assign_attributes_to_product_types(
AttributeVariant, attributes=types["product.attributevariant"]
)
assign_attributes_to_products(
product_attributes=types["product.assignedproductattribute"]
)
assign_attributes_to_variants(
variant_attributes=types["product.assignedvariantattribute"]
)
create_collections(
data=types["product.collection"], placeholder_dir=placeholder_dir
)
assign_products_to_collections(associations=types["product.collectionproduct"])
class SaleorProvider(BaseProvider):
def money(self):
return Money(fake.pydecimal(2, 2, positive=True), settings.DEFAULT_CURRENCY)
def weight(self):
return Weight(kg=fake.pydecimal(1, 2, positive=True))
fake.add_provider(SaleorProvider)
def get_email(first_name, last_name):
_first = unicodedata.normalize("NFD", first_name).encode("ascii", "ignore")
_last = unicodedata.normalize("NFD", last_name).encode("ascii", "ignore")
return <EMAIL>" % (
_first.lower().decode("utf-8"),
_last.lower().decode("utf-8"),
)
def create_product_image(product, placeholder_dir, image_name):
image = get_image(placeholder_dir, image_name)
# We don't want to create duplicated product images
if product.images.count() >= len(IMAGES_MAPPING.get(product.pk, [])):
return None
product_image = ProductImage(product=product, image=image)
product_image.save()
create_product_thumbnails.delay(product_image.pk)
return product_image
def create_address(save=True):
address = Address(
first_name=fake.first_name(),
last_name=fake.last_name(),
street_address_1=fake.street_address(),
city=fake.city(),
country=settings.DEFAULT_COUNTRY,
)
if address.country == "US":
state = fake.state_abbr()
address.country_area = state
address.postal_code = fake.postalcode_in_state(state)
else:
address.postal_code = fake.postalcode()
if save:
address.save()
return address
def create_fake_user(save=True):
address = create_address(save=save)
email = get_email(address.first_name, address.last_name)
# Skip the email if it already exists
try:
return User.objects.get(email=email)
except User.DoesNotExist:
pass
user = User(
first_name=address.first_name,
last_name=address.last_name,
email=email,
password="password",
default_billing_address=address,
default_shipping_address=address,
is_active=True,
note=fake.paragraph(),
date_joined=fake.date_time(tzinfo=timezone.get_current_timezone()),
)
if save:
user.save()
user.addresses.add(address)
return user
# We don't want to spam the console with payment confirmations sent to
# fake customers.
@patch("saleor.order.emails.send_payment_confirmation.delay")
def create_fake_payment(mock_email_confirmation, order):
payment = create_payment(
gateway="Dummy",
customer_ip_address=fake.ipv4(),
email=order.user_email,
order=order,
payment_token=str(<KEY>()),
total=order.total.gross.amount,
currency=order.total.gross.currency,
)
# Create authorization transaction
gateway.authorize(payment, payment.token)
# 20% chance to void the transaction at this stage
if random.choice([0, 0, 0, 0, 1]):
gateway.void(payment)
return payment
# 25% to end the payment at the authorization stage
if not random.choice([1, 1, 1, 0]):
return payment
# Create capture transaction
gateway.capture(payment)
# 25% to refund the payment
if random.choice([0, 0, 0, 1]):
gateway.refund(payment)
return payment
def create_order_lines(order, discounts, how_many=10):
variants = (
ProductVariant.objects.filter()
.order_by("?")
.prefetch_related("product__product_type")[:how_many]
)
variants_iter = itertools.cycle(variants)
lines = []
stocks = []
country = order.shipping_address.country
for dummy in range(how_many):
variant = next(variants_iter)
product = variant.product
quantity = random.randrange(1, 5)
stocks.append(
increase_stock(variant, country, quantity, allocate=True, commit=False)
)
unit_price = variant.get_price(discounts)
unit_price = TaxedMoney(net=unit_price, gross=unit_price)
lines.append(
OrderLine(
order=order,
product_name=str(product),
variant_name=str(variant),
product_sku=variant.sku,
is_shipping_required=variant.is_shipping_required(),
quantity=quantity,
variant=variant,
unit_price=unit_price,
tax_rate=0,
)
)
Stock.objects.bulk_update(stocks, ["quantity", "quantity_allocated"])
lines = OrderLine.objects.bulk_create(lines)
manager = get_plugins_manager()
for line in lines:
unit_price = manager.calculate_order_line_unit(line)
line.unit_price = unit_price
line.tax_rate = unit_price.tax / unit_price.net
OrderLine.objects.bulk_update(
lines,
["unit_price_net_amount", "unit_price_gross_amount", "currency", "tax_rate"],
)
return lines
def create_fulfillments(order):
for line in order:
if random.choice([False, True]):
fulfillment, _ = Fulfillment.objects.get_or_create(order=order)
quantity = random.randrange(0, line.quantity) + 1
fulfillment.lines.create(order_line=line, quantity=quantity)
line.quantity_fulfilled = quantity
line.save(update_fields=["quantity_fulfilled"])
update_order_status(order)
def create_fake_order(discounts, max_order_lines=5):
customers = User.objects.filter(is_superuser=False).order_by("?")
customer = random.choice([None, customers.first()])
if customer:
address = customer.default_shipping_address
order_data = {
"user": customer,
"billing_address": customer.default_billing_address,
"shipping_address": address,
}
else:
address = create_address()
order_data = {
"billing_address": address,
"shipping_address": address,
"user_email": get_email(address.first_name, address.last_name),
}
manager = get_plugins_manager()
shipping_method = ShippingMethod.objects.order_by("?").first()
shipping_price = shipping_method.price
shipping_price = manager.apply_taxes_to_shipping(shipping_price, address)
order_data.update(
{"shipping_method_name": shipping_method.name, "shipping_price": shipping_price}
)
order = Order.objects.create(**order_data)
lines = create_order_lines(order, discounts, random.randrange(1, max_order_lines))
order.total = sum([line.get_total() for line in lines], shipping_price)
weight = Weight(kg=0)
for line in order:
weight += line.variant.get_weight()
order.weight = weight
order.save()
create_fake_payment(order=order)
create_fulfillments(order)
return order
def create_fake_sale():
sale = Sale.objects.create(
name="Happy %s day!" % fake.word(),
type=DiscountValueType.PERCENTAGE,
value=random.choice([10, 20, 30, 40, 50]),
)
for product in Product.objects.all().order_by("?")[:4]:
sale.products.add(product)
return sale
def create_users(how_many=10):
for dummy in range(how_many):
user = create_fake_user()
yield "User: %s" % (user.email,)
def create_permission_groups():
super_users = User.objects.filter(is_superuser=True)
if not super_users:
super_users = create_staff_users(1, True)
group = create_group("Full Access", get_permissions(), super_users)
yield f"Group: {group}"
staff_users = create_staff_users()
customer_support_codenames = [
perm.codename
for enum in [CheckoutPermissions, OrderPermissions, GiftcardPermissions]
for perm in enum
]
customer_support_codenames.append(AccountPermissions.MANAGE_USERS.codename)
customer_support_permissions = Permission.objects.filter(
codename__in=customer_support_codenames
)
group = create_group("Customer Support", customer_support_permissions, staff_users)
yield f"Group: {group}"
def create_group(name, permissions, users):
group, _ = Group.objects.get_or_create(name=name)
group.permissions.add(*permissions)
group.user_set.add(*users)
return group
def create_staff_users(how_many=2, superuser=False):
users = []
for _ in range(how_many):
| |
except __builtin__.Exception:
self.this = this
def clear(self):
"""
clear(MatrixBaseQuaternion self)
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_clear(self)
def setTo(self, t):
"""
setTo(MatrixBaseQuaternion self, Quaternion t) -> MatrixBaseQuaternion
Parameters
----------
t: SimTK::Quaternion_< double > const &
"""
return _simbody.MatrixBaseQuaternion_setTo(self, t)
def setToNaN(self):
"""
setToNaN(MatrixBaseQuaternion self) -> MatrixBaseQuaternion
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_setToNaN(self)
def setToZero(self):
"""
setToZero(MatrixBaseQuaternion self) -> MatrixBaseQuaternion
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_setToZero(self)
def getElt(self, i, j):
"""
getElt(MatrixBaseQuaternion self, int i, int j) -> Quaternion
Parameters
----------
i: int
j: int
"""
return _simbody.MatrixBaseQuaternion_getElt(self, i, j)
def negateInPlace(self):
"""
negateInPlace(MatrixBaseQuaternion self) -> MatrixBaseQuaternion
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_negateInPlace(self)
def resize(self, m, n):
"""
resize(MatrixBaseQuaternion self, int m, int n) -> MatrixBaseQuaternion
Parameters
----------
m: int
n: int
"""
return _simbody.MatrixBaseQuaternion_resize(self, m, n)
def resizeKeep(self, m, n):
"""
resizeKeep(MatrixBaseQuaternion self, int m, int n) -> MatrixBaseQuaternion
Parameters
----------
m: int
n: int
"""
return _simbody.MatrixBaseQuaternion_resizeKeep(self, m, n)
def lockShape(self):
"""
lockShape(MatrixBaseQuaternion self)
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_lockShape(self)
def unlockShape(self):
"""
unlockShape(MatrixBaseQuaternion self)
Parameters
----------
self: SimTK::MatrixBase< SimTK::Quaternion_< double > > *
"""
return _simbody.MatrixBaseQuaternion_unlockShape(self)
__swig_destroy__ = _simbody.delete_MatrixBaseQuaternion
__del__ = lambda self: None
MatrixBaseQuaternion_swigregister = _simbody.MatrixBaseQuaternion_swigregister
MatrixBaseQuaternion_swigregister(MatrixBaseQuaternion)
class MatrixQuaternion(MatrixBaseQuaternion):
"""Proxy of C++ SimTK::Matrix_<(SimTK::Quaternion_<(double)>)> class."""
__swig_setmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, MatrixQuaternion, name, value)
__swig_getmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, MatrixQuaternion, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::Matrix_<(SimTK::Quaternion_<(double)>)> self) -> MatrixQuaternion
__init__(SimTK::Matrix_<(SimTK::Quaternion_<(double)>)> self, MatrixQuaternion src) -> MatrixQuaternion
Parameters
----------
src: SimTK::Matrix_< SimTK::Quaternion_< double > > const &
__init__(SimTK::Matrix_<(SimTK::Quaternion_<(double)>)> self, int m, int n) -> MatrixQuaternion
Parameters
----------
m: int
n: int
__init__(SimTK::Matrix_<(SimTK::Quaternion_<(double)>)> self, int m, int n, Quaternion initialValue) -> MatrixQuaternion
Parameters
----------
m: int
n: int
initialValue: SimTK::Quaternion_< double > const &
"""
this = _simbody.new_MatrixQuaternion(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def toString(self):
"""
toString(MatrixQuaternion self) -> std::string
Parameters
----------
self: SimTK::Matrix_< SimTK::Quaternion_< double > > const *
"""
return _simbody.MatrixQuaternion_toString(self)
def get(self, i, j):
"""
get(MatrixQuaternion self, int i, int j) -> Quaternion
Parameters
----------
i: int
j: int
"""
return _simbody.MatrixQuaternion_get(self, i, j)
def set(self, i, j, value):
"""
set(MatrixQuaternion self, int i, int j, Quaternion value)
Parameters
----------
i: int
j: int
value: SimTK::Quaternion_< double > const &
"""
return _simbody.MatrixQuaternion_set(self, i, j, value)
__swig_destroy__ = _simbody.delete_MatrixQuaternion
__del__ = lambda self: None
MatrixQuaternion_swigregister = _simbody.MatrixQuaternion_swigregister
MatrixQuaternion_swigregister(MatrixQuaternion)
class VectorBaseQuaternion(MatrixBaseQuaternion):
"""Proxy of C++ SimTK::VectorBase<(SimTK::Quaternion_<(double)>)> class."""
__swig_setmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VectorBaseQuaternion, name, value)
__swig_getmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VectorBaseQuaternion, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::VectorBase<(SimTK::Quaternion_<(double)>)> self, int m=0) -> VectorBaseQuaternion
Parameters
----------
m: int
__init__(SimTK::VectorBase<(SimTK::Quaternion_<(double)>)> self) -> VectorBaseQuaternion
__init__(SimTK::VectorBase<(SimTK::Quaternion_<(double)>)> self, VectorBaseQuaternion source) -> VectorBaseQuaternion
Parameters
----------
source: SimTK::VectorBase< SimTK::Quaternion_< double > > const &
"""
this = _simbody.new_VectorBaseQuaternion(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def size(self):
"""
size(VectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::VectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorBaseQuaternion_size(self)
def nrow(self):
"""
nrow(VectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::VectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorBaseQuaternion_nrow(self)
def ncol(self):
"""
ncol(VectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::VectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorBaseQuaternion_ncol(self)
def resize(self, m):
"""
resize(VectorBaseQuaternion self, int m) -> VectorBaseQuaternion
Parameters
----------
m: int
"""
return _simbody.VectorBaseQuaternion_resize(self, m)
def resizeKeep(self, m):
"""
resizeKeep(VectorBaseQuaternion self, int m) -> VectorBaseQuaternion
Parameters
----------
m: int
"""
return _simbody.VectorBaseQuaternion_resizeKeep(self, m)
def clear(self):
"""
clear(VectorBaseQuaternion self)
Parameters
----------
self: SimTK::VectorBase< SimTK::Quaternion_< double > > *
"""
return _simbody.VectorBaseQuaternion_clear(self)
def sum(self):
"""
sum(VectorBaseQuaternion self) -> Quaternion
Parameters
----------
self: SimTK::VectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorBaseQuaternion_sum(self)
__swig_destroy__ = _simbody.delete_VectorBaseQuaternion
__del__ = lambda self: None
VectorBaseQuaternion_swigregister = _simbody.VectorBaseQuaternion_swigregister
VectorBaseQuaternion_swigregister(VectorBaseQuaternion)
class VectorQuaternion(VectorBaseQuaternion):
"""Proxy of C++ SimTK::Vector_<(SimTK::Quaternion_<(double)>)> class."""
__swig_setmethods__ = {}
for _s in [VectorBaseQuaternion]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, VectorQuaternion, name, value)
__swig_getmethods__ = {}
for _s in [VectorBaseQuaternion]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, VectorQuaternion, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::Vector_<(SimTK::Quaternion_<(double)>)> self) -> VectorQuaternion
__init__(SimTK::Vector_<(SimTK::Quaternion_<(double)>)> self, VectorQuaternion src) -> VectorQuaternion
Parameters
----------
src: SimTK::Vector_< SimTK::Quaternion_< double > > const &
__init__(SimTK::Vector_<(SimTK::Quaternion_<(double)>)> self, int m, Quaternion initialValue) -> VectorQuaternion
Parameters
----------
m: int
initialValue: SimTK::Quaternion_< double > const &
"""
this = _simbody.new_VectorQuaternion(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def toString(self):
"""
toString(VectorQuaternion self) -> std::string
Parameters
----------
self: SimTK::Vector_< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorQuaternion_toString(self)
def get(self, i):
"""
get(VectorQuaternion self, int i) -> Quaternion
Parameters
----------
i: int
"""
return _simbody.VectorQuaternion_get(self, i)
def set(self, i, value):
"""
set(VectorQuaternion self, int i, Quaternion value)
Parameters
----------
i: int
value: SimTK::Quaternion_< double > const &
"""
return _simbody.VectorQuaternion_set(self, i, value)
def __str__(self):
"""
__str__(VectorQuaternion self) -> std::string
Parameters
----------
self: SimTK::Vector_< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorQuaternion___str__(self)
def __len__(self):
"""
__len__(VectorQuaternion self) -> int
Parameters
----------
self: SimTK::Vector_< SimTK::Quaternion_< double > > const *
"""
return _simbody.VectorQuaternion___len__(self)
__swig_destroy__ = _simbody.delete_VectorQuaternion
__del__ = lambda self: None
VectorQuaternion_swigregister = _simbody.VectorQuaternion_swigregister
VectorQuaternion_swigregister(VectorQuaternion)
class RowVectorBaseQuaternion(MatrixBaseQuaternion):
"""Proxy of C++ SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> class."""
__swig_setmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RowVectorBaseQuaternion, name, value)
__swig_getmethods__ = {}
for _s in [MatrixBaseQuaternion]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, RowVectorBaseQuaternion, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> self, int n=0) -> RowVectorBaseQuaternion
Parameters
----------
n: int
__init__(SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> self) -> RowVectorBaseQuaternion
__init__(SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> self, RowVectorBaseQuaternion source) -> RowVectorBaseQuaternion
Parameters
----------
source: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const &
__init__(SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> self, int n, Quaternion initialValue) -> RowVectorBaseQuaternion
Parameters
----------
n: int
initialValue: SimTK::Quaternion_< double > const &
__init__(SimTK::RowVectorBase<(SimTK::Quaternion_<(double)>)> self, int n, Quaternion cppInitialValues) -> RowVectorBaseQuaternion
Parameters
----------
n: int
cppInitialValues: SimTK::Quaternion_< double > const *
"""
this = _simbody.new_RowVectorBaseQuaternion(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def size(self):
"""
size(RowVectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.RowVectorBaseQuaternion_size(self)
def nrow(self):
"""
nrow(RowVectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.RowVectorBaseQuaternion_nrow(self)
def ncol(self):
"""
ncol(RowVectorBaseQuaternion self) -> int
Parameters
----------
self: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.RowVectorBaseQuaternion_ncol(self)
def nelt(self):
"""
nelt(RowVectorBaseQuaternion self) -> ptrdiff_t
Parameters
----------
self: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.RowVectorBaseQuaternion_nelt(self)
def index(self, indices):
"""
index(RowVectorBaseQuaternion self, Array_< int > const & indices) -> RowVectorViewQuaternion
Parameters
----------
indices: Array_< int > const &
"""
return _simbody.RowVectorBaseQuaternion_index(self, indices)
def updIndex(self, indices):
"""
updIndex(RowVectorBaseQuaternion self, Array_< int > const & indices) -> RowVectorViewQuaternion
Parameters
----------
indices: Array_< int > const &
"""
return _simbody.RowVectorBaseQuaternion_updIndex(self, indices)
def __call__(self, *args):
"""
__call__(RowVectorBaseQuaternion self, int j) -> Quaternion
Parameters
----------
j: int
__call__(RowVectorBaseQuaternion self, int j) -> Quaternion
Parameters
----------
j: int
__call__(RowVectorBaseQuaternion self, int j, int n) -> RowVectorViewQuaternion
Parameters
----------
j: int
n: int
__call__(RowVectorBaseQuaternion self, int j, int n) -> RowVectorViewQuaternion
Parameters
----------
j: int
n: int
__call__(RowVectorBaseQuaternion self, Array_< int > const & indices) -> RowVectorViewQuaternion
Parameters
----------
indices: Array_< int > const &
__call__(RowVectorBaseQuaternion self, Array_< int > const & indices) -> RowVectorViewQuaternion
Parameters
----------
indices: Array_< int > const &
"""
return _simbody.RowVectorBaseQuaternion___call__(self, *args)
def __pos__(self):
"""
__pos__(RowVectorBaseQuaternion self) -> RowVectorBaseQuaternion
Parameters
----------
self: SimTK::RowVectorBase< SimTK::Quaternion_< double > > const *
"""
return _simbody.RowVectorBaseQuaternion___pos__(self)
def resize(self, n):
"""
resize(RowVectorBaseQuaternion self, int n) -> RowVectorBaseQuaternion
Parameters
----------
n: int
"""
return _simbody.RowVectorBaseQuaternion_resize(self, n)
def resizeKeep(self, n):
"""
resizeKeep(RowVectorBaseQuaternion self, int n) -> RowVectorBaseQuaternion
Parameters
----------
n: int
"""
return _simbody.RowVectorBaseQuaternion_resizeKeep(self, n)
def clear(self):
| |
# -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:<EMAIL>
# datetime:1993/12/01
# filename:configs.py
# software: PyCharm
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from nets.yolo4 import yolo_body
from nets.yolo4_loss import yolo_loss
from keras.backend.tensorflow_backend import set_session
from utils.utils import get_random_data, get_random_mosaic_data, get_random_mosaic_data_v2
from my_queue import GeneratorEnqueuer
import time
import math
from cosine_anneal import WarmUpCosineDecayScheduler
from config.configs import CONFIG
def get_classes(classes_path):
"""loads the classes"""
with open(classes_path) as f:
class_names = f.readlines()
# use list expression to make your code more concise
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
"""loads the anchors from a file"""
with open(anchors_path) as f:
anchors = f.readline()
# use list expression to make your code more concise
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def data_generator(annotation_lines,
batch_size,
input_shape,
anchors,
num_classes):
"""data generator for fit_generator
the assignment strategy:
one gt ---> one anchor
1.find which anchor(9 anchors) gt belongs to
2.find which grid gt belongs to
Args:
annotation_lines: a list [anno1, anno2, ...]
batch_size: batch size
input_shape: resolution [h, w]
anchors: anchor boxes
num_classes: the number of class
max_boxes: box_data: [max_boxes, 5]
when have a lot of gt to predict, need to set max_boxes bigger.
Returns:
batch data: [image_data, *y_true], np.zeros(batch_size)
"""
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
# shuffle dataset at begin of epoch
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape)
image_data.append(image)
box_data.append(box)
i = (i + 1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
# get true_boxes
# y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
y_true = preprocess_true_boxes_iou_thres(box_data, input_shape, anchors, num_classes,
iou_threshold=CONFIG.TRAIN.IOU_THRESHOLD)
# use yield to get generator
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_mosaic_iou_thres(annotation_lines,
batch_size,
input_shape,
anchors,
num_classes):
"""data generator for fit_generator
the assignment strategy:
one gt ---> more anchor(iou > iou_threshold)
Args:
annotation_lines: a list [anno1, anno2, ...]
batch_size: batch size
input_shape: resolution [h, w]
anchors: anchor boxes
num_classes: the number of class
max_boxes: box_data: [max_boxes, 5]
when have a lot of gt to predict, must set max_boxes bigger.
iou_threshold: if iou > iou_threshold, the anchor is responsible for this gt.
Returns:
batch data: [image_data, *y_true], np.zeros(batch_size)
"""
n = len(annotation_lines)
shuffle_num = n // 4
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
# shuffle dataset at begin of epoch
np.random.shuffle(annotation_lines)
image, box = get_random_mosaic_data(annotation_lines[4 * i:4 * i + 4], input_shape)
image_data.append(image)
box_data.append(box)
i = (i + 1) % shuffle_num
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes_iou_thres(box_data, input_shape, anchors, num_classes,
iou_threshold=CONFIG.TRAIN.IOU_THRESHOLD)
# use yield to get generator
yield [image_data, *y_true], np.zeros(batch_size)
def preprocess_true_boxes(true_boxes,
input_shape,
anchors,
num_classes):
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors) // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32') # 416,416
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[:]
true_boxes[..., 2:4] = boxes_wh / input_shape[:]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# [(m, 13, 13, 3, 85), (m, 26, 26, 3, 85), (m, 52, 52, 3, 85)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# (1, 9, 2)
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# filter invalid boxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0:
continue
# [n, 1, 2]
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# get iou
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
# assign gt to one grid
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
# assign gt to one anchor
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
# score = 1 and get one hot class label
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def preprocess_true_boxes_iou_thres(true_boxes,
input_shape,
anchors,
num_classes,
iou_threshold=0.3):
"""get true boxes with iou threshold"""
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors) // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32') # 416,416
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[:]
true_boxes[..., 2:4] = boxes_wh / input_shape[:]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# [1, 9, 2]
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# filter invalid boxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0:
continue
# [n, 1, 2]
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# 1.iou > iou_threshold
positive = iou > iou_threshold # [num_true_boxes, num_anchors]
for t, n in enumerate(positive):
n = np.array(n, dtype=np.int32)
pos_index = np.argwhere(n == 1)
if len(pos_index):
continue
for id in pos_index:
id = id[0]
for l in range(num_layers):
if id in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(id)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
# 2.if no positive anchor, just choose the best one to be the positive.
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def get_batch(num_workers,
max_queue_size=32,
use_mosaic_iout_generator=CONFIG.DATASET.MOSAIC_AUG,
multiprocessing=CONFIG.DATASET.MULTIPROCESS,
**kwargs):
"""
Args:
num_workers: number of workers
max_queue_size: max queue size
multiprocessing: true in linux and false in windows
use_mosaic_iout_generator: use mosaic_iou_thres_generator or not
**kwargs: args used in data generator
"""
enqueuer = None
try:
if use_mosaic_iout_generator:
enqueuer = GeneratorEnqueuer(data_generator_mosaic_iou_thres(**kwargs),
use_multiprocessing=multiprocessing)
else:
enqueuer = GeneratorEnqueuer(data_generator(**kwargs),
use_multiprocessing=multiprocessing)
enqueuer.start(max_queue_size=max_queue_size, workers=num_workers)
generator_output = None
while True:
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(0.01)
yield generator_output
generator_output = None
finally:
if enqueuer is not None:
enqueuer.stop()
config = tf.ConfigProto()
# A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
if __name__ == "__main__":
annotation_path = CONFIG.TRAIN.ANNO_PATH
valid_anno_path = CONFIG.TRAIN.VALID_PATH
classes_path = CONFIG.TRAIN.CLASS_PATH
anchors_path = CONFIG.TRAIN.ANCHOR_PATH
# pretrained model path
weights_path = CONFIG.TRAIN.PRE_TRAINED_MODEL
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
num_anchors = len(anchors)
# checkpoint path
log_dir = CONFIG.TRAIN.SAVE_PATH
# resolution
input_shape | |
#
# if no previewer can be found
if previewer_func is None:
return msg
try:
result = previewer_func(filename, None, style)
if not result:
return msg
if isinstance(result, str):
msg.append(['stream', {'name': 'stdout', 'text': result}])
elif isinstance(result, dict):
msg.append([
'display_data', {
'source': filename,
'data': result,
'metadata': {}
}
])
elif isinstance(result, (list, tuple)) and len(result) == 2:
msg.append([
'display_data', {
'source': filename,
'data': result[0],
'metadata': result[1]
}
])
else:
msg.append([
'stream', {
'name': 'stderr',
'text': 'Unrecognized preview content: {}'.format(result)
}
])
except Exception as e:
msg.append([
'stream', {
'name': 'stderr',
'text': 'Failed to preview {}: {}'.format(filename, e)
}
])
return msg
def cmd_preview(args, unknown_args):
from .utils import env, load_config_files
from .hosts import Host
load_config_files(args.config)
env.verbosity = args.verbosity
if args.host:
# remote host?
host = Host(args.host)
rargs = ['sos', 'preview'] + args.items + ['--html']
if args.style:
rargs += ['-s', args.style] + unknown_args
if 'GENERAL' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('GENERAL', 'Running "{}"'.format(' '.join(rargs)))
msgs = eval(host._host_agent.check_output(rargs, under_workdir=True))
else:
from .preview import get_previewers
previewers = get_previewers()
msgs = []
style = {
'style': args.style,
'options': unknown_args
} if args.style or unknown_args else None
for filename in args.items:
msgs.extend(preview_file(previewers, filename, style))
if args.html:
print(msgs)
else:
from .utils import colorstr
for msg in msgs:
if msg[0] == 'stream':
if msg[1]['name'] == 'stdout':
print(msg[1]['text'])
else:
print(colorstr(msg[1]['text'], 'PURPLE'))
elif msg[0] == 'display_data':
if 'text/plain' in msg[1]['data']:
print(msg[1]['data']['text/plain'])
elif 'text/html' in msg[1]['data']:
print(msg[1]['data']['text/html'])
else:
print('BINARY DATA of type {}'.format(', '.join(
msg[1]['data'].keys())))
else:
raise RuntimeError(
'Unrecognized preview output: {}'.format(msg))
# exit with code 1 if error happens
sys.exit(1 if any(msg[1]['name'] == 'stderr'
for msg in msgs
if msg[0] == 'stream') else 0)
#
# subcommand execute
#
def get_execute_parser(desc_only=False):
parser = argparse.ArgumentParser(
'execute', description='''Execute a packages task''')
if desc_only:
return parser
parser.add_argument('tasks', nargs='+', help='''IDs of the task.''')
parser.add_argument(
'-s',
choices=['default', 'ignore', 'force', 'build', 'assert'],
default='default',
metavar='SIGMODE',
dest='__sig_mode__',
help='''How runtime signature would be handled, which can be "default"
(save and use signature, default mode in batch mode), "ignore"
(ignore runtime signature, default mode in interactive mode),
"force" (ignore existing signature and overwrite them while
executing the workflow), "build" (build new or overwrite
existing signature from existing environment and output files), and
"assert" for validating existing files against their signatures.
Please refer to online documentation for details about the
use of runtime signatures.''')
parser.add_argument(
'-v',
dest='verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2) and debug (3)
information to standard output (default to 2). More debug information could be
generated by setting environmental variable SOS_DEBUG to comma separated topics
of GENERAL, WORKER, CONTROLLER, STEP, VARIABLE, EXECUTOR, TARGET, ZERONQ, TASK,
DAG, and ACTION, or ALL for all debug information''')
parser.add_argument(
'-n',
'--dryrun',
action='store_true',
dest='dryrun',
help='''Dryrun mode, which will cause actions to print scripts instead
of executing them.''')
parser.add_argument(
'-q',
'--queue',
help='''Check the status of job on specified tasks queue or remote host
if the tasks . The queue can be defined in global or local sos
configuration file, or a file specified by option --config. A host is
assumed to be a remote machine with process type if no configuration
is found.''')
parser.add_argument(
'-c',
'--config',
help='''A configuration file with host
definitions, in case the definitions are not defined in global sos config.yml files.'''
)
parser.add_argument(
'-w',
'--wait',
action='store_true',
help='''Wait for the
completion of the task, and retrieve job results if needed after the
completion of the task. This option is only valid with the specification
of the -q option.''')
parser.set_defaults(func=cmd_execute)
return parser
def cmd_execute(args, workflow_args):
from .tasks import check_task, monitor_interval, resource_monitor_interval
from .task_executor import execute_task
from .utils import env, load_config_files
import glob
if args.queue is None:
# local machine ...
exit_code = []
for task in args.tasks:
#
matched = [
os.path.basename(x)[:-5] for x in glob.glob(
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task +
'*.task'))
]
if not matched:
env.logger.error(
'{} does not match any existing task'.format(task))
exit_code.append(1)
continue
elif len(matched) > 1:
env.logger.error('"{}" matches more than one task ID {}'.format(
task, ', '.join(matched)))
exit_code.append(1)
continue
else:
task = matched[0]
# this is for local execution, perhaps on a remote host, and
# there is no daemon process etc. It also does not handle job
# preparation.
status = check_task(task)['status']
if status == 'running':
print(f'{task} is already running')
exit_code.append(1)
continue
# if status == 'completed' and args.__sig_mode__ != 'force':
# # if args.verbosity <= 1:
# env.logger.info('{} ``already completed``'.format(task))
# with open(os.path.join(os.path.expanduser('~'), '.sos', 'tasks', task + '.err'), 'a') as err:
# err.write('{} already completed'.format(task))
# # else:
# # print(summarizeExecution(task, status=status))
# exit_code.append(0)
# continue
exit_code.append(
execute_task(
task,
verbosity=args.verbosity,
runmode='dryrun' if args.dryrun else 'run',
sigmode=args.__sig_mode__,
monitor_interval=monitor_interval,
resource_monitor_interval=resource_monitor_interval))
sys.exit(sum(exit_code))
# with queue definition
from .hosts import Host
import time
# this is for local execution using a task queue. The task queue
# will prepare the task, sync files, and execute this command remotely
# if needed.
load_config_files(args.config)
env.verbosity = args.verbosity
env.config['sig_mode'] = args.__sig_mode__
env.config['run_mode'] = 'dryrun' if args.dryrun else 'run'
host = Host(args.queue)
for task in args.tasks:
host.submit_task(task)
failed_tasks = set()
while True:
res = host.check_status(args.tasks)
if any(x in ('failed', 'aborted') for x in res):
for t, s in zip(args.tasks, res):
if s in ('failed', 'aborted') and t not in failed_tasks:
env.logger.warning('{} ``{}``'.format(t, s))
failed_tasks.add(t)
if all(x in ('completed', 'failed', 'aborted') for x in res):
raise RuntimeError(
'{} completed, {} failed, {} aborted)'.format(
len([x for x in res if x == 'completed']),
len([x for x in res if x == 'failed']),
len([x for x in res if x.startswith('aborted')])))
if all(x == 'completed' for x in res):
if 'TASK' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TASK', f'Put results for {args.tasks}')
res = host.retrieve_results(args.tasks)
return
elif all(x != 'pending' for x in res) and not args.wait:
return
elif any(x in ('pending', 'running', 'submitted') for x in res):
continue
else:
raise RuntimeError('Job returned with status {}'.format(res))
time.sleep(0.01)
#
# command status
#
def get_status_parser(desc_only=False):
parser = argparse.ArgumentParser(
'status', description='''Check the status of specified tasks''')
if desc_only:
return parser
parser.add_argument(
'tasks',
nargs='*',
help='''ID of the task. All tasks
that are releted to the workflow executed under the current directory
will be checked if unspecified. There is no need to specify compelete
task IDs because SoS will match specified name with tasks starting with
these names.''')
parser.add_argument(
'-q',
'--queue',
help='''Check the status of job on specified tasks queue or remote host
if the tasks . The queue can be defined in global or local sos
configuration file, or a file specified by option --config. A host is
assumed to be a remote machine with process type if no configuration
is found.''')
parser.add_argument(
'-c',
'--config',
help='''A configuration file with host
definitions, in case the definitions are not defined in global sos config.yml files.'''
)
parser.add_argument(
'-a',
'--all',
action='store_true',
help='''Check the status of all tasks on local or specified remote task queue,
including tasks created by workflows executed from other directories.'''
)
parser.add_argument(
'-v',
dest='verbosity',
type=int,
choices=range(5),
default=2,
help='''Output error (0), warning (1), info (2), debug (3) and trace (4)
information to standard output (default to 2).''')
parser.add_argument(
'-t',
'--tags',
nargs='*',
help='''Only list tasks with
one of the specified tags.''')
parser.add_argument(
'-s',
'--status',
nargs='*',
help='''Display tasks with
one of the specified status.''')
parser.add_argument(
'--age',
help='''Limit to tasks that are created more than
(default) or within specified age. Value of this parameter can be in units
s (second), m (minute), h (hour), or d (day, default), or in the foramt of
HH:MM:SS, with optional prefix + for older (default) and - for newer than
specified age.''')
parser.add_argument(
'--html',
action='store_true',
help='''Output results in HTML format. This option will override option
verbosity and output detailed status information in HTML tables and
figures.''')
parser.add_argument(
'--numeric-times', action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=cmd_status)
return parser
def cmd_status(args, workflow_args):
from .tasks import print_task_status
from .utils import env, load_config_files, get_traceback
from .hosts import Host
try:
load_config_files(args.config)
if not args.queue:
print_task_status(
tasks=args.tasks,
check_all=args.all,
verbosity=args.verbosity,
html=args.html,
numeric_times=args.numeric_times,
age=args.age,
tags=args.tags,
status=args.status)
else:
# remote host?
host = Host(args.queue)
print(
host._task_engine.query_tasks(
tasks=args.tasks,
check_all=args.all,
verbosity=args.verbosity,
html=args.html,
| |
import collections
import hashlib
import inspect
import os
import pathlib
import click
import django
from django.conf import settings
from django.db import connections
from django.db.migrations import executor as django_migration_executor
from django.db.migrations import loader as django_migration_loader
from django.utils.functional import cached_property
import formaldict
import jinja2
import yaml
from migration_docs import utils
# The default Jinja template for showing migrations
DEFAULT_MIGRATION_TEMPLATE = """
{% for migration in migrations %}
[{% if migration.applied %}X{% else %} {% endif %}] {{ migration.label }}
{% endfor %}
""".strip()
def _get_migration_docs_file_root():
"""
Get the root path to migration docs configuration files.
"""
return os.path.join(os.getcwd(), '.migration-docs')
def _get_migration_docs_file_path(file_name):
"""
Get the path to a migration docs file.
"""
return os.path.join(_get_migration_docs_file_root(), file_name)
def _no_msg(msg, fg='green'):
"""A message printer that does nothing"""
pass
def _pretty_msg(msg, fg='green'):
"""A pretty message printer"""
click.secho(msg, fg=fg)
class Migration:
"""A migration and its associated docs.
Migrations are typically loaded and accessed via the parent `Migrations`
object. When loaded, the Migration has access to core migration
attributes (e.g. ``atomic``, ``sql``, etc) and also has attributes
for every attribute collected in the documentation schema. For example,
if the user configured a ``type`` attribute to be collected
in ``.migration-docs/migration.yaml``, it would be accessible as
a ``type`` attribute on this object.
"""
def __init__(self, node, *, executor, loader, docs):
self._node = node
self._executor = executor
self._loader = loader
self._docs = docs
@property
def applied(self):
"""True if the migration has been applied"""
return (self.app_label, self.name) in self._loader.applied_migrations
@cached_property
def hash(self):
"""The MD5 hash of the migration file"""
return hashlib.md5(
inspect.getsource(inspect.getmodule(self._node)).encode()
).hexdigest()
@property
def atomic(self):
"""True if the migration is executed in a transaction"""
return self._node.atomic
@property
def app_label(self):
"""The Django app label of the migration"""
return self._node.app_label
@property
def name(self):
"""The name of the migration (e.g. 0001_initial)"""
return self._node.name
@property
def operations(self):
"""The raw list of migration operation objects"""
return self._node.operations
@property
def operations_str(self):
"""String representations of the migration operations"""
return [str(operation) for operation in self.operations]
@cached_property
def sql(self):
"""The raw SQL for the migration"""
if django.VERSION[0] >= 3 and django.VERSION[1] >= 1:
migration_sql_obj = self._loader
else:
migration_sql_obj = self._executor
try:
sql_statements = migration_sql_obj.collect_sql(
[(self._node, False)]
)
return '\n'.join(sql_statements)
except Exception as exc:
return f'Error obtaining SQL - "{exc}"'
@property
def label(self):
"""The unique identifying label of the migration"""
return str(self._node)
def __str__(self):
return self.label
def __getattribute__(self, attr):
"""
Allows migration docs to be accessed as attributes on the Migration
or the migration docs.
Doing this provides the ability for users to filter Migrations
by any documented attribute.
"""
try:
return object.__getattribute__(self, attr)
except AttributeError:
if self._docs.get(self.label) and attr in self._docs[self.label]:
return self._docs[self.label][attr]
elif attr in self._docs.schema:
return None
else:
raise
def set_docs(self, prompt=True, defaults=None):
"""Set docs about a migration
Args:
prompt (boolean, default=False): True if collecting data from
a user.
defaults (dict, default=None): When prompting, use these values
as defaults.
"""
if self.label not in self._docs:
self._docs[self.label] = {}
self._docs[self.label]['_hash'] = self.hash
self._docs[self.label]['atomic'] = self.atomic
self._docs[self.label]['sql'] = self.sql
if prompt:
self._docs[self.label].update(
self._docs.schema.prompt(defaults=defaults)
)
self._docs.save()
class Migrations(utils.FilterableUserList):
"""
A filterable and groupable list of migrations and their associated
migration docs.
"""
def __init__(self, using='default', loader=None, executor=None):
connection = connections[using]
self._loader = loader or django_migration_loader.MigrationLoader(
connection, ignore_no_migrations=True
)
self._graph = self._loader.graph
self._executor = django_migration_executor.MigrationExecutor(
connection
)
self._docs = MigrationDocs()
self._migrations = {
str(node): Migration(
node,
executor=self._executor,
loader=self._loader,
docs=self._docs,
)
for node in self._graph.nodes.values()
}
# Construct a plan of migrations. Set the ``data`` as the plan so
# that this datastructure is a list
targets = self._graph.leaf_nodes()
self.data = []
seen = set()
for target in targets:
for migration in self._graph.forwards_plan(target):
if migration not in seen: # pragma: no branch
# We don't cover the "else" branch of this statement since
# our test models dont have complex enough migrations
self.data.append(
self._migrations[str(self._graph.nodes[migration])]
)
seen.add(migration)
def __getitem__(self, i):
"""Allow accessing by list index or migration label"""
if isinstance(i, int):
return self.data[i]
else:
return self._migrations[i]
def filter_by_missing_docs(self):
return self.intersect('label', set(self._migrations) - set(self._docs))
def filter_by_stale_docs(self):
labels = [
migration
for migration, docs in self._docs.items()
if docs is not None
and migration in self._migrations
and docs['_hash'] != self._migrations[migration].hash
]
return self.intersect('label', labels)
@property
def excess_docs(self):
return set(self._docs) - set(self._migrations)
def prune_excess_docs(self):
for label in self.excess_docs:
del self._docs[label]
self._docs.save()
def bootstrap_docs(self):
"""Bootstraps all migration docs to empty values."""
self._docs = MigrationDocs(data={str(node): None for node in self})
self._docs.save()
class MigrationDocs(collections.UserDict):
def __init__(self, data=None, msg=_pretty_msg):
"""
Represents migration docs as a dictionary. Reads and persists docs as
YAML.
Args:
msg (func): Function for printing messages to the user.
data (dict, default=None): Data to use as migration docs. If None,
load migration docs from the docs.yaml file.
"""
self._msg = msg
if not data:
docs_file = _get_migration_docs_file_path('docs.yaml')
try:
with open(docs_file, 'r') as f:
self.data = yaml.safe_load(f)
except IOError:
self.data = {}
except Exception as exc:
raise RuntimeError(
'django-migration-docs: docs.yaml is corrupt and cannot'
' be parsed as YAML. Please fix the'
' .migration-docs/docs.yaml file.'
) from exc
else:
self.data = data
@cached_property
def schema(self):
"""Loads the migration doc schema
If not configured, returns a schema with a point of contact and
description for the migration.
"""
try:
with open(
_get_migration_docs_file_path('migration.yaml'), 'r'
) as f:
schema = yaml.safe_load(f)
except IOError:
schema = [
{
'label': 'point_of_contact',
'help': 'The point of contact for this migration.',
},
{
'label': 'description',
'help': 'An in-depth description of the migration.',
'multiline': True,
},
]
except Exception as exc:
raise RuntimeError(
'django-migration-docs: migration.yaml is corrupt and cannot'
' be parsed as YAML. Please fix the'
' .migration-docs/migration.yaml file.'
) from exc
return formaldict.Schema(schema)
def save(self):
"""Save all migration docs
Ensure docs are ordered when persisted to keep YAML consistently
ordered
"""
docs_file = _get_migration_docs_file_path('docs.yaml')
yaml.Dumper.add_representer(
collections.OrderedDict,
lambda dumper, data: dumper.represent_mapping(
'tag:yaml.org,2002:map', data.items()
),
)
ordered_docs = collections.OrderedDict(
(label, docs) for label, docs in sorted(self.data.items())
)
yaml_str = yaml.dump(ordered_docs, Dumper=yaml.Dumper)
pathlib.Path(docs_file).parent.mkdir(parents=True, exist_ok=True)
with open(docs_file, 'w+') as f:
f.write(yaml_str)
def bootstrap(msg=_pretty_msg):
"""
Bootstrap migration docs with filler values when integrating docs
with a project for the first time.
Args:
msg (func): A message printer for showing messages to the user.
Raises:
RuntimeError: When migration docs have already been synced
"""
if MigrationDocs():
raise RuntimeError(
'Cannot bootstrap when migration docs have already been synced.'
)
Migrations().bootstrap_docs()
msg('django-migration-docs: Docs successfully bootstrapped.')
def sync(msg=_pretty_msg):
"""
Sync new migrations with the migration docs and prune migrations that
no longer exist.
Args:
msg (func): A message printer for showing messages to the user.
"""
# Run any configured pre-sync hooks
pre_sync_hooks = getattr(settings, 'MIGRATION_DOCS_PRE_SYNC_HOOKS', [])
if pre_sync_hooks:
msg('django-migration-docs: Running pre-sync hooks...')
for pre_sync_hook in pre_sync_hooks:
msg(pre_sync_hook, fg='yellow')
utils.shell(pre_sync_hook)
migrations = Migrations()
missing_docs = migrations.filter_by_missing_docs()
stale_docs = migrations.filter_by_stale_docs()
excess_docs = migrations.excess_docs
# Collect information for new migrations
if missing_docs:
msg(
'django-migration-docs: Found no docs for'
f' {len(missing_docs)} migration(s). Please enter'
' more information.'
)
for migration in missing_docs:
msg(f'{migration.label}:', fg='yellow')
migration.set_docs()
# Update any stale documentation
if stale_docs:
msg(
f'django-migration-docs: Found {len(stale_docs)} stale'
' migration doc(s). Docs updated automatically.'
)
for migration in stale_docs:
migration.set_docs(prompt=False)
# Delete old migrations
if excess_docs:
msg(
f'django-migration-docs: Found docs for {len(excess_docs)}'
' deleted migration(s). Docs were removed.'
)
migrations.prune_excess_docs()
msg('django-migration-docs: Successfully synced migration docs.')
def update(migrations, msg=_pretty_msg):
"""
Update migration docs for specific migrations.
Args:
migrations (List[str]): A list of migration labels to update
(e.g. users.0001_initial).
msg (func): A message printer for showing messages to the user.
"""
migration_objs = Migrations()
for migration in migrations:
msg(f'{migration}:', fg='yellow')
try:
migration_objs[migration].set_docs()
except KeyError:
msg(
f'Migration with label "{migration}" does not exist.', fg='red'
)
def check(msg=_pretty_msg):
"""
Check migration notes. Return False if any of the conditions hold true:
- There are migrations without docs.
- There are documented migrations that no longer exist.
- There are stale migration docs.
Args:
msg (func): A message printer for showing messages to the user.
Raises:
bool: True when the migration docs are up to date, False otherwise.
"""
migrations = Migrations()
missing_docs = migrations.filter_by_missing_docs()
stale_docs = migrations.filter_by_stale_docs()
excess_docs = migrations.excess_docs
if missing_docs:
msg(
f'django-migration-docs: Found no docs for {len(missing_docs)}'
' migration(s).',
fg='red',
)
if stale_docs:
msg(
f'django-migration-docs: Found {len(stale_docs)} stale'
' migration doc(s).',
fg='red',
)
if | |
1919
str_multi_scene_14 = 1920
str_multi_scene_15 = 1921
str_multi_scene_end = 1922
str_multi_game_type_1 = 1923
str_multi_game_type_2 = 1924
str_multi_game_type_3 = 1925
str_multi_game_type_4 = 1926
str_multi_game_type_5 = 1927
str_multi_game_type_6 = 1928
str_multi_game_type_7 = 1929
str_multi_game_type_8 = 1930
str_multi_game_types_end = 1931
str_poll_kick_player_s1_by_s0 = 1932
str_poll_ban_player_s1_by_s0 = 1933
str_poll_change_map_to_s1_by_s0 = 1934
str_poll_change_map_to_s1_and_factions_to_s2_and_s3_by_s0 = 1935
str_poll_change_number_of_bots_to_reg0_and_reg1_by_s0 = 1936
str_poll_kick_player = 1937
str_poll_ban_player = 1938
str_poll_change_map = 1939
str_poll_change_map_with_faction = 1940
str_poll_change_number_of_bots = 1941
str_poll_time_left = 1942
str_poll_result_yes = 1943
str_poll_result_no = 1944
str_total_item_cost_reg0 = 1945
str_server_name = 1946
str_game_password = <PASSWORD>
str_map = 1948
str_game_type = 1949
str_max_number_of_players = 1950
str_number_of_bots_in_team_reg1 = 1951
str_team_reg1_faction = 1952
str_enable_valve_anti_cheat = 1953
str_allow_friendly_fire = 1954
str_allow_melee_friendly_fire = 1955
str_friendly_fire_damage_self_ratio = 1956
str_friendly_fire_damage_friend_ratio = 1957
str_spectator_camera = 1958
str_control_block_direction = 1959
str_map_time_limit = 1960
str_round_time_limit = 1961
str_players_take_control_of_a_bot_after_death = 1962
str_team_points_limit = 1963
str_point_gained_from_flags = 1964
str_point_gained_from_capturing_flag = 1965
str_respawn_period = 1966
str_add_to_official_game_servers_list = 1967
str_combat_speed = 1968
str_combat_speed_0 = 1969
str_combat_speed_1 = 1970
str_combat_speed_2 = 1971
str_combat_speed_3 = 1972
str_combat_speed_4 = 1973
str_off = 1974
str_on = 1975
str_defender_spawn_count_limit = 1976
str_unlimited = 1977
str_automatic = 1978
str_by_mouse_movement = 1979
str_free = 1980
str_stick_to_any_player = 1981
str_stick_to_team_members = 1982
str_stick_to_team_members_view = 1983
str_make_factions_voteable = 1984
str_make_kick_voteable = 1985
str_make_ban_voteable = 1986
str_bots_upper_limit_for_votes = 1987
str_make_maps_voteable = 1988
str_valid_vote_ratio = 1989
str_auto_team_balance_limit = 1990
str_welcome_message = 1991
str_initial_gold_multiplier = 1992
str_battle_earnings_multiplier = 1993
str_round_earnings_multiplier = 1994
str_allow_player_banners = 1995
str_force_default_armor = 1996
str_reg0 = 1997
str_s0_reg0 = 1998
str_s0_s1 = 1999
str_reg0_dd_reg1reg2 = 2000
str_s0_dd_reg0 = 2001
str_respawning_in_reg0_seconds = 2002
str_no_more_respawns_remained_this_round = 2003
str_reg0_respawns_remained = 2004
str_this_is_your_last_respawn = 2005
str_wait_next_round = 2006
str_yes_wo_dot = 2007
str_no_wo_dot = 2008
str_we_resign = 2009
str_i_resign = 2010
str_s1_returned_flag = 2011
str_s1_auto_returned_flag = 2012
str_s1_captured_flag = 2013
str_s1_taken_flag = 2014
str_s1_neutralized_flag_reg0 = 2015
str_s1_captured_flag_reg0 = 2016
str_s1_pulling_flag_reg0 = 2017
str_s1_destroyed_target_0 = 2018
str_s1_destroyed_target_1 = 2019
str_s1_destroyed_catapult = 2020
str_s1_destroyed_trebuchet = 2021
str_s1_destroyed_all_targets = 2022
str_s1_saved_1_target = 2023
str_s1_saved_2_targets = 2024
str_s1_defended_castle = 2025
str_s1_captured_castle = 2026
str_auto_team_balance_in_20_seconds = 2027
str_auto_team_balance_next_round = 2028
str_auto_team_balance_done = 2029
str_s1_won_round = 2030
str_round_draw = 2031
str_round_draw_no_one_remained = 2032
str_death_mode_started = 2033
str_reset_to_default = 2034
str_done = 2035
str_player_name = 2036
str_kills = 2037
str_deaths = 2038
str_ping = 2039
str_dead = 2040
str_reg0_dead = 2041
str_bots_reg0_agents = 2042
str_bot_1_agent = 2043
str_score = 2044
str_score_reg0 = 2045
str_flags_reg0 = 2046
str_reg0_players = 2047
str_reg0_player = 2048
str_open_gate = 2049
str_close_gate = 2050
str_open_door = 2051
str_close_door = 2052
str_raise_ladder = 2053
str_drop_ladder = 2054
str_back = 2055
str_start_map = 2056
str_choose_an_option = 2057
str_choose_a_poll_type = 2058
str_choose_faction = 2059
str_choose_a_faction = 2060
str_choose_troop = 2061
str_choose_a_troop = 2062
str_choose_items = 2063
str_options = 2064
str_redefine_keys = 2065
str_submit_a_poll = 2066
str_administrator_panel = 2067
str_kick_player = 2068
str_ban_player = 2069
str_mute_player = 2070
str_unmute_player = 2071
str_quit = 2072
str_poll_for_changing_the_map = 2073
str_poll_for_changing_the_map_and_factions = 2074
str_poll_for_changing_number_of_bots = 2075
str_poll_for_kicking_a_player = 2076
str_poll_for_banning_a_player = 2077
str_choose_a_player = 2078
str_choose_a_map = 2079
str_choose_a_faction_for_team_reg0 = 2080
str_choose_number_of_bots_for_team_reg0 = 2081
str_spectator = 2082
str_spectators = 2083
str_score = 2084
str_command = 2085
str_profile_banner_selection_text = 2086
str_use_default_banner = 2087
str_party_morale_is_low = 2088
str_weekly_report = 2089
str_has_deserted_the_party = 2090
str_have_deserted_the_party = 2091
str_space = 2092
str_us_ = 2093
str_allies_ = 2094
str_enemies_ = 2095
str_routed = 2096
str_weekly_budget = 2097
str_income_from_s0 = 2098
str_mercenary_payment_from_s0 = 2099
str_s0s_party = 2100
str_loss_due_to_tax_inefficiency = 2101
str_wages_for_s0 = 2102
str_earlier_debts = 2103
str_net_change = 2104
str_earlier_wealth = 2105
str_new_wealth = 2106
str_new_debts = 2107
str_completed_faction_troop_assignments_cheat_mode_reg3 = 2108
str_completed_political_events_cheat_mode_reg3 = 2109
str_assigned_love_interests_attraction_seed_reg3 = 2110
str_located_kingdom_ladies_cheat_mode_reg3 = 2111
str_team_reg0_bot_count_is_reg1 = 2112
str_input_is_not_correct_for_the_command_type_help_for_more_information = 2113
str_maximum_seconds_for_round_is_reg0 = 2114
str_respawn_period_is_reg0_seconds = 2115
str_bots_upper_limit_for_votes_is_reg0 = 2116
str_map_is_voteable = 2117
str_map_is_not_voteable = 2118
str_factions_are_voteable = 2119
str_factions_are_not_voteable = 2120
str_players_respawn_as_bot = 2121
str_players_do_not_respawn_as_bot = 2122
str_kicking_a_player_is_voteable = 2123
str_kicking_a_player_is_not_voteable = 2124
str_banning_a_player_is_voteable = 2125
str_banning_a_player_is_not_voteable = 2126
str_player_banners_are_allowed = 2127
str_player_banners_are_not_allowed = 2128
str_default_armor_is_forced = 2129
str_default_armor_is_not_forced = 2130
str_percentage_of_yes_votes_required_for_a_poll_to_get_accepted_is_reg0 = 2131
str_auto_team_balance_threshold_is_reg0 = 2132
str_starting_gold_ratio_is_reg0 = 2133
str_combat_gold_bonus_ratio_is_reg0 = 2134
str_round_gold_bonus_ratio_is_reg0 = 2135
str_point_gained_from_flags_is_reg0 = 2136
str_point_gained_from_capturing_flag_is_reg0 = 2137
str_map_time_limit_is_reg0 = 2138
str_team_points_limit_is_reg0 = 2139
str_defender_spawn_count_limit_is_s1 = 2140
str_system_error = 2141
str_prisoner_granted_parole = 2142
str_prisoner_not_offered_parole = 2143
str__age_reg1_family_ = 2144
str_s49_s12_s11_rel_reg0 = 2145
str_s49_s12_s11 = 2146
str_lord_info_string = 2147
str_updating_faction_notes_for_s14_temp_=_reg4 = 2148
str_foreign_relations__ = 2149
str_s21__the_s5_is_at_war_with_the_s14 = 2150
str_s21_the_s5_has_had_the_upper_hand_in_the_fighting = 2151
str_s21_the_s5_has_gotten_the_worst_of_the_fighting = 2152
str_s21_the_fighting_has_gone_on_for_some_time_and_the_war_may_end_soon_with_a_truce = 2153
str_s21_the_fighting_has_begun_relatively_recently_and_the_war_may_continue_for_some_time = 2154
str_s21_reg4reg5 = 2155
str__however_the_truce_is_no_longer_binding_on_the_s14 = 2156
str_s21__the_s5_is_bound_by_truce_not_to_attack_the_s14s18_the_truce_will_expire_in_reg1_days = 2157
str_s21__the_s5_has_recently_suffered_provocation_by_subjects_of_the_s14_and_there_is_a_risk_of_war = 2158
str_s21__the_s5_has_no_outstanding_issues_with_the_s14 = 2159
str_s21_the_s14_was_recently_provoked_by_subjects_of_the_s5_and_there_is_a_risk_of_war_ = 2160
str_s21_cheat_mode_assessment_s14_ = 2161
str_the_s5_is_ruled_by_s6_it_occupies_s8_its_vassals_are_s10__s21 = 2162
str_assigned_lord_reputation_and_relations_cheat_mode_reg3 = 2163
str_caravan_trades_in_s5_originally_from_s4_ = 2164
str_your_hero_prisoned_at_s1 = 2165
str_old_morale_is_reg0_new_morale_is_reg1 = 2166
str_our_per_person__reg0_num_people__reg1_total_gain__reg2 = 2167
str_ene_per_person__reg0_num_people__reg1_total_gain__reg2 = 2168
str_all_per_person__reg0_num_people__reg1_total_gain__reg2 = 2169
str_loss_ratio_is_reg1 = 2170
str_total_enemy_morale_gain__reg6_last_total_enemy_morale_gain__reg7_remaining_enemy_population__reg5 = 2171
str_reg4_killed_reg5_wounded_reg6_routed = 2172
str_reg4_killed_reg5_routed = 2173
str_reg4_killed_reg5_wounded = 2174
str_reg4_wounded_reg5_routed = 2175
str_routed = 2176
str_caravan_in_s10_considers_s11_total_price_dif_=_reg3 = 2177
str_test__caravan_in_s3_selects_for_s4_trade_score_reg3 = 2178
str_prisoner_relative_is_reg0 = 2179
str_test_diagnosis__traveller_attacks_for_s4 = 2180
str_traveller_attack_found = 2181
str_s42 = 2182
str_test_diagnostic_quest_found_for_s4 = 2183
str_s4_changing_sides_aborts_quest = 2184
str_s4_awarded_to_s5 = 2185
str_s11_reacts_to_granting_of_s12_to_s10 = 2186
str_debug__hiring_men_to_s7_ideal_size__reg6_ideal_top_size__reg7_hiring_budget__reg8 = 2187
str_debug__hiring_men_to_party_for_s0 = 2188
str_calculating_sortie_for_s4_strength_of_reg3_vs_reg4_enemies = 2189
str_s4_sorties = 2190
str_current_wealth_reg1_taxes_last_collected_from_s4 = 2191
str_s4_considers_going_to_s5_to_pay_court_to_s6 = 2192
str_relation_with_1_bug_found_here__probably_because_s5_has_just_been_captured = 2193
str_s4_has_reg4_chance_of_going_to_home_center = 2194
str_s4_has_reg4_chance_of_recruiting_troops = 2195
str_s4_has_reg4_chance_of_going_to_s5 = 2196
str_s4_has_reg5_chance_of_patrolling_s6 = 2197
str_s4_has_reg5_chance_of_raiding_s6 = 2198
str_s4_has_reg5_chance_of_besieging_s6 = 2199
str_sum_chances_reg6 = 2200
str_deciding_faction_ai_for_s3 = 2201
str_s5_decides_s14 = 2202
str_lords_of_the_s1_gather_for_a_feast_at_s2 = 2203
str_s5_begins_offensive = 2204
str_renown_change_of_reg4_reduced_to_reg5_because_of_high_existing_renown = 2205
str_s14 = 2206
str_players_kingdom_has_had_reg3_days_of_peace = 2207
str_s4_is_present_at_the_center_and_in_place_of_honor = 2208
str_s4_is_present_at_the_center_as_a_refugee = 2209
str_s4_is_present_at_the_center_and_not_attending_the_feast = 2210
str_s4_is_present_at_the_center_and_is_married = 2211
str_s4_is_present_at_the_center_and_is_attending_the_feast = 2212
str_s4_is_present_at_the_center_and_is_awaiting_the_player_in_private = 2213
str_s4_is_present_at_the_center_and_is_allowed_to_meet_the_player = 2214
str_s4_is_present_at_the_center_and_is_not_allowed_to_meet_the_player = 2215
str_no_relation = 2216
str_wife = 2217
str_husband = 2218
str_father = 2219
str_mother = 2220
str_daughter = 2221
str_son = 2222
str_sister = 2223
str_brother = 2224
str_niece = 2225
str_nephew = 2226
str_aunt = 2227
str_uncle = 2228
str_cousin = 2229
str_daughterinlaw = 2230
str_soninlaw = 2231
str_motherinlaw = 2232
str_fatherinlaw = 2233
str_sisterinlaw = 2234
str_brotherinlaw = 2235
str_print_party_members_entered = 2236
str_num_companion_stacks_=_reg10 = 2237
str_someone = 2238
str_i_take_what_work_i_can_sirmadame_i_carry_water_or_help_the_merchants_with_their_loads_or_help_build_things_if_theres_things_to_be_built = 2239
str_dna_reg4_total_production_reg5_modula_reg7 = 2240
str_agent_produces_s9 = 2241
str_im_not_doing_anything_sirmadame_theres_no_work_to_be_had_around_here_these_days = 2242
str_im_not_doing_anything_sirmadame_i_have_no_land_of_my_own_and_theres_no_work_to_be_had_around_here_these_days = 2243
str_why_im_still_living_off_of_your_kindness_and_goodness_sirmadame_hopefully_there_will_be_work_shortly = 2244
str_i_work_in_the_fields_just_outside_the_walls_where_they_grow_grain_we_dont_quite_grow_enough_to_meet_our_needs_though_and_have_to_import_grain_from_the_surrounding_countryside = 2245
str_i_work_mostly_in_the_fields_growing_grain_in_the_town_they_grind_it_to_make_bread_or_ale_and_we_can_also_boil_it_as_a_porridge = 2246
str_i_work_in_the_breweries_making_ale_the_poor_folk_drink_a_lot_of_it_as_its_cheaper_than_wine_we_make_it_with_grain_brought_in_from_the_countryside = 2247
str_i_work_in_a_mill_grinding_flour_to_make_bread_bread_is_cheap_keeps_well_and_fills_the_stomach = 2248
str_i_tend_cattle_we_dry_and_salt_meat_to_preserve_it_and_make_cheese_from_the_milk = 2249
str_i_tend_cattle_we_dry_and_salt_meat_to_preserve_it_and_make_cheese_from_the_milk_so_it_doesnt_spoil = 2250
str_i_tend_sheep_we_send_the_wool_to_the_cities_to_be_woven_into_cloth_and_make_mutton_sausage_when_we_cull_the_herds = 2251
str_i_work_at_a_loom_spinning_cloth_from_wool_wool_is_some_of_the_cheapest_cloth_you_can_buy_but_it_will_still_keep_you_warm = 2252
str_i_crew_a_fishing_boat_we_salt_and_smoke_the_flesh_to_sell_it_far_inland = 2253
str_i_sift_salt_from_a_nearby_flat_they_need_salt_everywhere_to_preserve_meat_and_fish = 2254
str_i_mine_iron_from_a_vein_in_a_nearby_cliffside_they_use_it_to_make_tools_arms_and_other_goods = 2255
str_i_make_pottery_which_people_use_to_store_grain_and_carry_water = 2256
str_trade_explanation_tools = 2257
str_trade_explanation_oil = 2258
str_trade_explanation_linen = 2259
str_trade_explanation_velvet = 2260
str_trade_explanation_spice = 2261
str_trade_explanation_apples = 2262
str_trade_explanation_grapes = 2263
str_trade_explanation_dyes = 2264
str_trade_explanation_leatherwork = 2265
str_trade_explanation_flax = 2266
str_trade_explanation_dates = 2267
str_trade_explanation_dates = 2268
str_trade_explanation_olives = 2269
str_s10_has_reg4_needs_reg5 = 2270
str_s14_i_hear_that_you_can_find_a_good_price_for_it_in_s15 = 2271
str_s1_reg1 = 2272
str_s1_reg2 = 2273
str_s1_reg3 = 2274
str_s1_reg4 = 2275
str_s1_reg5 = 2276
str_s1_reg6 = 2277
str_s1_reg7 = 2278
str_s1_reg8 = 2279
str_s1_reg9 = 2280
str_reg13 = 2281
str_reg14 = 2282
str_reg15 = 2283
str_reg16 = 2284
str_reg17 = 2285
str_reg18 = 2286
str_reg19 = 2287
str_reg20 = 2288
str_reg21 = 2289
str_assigning_lords_to_empty_centers = 2290
str_assign_lords_to_empty_centers_just_happened = 2291
str_s4_of_the_s5_is_unassigned = 2292
str_s4_of_the_s5_is_reserved_for_player = 2293
str_s4_of_the_s5_has_no_fiefs = 2294
str_s4_unassigned_centers_plus_landless_lords_=_reg4 = 2295
str_s4_holds_s5_in_reserve = 2296
str_s2s_rebellion = 2297
str_political_suggestion = 2298
str_updating_volunteers_for_s4_faction_is_s5 = 2299
str_shuffling_companion_locations = 2300
str_s4_is_at_s5 = 2301
str_instability_reg0_of_lords_are_disgruntled_reg1_are_restless = 2302
str_reg1shehe_is_prisoner_of_s1 = 2303
str_s39_rival = 2304
str_s40 = 2305
str_s41_s39_rival = 2306
str_reputation_cheat_mode_only_martial_ = 2307
str_reputation_cheat_mode_only_debauched_ = 2308
str_reputation_cheat_mode_only_pitiless_ = 2309
str_reputation_cheat_mode_only_calculating_ = 2310
str_reputation_cheat_mode_only_quarrelsome_ = 2311
str_reputation_cheat_mode_only_goodnatured_ = 2312
str_reputation_cheat_mode_only_upstanding_ = 2313
str_reputation_cheat_mode_only_conventional_ = 2314
str_reputation_cheat_mode_only_adventurous_ = 2315
str_reputation_cheat_mode_only_romantic_ = 2316
str_reputation_cheat_mode_only_moralist_ = 2317
str_reputation_cheat_mode_only_ambitious_ = 2318
str_reputation_cheat_mode_only_reg11_ = 2319
str_love_interest = 2320
str_betrothed = 2321
str_s40_s39_s2_reg0 = 2322
str_other_relations_s40_ = 2323
str_relation_with_liege_reg0_ = 2324
str_sense_of_security_military_reg1_court_position_reg3_ = 2325
str_s46s45s44s48 = 2326
str_political_details_s47_ = 2327
str_checking_volunteer_availability_script = 2328
str_center_relation_at_least_zero = 2329
str_relationfaction_conditions_met = 2330
str_troops_available = 2331
str_party_has_capacity = 2332
str_personality_clash_conversation_begins = 2333
str_personality_match_conversation_begins = 2334
str_the_s55 = 2335
str_travellers_on_the_road = 2336
str_attack_on_travellers_found_reg3_hours_ago = 2337
str_trade_event_found_reg3_hours_ago = 2338
str_a_short_while_ago = 2339
str_one_day_ago = 2340
str_two_days_day_ago = 2341
str_earlier_this_week = 2342
str_about_a_week_ago = 2343
str_about_two_weeks_ago = 2344
str_several_weeks_ago = 2345
str_unknown_assailants = 2346
str_swadians = 2347
str_vaegirs = 2348
str_khergits = 2349
str_nords = 2350
str_rhodoks = 2351
str_sarranids = 2352
str_uesugi = 2353
str_date = 2354
str_oda = 2355
str_mori = 2356
str_takeda = 2357
str_tokugawa = 2358
str_miyoshi = 2359
str_amako = 2360
str_otomo = 2361
str_nanbu = 2362
str_asakura = 2363
str_chosokabe = 2364
str_hojo = 2365
str_mogami = 2366
str_shimazu = 2367
str_ryuzoji = 2368
str_satake = 2369
str_satomi = 2370
str_ukita = 2371
str_ikko = 2372
str_bandits = 2373
str_deserters = 2374
str_your_followers = 2375
str_we_have_heard_that_travellers_heading_to_s40_were_attacked_on_the_road_s46_by_s39 = 2376
str_s43_s44 = 2377
str_we_have_heard_that_travellers_coming_from_s40_were_attacked_on_the_road_s46_by_s39 = 2378
str_travellers_coming_from_s40_traded_here_s46 = 2379
str_s44 = 2380
str_it_is_still_early_in_the_caravan_season_so_we_have_seen_little_tradings42 = 2381
str_there_has_been_very_little_trading_activity_here_recentlys42 = 2382
str_there_has_some_trading_activity_here_recently_but_not_enoughs42 = 2383
str_there_has_some_trading_activity_here_recently_but_the_roads_are_dangerouss42 = 2384
str_the_roads_around_here_are_very_dangerouss42 = 2385
str_we_have_received_many_traders_in_town_here_although_there_is_some_danger_on_the_roadss42 = 2386
str_we_have_received_many_traders_in_town_heres42 = 2387
str_s44_s41 = 2388
str_s41 = 2389
str_there_is_little_news_about_the_caravan_routes_to_the_towns_of_s44_and_nearby_parts_but_no_news_is_good_news_and_those_are_therefore_considered_safe = 2390
str_s47_also_the_roads_to_the_villages_of_s44_and_other_outlying_hamlets_are_considered_safe = 2391
str_however_the_roads_to_the_villages_of_s44_and_other_outlying_hamlets_are_considered_safe = 2392
str_we_have_shortages_of = 2393
str_s33_s34_reg1 = 2394
str_we_have_adequate_stores_of_all_commodities = 2395
str_s33_and_some_other_commodities = 2396
str_the_roads_are_full_of_brigands_friend_but_that_name_in_particular_does_not_sound_familiar_good_hunting_to_you_nonetheless = 2397
str_less_than_an_hour_ago = 2398
str_maybe_reg3_hours_ago = 2399
str_reg3_days_ago = 2400
str_youre_in_luck_we_sighted_those_bastards_s16_near_s17_hurry_and_you_might_be_able_to_pick_up_their_trail_while_its_still_hot = 2401
str_you_speak_of_claims_to_the_throne_good_there_is_nothing_id_rather_do_than_fight_for_a_good_cause = 2402
str_you_speak_of_claims_to_the_throne_well_there_is_nothing_id_rather_do_than_fight_for_a_good_cause_but_the_claim_you_make_seems_somewhat_weak = 2403
str_i_am_pleased_that_you_speak_of_upholding_my_ancient_rights_which_are_sometimes_trod_upon_in_these_sorry_days = 2404
str_i_am_pleased_that_you_speak_of_upholding_my_ancient_rights_but_sometimes_men_make_pledges_before_they_are_king_which_they_cannot_keep_once_they_take_the_throne = 2405
str_you_speak_of_protecting_the_commons_well_i_supposed_thats_good_but_sometimes_the_commons_overstep_their_boundaries_im_more_concerned_that_your_claim_be_legal_so_i_can_swing_my_sword_with_a_good_conscience = 2406
str_you_speak_of_giving_me_land_good_i_ask_for_no_more_than_my_due = 2407
str_you_speak_of_giving_me_land_unfortunately_you_are_not_wellknown_for_rewarding_those_to_whom_you_have_made_such_offers = 2408
str_you_speak_of_unifying_calradia_well_i_believe_that_well_always_be_fighting__its_important_that_we_fight_for_a_rightful_cause = 2409
str_you_talk_of_claims_to_the_throne_but_i_leave_bickering_about_legalities_to_the_lawyers_and_clerks = 2410
str_you_speak_of_ruling_justly_hah_ill_believe_theres_such_a_thing_as_a_just_king_when_i_see_one = 2411
str_you_spoke_of_protecting_the_rights_of_the_nobles_if_you_did_youd_be_the_first_king_to_do_so_in_a_very_long_time = 2412
str_you_speak_of_giving_me_land_ay_well_lets_see_if_you_deliver = 2413
str_you_speak_of_giving_me_land_bah_youre_not_known_for_delivering_on_your_pledges = 2414
str_you_speak_of_unifying_calradia_well_youve_done_a_good_job_at_making_calradia_bend_its_knee_to_you_so_maybe_thats_not_just_talk = 2415
str_you_speak_of_unifying_calradia_id_be_impressed_if_i_thought_you_could_do_it_but_unfortunately_you_dont = 2416
str_you_speak_of_claims_to_the_throne_well_any_peasant_can_claim_to_be_a_kings_bastard = 2417
str_well_its_a_fine_thing_to_court_the_commons_with_promises_but_what_do_you_have_to_offer_me = 2418
str_you_speak_of_protecting_the_rights_of_lords_that_would_make_a_fine_change_if_my_rights_as_lord_would_be_respected = 2419
str_you_speak_of_protecting_the_rights_of_lords_that_would_make_a_fine_change_if_my_rights_as_lord_would_be_respected_however_it_is_easy_for_you_to_make_promises_while_you_are_weak_that_you_have_no_intention_of_keeping_when_you_are_strong = 2420
str_you_speak_of_giving_me_land_well_my_family_is_of_ancient_and_noble_lineage_so_you_promise_me_no_more_than_my_due_still_your_gesture_is_appreciated = 2421
str_you_speak_of_giving_me_land_well_you_make_that_pledge_but_i_am_not_impressed = 2422
str_you_speak_of_unifying_calradia_well_much_of_this_land_now_bends_its_knee_to_you_so_perhaps_that_is_not_just_talk = 2423
str_you_speak_of_unifying_calradia_but_right_now_yours_is_just_one_squabbling_faction_among_many = 2424
str_you_speak_of_claims_well_no_offense_but_a_claim_unsupported_by_might_rarely_prospers = 2425
str_you_speak_of_protecting_the_commons_well_i_suppose_that_will_make_for_a_more_prosperous_realm_ive_always_tried_to_treat_my_peasants_decently_saves_going_to_bed_worrying_about_whether_youll_wake_up_with_the_roof_on_fire = 2426
str_you_speak_of_protecting_the_commons_very_well_but_remember_that_peasants_are_more_likely_to_cause_trouble_if_you_make_promises_then_dont_deliver_than_if_you_never_made_the_promise_in_the_first_place = 2427
str_you_speak_of_protecting_the_rights_of_lords_good_youd_be_well_advised_to_do_that__men_fight_better_for_a_king_wholl_respect_their_rights = 2428
str_you_speak_of_protecting_the_rights_of_lords_very_well_but_remember__failing_to_keep_promises_which_you_made_while_scrambling_up_the_throne_is_the_quickest_way_to_topple_off_of_it_once_you_get_there = 2429
str_you_speak_of_giving_me_land_very_good_but_often_i_find_that_when_a_man_makes_too_many_promises_trying_to_get_to_the_top_he_has_trouble_keeping_them_once_he_reaches_it = 2430
str_you_speak_of_unifying_calradia_well_many_have_said_that_you_might_very_well_be_the_one_to_do_it = 2431
str_you_speak_of_unifying_calradia_well_all_the_kings_say_that_im_not_sure_that_you_will_succeed_while_they_fail = 2432
str_you_speak_of_claims_do_you_think_i_care_for_the_nattering_of_lawyers = 2433
str_you_speak_of_protecting_the_commons_how_kind_of_you_i_shall_tell_my_swineherd_all_about_your_sweet_promises_no_doubt_he_will_become_your_most_faithful_vassal = 2434
str_you_speak_of_protecing_the_rights_of_lords_such_sweet_words_but_ill_tell_you_this__the_only_rights_that_are_respected_in_this_world_are_the_rights_to_dominate_whoever_is_weaker_and_to_submit_to_whoever_is_stronger = 2435
str_you_speak_of_giving_me_land_yes_very_good__but_you_had_best_deliver = 2436
str_you_speak_of_giving_me_land_hah_perhaps_all_those_others_to_whom_you_promised_lands_will_simply_step_aside = 2437
str_you_speak_of_unifying_calradia_you_may_indeed_humble_the_other_kings_of_this_land_and_in_that_case_i_would_hope_that_you_would_remember_me_as_your_faithful_servant = 2438
str_you_speak_of_unifying_calradia_but_you_are_weak_and_i_think_that_you_will_remain_weak = 2439
str_you_speak_of_claims_its_good_for_a_king_to_have_a_strong_claim_although_admittedly_im_more_concerned_that_he_rules_just_ly_than_with_legalities_anyway_your_claim_seems_wellfounded_to_me = 2440
str_you_speak_of_claims_but_your_claim_seems_a_bit_weak_to_me = 2441
str_you_speak_of_protecting_the_commons_i_like_that_my_tenants_are_a_happy_lot_i_think_but_i_hear_of_others_in_other_estates_that_arent_so_fortunate = 2442
str_you_speak_of_protecting_the_commons_im_glad_to_hear_you_say_that_but_do_me_a_favor__dont_promise_the_commons_anything_you_cant_deliver_thats_a_sure_way_to_get_them_to_rebel_and_it_breaks_my_heart_to_have_to_put_them_down = 2443
str_you_speak_of_protecting_the_rights_of_lords_well_very_good_i_suppose_but_you_know__we_lords_can_take_of_ourselves_its_the_common_folk_who_need_a_strong_king_to_look_out_for_them_to_my_mind = 2444
str_you_speak_of_giving_me_land_its_kind_of_you_really_though_that_is_not_necessary = 2445
str_you_speak_of_unifying_calradia_well_maybe_you_can_unite_this_land_by_the_sword_but_im_not_sure_that_this_will_make_you_a_good_ruler = 2446
str_you_speak_of_claims_a_king_must_have_a_strong_legal_claim_for_there_not_to_be_chaos_in_the_realm_and_yours_is_wellestablished = 2447
str_you_speak_of_claims_a_king_must_have_a_strong_legal_claim_for_there_not_to_be_chaos_in_the_realm_but_your_claim_is_not_so_strong = 2448
str_you_speak_of_protecting_the_rights_of_lords_it_is_of_course_important_that_a_king_respect_the_rights_of_his_vassals_although_i_worry_that_a_king_who_took_a_throne_without_proper_cause_would_not_rule_with_justice = 2449
str_you_speak_of_protecting_the_rights_of_lords_it_is_of_course_important_that_a_king_respect_the_rights_of_his_vassals_however_i_would_like_to_know_that_you_would_indeed_deliver_on_your_promises = 2450
str_you_speak_of_protecting_the_commons_i_would_be_pleased_to_serve_a_king_who_respected_the_rights_of_his_subjects_although_i_worry_that_a_king_who_took_a_throne_without_proper_cause_would_not_rule_with_justice = 2451
str_you_speak_of_protecting_the_commons_i_would_be_pleased_to_serve_a_king_who_respected_the_rights_of_his_subjects_however_i_would_like_to_know_that_you_would_indeed_deliver_on_your_promises = 2452
str_i_am_not_swayed_by_promises_of_reward = 2453
str_you_speak_of_unifying_calradia_it_would_be_good_to_bring_peace_to_the_realm_and_i_believe_that_you_are_strong_enough_to_do_so = 2454
str_you_speak_of_unifying_calradia_it_would_be_good_to_bring_peace_the_realm_but_with_your_kingdom_in_its_current_state_i_worry_that_you_are_just_bringing_more_discord = 2455
str_s15 = 2456
str_my_s11_s15 = 2457
str_stop_gap__s15_is_the_rival_of_s16 = 2458
str_my_s11_s18 = 2459
str_the_socalled_s11_s18 = 2460
str_s18_would_cheat_me_of_my_inheritance_by_heaven_i_know_my_rights_and_im_not_going_to_back_down = 2461
str_s18_once_questioned_my_honour_and_my_bravery_i_long_for_the_day_when_i_can_meet_him_in_battle_and_make_him_retract_his_statement = 2462
str_s18_once_questioned_my_judgment_in_battle_by_heaven_would_he_have_us_shirk_our_duty_to_smite_our_sovereigns_foes = 2463
str_s18_seems_to_think_he_has_the_right_to_some_of_my_property_well_he_does_not = 2464
str_s18_once_took_something_i_said_amiss_stubborn_bastard_wont_give_it_up_and_keeps_trying_to_get_me_to_recant_my_words = 2465
str_s18_is_a_crafty_weasel_and_i_dont_trust_him_one_bit = 2466
str_s18_i_despite_him_he_puts_on_such_a_nauseating_display_of_virtue_and_thinks_nothing_of_insulting_his_betters = 2467
str_s18_entered_into_a_little_deal_with_me_and_is_now_trying_to_wriggle_out_of_it = 2468
str_s18_once_ran_an_errand_for_me_and_now_thinks_i_owe_him_something_i_owe_his_ilk_nothing = 2469
str_s18_is_soft_and_weak_and_not_fit_to_govern_a_fief_and_i_have_always_detested_him = 2470
str_s18_is_a_quarrelsome_oaf_and_a_liability_in_my_opinion_and_ive_let_him_know_as_much = 2471
str_s18_i_am_sorry_to_say_is_far_too_softhearted_a_man_to_be_given_any_kind_of_responsibility_his_chivalry_will_allow_the_enemy_to_flee_to_fight_another_day_and_will_cost_the_lives_of_my_own_faithful_men = 2472
str_s18_seems_to_have_something_against_me_for_some_reason_i_dont_like_to_talk_ill_of_people_but_i_think_hes_can_be_a_bit_of_a_cad_sometimes = 2473
str_s18_has_always_treated_me_contemptuously_although_i_have_done_him_no_wrong = 2474
str_s18_is_thoroughly_dishonorable_and_a_compulsive_spinner_of_intrigues_which_i_fear_will_drag_us_into_wars_or_incite_rebellions = 2475
str_s18_disappoints_me_i_once_scolded_for_his_rashness_in_battle_and_he_took_offense_i_do_not_care_to_apologize_for_my_efforts_to_save_his_life_and_the_lives_of_his_men = 2476
str_s18_squanders_money_and_carouses_in_a_way_most_unbefitting_a_noble_by_doing_so_he_disgraces_us_all = 2477
str_s18_has_been_speaking_ill_of_me_behind_my_back_or_so_they_say = 2478
str_s18_is_a_disgrace_reg3shehe_consorts_with_merchants_lends_money_at_interest_uses_coarse_language_and_shows_no_attempt_to_uphold_the_dignity_of_the_honor_bestowed_upon_reg3herhim = 2479
str_s18_has_condemned_me_for_engaging_in_commerce_what_could_possibly_be_wrong_with_that = 2480
str_s18_i_have_heard_has_been_encouraging_seditious_ideas_among_the_peasantry__a_foolish_move_which_endangers_us_all = 2481
str_s18_has_called_me_out_for_the_way_i_deal_with_my_tenants_well_so_be_it_if_i_teach_them_that_they_are_the_equal_of_anyone_with_socalled_gentle_blood_what_is_it_to_reg3herhim = 2482
str_a_most_gallant_gentleman_who_knows_how_to_treat_a_lady = 2483
str_a_base_cad = 2484
str_a_man_who_treats_me_as_his_equal_which_is_rare = 2485
str_appears_to_value_me_with_his_estate_and_his_horse_as_prizes_worth_having = 2486
str_a_bit_dull_but_what_can_you_expect = 2487
str_the_man_whom_destiny_intends_for_me = 2488
str_is_not_right_for_me__i_cannot_say_why_but_he_makes_my_skin_crawl = 2489
str_is_a_man_who_clearly_intends_to_make_his_mark_in_the_world = 2490
str_is_a_layabout_a_naif_prey_for_others_who_are_cleverer_than_he = 2491
str_is_a_man_of_stalwart_character = 2492
str_appears_to_be_a_man_of_low_morals = 2493
str_appears_to_be_a_man_who_lacks_selfdiscipline = 2494
str_check_reg8_s4_reconciles_s5_and_s6_ = 2495
str_diagnostic__player_should_receive_consultation_quest_here_if_not_already_active = 2496
str_check_reg8_s4_rules_in_s5s_favor_in_quarrel_with_s6_ = 2497
str_check_reg8_new_rivalry_generated_between_s5_and_s6 = 2498
str_check_reg8_s5_attempts_to_win_over_s6 = 2499
str_s1_has_no_lords = 2500
str_do_political_consequences_for_s4_victory_over_s5 = 2501
str_bandits_attacked_a_party_on_the_roads_so_a_bounty_is_probably_available = 2502
str_travellers_attacked_on_road_from_s15_to_s16 = 2503
str_s15_shares_joy_of_victory_with_s16 = 2504
str_faction_marshall_s15_involved_in_defeat = 2505
str_player_faction_marshall_involved_in_defeat = 2506
str_s14_of_s15_defeated_in_battle_loses_one_point_relation_with_liege = 2507
str_s14_defeated_in_battle_by_s15_loses_one_point_relation = 2508
str_s14_blames_s15_for_defeat = 2509
str_s32_is_undeclared_rebel = 2510
str_small_bonus_for_no_base = 2511
str_s15_considered_member_of_faction_s20_weight_of_reg15 = 2512
str_checking_for_recruitment_argument_reg24 = 2513
str_g_talk_troop_s20_evaluates_being_vassal_to_s22_of_s21 = 2514
str_base_result_for_security_reg1 = 2515
str_result_for_security_weighted_by_personality_reg2 = 2516
str_base_result_for_political_connections_reg3 = 2517
str_result_for_political_connections_weighted_by_personality_reg4 = 2518
str_result_for_argument_strength_reg7 = 2519
str_result_for_argument_appeal_reg17 = 2520
str_combined_result_for_argument_modified_by_persuasion_reg8 = 2521
str_base_changing_sides_penalty_reg9 = 2522
str_changing_sides_penalty_weighted_by_personality_reg10 = 2523
str_combined_bonuses_and_penalties_=_reg0 = 2524
str_intrigue_test_troop_party_is_active = 2525
str_intrigue_test_troop_party_is_not_in_battle = 2526
str_intrigue_test_troop_is_not_prisoner = 2527
str_intrigue_test_troop_is_nearby = 2528
str_s20_relation_with_s15_changed_by_reg4_to_reg14 = 2529
str_total_additions_reg4 = 2530
str_total_subtractions_reg4 = 2531
str_checking_lord_reactions_in_s15 = 2532
str_s14_protests_the_appointment_of_s15_as_marshall = 2533
str_s11_relocates_to_s10 = 2534
str_checking_s3_at_s5_with_s11_relationship_with_s4_score_reg0 = 2535
str_s3_feast_concludes_at_s4 = 2536
str_attendance_reg3_nobles_out_of_reg4 = 2537
str_romantic_chemistry_reg0 = 2538
str_personality_modifier_reg2 = 2539
str_renown_modifier_reg3 = 2540
str_final_score_reg0 = 2541
str_s4_pursues_suit_with_s5_in_s7 = 2542
str_note__favor_event_logged = 2543
str_result_lady_forced_to_agree_to_engagement = 2544
str_result_lady_rejects_suitor = 2545
str_result_happy_engagement_between_s4_and_s5 = 2546
str_result_s4_elopes_with_s5 = 2547
str_result_s4_reluctantly_agrees_to_engagement_with_s5 = 2548
str_result_stalemate_patience_roll_=_reg3 = 2549
str_s3_marries_s4_at_s5 = 2550
str__i_must_attend_to_this_matter_before_i_worry_about_the_affairs_of_the_realm = 2551
str_the_other_matter_took_precedence = 2552
str_i_cannot_leave_this_fortress_now_as_it_is_under_siege = 2553
str_after_all_we_are_under_siege = 2554
str_we_are_not_strong_enough_to_face_the_enemy_out_in_the_open = 2555
str_i_should_probably_seek_shelter_behind_some_stout_walls = 2556
str_enemies_are_reported_to_be_nearby_and_we_should_stand_ready_to_either_man_the_walls_or_sortie_out_to_do_battle = 2557
str_the_enemy_is_nearby = 2558
str_as_the_marshall_i_am_assembling_the_army_of_the_realm = | |
# -*- coding: utf-8 -*-
import os
import abc
import yaml
from datetime import datetime
import pytsk3
from dfvfs.helpers import file_system_searcher
from dfvfs.resolver import resolver as path_spec_resolver
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.lib import tsk_image
from engine import path_extractors
from engine import path_helper
from advanced_modules import logger
class BaseAnalyzer(object):
NAME = 'base_analyzer'
DESCRIPTION = ''
_plugin_classes = None
def __init__(self):
super(BaseAnalyzer, self).__init__()
self._default_plugin = None
self._plugins = None
self._schema = None
self.EnablePlugins([])
@classmethod
def DeregisterPlugin(cls, plugin_class):
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError(
'Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
def EnablePlugins(self, plugin_includes):
self._plugins = []
if not self._plugin_classes:
return
default_plugin_name = '{0:s}_default'.format(self.NAME)
for plugin_name, plugin_class in iter(self._plugin_classes.items()):
if plugin_name == default_plugin_name:
self._default_plugin = plugin_class()
continue
if plugin_includes and plugin_name not in plugin_includes:
continue
plugin_object = plugin_class()
self._plugins.append(plugin_object)
"""
if "*" in plugin_includes:
for _, plugin_class in iter(self._plugin_classes.items()):
plugin_object = plugin_class()
self._plugins.append(plugin_object)
"""
# TODO: move this to a filter.
# pylint: disable=redundant-returns-doc
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: a format specification or None if not available.
"""
return
@classmethod
def GetPluginObjectByName(cls, plugin_name):
"""Retrieves a specific plugin object by its name.
Args:
plugin_name (str): name of the plugin.
Returns:
BasePlugin: a plugin object or None if not available.
"""
plugin_class = cls._plugin_classes.get(plugin_name, None)
if plugin_class:
return plugin_class()
return None
@classmethod
def GetPlugins(cls):
"""Retrieves the registered plugins.
Yields:
tuple[str, type]: name and class of the plugin.
"""
for plugin_name, plugin_class in iter(cls._plugin_classes.items()):
yield plugin_name, plugin_class
@classmethod
def RegisterPlugin(cls, plugin_class):
"""Registers a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class (type): class of the plugin.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError((
'Plugin class already set for name: {0:s}.').format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def RegisterPlugins(cls, plugin_classes):
"""Registers plugin classes.
Args:
plugin_classes (list[type]): classes of plugins.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
for plugin_class in plugin_classes:
cls.RegisterPlugin(plugin_class)
@classmethod
def SupportsPlugins(cls):
"""Determines if a caller supports plugins.
Returns:
bool: True if the caller supports plugins.
"""
return cls._plugin_classes is not None
class AdvancedModuleAnalyzer(BaseAnalyzer):
def __init__(self):
super(AdvancedModuleAnalyzer, self).__init__()
self._path_spec_extractor = path_extractors.PathSpecExtractor()
@abc.abstractmethod
def Analyze(self, par_id, configuration, source_path_spec, knowledge_base):
"""
Analyze
"""
def BuildFindSpecs(self, paths, path_separator, environment_variables=None):
"""Builds find specifications from path filters.
Args:
path_filters (list[PathFilter]): path filters.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
"""
find_specs = []
for path in paths:
# Since paths are regular expression the path separator is escaped.
if path_separator == '\\':
path_separator = '\\\\'
else:
path_separator = path_separator
expand_path = False
path_segments = path.split(path_separator)
for index, path_segment in enumerate(path_segments):
if len(path_segment) <= 2:
continue
if path_segment[0] == '{' and path_segment[-1] == '}':
# Rewrite legacy path expansion attributes, such as {systemroot}
# into %SystemRoot%.
path_segment = '%{0:s}%'.format(path_segment[1:-1])
path_segments[index] = path_segment
if path_segment[0] == '%' and path_segment[-1] == '%':
expand_path = True
if expand_path:
path_segments = path_helper.PathHelper.ExpandWindowsPathSegments(
path_segments, environment_variables)
if path_segments[0] != '':
logger.warning(
'The path filter must be defined as an absolute path: ''{0:s}'.format(path))
continue
# Strip the root path segment.
path_segments.pop(0)
if not path_segments[-1]:
logger.warning(
'Empty last path segment in path: {0:s}'.format(path))
continue
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location_regex=path_segments)
find_specs.append(find_spec)
return find_specs
def get_tsk_file_system(self, source_path_spec, configuration):
file_object = path_spec_resolver.Resolver.OpenFileObject(
source_path_spec.parent, resolver_context=configuration.resolver_context)
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_file_system = pytsk3.FS_Info(tsk_image_object)
return tsk_file_system
def extract_file_object(self, tsk_file_system, inode):
f = tsk_file_system.open_meta(inode)
return f.read_random(0, f.info.meta.size)
def LoadTargetFileToMemory(self, source_path_spec, configuration,
file_path=None, file_spec=None, data_stream_name=None):
try:
if not file_spec:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location=file_path, location_separator=source_path_spec.location)
else:
find_spec = file_spec
except ValueError as exception:
logger.error(
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}'.format(file_path, exception))
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
[source_path_spec], find_specs=[find_spec], recurse_file_system=False,
resolver_context=configuration.resolver_context)
for path_spec in path_spec_generator:
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(path_spec)
try:
file_entry = path_spec_resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=configuration.resolver_context)
if file_entry is None or not file_entry.IsFile():
logger.warning(
'Unable to open file entry with path spec: {0:s}'.format(
display_name))
return False
if data_stream_name:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
return False
return file_object
elif not data_stream_name:
file_object = file_entry.GetFileObject()
if not file_object:
return False
return file_object
except KeyboardInterrupt:
return False
def ExtractTargetFileToPath(self, source_path_spec, configuration,
file_path=None, file_spec=None, output_path=None, data_stream_name=None):
# TODO: find_spec 있을 경우 처리 해야함. Load참조
try:
if not file_spec:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location=file_path, location_separator=source_path_spec.location)
else:
find_spec = file_spec
except ValueError as exception:
logger.error(
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}'.format(file_path, exception))
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
[source_path_spec], find_specs=[find_spec], recurse_file_system=False,
resolver_context=configuration.resolver_context)
for path_spec in path_spec_generator:
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(path_spec)
try:
file_entry = path_spec_resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=configuration.resolver_context)
if file_entry is None or not file_entry.IsFile():
logger.warning(
'Unable to open file entry with path spec: {0:s}'.format(
display_name))
return False
if data_stream_name:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
return False
try:
buffer_size = 65536
file = open(output_path + os.sep + file_entry.name + '_' + data_stream_name, 'wb')
file_object.seek(0, os.SEEK_SET)
data = file_object.read(buffer_size)
while data:
file.write(data)
data = file_object.read(buffer_size)
file.close()
except IOError as exception:
# TODO: replace location by display name.
location = getattr(file_entry.path_spec, 'location', '')
logger.error(
'Failed to extract file "{0:s}" : {1!s}'.format(data_stream_name, exception))
return False
finally:
file_object.close()
elif not data_stream_name:
file_object = file_entry.GetFileObject()
if not file_object:
return False
try:
buffer_size = 65536
file = open(output_path + os.sep + file_entry.name, 'wb')
file_object.seek(0, os.SEEK_SET)
data = file_object.read(buffer_size)
while data:
file.write(data)
data = file_object.read(buffer_size)
file.close()
except IOError as exception:
logger.error(
'Failed to extract file "{0:s}" : {1!s}'.format(display_name, exception))
finally:
file_object.close()
except KeyboardInterrupt:
return False
def ExtractTargetDirToPath(self, source_path_spec, configuration, dir_path=None, file_spec=None, output_path=None):
"""Extract target directory to path
Args:
source_path_spec:
configuration:
dir_path:
output_path:
"""
try:
if not file_spec:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location=dir_path,
location_separator=source_path_spec.location)
else:
find_spec = file_spec
except ValueError as exception:
logger.error(
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}'.format(dir_path, exception))
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
[source_path_spec], find_specs=[find_spec], recurse_file_system=False,
resolver_context=configuration.resolver_context)
for path_spec in path_spec_generator:
self.DirectoryTraversal(path_spec, output_path)
def DirectoryTraversal(self, path_spec, output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
_path_specs = []
_path_specs.append(path_spec)
self.RecursiveDirOrFileSearch(path_spec, output_path)
def RecursiveDirOrFileSearch(self, path_spec, output_path):
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(path_spec)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if file_entry is None:
logger.warning(
'Unable to open file entry with path spec: {0:s}'.format(
display_name))
return False
if file_entry.IsDirectory():
if not os.path.exists(output_path + os.sep + file_entry.name):
os.mkdir(output_path + os.sep + file_entry.name)
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
self.RecursiveDirOrFileSearch(sub_file_entry.path_spec, output_path + os.sep + file_entry.name)
if file_entry.IsFile():
for data_stream in file_entry.data_streams:
file_object = file_entry.GetFileObject(data_stream_name=data_stream.name)
if not file_object:
return False
try:
buffer_size = 65536
file = open(output_path + os.sep + file_entry.name, 'wb')
file_object.seek(0, os.SEEK_SET)
data = file_object.read(buffer_size)
while data:
file.write(data)
data = file_object.read(buffer_size)
file.close()
except IOError as exception:
print(display_name)
logger.error(
'Failed to extract file "{0:s}" : {1!s}'.format(display_name, exception))
finally:
file_object.close()
def LoadSchemaFromYaml(self, _yaml_path):
if not os.path.exists(_yaml_path):
return False
yaml_data = open(_yaml_path, 'r')
data = yaml_data.read()
self._schema = yaml.safe_load(data)
if not self._schema:
return False
return True
def CreateTable(self, _cursor, _standalone_check=False):
if not self._schema:
return False
# Read YAML Data Stream
name = self._schema['Name']
table = self._schema['Table'][0]['TableName']
columns = self._schema['Table'][0]['Columns']
types = self._schema['Table'][0]['Types']
query = ["CREATE TABLE ", table, "("]
for i in range(len(columns)):
if _standalone_check:
# For standalone mode: escape special character like '$'
query.append('"' + columns[i] + '" ')
else:
query.append(columns[i] + ' ')
if columns[i][:11].lower() == 'foreign key' or columns[i][:11].lower() == 'primary key':
pass
else:
for j in range(0, len(types[i])):
if j == len(types) - 1:
query.append(types[i][j])
else:
query.append(types[i][j] + " ")
if i != (len(columns) - 1):
query.append(",")
else:
query.append(");")
query = ''.join(query)
_cursor.execute_query(query)
return True
def CreateTableWithSchema(self, _cursor, _table_name, _schema, _standalone_check=False):
if not _schema:
return False
query = ["CREATE TABLE ", _table_name, "("]
for i in range(0, len(_schema)):
if _standalone_check:
# For standalone mode: escape special character like '$'
query.append('"' + _schema[i] + '" ')
else:
query.append('`' + _schema[i] + '` TEXT')
if _schema[i][:11].lower() == 'foreign key' or _schema[i][:11].lower() == 'primary key':
pass
else:
# TODO: | |
raise value error if not verified
self._verify_node(node)
# check the dag itself
if not self._verify_dag():
return False
return True
def _verify_node(self, node):
# type: (PipelineController.Node) -> bool
"""
Raise ValueError on verification errors
:return: Return True iff the specific node is verified
"""
if not node.base_task_id:
raise ValueError("Node '{}', base_task_id is empty".format(node.name))
if not self._default_execution_queue and not node.queue:
raise ValueError("Node '{}' missing execution queue, "
"no default queue defined and no specific node queue defined".format(node.name))
task = Task.get_task(task_id=node.base_task_id)
if not task:
raise ValueError("Node '{}', base_task_id={} is invalid".format(node.name, node.base_task_id))
pattern = self._step_ref_pattern
for v in node.parameters.values():
if isinstance(v, str):
for g in pattern.findall(v):
self.__verify_step_reference(node, g)
return True
def _verify_dag(self):
# type: () -> bool
"""
:return: True iff the pipeline dag is fully accessible and contains no cycles
"""
visited = set()
prev_visited = None
while prev_visited != visited:
prev_visited = copy(visited)
for k, node in self._nodes.items():
if k in visited:
continue
if not all(p in visited for p in node.parents or []):
continue
visited.add(k)
# return False if we did not cover all the nodes
return not bool(set(self._nodes.keys()) - visited)
def _launch_node(self, node):
# type: (PipelineController.Node) -> ()
"""
Launch a single node (create and enqueue a ClearmlJob)
:param node: Node to launch
:return: Return True if a new job was launched
"""
if node.job or node.executed:
return False
updated_hyper_parameters = {}
for k, v in node.parameters.items():
updated_hyper_parameters[k] = self._parse_step_ref(v)
task_overrides = self._parse_task_overrides(node.task_overrides) if node.task_overrides else None
extra_args = dict()
if self._target_project:
extra_args['project'] = get_or_create_project(
session=self._task.session if self._task else Task.default_session,
project_name=self._target_project)
skip_node = None
if self._pre_step_callbacks.get(node.name):
skip_node = self._pre_step_callbacks[node.name](self, node, updated_hyper_parameters)
if skip_node is False:
node.skip_job = True
return True
node.job = ClearmlJob(
base_task_id=node.base_task_id, parameter_override=updated_hyper_parameters,
tags=['pipe: {}'.format(self._task.id)] if self._add_pipeline_tags and self._task else None,
parent=self._task.id if self._task else None,
disable_clone_task=not node.clone_task,
task_overrides=task_overrides,
allow_caching=node.cache_executed_step,
**extra_args
)
if self._experiment_created_cb:
skip_node = self._experiment_created_cb(self, node, updated_hyper_parameters)
if skip_node is False:
# skipping node
getLogger('clearml.automation.controller').warning(
'Skipping node {} on callback request'.format(node))
# delete the job we just created
node.job.delete()
node.skip_job = True
elif node.job.is_cached_task():
node.executed = node.job.task_id()
else:
return node.job.launch(queue_name=node.queue or self._default_execution_queue)
return True
def _update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
if not self._task:
return
sankey_node = dict(
label=[],
color=[],
hovertemplate='%{label}<extra></extra>',
# customdata=[],
# hovertemplate='%{label}<br />Hyper-Parameters:<br />%{customdata}<extra></extra>',
)
sankey_link = dict(
source=[],
target=[],
value=[],
# hovertemplate='%{target.label}<extra></extra>',
hovertemplate='<extra></extra>',
)
visited = []
node_params = []
nodes = list(self._nodes.values())
while nodes:
next_nodes = []
for node in nodes:
if not all(p in visited for p in node.parents or []):
next_nodes.append(node)
continue
visited.append(node.name)
idx = len(visited) - 1
parents = [visited.index(p) for p in node.parents or []]
node_params.append(node.job.task_parameter_override if node.job else node.parameters) or {}
# sankey_node['label'].append(node.name)
# sankey_node['customdata'].append(
# '<br />'.join('{}: {}'.format(k, v) for k, v in (node.parameters or {}).items()))
sankey_node['label'].append(
'{}<br />'.format(node.name) +
'<br />'.join('{}: {}'.format(k, v if len(str(v)) < 24 else (str(v)[:24]+' ...'))
for k, v in (node.parameters or {}).items()))
sankey_node['color'].append(self._get_node_color(node))
for p in parents:
sankey_link['source'].append(p)
sankey_link['target'].append(idx)
sankey_link['value'].append(1)
nodes = next_nodes
# make sure we have no independent (unconnected) nodes
single_nodes = []
for i in [n for n in range(len(visited)) if n not in sankey_link['source'] and n not in sankey_link['target']]:
single_nodes.append(i)
# create the sankey graph
dag_flow = dict(
link=sankey_link,
node=sankey_node,
textfont=dict(color='rgba(0,0,0,0)', size=1),
type='sankey',
orientation='h'
)
table_values = self._build_table_report(node_params, visited)
# hack, show single node sankey
if single_nodes:
singles_flow = dict(
x=list(range(len(single_nodes))), y=[1] * len(single_nodes),
text=[v for i, v in enumerate(sankey_node['label']) if i in single_nodes],
mode='markers',
hovertemplate="%{text}<extra></extra>",
marker=dict(
color=[v for i, v in enumerate(sankey_node['color']) if i in single_nodes],
size=[40] * len(single_nodes),
),
showlegend=False,
type='scatter',
)
# only single nodes
if len(single_nodes) == len(sankey_node['label']):
fig = dict(data=[singles_flow], layout={
'hovermode': 'closest', 'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
else:
dag_flow['domain'] = {'x': [0.0, 1.0], 'y': [0.2, 1.0]}
fig = dict(data=[dag_flow, singles_flow],
layout={'autosize': True,
'hovermode': 'closest',
'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'visible': False},
'yaxis': {'anchor': 'x', 'domain': [0.0, 0.15], 'visible': False}
})
else:
# create the sankey plot
fig = dict(data=[dag_flow], layout={'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
# report DAG
self._task.get_logger().report_plotly(
title='Pipeline', series='Execution Flow', iteration=0, figure=fig)
# report detailed table
self._task.get_logger().report_table(
title='Pipeline Details', series='Execution Details', iteration=0, table_plot=table_values)
def _build_table_report(self, node_params, visited):
# type: (List, List) -> List[List]
"""
Create the detailed table report on all the jobs in the pipeline
:param node_params: list of node parameters
:param visited: list of nodes
:return: Table as List of List of strings (cell)
"""
task_link_template = self._task.get_output_log_web_page() \
.replace('/{}/'.format(self._task.project), '/{project}/') \
.replace('/{}/'.format(self._task.id), '/{task}/')
table_values = [["Pipeline Step", "Task ID", "Task Name", "Status", "Parameters"]]
for name, param in zip(visited, node_params):
param_str = str(param)
if len(param_str) > 3:
# remove {} from string
param_str = param_str[1:-1]
step_name = name
if self._nodes[name].base_task_id:
step_name += '\n[<a href="{}"> {} </a>]'.format(
task_link_template.format(project='*', task=self._nodes[name].base_task_id), 'base task')
table_values.append(
[step_name,
self.__create_task_link(self._nodes[name], task_link_template),
self._nodes[name].job.task.name if self._nodes[name].job else '',
self.__get_node_status(self._nodes[name]),
param_str]
)
return table_values
@staticmethod
def _get_node_color(node):
# type (self.Mode) -> str
"""
Return the node color based on the node/job state
:param node: A node in the pipeline
:return: string representing the color of the node (e.g. "red", "green", etc)
"""
if not node:
return ""
if node.executed is not None:
if node.job and node.job.is_failed():
return "red" # failed job
elif node.job and node.job.is_cached_task():
return "darkslateblue"
elif not node.job or node.job.is_completed():
return "blue" # completed job
else:
return "royalblue" # aborted job
elif node.job:
if node.job.is_pending():
return "#bdf5bd" # lightgreen, pending in queue
else:
return "green" # running job
elif node.skip_job:
return "gray" # skipped job
else:
return "lightsteelblue" # pending job
def _force_task_configuration_update(self):
pipeline_dag = self._serialize()
if self._task:
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section, config_type='dictionary', config_dict=pipeline_dag)
def _daemon(self):
# type: () -> ()
"""
The main pipeline execution loop. This loop is executed on its own dedicated thread.
:return:
"""
pooling_counter = 0
launched_nodes = set()
last_plot_report = time()
while self._stop_event:
# stop request
if self._stop_event.wait(self._pool_frequency if pooling_counter else 0.01):
break
pooling_counter += 1
# check the pipeline time limit
if self._pipeline_time_limit and (time() - self._start_time) > self._pipeline_time_limit:
break
# check the state of all current jobs
# if no a job ended, continue
completed_jobs = []
force_execution_plot_update = False
for j in self._running_nodes:
node = self._nodes[j]
if not node.job:
continue
if node.job.is_stopped():
completed_jobs.append(j)
node.executed = node.job.task_id() if not node.job.is_failed() else False
if j in launched_nodes:
launched_nodes.remove(j)
elif node.timeout:
started = node.job.task.data.started
if (datetime.now().astimezone(started.tzinfo) - started).total_seconds() > node.timeout:
node.job.abort()
completed_jobs.append(j)
node.executed = node.job.task_id()
elif j in launched_nodes and node.job.is_running():
# make sure update the execution graph when the job started running
# (otherwise it will still be marked queued)
launched_nodes.remove(j)
force_execution_plot_update = True
# update running jobs
self._running_nodes = [j for j in self._running_nodes if j not in completed_jobs]
# nothing changed, we can sleep
if not completed_jobs and self._running_nodes:
# force updating the pipeline state (plot) at least every 5 min.
if force_execution_plot_update or time()-last_plot_report > 5.*60:
last_plot_report = time()
self.update_execution_plot()
continue
# callback on completed jobs
if self._experiment_completed_cb or self._post_step_callbacks:
for job in completed_jobs:
job_node = self._nodes.get(job)
if not job_node:
continue
if self._experiment_completed_cb:
self._experiment_completed_cb(self, job_node)
if self._post_step_callbacks.get(job_node.name):
self._post_step_callbacks[job_node.name](self, job_node)
# Pull the next jobs in the pipeline, based on the completed list
next_nodes = []
for node in self._nodes.values():
# check if already processed or needs to be skipped
if node.job or node.executed or node.skip_job:
continue
completed_parents = [bool(p in self._nodes and self._nodes[p].executed) for p in node.parents or []]
if all(completed_parents):
next_nodes.append(node.name)
# update the execution graph
for name in next_nodes:
if self._launch_node(self._nodes[name]) and not self._nodes[name].skip_job:
print('Launching step: {}'.format(name))
print('Parameters:\n{}'.format(
self._nodes[name].job.task_parameter_override if self._nodes[name].job
else self._nodes[name].parameters))
print('Overrides:\n{}'.format(self._nodes[name].task_overrides))
self._running_nodes.append(name)
launched_nodes.add(name)
# check if node is cached do not wait for event but run the loop again
if self._nodes[name].executed:
pooling_counter = 0
else:
getLogger('clearml.automation.controller').warning(
'Skipping launching step \'{}\': {}'.format(name, self._nodes[name]))
# update current state (in configuration, so that we could later continue an aborted pipeline)
self._force_task_configuration_update()
# visualize pipeline state (plot)
self.update_execution_plot()
# quit if all pipelines nodes are fully executed.
if not next_nodes | |
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
from io import BytesIO
import random
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from .utils import versioned_reverse as reverse
from django.test.utils import override_settings
from PIL import Image as PILImage
from .utils import get, assert_fields_exist, assert_event_data_is_equal
# event_list_url is used as magic fixture, which flake8 doesn't see
from .test_event_post import create_with_post, list_url as event_list_url # noqa
from events.tests.test_event_put import update_with_put
from events.models import Image
from events.auth import ApiKeyUser
import requests
temp_dir = tempfile.mkdtemp()
@pytest.yield_fixture(autouse=True)
def tear_down():
yield
shutil.rmtree(temp_dir, ignore_errors=True)
# === util methods ===
def create_in_memory_image_file(name='test_image', image_format='png', size=(512, 256), color=(128, 128, 128)):
image = PILImage.new('RGBA', size=size, color=color)
file = BytesIO()
file.name = '{}.{}'.format(name, image_format)
image.save(file, format=image_format)
file.seek(0)
return file
def get_list(api_client):
list_url = reverse('image-list')
return get(api_client, list_url)
def get_detail(api_client, detail_pk):
detail_url = reverse('image-detail', kwargs={'pk': detail_pk})
return get(api_client, detail_url)
def assert_image_fields_exist(data, version='v1'):
# TODO: start using version parameter
fields = (
'@context',
'@id',
'@type',
'name',
'publisher',
'created_time',
'cropping',
'id',
'url',
'last_modified_time',
'license',
'photographer_name',
'data_source',
'alt_text',
)
assert_fields_exist(data, fields)
# === fixtures ===
@pytest.fixture
def list_url():
return reverse('image-list')
@pytest.fixture
def image_data():
image_file = create_in_memory_image_file()
return {
'image': image_file,
}
@pytest.fixture
def image_url():
url = 'https://commons.wikimedia.org/wiki/File:Common_Squirrel.jpg'
return {
'url': url,
}
# === tests ===
@pytest.mark.django_db
def test__get_image_list_check_fields_exist(api_client):
image_file = create_in_memory_image_file(name='existing_test_image')
uploaded_image = SimpleUploadedFile(
'existing_test_image.png',
image_file.read(),
'image/png',
)
Image.objects.create(image=uploaded_image)
response = get_list(api_client)
assert_image_fields_exist(response.data['data'][0])
@pytest.mark.django_db
def test__get_image_list_check_fields_exist_for_url(api_client):
Image.objects.create(url='https://commons.wikimedia.org/wiki/File:Common_Squirrel.jpg')
response = get_list(api_client)
assert_image_fields_exist(response.data['data'][0])
@pytest.mark.django_db
def test__get_detail_check_fields_exist(api_client):
image_file = create_in_memory_image_file(name='existing_test_image')
uploaded_image = SimpleUploadedFile(
'existing_test_image.png',
image_file.read(),
'image/png',
)
existing_image = Image.objects.create(image=uploaded_image)
response = get_detail(api_client, existing_image.pk)
assert_image_fields_exist(response.data)
@pytest.mark.django_db
def test_get_detail_check_fields_exist_for_url(api_client):
existing_image = Image.objects.create(url='https://commons.wikimedia.org/wiki/File:Common_Squirrel.jpg')
response = get_detail(api_client, existing_image.pk)
assert_image_fields_exist(response.data)
@pytest.mark.django_db
def test__get_detail_check_image_url(api_client):
image_file = create_in_memory_image_file(name='existing_test_image')
uploaded_image = SimpleUploadedFile(
'existing_test_image.png',
image_file.read(),
'image/png',
)
existing_image = Image.objects.create(image=uploaded_image)
response = get_detail(api_client, existing_image.pk)
assert 'images/existing_test_image' in response.data['url']
assert response.data['url'].endswith('.png')
# @pytest.mark.django_db
# def test__unauthenticated_user_cannot_upload_an_image(api_client, list_url, image_data, user):
# response = api_client.post(list_url, image_data)
# assert response.status_code == 401
# @pytest.mark.django_db
# def test__unauthenticated_user_cannot_upload_an_url(api_client, list_url, image_url, user):
# response = api_client.post(list_url, image_url)
# assert response.status_code == 401
@override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='')
@pytest.mark.django_db
def test__upload_an_image(api_client, settings, list_url, image_data, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
response = api_client.post(list_url, image_data)
assert response.status_code == 201
assert Image.objects.all().count() == 1
image = Image.objects.get(pk=response.data['id'])
#assert image.created_by == user
#assert image.last_modified_by == user
#assert image.publisher == organization
# image url should contain the image file's path relative to MEDIA_ROOT.
assert image.image.url.startswith('https://avoinhamelinkedevents.blob.core.windows.net/media/images/test_image')
assert image.image.url.endswith('.png')
# check the actual image file
#image_path = os.path.join(settings.MEDIA_ROOT, image.image.url)
#image = PILImage.open(image_path)
response = requests.get(image.image.url)
image = PILImage.open(BytesIO(response.content))
assert image.size == (512, 256)
assert image.format == 'PNG'
@override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='')
@pytest.mark.django_db
def test__upload_an_image_with_api_key(api_client, settings, list_url, image_data, data_source, organization):
data_source.owner = organization
data_source.save()
api_client.credentials(apikey=data_source.api_key)
response = api_client.post(list_url, image_data)
assert response.status_code == 201
assert Image.objects.all().count() == 1
assert ApiKeyUser.objects.all().count() == 1
image = Image.objects.get(pk=response.data['id'])
#assert image.publisher == organization
# image url should contain the image file's path relative to MEDIA_ROOT.
assert image.image.url.startswith('https://avoinhamelinkedevents.blob.core.windows.net/media/images/test_image')
assert image.image.url.endswith('.png')
# check the actual image file
#image_path = os.path.join(settings.MEDIA_ROOT, image.image.url)
#image = PILImage.open(image_path)
response = requests.get(image.image.url)
image = PILImage.open(BytesIO(response.content))
assert image.size == (512, 256)
assert image.format == 'PNG'
# @pytest.mark.django_db
# def test__image_cannot_be_edited_outside_organization(
# api_client, settings, list_url, image_data, user, organization, organization2, user2):
# organization.admin_users.add(user)
# api_client.force_authenticate(user)
# response = api_client.post(list_url, image_data)
# assert response.status_code == 201
# assert Image.objects.all().count() == 1
# image = Image.objects.get(pk=response.data['id'])
# assert image.created_by == user
# assert image.last_modified_by == user
# assert image.publisher == organization
# # other users cannot edit or delete the image
# organization2.admin_users.add(user2)
# api_client.force_authenticate(user2)
# detail_url = reverse('image-detail', kwargs={'pk': response.data['id']})
# response2 = api_client.put(detail_url, {'name': 'this is needed'})
# assert response2.status_code == 403
# response3 = api_client.delete(detail_url)
# assert response3.status_code == 403
# @pytest.mark.django_db
# def test__image_from_another_data_source_can_be_edited_by_admin(api_client, list_url, image_data, data_source, user,
# organization, other_data_source):
# organization.admin_users.add(user)
# api_client.force_authenticate(user)
# response = api_client.post(list_url, image_data)
# assert response.status_code == 201
# assert Image.objects.all().count() == 1
# image = Image.objects.get(pk=response.data['id'])
# assert image.created_by == user
# assert image.last_modified_by == user
# assert image.publisher == organization
# assert image.data_source == data_source
# image.data_source = other_data_source
# image.save()
# other_data_source.user_editable = True
# other_data_source.owner = organization
# other_data_source.save()
# assert image.data_source == other_data_source
# assert other_data_source in organization.owned_systems.all()
# # user can still edit or delete the image
# detail_url = reverse('image-detail', kwargs={'pk': response.data['id']})
# response2 = api_client.put(detail_url, {'id': response.data['id'], 'name': 'this is needed'})
# assert response2.status_code == 200
# response3 = api_client.delete(detail_url)
# assert response3.status_code == 204
# @pytest.mark.django_db
# def test__image_cannot_be_edited_outside_organization_with_apikey(
# api_client, settings, list_url, image_data, user, organization, organization2, other_data_source):
# organization.admin_users.add(user)
# api_client.force_authenticate(user)
# response = api_client.post(list_url, image_data)
# assert response.status_code == 201
# assert Image.objects.all().count() == 1
# image = Image.objects.get(pk=response.data['id'])
# assert image.created_by == user
# assert image.last_modified_by == user
# assert image.publisher == organization
# # api key user cannot edit or delete the image
# other_data_source.owner = organization2
# other_data_source.save()
# api_client.force_authenticate(user=None)
# api_client.credentials(apikey=other_data_source.api_key)
# detail_url = reverse('image-detail', kwargs={'pk': response.data['id']})
# response2 = api_client.put(detail_url, {'name': 'this is needed'})
# assert response2.status_code == 403
# response3 = api_client.delete(detail_url)
# assert response3.status_code == 403
# # event_list_url is used as magic fixture, which flake8 doesn't see
# @override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='') # noqa
# @pytest.mark.django_db
# def test__create_an_event_with_uploaded_image(
# api_client, list_url, event_list_url, minimal_event_dict, image_data, user): # noqa
# api_client.force_authenticate(user)
# image_response = api_client.post(list_url, image_data)
# assert image_response.status_code == 201
# assert Image.objects.all().count() == 1
# image = Image.objects.get(pk=image_response.data['id'])
# #assert image.created_by == user
# #assert image.last_modified_by == user
# minimal_event_dict.update({'images': [{'@id': str(image_response.data['@id'])}]})
# response = create_with_post(api_client, minimal_event_dict)
# # the event data should contain the expanded image data
# minimal_event_dict['images'][0].update(image_response.data)
# # the image field url changes between endpoints
# # also, admin only fields are not displayed in inlined resources
# minimal_event_dict['images'][0].pop('url')
# minimal_event_dict['images'][0].pop('created_by')
# minimal_event_dict['images'][0].pop('last_modified_by')
# assert_event_data_is_equal(minimal_event_dict, response.data)
# # event_list_url is used as magic fixture, which flake8 doesn't see
# @override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='') # noqa
# @pytest.mark.django_db
# def test__update_an_event_with_uploaded_image(
# api_client, list_url, event_list_url, minimal_event_dict, image_data, user): # noqa
# api_client.force_authenticate(user)
# response = create_with_post(api_client, minimal_event_dict)
# image_response = api_client.post(list_url, image_data)
# assert image_response.status_code == 201
# assert Image.objects.all().count() == 1
# image = Image.objects.get(pk=image_response.data['id'])
# #assert image.created_by == user
# #assert image.last_modified_by == user
# minimal_event_dict.update({'images': [{'@id': str(image_response.data['@id'])}]})
# response2 = update_with_put(api_client, response.data['@id'], minimal_event_dict)
# # the event data should contain the expanded image data
# minimal_event_dict['images'][0].update(image_response.data)
# # the image field url changes between endpoints
# # also, admin only fields are not displayed in inlined resources
# minimal_event_dict['images'][0].pop('url')
# minimal_event_dict['images'][0].pop('created_by')
# minimal_event_dict['images'][0].pop('last_modified_by')
# assert_event_data_is_equal(minimal_event_dict, response2.data)
@pytest.mark.django_db
def test__upload_an_url(api_client, settings, list_url, image_url, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
response = api_client.post(list_url, image_url)
assert response.status_code == 201
assert Image.objects.all().count() == 1
image = Image.objects.get(pk=response.data['id'])
#assert image.created_by == user
#assert image.last_modified_by == user
# image url should stay the same as when input
assert image.url == 'https://commons.wikimedia.org/wiki/File:Common_Squirrel.jpg'
@pytest.mark.django_db
def test__upload_an_url_with_api_key(api_client, settings, list_url, image_url, data_source, organization):
data_source.owner = organization
data_source.save()
api_client.credentials(apikey=data_source.api_key)
response = api_client.post(list_url, image_url)
assert response.status_code == 201
assert Image.objects.all().count() == 1
assert ApiKeyUser.objects.all().count() == 1
image = Image.objects.get(pk=response.data['id'])
#assert image.publisher == organization
# image url should stay the same as when input
assert image.url == 'https://commons.wikimedia.org/wiki/File:Common_Squirrel.jpg'
@pytest.mark.django_db
def test__upload_an_image_and_url(api_client, settings, list_url, image_data, image_url, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
image_data_and_url = image_data.copy()
image_data_and_url.update(image_url)
response = api_client.post(list_url, image_data_and_url)
assert response.status_code == 400
for line in response.data:
assert 'You can only provide image or url, not both' in line
# @override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='')
# @pytest.mark.django_db(transaction=True) # transaction is needed for django-cleanup
# def test__delete_an_image(api_client, settings, user, organization):
# api_client.force_authenticate(user)
# image_file = create_in_memory_image_file(name='existing_test_image')
# uploaded_image = SimpleUploadedFile(
# 'existing_test_image.png',
# image_file.read(),
# 'image/png',
# )
# existing_image = Image.objects.create(image=uploaded_image,
# publisher=organization)
# assert Image.objects.all().count() == 1
# # verify that the image file exists at first just in case
# image_path = os.path.join(settings.MEDIA_ROOT, existing_image.image.url)
# assert os.path.isfile(image_path)
# detail_url = reverse('image-detail', kwargs={'pk': existing_image.pk})
# response = api_client.delete(detail_url)
# assert response.status_code == 204
# assert Image.objects.all().count() == 0
# # check that the image file is deleted
# assert not os.path.isfile(image_path)
# @override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='')
# @pytest.mark.django_db(transaction=True) # transaction is needed for django-cleanup
# def test__delete_an_image_with_api_key(api_client, settings, organization, data_source):
# data_source.owner = organization
# data_source.save()
# api_client.credentials(apikey=data_source.api_key)
# image_file = create_in_memory_image_file(name='existing_test_image')
# uploaded_image = SimpleUploadedFile(
# 'existing_test_image.png',
# image_file.read(),
# 'image/png',
# )
# existing_image = Image.objects.create(image=uploaded_image, data_source=data_source,
# publisher=organization)
# assert Image.objects.all().count() == 1
# # verify that the image file exists at first just in case
# image_path = os.path.join(settings.MEDIA_ROOT, existing_image.image.url)
# assert os.path.isfile(image_path)
# detail_url = reverse('image-detail', kwargs={'pk': existing_image.pk})
# response = api_client.delete(detail_url)
# assert response.status_code == 204
# assert Image.objects.all().count() == 0
# assert ApiKeyUser.objects.all().count() == 1
# # check that the image file is deleted
# assert not os.path.isfile(image_path)
@override_settings(MEDIA_ROOT=temp_dir, MEDIA_URL='')
@pytest.mark.django_db
def test__upload_a_non_valid_image(api_client, list_url, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
non_image_file = BytesIO(bytes(random.getrandbits(8) for _ in range(100)))
response = api_client.post(list_url, {'image': non_image_file})
assert response.status_code == 400
assert 'image' in response.data
@pytest.mark.django_db
def test__upload_an_invalid_dict(api_client, list_url, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
response = api_client.post(list_url, {'name': 'right', 'key': 'wrong'})
assert response.status_code == 400
for line in response.data:
assert 'You must provide either image or url' in line
@pytest.mark.django_db
def test_set_image_license(api_client, list_url, image_data, image_url, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user)
# an image is posted without a license, expect cc_by
response = api_client.post(list_url, image_url)
assert response.status_code == 201
new_image = Image.objects.last()
assert new_image.license_id == 'event_only'
# an image is posted with event_only license, expect change
image_data['license'] = 'event_only'
response = api_client.post(list_url, image_data)
assert response.status_code == 201
new_image = Image.objects.last()
assert new_image.license_id == 'event_only'
| |
<gh_stars>1-10
import re, os, itertools, string
from collections import Counter
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import words
import numpy as np
import pandas as pd
import emoji
import utils
def get_morphems(morpheme):
mp = morpheme.split(';')
out_list = []
for i in mp:
out = re.split('/|#', i)
out_list.append(out)
word = ''.join([i[0].strip() for i in out_list])
pos = '/'.join([i[1].strip() for i in out_list])
return({'word':word, 'pos':pos})
def build_subj_dicionary(df):
subj_dict = dict()
for line in range(len(df)):
if df.iloc[line]['tag_type'] == 'Seed':
word_pos_dict = get_morphems(df.iloc[line]['morphemes'])
word = word_pos_dict['word']
pos = word_pos_dict['pos'] # part of speech: noun, verb, adj, adv or anypos
word_type = df.iloc[line]['subjectivity-polarity'] # weak or strong subjective
polarity = df.iloc[line]['polarity'] # its polarity: can be positive, negative or neutral
new_dict_entry = {pos: [word_type, polarity]}
if word in subj_dict.keys():
subj_dict[word].update(new_dict_entry)
else:
subj_dict[word] = new_dict_entry
return subj_dict
def get_subj_lexicon():
lexicon = pd.read_csv(path + "/res/subjectivity_lexicon.csv")
# lexicon = utils.load_csv(path + "/res/subjectivity_lexicon.csv")
subj_dict = build_subj_dicionary(lexicon)
return subj_dict
def get_emoji_dictionary():
emojis = utils.load_file(path + "/res/emoji/emoji_list.txt")
emoji_dict = {}
for line in emojis:
line = line.split(" ", 1)
emoji = line[0]
description = line[1]
emoji_dict[emoji] = description
return emoji_dict
def build_emoji_sentiment_dictionary():
new_emoji_sentiment_filename = path + "/res/emoji/emoji_sentiment_dictionary.txt"
if not os.path.exists(new_emoji_sentiment_filename):
filename = path + "/res/emoji/emoji_sentiment_raw.txt"
emojis = utils.load_file(filename)[1:]
lines = []
for line in emojis:
line = line.split(",")
emoji = line[0]
occurences = line[2]
negative = float(line[4]) / float(occurences)
neutral = float(line[5]) / float(occurences)
positive = float(line[6]) / float(occurences)
description = line[7]
lines.append(str(emoji) + "\t" + str(negative) + "\t" + str(neutral)
+ "\t" + str(positive) + "\t" + description.lower())
utils.save_file(lines, new_emoji_sentiment_filename)
emoji_sentiment_data = utils.load_file(new_emoji_sentiment_filename)
emoji_sentiment_dict = {}
for line in emoji_sentiment_data:
line = line.split("\t")
# Get emoji characteristics as a list [negative, neutral, positive, description]
emoji_sentiment_dict[line[0]] = [line[1], line[2], line[3], line[4]]
return emoji_sentiment_dict
# Extract each tweet's emojis - obv. it's just a brute force solution (so, it's slow) but works in ALL cases
def extract_emojis(tweets):
tweets = str(tweets)
emojis = []
for tw in tweets:
tw_emojis = []
for word in tw:
chars = list(word)
for ch in chars:
if ch in emoji.UNICODE_EMOJI:
tw_emojis.append(ch)
emojis.append(' '.join(tw_emojis))
return emojis
# Replace a contraction (coming from possessives, verbs, emphasis or just bad language) by its longer form
def replace_contracted_form(contracted_word, pos, dictionary):
long_form = []
if "'" in contracted_word:
# print("Found apostrophe in word: ", contracted_word, ' with pos: ', pos)
split_words = contracted_word.split("'")
check_if_in_dict = False
# If the contraction is a nominal + verbal or a proper noun + verbal
if pos is 'L' or pos is 'M':
long_form.append(split_words[0])
if split_words[1].lower() in contractions:
long_form.extend(contractions[split_words[1].lower()].split())
# If the contraction is a whole verb (like let's or isn't)
elif pos in ['V', 'Y', 'O'] and contracted_word.lower() in contractions:
long_form.extend(contractions[contracted_word.lower()].split())
# If the contraction is proper noun with possessive or a nominal with a possessive or even a (proper) noun
elif pos in ['S', 'Z', 'D', 'N', '^']:
if contracted_word.lower() in contractions:
long_form.extend(contractions[contracted_word.lower()].split())
elif split_words[1].lower() == 's':
long_form.append(split_words[0])
elif contracted_word.lower() in contractions:
long_form.extend(contractions[contracted_word.lower()].split())
else:
check_if_in_dict = True
# Can skip ' which are just punctuation marks (usually used to emphasize or quote something)
elif pos is ',':
# print("Punctuation, nothing to replace.", split_words[0], ' -- ', split_words[1])
return []
# Never replace contractions in emojis or emoticons (will be translated later)
elif pos is 'E':
long_form.append(contracted_word)
else:
check_if_in_dict = True
if check_if_in_dict:
# Attempt to separate words which have been separated by ' by human error
clean0 = re.findall("[a-zA-Z]+", split_words[0])
clean1 = re.findall("[a-zA-Z]+", split_words[1])
if clean0 != [] and clean0[0].lower() in dictionary and clean1 != [] and clean1[0].lower() in dictionary:
# print("Cleaned to ", clean0, ', ', clean1)
long_form.extend([clean0[0], clean1[0]])
else:
# print("Word couldn't be de-contracted!")
long_form.append(contracted_word)
return long_form
else:
return long_form.append(contracted_word)
# Cannot do lemmatization with NLTK without changing the case - which we don't want
# So lemmatize but remember if upper case or startign with upper letter
# This will be needed when performing CMU pos-tagging or when extracting pragmatic features
def correct_spelling_but_preserve_case(lemmatizer, word):
corrected = lemmatizer.lemmatize(word.lower(), 'v')
corrected = lemmatizer.lemmatize(corrected)
if word.isupper():
return corrected.upper()
if word[0].isupper():
return corrected[0].upper() + corrected[1:]
return corrected
# Reduce the length of the pattern (if repeating characters are found)
def reduce_lengthening(word, dictionary):
if word.lower() in dictionary or word.isnumeric():
return word
# Pattern for repeating character sequences of length 2 or greater
pattern2 = re.compile(r"(.)\1{2,}")
# Pattern for repeating character sequences of length 1 or greater
pattern1 = re.compile(r"(.)\1{1,}")
# Word obtained from stripping repeating sequences of length 2
word2 = pattern2.sub(r"\1\1", word)
# Word obtained from stripping repeating sequences of length 1
word1 = pattern1.sub(r"\1", word)
# print("Reduced length from ", word, " w2 -- ", word2, " w1 -- ", word1)
if word1.lower() in dictionary:
return word1
else:
return word2
# Translate emojis (or a group of emojis) into a list of descriptions
def process_emojis(word, emoji_dict, translate_emojis=True):
processed = []
chars = list(word)
remaining = ""
for c in chars:
if c in emoji_dict.keys() or c in emoji.UNICODE_EMOJI:
if remaining != "":
processed.append(remaining)
remaining = ""
if translate_emojis:
if c in emoji_dict:
processed.extend(emoji_dict[c][3].lower().split())
else:
processed.extend(c)
else:
remaining += c
if remaining != "":
processed.append(remaining)
if processed != []:
return ' '.join(processed)
else:
return word
# TODO: Numerals - sarcasm heavily relies on them so find a way to extract meaning behind numbers
# Attempt to clean each tweet and make it as grammatical as possible
def grammatical_clean(tweets, pos_tags, word_file, filename, translate_emojis=True, replace_slang=True, lowercase=False):
if not os.path.exists(filename):
dictionary = utils.load_file(word_file).split()
emoji_dict = build_emoji_sentiment_dictionary()
lemmatizer = WordNetLemmatizer()
corrected_tweets = []
for tweet, pos_tag in zip(tweets, pos_tags):
corrected_tweet = []
# print("Tweet: ", tweet)
# print("POS: ", pos_tag)
for word, pos in zip(tweet.split(), pos_tag.split()):
if lowercase:
t = word.lower()
else:
t = word
if t.startswith("#"):
t = t[1:]
# Remove unnecessary hyphens that just add noise (but not from composed words)
if t.startswith('-') or t.endswith('-'):
t = re.sub('[-]', '', t)
# Process emojis (not written with parenthesis, but with symbols)
emoji_translation = process_emojis(t, emoji_dict, translate_emojis=translate_emojis)
if emoji_translation != t:
corrected_tweet.append(emoji_translation)
continue
# Replace contractions with long-forms
if "'" in t:
long_form = replace_contracted_form(t, pos, dictionary)
corrected_tweet.extend(long_form)
# print("Removed contracted form of ", t, " to ", long_form)
continue
# Check if token contains repeating characters and if so, remove them
# Exclude removal of repeating punctuation, numerals, user mentions
if pos not in [',', '$', '~', '@'] and len(t) > 0:
t = correct_spelling_but_preserve_case(lemmatizer, t)
reduced = reduce_lengthening(t, dictionary)
if reduced != t.lower:
# print("Reduced length of word ", t, " to ", reduced)
t = reduced
# Translate emoticons to their description
if translate_emojis and t.lower() in wikipedia_emoticons:
translated_emoticon = wikipedia_emoticons[t.lower()].split()
# print("WIKI emoticon translated from ", t, " to ", translated_emoticon)
corrected_tweet.extend(translated_emoticon)
continue
elif t.lower() in emotiocons_to_emojis:
translated_emoticon = emotiocons_to_emojis[t.lower()]
corrected_tweet.append(translated_emoticon)
# print("Replaced emoticon from ", t, " to ", translated_emoticon)
continue
# Replace all slang (or twitter abbreviations) to explicit form
if replace_slang and t.lower() in slang.keys():
slang_translation = slang[t.lower()]
# print("Slang word replaced from ", t, " to ", slang_translation)
corrected_tweet.extend(slang_translation.split())
continue
if t != '':
# print("Corrected tweet ", t)
corrected_tweet.append(t)
corrected_tweets.append(corrected_tweet)
# Save the grammatical set to filename
lines = [' '.join(line) for line in corrected_tweets]
# Used for comparison between previous data and the cleaned, grammatical data
for dirty, corrected in zip(tweets, lines):
print("Dirty:\t%s\nGr\t%s\nammatical:" % (dirty, corrected))
utils.save_file(lines, filename)
return lines
# Load grammatical set from filename
# corrected_tweets = [[word for word in line.split()] for line in utils.load_file(filename)]
corrected_tweets = [line for line in utils.load_file(filename)]
return corrected_tweets
def get_stopwords_list(filename="stopwords.txt"):
stopwords = utils.load_file(path + "/res/" + filename)
return stopwords
def get_stopwords_list_char(filename="stopwords_char.txt"):
stopwords = utils.load_file(path + "/res/" + filename)
return stopwords
def build_vocabulary(vocab_filename, lines, minimum_occurrence=1):
if not os.path.exists(vocab_filename):
stopwords = get_stopwords_list(filename="stopwords_loose.txt")
print("Building vocabulary...")
vocabulary = Counter()
for line in lines:
try:
vocabulary.update([l.lower() for l in line.split() if l not in stopwords])
except AttributeError:
continue
print("The top 10 most common words: ", vocabulary.most_common(10))
# Filter all words that appear too rarely or too frequently to be conclusive
vocabulary = {key: vocabulary[key] for key in vocabulary
if vocabulary[key] >= minimum_occurrence}
utils.save_file(vocabulary.keys(), vocab_filename)
print("Vocabulary saved | |
value: RAM counter
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
cnt = collections.Counter()
for obj in objs:
if not bt.is_operator(obj):
continue
if (bt.is_output_chainable_operator(obj) and
not obj.chain_head):
continue
stream_hash = obj.get_stream_hash()
input_rams, output_rams, temp_rams = max_stream_rams[stream_hash]
for width, length in sorted(input_rams, reverse=True):
key = to_actual_ram_spec(config, width, length)
cnt[key] += 1
for width, length in sorted(output_rams, reverse=True):
key = to_actual_ram_spec(config, width, length)
cnt[key] += 1
for width, length in sorted(temp_rams, reverse=True):
key = to_actual_ram_spec(config, width, length)
cnt[key] += 1
max_stage_rams[stage] = cnt
return max_stage_rams
def max_tuple(*tuples):
return tuple([max(*values) for values in zip(*tuples)])
def make_ram_sets(config, schedule_table, ram_dict, max_stream_rams):
ram_set_cache = collections.defaultdict(list)
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
make_stage_ram_sets(config, schedule_table, ram_dict, max_stream_rams,
stage, objs, ram_set_cache)
return ram_set_cache
def make_stage_ram_sets(config, schedule_table, ram_dict, max_stream_rams,
stage, objs, ram_set_cache):
ram_index_set = collections.defaultdict(set)
# cache check
for obj in objs:
if not bt.is_operator(obj):
continue
if bt.is_output_chainable_operator(obj) and not obj.chain_head:
continue
stream_hash = obj.get_stream_hash()
for (input_rams, output_rams,
temp_rams, used_ram_index_dict) in ram_set_cache[stream_hash]:
satisfied = obj.check_ram_requirements(input_rams,
output_rams, temp_rams)
for key, ram_indexes in used_ram_index_dict.items():
for ram_index in ram_indexes:
if ram_index in ram_index_set[key]:
satisfied = False
# Hit: reuse the existing RAM set
if satisfied:
obj.set_rams(input_rams, output_rams, temp_rams)
obj.cached_ram_set = True
for key, ram_indexes in used_ram_index_dict.items():
for ram_index in ram_indexes:
ram_index_set[key].add(ram_index)
# Miss or Unsatisfied: create a new RAM set
for obj in objs:
if not bt.is_operator(obj):
continue
if bt.is_output_chainable_operator(obj) and not obj.chain_head:
continue
if obj.cached_ram_set:
continue
stream_hash = obj.get_stream_hash()
req_inputs, req_outputs, req_temps = max_stream_rams[stream_hash]
used_ram_index_dict = collections.defaultdict(list)
req_rams = []
req_rams.extend([(width, length, 'input', i)
for i, (width, length) in enumerate(req_inputs)])
req_rams.extend([(width, length, 'output', i)
for i, (width, length) in enumerate(req_outputs)])
req_rams.extend([(width, length, 'temp', i)
for i, (width, length) in enumerate(req_temps)])
input_rams = [None for _ in req_inputs]
output_rams = [None for _ in req_outputs]
temp_rams = [None for _ in req_temps]
# smallest request first
for width, length, ram_type, pos in sorted(req_rams,
key=lambda x: (x[0], x[1])):
width, length = to_actual_ram_spec(config, width, length)
key = (width, length)
found = False
# smallest RAM first
for ram_key, rams in sorted(ram_dict.items(), key=lambda x: x[0]):
if found:
break
for i, ram in enumerate(rams):
if i in ram_index_set[ram_key]:
continue
ram_width, ram_length = ram_key
if ram_width != width:
continue
if ram_length >= length:
if ram_type == 'input':
input_rams[pos] = ram
elif ram_type == 'output':
output_rams[pos] = ram
elif ram_type == 'temp':
temp_rams[pos] = ram
ram_index_set[ram_key].add(i)
used_ram_index_dict[key].append(i)
found = True
break
obj.set_rams(input_rams, output_rams, temp_rams)
ram_set_cache[stream_hash].append((input_rams, output_rams,
temp_rams, used_ram_index_dict))
def to_actual_ram_spec(config, width, length):
if width == 0:
width = config['default_datawidth']
min_capacity = config['min_onchip_ram_capacity']
maxi_datawidth = config['maxi_datawidth']
numbanks = maxi_datawidth // width
if numbanks < 1:
numbanks = 1
# while min_length is the total length of a MultibankMemory,
# min_capacity is the minimum capacity for each bank
min_length = (min_capacity // width) * numbanks
if length < min_length:
length = min_length
addrwidth = int(math.ceil(math.log(length, 2)))
real_length = 2 ** addrwidth
return width, real_length
def make_control_params(config, schedule_table):
control_param_dict = get_control_param_dict(schedule_table)
obj_cache = collections.defaultdict(list)
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
for obj in objs:
if not bt.is_operator(obj):
continue
if bt.is_view(obj):
continue
if bt.is_removable_reshape(obj):
continue
if (bt.is_output_chainable_operator(obj) and
not obj.chain_head):
continue
key = obj.get_stream_hash()
control_param_list = control_param_dict[key]
control_param_len = len(control_param_list)
if (not config['disable_control_cache'] and
obj.control_cachable and len(obj_cache[key]) > 0):
# hit
orig = obj_cache[key][0]
obj.copy_control_params(orig)
else:
# miss
width_dict, signed_dict = calc_control_param_width(control_param_list)
obj.make_control_params(control_param_len, width_dict, signed_dict,
use_param_ram=config['use_param_ram'],
min_param_ram_len=config['min_param_ram_len'])
obj.make_control_param_buf(control_param_list,
use_param_ram=config['use_param_ram'],
min_param_ram_len=config['min_param_ram_len'],
ram_style=config['param_ram_style'])
obj_cache[key].append(obj)
return control_param_dict
def get_control_param_dict(schedule_table):
index_dict = collections.defaultdict(int)
control_param_dict = collections.defaultdict(list)
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
for obj in objs:
if not bt.is_operator(obj):
continue
if bt.is_view(obj):
continue
if bt.is_removable_reshape(obj):
continue
if (bt.is_output_chainable_operator(obj) and
not obj.chain_head):
continue
key = obj.get_stream_hash()
index = index_dict[key]
obj.set_control_param_index(index)
index_dict[key] += 1
values = obj.collect_all_control_param_values()
control_param_dict[key].append(values)
return control_param_dict
def calc_control_param_width(control_param_value_list):
width_dict = collections.OrderedDict()
signed_dict = collections.OrderedDict()
for values in control_param_value_list:
for name, value in values.items():
if isinstance(value, (tuple, list)):
if name in width_dict:
current_width = width_dict[name]
else:
current_width = [1 for _ in value]
if name in signed_dict:
current_signed = signed_dict[name]
else:
current_signed = [False for _ in value]
width = []
signed = []
for v, cw, cs in zip(value, current_width, current_signed):
w = max(abs(v).bit_length(), 1)
width.append(max(cw, w))
s = v < 0
signed.append(cs or s)
width_dict[name] = width
signed_dict[name] = signed
else:
if name in width_dict:
current_width = width_dict[name]
else:
current_width = 1
if name in signed_dict:
current_signed = signed_dict[name]
else:
current_signed = False
w = max(abs(value).bit_length(), 1)
width = max(current_width, w)
s = value < 0
signed = current_signed or s
width_dict[name] = width
signed_dict[name] = signed
for name, signed in signed_dict.items():
if isinstance(width_dict[name], (tuple, list)):
width = []
for w, s in zip(width_dict[name], signed):
if s:
width.append(w + 1)
else:
width.append(w)
width_dict[name] = width
else:
if signed:
width_dict[name] += 1
return width_dict, signed_dict
def make_substreams(config, m, clk, rst, maxi, schedule_table):
max_substrms = calc_max_substreams(config, schedule_table)
substrm_dict = collections.defaultdict(list)
substrm_index = collections.defaultdict(int)
for key, num in sorted(max_substrms.items(), key=lambda x: x[0]):
method_name = key[0]
args = key[1]
method = getattr(substreams, method_name)
for _ in range(num):
i = substrm_index[key]
substrm_index[key] += 1
substrm = method(m, clk, rst, *args)
substrm_dict[key].append(substrm)
return substrm_dict
def calc_max_substreams(config, schedule_table):
max_substrms = collections.Counter()
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
cnt = collections.Counter()
for obj in objs:
if not bt.is_operator(obj):
continue
if (bt.is_output_chainable_operator(obj) and
not obj.chain_head):
continue
substrms = obj.get_required_substreams()
for key in substrms:
cnt[key] += 1
for key, val in sorted(cnt.items(), key=lambda x: x[0], reverse=True):
max_substrms[key] = max(max_substrms[key], val)
return max_substrms
def to_hashable_dict(dct):
return tuple([(key, val)
for key, val in sorted(dct.items(), key=lambda x:x[0])])
def from_hashable_dict(dct):
return dict(dct)
def make_streams(config, schedule_table, ram_dict, substrm_dict):
stream_cache = collections.defaultdict(list)
for stage, objs in sorted(schedule_table.items(), key=lambda x: x[0]):
make_stage_streams(config, schedule_table, ram_dict, substrm_dict,
stage, objs, stream_cache)
return stream_cache
def make_stage_streams(config, schedule_table, ram_dict, substrm_dict,
stage, objs, stream_cache):
substrm_index_set = collections.defaultdict(set)
# cache check
for obj in objs:
if config['disable_stream_cache']:
break
if not bt.is_operator(obj):
continue
if bt.is_output_chainable_operator(obj) and not obj.chain_head:
continue
if not obj.stream_cachable:
continue
stream_hash = obj.get_stream_hash()
for (strm, used_substrm_index_dict) in stream_cache[stream_hash]:
satisfied = True
for key, substrm_indexes in used_substrm_index_dict.items():
for substrm_index in substrm_indexes:
if substrm_index in substrm_index_set:
satisfied = False
# Hit: reuse the existing substream set and main stream
if satisfied:
obj.set_stream(strm)
obj.cached_stream = True
for key, substrm_indexes in used_substrm_index_dict.items():
for substrm_index in substrm_indexes:
substrm_index_set[key].add(substrm_index)
# Miss: create a new substream set and main stream
for obj in objs:
if not bt.is_operator(obj):
continue
if bt.is_output_chainable_operator(obj) and not obj.chain_head:
continue
if obj.cached_stream:
continue
req_substrms = obj.get_required_substreams()
used_substrm_index_dict = collections.defaultdict(list)
substrms = []
for key in req_substrms:
substrm_list = substrm_dict[key]
for i, substrm in enumerate(substrm_list):
if i in substrm_index_set[key]:
continue
substrm_index_set[key].add(i)
used_substrm_index_dict[key].append(i)
sub = substrm_list[i]
substrms.append(sub)
break
obj.set_substreams(substrms)
strm = obj.make_stream(datawidth=config['default_datawidth'],
fsm_as_module=config['fsm_as_module'],
dump=config['dump_stream'],
dump_base=config['dump_stream_base'])
obj.set_stream(strm)
stream_hash = obj.get_stream_hash()
stream_cache[stream_hash].append((strm, used_substrm_index_dict))
def make_addr_map(config, objs, saxi):
chunk_size = config['offchipram_chunk_bytes']
maxi_datawidth = config['maxi_datawidth']
if (chunk_size * 8) % maxi_datawidth != 0:
raise ValueError("'offchipram_chunk_bytes' must be a multiple number of 'maxi_datawidth'.")
global_addr_offset = config['default_global_addr_offset']
global_addr_map = collections.OrderedDict()
local_addr_map = collections.OrderedDict()
global_map_info = collections.OrderedDict()
global_mem_map = collections.OrderedDict() # key: (start, end)
if not config['use_map_ram']:
map_regs = saxi.register[num_header_regs + num_control_regs:]
offset_reg = saxi.register[control_reg_global_offset]
offset_reg.initval = global_addr_offset
reg_index = 1
global_index = 1
local_index = 1
storage_used = 0
unified_storage_used = 0
temporal_used = 0
unified_storage_list = []
local_addr_map[0] = 0
# output
for obj in sorted(objs, key=lambda x: x.object_id):
if obj.is_output and obj.global_index is None:
while bt.is_view(obj) or bt.is_removable_reshape(obj):
obj = obj.args[0]
width = obj.dtype.width
length = obj.get_aligned_length()
space_size = align_space(width, length, chunk_size)
default_global_addr = storage_used
obj.set_global_index(global_index)
obj.set_local_index(0)
obj.set_default_global_addr(default_global_addr)
obj.set_default_local_addr(0)
i = (("output (%s) %s "
"(size: %s, dtype: %s, shape: %s, "
"alignment: %d words (%d bytes)), "
"aligned shape: %s") %
(obj.__class__.__name__,
"'%s'" % obj.name if obj.name is not None else 'None',
size_str(space_size),
obj.dtype.to_str() if obj.dtype is not None else 'None',
(str(obj.shape)
if isinstance(obj.shape, (tuple, list)) else '()'),
obj.get_word_alignment(),
bt.to_byte(obj.get_word_alignment() * obj.get_ram_width()),
(str(tuple(obj.get_aligned_shape()))
if isinstance(obj.shape, (tuple, list)) else '()')))
global_mem_map[(default_global_addr,
default_global_addr + space_size - 1)] = i
if config['use_map_ram']:
global_addr_map[global_index] = default_global_addr
if not config['use_map_ram']:
map_regs[global_index].initval = default_global_addr
global_map_info[global_index] = i
global_index += 1
storage_used += space_size
# input (placeholder)
| |
3.0
The *rootUEP* parameter has been renamed into *root_uep*.
filters : Filters
An instance of the Filters (see :ref:`FiltersClassDescr`) class that
provides information about the desired I/O filters applicable to the
leaves that hang directly from the *root group*, unless other filter
properties are specified for these leaves. Besides, if you do not
specify filter properties for child groups, they will inherit these
ones, which will in turn propagate to child nodes.
Notes
-----
In addition, it recognizes the (lowercase) names of parameters
present in :file:`tables/parameters.py` as additional keyword
arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
.. rubric:: File attributes
.. attribute:: filename
The name of the opened file.
.. attribute:: format_version
The PyTables version number of this file.
.. attribute:: isopen
True if the underlying file is open, false otherwise.
.. attribute:: mode
The mode in which the file was opened.
.. attribute:: root
The *root* of the object tree hierarchy (a Group instance).
.. attribute:: root_uep
The UEP (user entry point) group name in the file (see
the :func:`open_file` function).
.. versionchanged:: 3.0
The *rootUEP* attribute has been renamed into *root_uep*.
"""
# The top level kinds. Group must go first!
_node_kinds = ('Group', 'Leaf', 'Link', 'Unknown')
@property
def title(self):
"""The title of the root group in the file."""
return self.root._v_title
@title.setter
def title(self, title):
self.root._v_title = title
@title.deleter
def title(self):
del self.root._v_title
@property
def filters(self):
"""Default filter properties for the root group
(see :ref:`FiltersClassDescr`)."""
return self.root._v_filters
@filters.setter
def filters(self, filters):
self.root._v_filters = filters
@filters.deleter
def filters(self):
del self.root._v_filters
@property
def open_count(self):
"""The number of times this file handle has been opened.
.. versionchanged:: 3.1
The mechanism for caching and sharing file handles has been
removed in PyTables 3.1. Now this property should always
be 1 (or 0 for closed files).
.. deprecated:: 3.1
"""
return self._open_count
def __init__(self, filename, mode="r", title="",
root_uep="/", filters=None, **kwargs):
self.filename = filename
"""The name of the opened file."""
self.mode = mode
"""The mode in which the file was opened."""
if mode not in ('r', 'r+', 'a', 'w'):
raise ValueError("invalid mode string ``%s``. Allowed modes are: "
"'r', 'r+', 'a' and 'w'" % mode)
# Get all the parameters in parameter file(s)
params = {k: v for k, v in parameters.__dict__.items()
if k.isupper() and not k.startswith('_')}
# Update them with possible keyword arguments
if [k for k in kwargs if k.isupper()]:
warnings.warn("The use of uppercase keyword parameters is "
"deprecated", DeprecationWarning)
kwargs = {k.upper(): v for k, v in kwargs.items()}
params.update(kwargs)
# If MAX_ * _THREADS is not set yet, set it to the number of cores
# on this machine.
if params['MAX_NUMEXPR_THREADS'] is None:
params['MAX_NUMEXPR_THREADS'] = detect_number_of_cores()
if params['MAX_BLOSC_THREADS'] is None:
params['MAX_BLOSC_THREADS'] = detect_number_of_cores()
self.params = params
# Now, it is time to initialize the File extension
self._g_new(filename, mode, **params)
# Check filters and set PyTables format version for new files.
new = self._v_new
if new:
_checkfilters(filters)
self.format_version = format_version
"""The PyTables version number of this file."""
# The node manager must be initialized before the root group
# initialization but the node_factory attribute is set onl later
# because it is a bount method of the root grop itself.
node_cache_slots = params['NODE_CACHE_SLOTS']
self._node_manager = NodeManager(nslots=node_cache_slots)
# For the moment Undo/Redo is not enabled.
self._undoEnabled = False
# Set the flag to indicate that the file has been opened.
# It must be set before opening the root group
# to allow some basic access to its attributes.
self.isopen = 1
"""True if the underlying file os open, False otherwise."""
# Append the name of the file to the global dict of files opened.
_open_files.add(self)
# Set the number of times this file has been opened to 1
self._open_count = 1
# Get the root group from this file
self.root = root = self.__get_root_group(root_uep, title, filters)
"""The *root* of the object tree hierarchy (a Group instance)."""
# Complete the creation of the root node
# (see the explanation in ``RootGroup.__init__()``.
root._g_post_init_hook()
self._node_manager.node_factory = self.root._g_load_child
# Save the PyTables format version for this file.
if new:
if params['PYTABLES_SYS_ATTRS']:
root._v_attrs._g__setattr(
'PYTABLES_FORMAT_VERSION', format_version)
# If the file is old, and not opened in "read-only" mode,
# check if it has a transaction log
if not new and self.mode != "r" and _trans_group_path in self:
# It does. Enable the undo.
self.enable_undo()
# Set the maximum number of threads for Numexpr
ne.set_vml_num_threads(params['MAX_NUMEXPR_THREADS'])
def __get_root_group(self, root_uep, title, filters):
"""Returns a Group instance which will act as the root group in the
hierarchical tree.
If file is opened in "r", "r+" or "a" mode, and the file already
exists, this method dynamically builds a python object tree
emulating the structure present on file.
"""
self._v_objectid = self._get_file_id()
if root_uep in [None, ""]:
root_uep = "/"
# Save the User Entry Point in a variable class
self.root_uep = root_uep
new = self._v_new
# Get format version *before* getting the object tree
if not new:
# Firstly, get the PyTables format version for this file
self.format_version = utilsextension.read_f_attr(
self._v_objectid, 'PYTABLES_FORMAT_VERSION')
if not self.format_version:
# PYTABLES_FORMAT_VERSION attribute is not present
self.format_version = "unknown"
self._isPTFile = False
elif not isinstance(self.format_version, str):
# system attributes should always be str
self.format_version = self.format_version.decode('utf-8')
# Create new attributes for the root Group instance and
# create the object tree
return RootGroup(self, root_uep, title=title, new=new, filters=filters)
def _get_or_create_path(self, path, create):
"""Get the given `path` or create it if `create` is true.
If `create` is true, `path` *must* be a string path and not a
node, otherwise a `TypeError`will be raised.
"""
if create:
return self._create_path(path)
else:
return self.get_node(path)
def _create_path(self, path):
"""Create the groups needed for the `path` to exist.
The group associated with the given `path` is returned.
"""
if not hasattr(path, 'split'):
raise TypeError("when creating parents, parent must be a path")
if path == '/':
return self.root
parent, create_group = self.root, self.create_group
for pcomp in path.split('/')[1:]:
try:
child = parent._f_get_child(pcomp)
except NoSuchNodeError:
child = create_group(parent, pcomp)
parent = child
return parent
def create_group(self, where, name, title="", filters=None,
createparents=False):
"""Create a new group.
Parameters
----------
where : str or Group
The parent group from which the new group will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new group.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters applicable
to the leaves that hang directly from this new group (unless other
filter properties are specified for these leaves). Besides, if you
do not specify filter properties for its child groups, they will
inherit these ones.
createparents : bool
Whether to create the needed groups for the parent
path to exist (not done by default).
See Also
--------
Group : for more information on groups
"""
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
return Group(parentnode, name,
title=title, new=True, filters=filters)
def create_table(self, where, name, description=None, title="",
filters=None, expectedrows=10_000,
chunkshape=None, byteorder=None,
createparents=False, obj=None, track_times=True):
"""Create a new table with the given name in where location.
Parameters
----------
where : str or Group
The parent group from which the new table will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new table.
description : Description
This is an object that describes the table, i.e. how
many columns it has, their names, types, shapes, etc. It
can be any of the following:
* *A user-defined class*: This should inherit from the
IsDescription class (see :ref:`IsDescriptionClassDescr`)
where table fields are specified.
* *A dictionary*: For example, when you do not know
beforehand which structure your table will have).
* *A Description instance*: | |
<reponame>groschovskiy/vroom
"""Vroom vim management."""
import ast
from io import StringIO
import json
import re
import subprocess
import tempfile
import time
# Regex for quoted python string literal. From pyparsing.quotedString.reString.
QUOTED_STRING_RE = re.compile(r'''
(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|
(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')
''', re.VERBOSE)
# Vroom has been written such that this data *could* go into a separate .vim
# file, and that would be great. However, python distutils (believe it or not)
# makes it extraordinarily tough to distribute custom files with your python
# modules. It's both difficult to know where they go and difficult to allow them
# to be read. If the user does a sudo install, distutils has no way to make the
# .vim file actually readable and vroom dies from permission errors.
# So screw you, python. I'll just hardcode it.
_, CONFIGFILE = tempfile.mkstemp()
with open(CONFIGFILE, 'w') as configfile:
configfile.write("""
" Prevents your vroom tests from doing nasty things to your system.
set noswapfile
" Hidden function to execute a command and return the output.
" Useful for :messages
function! VroomExecute(command)
redir => l:output
silent! execute a:command
redir end
return l:output
endfunction
" Hidden function to reset a test.
function! VroomClear()
stopinsert
silent! bufdo! bdelete!
endfunction
" Hidden function to dump an error into vim.
function! VroomDie(output)
let g:vroom_error = a:output
let g:vroom_error .= "\\n:tabedit $VROOMFILE to edit the test file."
let g:vroom_error .= "\\nThis output is saved in g:vroom_error."
let g:vroom_error .= "\\nQuit vim when you're done."
echo g:vroom_error
endfunction
" Hidden function to kill vim, independent of insert mode.
function! VroomEnd()
qa!
endfunction
""")
def DeserializeVimValue(value_str):
"""Return string representation of value literal from vim.
Args:
value_str: A serialized string representing a simple value literal. The
serialization format is just the output of the vimscript string() func.
Raises:
BadVimValue if the value could not be deserialized.
"""
if not value_str:
return None
# Translate some vimscript idioms to python before evaling as python literal.
# Vimscript strings represent backslashes literally.
value_str = value_str.replace('\\', '\\\\').replace('\n', '\\n')
# Replace "''" inside strings with "\'".
def ToVimQuoteEscape(m):
val = m.group(0)
if val.startswith("'"):
return val[:1] + val[1:-1].replace("''", "\\'") + val[-1:]
else:
return val
value_str = re.sub(QUOTED_STRING_RE, ToVimQuoteEscape, value_str)
try:
return ast.literal_eval(value_str)
except SyntaxError:
raise BadVimValue(value_str)
class Communicator(object):
"""Object to communicate with a vim server."""
def __init__(self, args, env, writer):
self.writer = writer.commands
self.args = args
# The order of switches matters. '--clean' will prevent vim from loading any
# plugins from ~/.vim/, but it also sets '-u DEFAULTS'. We supply '-u' after
# to force vim to take our '-u' value (while still avoiding plugins).
self.start_command = [
'vim',
'--clean',
'-u', args.vimrc,
'--servername', args.servername,
'-c', 'set shell=' + args.shell,
'-c', 'source %s' % CONFIGFILE]
self.env = env
self._cache = {}
def Start(self):
"""Starts vim."""
if not self._IsCurrentDisplayUsable():
# Try using explicit $DISPLAY value. This only affects vim's client/server
# connections and not how console vim appears.
original_display = self.env.get('DISPLAY')
self.env['DISPLAY'] = ':0'
if not self._IsCurrentDisplayUsable():
# Restore original display value if ":0" doesn't work, either.
if original_display is None:
del self.env['DISPLAY']
else:
self.env['DISPLAY'] = original_display
# TODO(dbarnett): Try all values from /tmp/.X11-unix/, etc.
# We do this separately from __init__ so that if it fails, vroom.runner
# still has a _vim attribute it can query for details.
self.process = subprocess.Popen(self.start_command, env=self.env)
time.sleep(self.args.startuptime)
if self.process.poll() is not None and self.process.poll() != 0:
# If vim exited this quickly, it probably means we passed a switch it
# doesn't recognize. Try again without the '--clean' switch since this is
# new in 8.0.1554+.
self.start_command.remove('--clean')
self.process = subprocess.Popen(self.start_command, env=self.env)
time.sleep(self.args.startuptime)
def _IsCurrentDisplayUsable(self):
"""Check whether vim fails using the current configured display."""
try:
self.Ask('1')
except NoDisplay:
return False
except Quit:
# Any other error means the display setting is fine (assuming vim didn't
# fail before it checked the display).
pass
return True
def Communicate(self, command, extra_delay=0):
"""Sends a command to vim & sleeps long enough for the command to happen.
Args:
command: The command to send.
extra_delay: Delay in excess of --delay
Raises:
Quit: If vim quit unexpectedly.
"""
self.writer.Log(command)
self.TryToSay([
'vim',
'--servername', self.args.servername,
'--remote-send', command])
self._cache = {}
time.sleep(self.args.delay + extra_delay)
def Ask(self, expression):
"""Asks vim for the result of an expression.
Args:
expression: The expression to ask for.
Returns:
Return value from vim, or None if vim had no output.
Raises:
Quit if vim quit unexpectedly.
BadVimValue if vim returns a value that can't be deserialized.
"""
try:
out = self.TryToSay([
'vim',
'--servername', self.args.servername,
'--remote-expr', 'string(%s)' % expression])
except ErrorOnExit as e:
if e.error_text.startswith('E449:'): # Invalid expression received
raise InvalidExpression(expression)
raise
# Vim adds a trailing newline to --remote-expr output if there isn't one
# already.
return DeserializeVimValue(out.rstrip())
def GetCurrentLine(self):
"""Figures out what line the cursor is on.
Returns:
The cursor's line.
"""
if 'line' not in self._cache:
lineno = self.Ask("line('.')")
try:
self._cache['line'] = lineno
except (ValueError, TypeError):
raise ValueError("Vim lost the cursor, it thinks it's '%s'." % lineno)
return self._cache['line']
def GetBufferLines(self, number):
"""Gets the lines in the requesed buffer.
Args:
number: The buffer number to load. SHOULD NOT be a member of
SpecialBuffer, use GetMessages if you want messages. Only works on
real buffers.
Returns:
The buffer lines.
"""
if number not in self._cache:
num = "'%'" if number is None else number
cmd = "getbufline(%s, 1, '$')" % num
self._cache[number] = self.Ask(cmd)
return self._cache[number]
def GetMessages(self):
"""Gets the vim message list.
Returns:
The message list.
"""
# This prevents GetMessages() from being called twice in a row.
# (When checking a (msg) output line, first we check the messages then we
# load the buffer.) Cleans up --dump-commands a bit.
if 'msg' not in self._cache:
cmd = "VroomExecute('silent! messages')"
# Add trailing newline as workaround for http://bugs.python.org/issue7638.
self._cache['msg'] = (self.Ask(cmd) + '\n').splitlines()
return self._cache['msg']
def Clear(self):
self.writer.Log(None)
self.Ask('VroomClear()')
self._cache = {}
def Output(self, writer):
"""Send the writer output to the user."""
if hasattr(self, 'process'):
buf = StringIO()
writer.Write(buf)
self.Ask('VroomDie({})'.format(VimscriptString(buf.getvalue())))
buf.close()
def Quit(self):
"""Tries to cleanly quit the vim process.
Returns:
True if vim successfully quit or wasn't running, False otherwise.
"""
# We might die before the process is even set up.
if hasattr(self, 'process'):
if self.process.poll() is None:
# Evaluate our VroomEnd function as an expression instead of issuing a
# command, which works even if vim isn't in normal mode.
try:
self.Ask('VroomEnd()')
except Quit:
# Probably failed to quit. If vim is still running, we'll return False
# below.
pass
if self.process.poll() is None:
return False
else:
del self.process
return True
def Kill(self):
"""Kills the vim process."""
# We might die before the process is even set up.
if hasattr(self, 'process'):
if self.process.poll() is None:
self.process.kill()
del self.process
def TryToSay(self, cmd):
"""Execute a given vim process.
Args:
cmd: The command to send.
Returns:
stdout from vim.
Raises:
Quit: If vim quits unexpectedly.
"""
if hasattr(self, 'process') and self.process.poll() is not None:
raise ServerQuit()
# Override messages generated by the vim client process (in particular, the
# "No display" message) to be in English so that we can recognise them.
# We do this by setting both LC_ALL (per POSIX) and LANGUAGE (a GNU gettext
# extension) to en_US.UTF-8. (Setting LANG=C would disable localisation
# entirely, but has the bad side-effect of also setting the character
# encoding to ASCII, which breaks when the remote side sends a non-ASCII
# character.)
#
# Note that this does not affect messages from the vim server process,
# which should be matched using error codes as usual.
env = self.env.copy()
env.update({
'LANGUAGE': 'en_US.UTF-8',
'LC_ALL': 'en_US.UTF-8'})
out, err = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env).communicate()
if out is None:
raise Quit('Vim could not respond to query "%s"' % ' '.join(cmd[3:]))
if err:
error_text = err.decode('utf-8').rstrip('\n')
if error_text == 'No display: Send expression failed.':
raise NoDisplay(self.env.get('DISPLAY'))
else:
raise ErrorOnExit(error_text)
return out.decode('utf-8')
def VimscriptString(string):
"""Escapes & quotes a string for usage as a vimscript string literal.
Escaped such that \\n will mean newline (in other words double-quoted
vimscript strings are used).
>>> VimscriptString('Then (s)he said\\n"Hello"')
'"Then (s)he said\\\\n\\\\"Hello\\\\""'
Args:
string: The string to escape.
Returns:
| |
shell-style wildcards to
filter directory names.
:param list exclude_dirs: An optional list of patterns used to
exclude directories
:param list exclude_files: An optional list of patterns used to
exclude files.
:param list namespaces: A list of namespaces to include in
the resource information.
:param page: May be a tuple of ``(<start>, <end>)`` indexes to
return an iterator of a subset of the resource info, or
``None`` to iterate over the entire directory. Paging a
directory scan may be necessary for very large directories.
:type page: tuple or None
:return: An iterator of :class:`~fs.info.Info` objects.
:rtype: iterator
This method enhances :meth:`~fs.base.FS.scandir` with additional
filtering functionality.
"""
resources = self.scandir(path, namespaces=namespaces)
filters = []
def match_dir(patterns, info):
"""Pattern match info.name"""
return info.is_file or self.match(patterns, info.name)
def match_file(patterns, info):
"""Pattern match info.name"""
return info.is_dir or self.match(patterns, info.name)
def exclude_dir(patterns, info):
"""Pattern match info.name"""
return info.is_file or not self.match(patterns, info.name)
def exclude_file(patterns, info):
"""Pattern match info.name"""
return info.is_dir or not self.match(patterns, info.name)
if files:
filters.append(partial(match_file, files))
if dirs:
filters.append(partial(match_dir, dirs))
if exclude_dirs:
filters.append(partial(exclude_dir, exclude_dirs))
if exclude_files:
filters.append(partial(exclude_file, exclude_files))
if filters:
resources = (
info
for info in resources
if all(_filter(info) for _filter in filters)
)
iter_info = iter(resources)
if page is not None:
start, end = page
iter_info = itertools.islice(iter_info, start, end)
return iter_info
def getbytes(self, path):
"""
Get the contents of a file as bytes.
:param str path: A path to a readable file on the filesystem.
:returns: file contents
:rtype: bytes
:raises fs.errors.ResourceNotFound: If ``path`` does not
exist.
"""
with closing(self.open(path, mode='rb')) as read_file:
contents = read_file.read()
return contents
def gettext(self, path, encoding=None, errors=None, newline=''):
"""
Get the contents of a file as a string.
:param str path: A path to a readable file on the filesystem.
:param str encoding: Encoding to use when reading contents in
text mode.
:param str errors: Unicode errors parameter.
:param str newline: Newlines parameter.
:returns: file contents.
:raises fs.errors.ResourceNotFound: If ``path`` does not
exist.
"""
with closing(
self.open(
path,
mode='rt',
encoding=encoding,
errors=errors,
newline=newline)
) as read_file:
contents = read_file.read()
return contents
def getmeta(self, namespace="standard"):
"""
Get meta information regarding a filesystem.
:param keys: A list of keys to retrieve, or None for all keys.
:type keys: list or None
:param str namespace: The meta namespace (default is
`"standard"`).
:rtype: dict
Meta information is associated with a *namespace* which may be
specified with the `namespace` parameter. The default namespace,
``"standard"``, contains common information regarding the
filesystem's capabilities. Some filesystems may provide other
namespaces, which expose less common, or implementation specific
information. If a requested namespace is not supported by
a filesystem, then an empty dictionary will be returned.
The ``"standard"`` namespace supports the following keys:
=================== ============================================
key Description
------------------- --------------------------------------------
case_insensitive True if this filesystem is case sensitive.
invalid_path_chars A string containing the characters that may
may not be used on this filesystem.
max_path_length Maximum number of characters permitted in a
path, or None for no limit.
max_sys_path_length Maximum number of characters permitted in
a sys path, or None for no limit.
network True if this filesystem requires a network.
read_only True if this filesystem is read only.
supports_rename True if this filesystem supports an
os.rename operation.
=================== ============================================
.. note::
Meta information is constant for the lifetime of the
filesystem, and may be cached.
"""
if namespace == 'standard':
meta = self._meta
else:
meta = {}
return meta
def getsize(self, path):
"""
Get the size (in bytes) of a resource.
:param str path: A path to a resource.
:rtype: int
The *size* of a file is the total number of readable bytes,
which may not reflect the exact number of bytes of reserved
disk space (or other storage medium).
The size of a directory is the number of bytes of overhead
use to store the directory entry.
"""
size = self.getdetails(path).size
return size
def getsyspath(self, path):
"""
Get an *system path* to a resource.
:param str path: A path on the filesystem.
:rtype: str
:raises fs.errors.NoSysPath: If there is no corresponding system path.
A system path is one recognized by the OS, that may be used
outside of PyFilesystem (in an application or a shell for
example). This method will get the corresponding system path
that would be referenced by ``path``.
Not all filesystems have associated system paths. Network and
memory based filesystems, for example, may not physically store
data anywhere the OS knows about. It is also possible for some
paths to have a system path, whereas others don't.
If ``path`` doesn't have a system path,
a :class:`~fs.errors.NoSysPath` exception will be thrown.
.. note::
A filesystem may return a system path even if no
resource is referenced by that path -- as long as it can
be certain what that system path would be.
"""
raise errors.NoSysPath(path=path)
def gettype(self, path):
"""
Get the type of a resource.
:param path: A path in the filesystem.
:returns: :class:`~fs.ResourceType`
A type of a resource is an integer that identifies the what
the resource references. The standard type integers may be one
of the values in the :class:`~fs.ResourceType` enumerations.
The most common resource types, supported by virtually all
filesystems are ``directory`` (1) and ``file`` (2), but the
following types are also possible:
=================== ======
ResourceType value
------------------- ------
unknown 0
directory 1
file 2
character 3
block_special_file 4
fifo 5
socket 6
symlink 7
=================== ======
Standard resource types are positive integers, negative values
are reserved for implementation specific resource types.
"""
resource_type = self.getdetails(path).type
return resource_type
def geturl(self, path, purpose='download'):
"""
Get a URL to the given resource.
:param str path: A path on the filesystem
:param str purpose: A short string that indicates which URL to
retrieve for the given path (if there is more than one). The
default is `'download'`, which should return a URL that
serves the file. Other filesystems may support other values
for ``purpose``.
:returns: A URL.
:rtype: str
:raises fs.errors.NoURL: If the path does not map to a URL.
"""
raise errors.NoURL(path, purpose)
def hassyspath(self, path):
"""
Check if a path maps to a system path.
:param str path: A path on the filesystem
:rtype: bool
"""
has_sys_path = True
try:
self.getsyspath(path)
except errors.NoSysPath:
has_sys_path = False
return has_sys_path
def hasurl(self, path, purpose='download'):
"""
Check if a path has a corresponding URL.
:param str path: A path on the filesystem
:param str purpose: A purpose parameter, as given in
:meth:`~fs.base.FS.geturl`.
:rtype: bool
"""
has_url = True
try:
self.geturl(path, purpose=purpose)
except errors.NoURL:
has_url = False
return has_url
def isclosed(self):
"""Check if the filesystem is closed."""
return getattr(self, '_closed', False)
def isdir(self, path):
"""Check a path exists and is a directory."""
try:
return self.getinfo(path).is_dir
except errors.ResourceNotFound:
return False
def isempty(self, path):
"""
Check if a directory is empty (contains no files or
directories).
:param str path: A directory path.
:rtype: bool
"""
return next(iter(self.scandir(path)), None) is None
def isfile(self, path):
"""Check a path exists and is a file."""
try:
return not self.getinfo(path).is_dir
except errors.ResourceNotFound:
return False
def islink(self, path):
"""
Check if a path is a symlink.
:param str path: A path on the filesystem.
:rtype: bool
"""
self.getinfo(path)
return False
def lock(self):
"""
Get a context manager that *locks* the filesystem.
Locking a filesystem gives a thread exclusive access to it.
Other threads will block until the threads with the lock has
left the context manager. Here's how you would use it::
with my_fs.lock(): # May block
# code here has exclusive access to the filesystem
It is a good idea to put a lock around any operations that you
would like to be *atomic*. For instance if you are copying
files, and you don't want another thread to delete or modify
anything while the copy is in progress.
Locking with this method is only required for code that calls
multiple filesystem | |
and 7 for n={} are: {}'.format(n, ','.join(values)))
# empty list
del values[:]
# -----------------------------------------
# Question 66
# Please write assert statements to verify that every number in the list [2,4,6,8] is even.
#
# Hints:
# Use "assert expression" to make assertion.
def check_list_evens(list):
for x in list:
assert x%2 == 0, 'Not all numbers in list are even'
# -----------------------------------------
# Question 67
# Please write a program which accepts basic mathematic expression from console and print the evaluation result.
#
# Example:
# If the following string is given as input to the program:
#
# 35+3
#
# Then, the output of the program should be:
#
# 38
#
# Hints:
# Use eval() to evaluate an expression.
def simpe_math_calc():
print('Result: ', eval(input('Enter simple math folmula: ')))
# -----------------------------------------
# Question 68
# Please write a binary search function which searches an item in a sorted list. The function should return the index
# of element to be searched in the list.
#
# Hints:
# Use if/elif to deal with conditions.
def binary_search(sorted_list, item):
first = 0
last = len(sorted_list) - 1
while first < last:
midpoint = (first + last)//2
if sorted_list[midpoint] == item:
return midpoint
elif sorted_list[midpoint] > item:
last = midpoint - 1 # Since we have already examined the midpoint
else:
first = midpoint + 1 # Since we have already examined the midpoint
return None
# -----------------------------------------
# Question 69
# Please generate a random float where the value is between 10 and 100 using Python math module.
#
# Hints:
# Use random.random() to generate a random float in [0,1].
def rand_10to100():
import random
# Solution 1
# return random.random()*100
# or Solution 2
return random.uniform(10, 100)
# -----------------------------------------
# Question 70
# Please write a program to output a random even number between 0 and 10 inclusive using random module and list
# comprehension.
#
# Hints:
# Use random.choice() to a random element from a list.
def rand_from_range(n1, n2):
import random
return random.choice([i for i in range(n1, n2+1) if not i % 2])
# -----------------------------------------
# Question 71
# Please write a program to output a random number, which is divisible by 5 and 7, between 0 and 10 inclusive using
# random module and list comprehension.
#
# Hints:
# Use random.choice() to a random element from a list.
def rand_from_range2(n1, n2):
import random
return random.choice([i for i in range(n1, n2+1) if i%5==0 and i%7==0])
# -----------------------------------------
# Question 72
# Please write a program to generate a list with 5 random numbers between 100 and 200 inclusive.
#
# Hints:
# Use random.sample() to generate a list of random values.
def rand_k_nums_from_range(n1, n2, rn):
import random
return random.sample([i for i in range(n1, n2+1)], rn)
# -----------------------------------------
# Question 73
# Please write a program to randomly generate a list with 5 numbers, which are divisible by 5 and 7 , between 1 and
# 1000 inclusive.
#
# Hints:
# Use random.sample() to generate a list of random values.
def rand_5_from_range(n1, n2):
import random
return random.sample([i for i in range(n1, n2+1) if i%5==0 and i%7==0], 5)
# -----------------------------------------
# Question 74
# Please write a program to randomly print a integer number between 7 and 15 inclusive.
#
# Hints:
# Use random.randrange() to a random integer in a given range.
def rand_int_in_range(n1, n2):
import random
return random.randint(n1, n2)
# -----------------------------------------
# Question 75
# Please write a program to compress and decompress the string "hello world!hello world!hello world!hello world!".
#
# Hints:
# Use zlib.compress() and zlib.decompress() to compress and decompress a string.
def cmpress_decomprs():
import zlib
data = 'hello world!hello world!hello world!hello world!'
compressed_data = zlib.compress(data.encode('utf-8'), -1)
print(zlib.decompress(compressed_data))
# -----------------------------------------
# Question 76
# Please write a program to print the running time of execution of "1+1" for 100 times.
#
# Hints:
# Use timeit() function to measure the running time.
def time_the_f():
import timeit
print(timeit.timeit("1+1", number=100))
# -----------------------------------------
# Question 77
# Please write a program to shuffle and print the list [3,6,7,8].
#
# Hints:
# Use shuffle() function to shuffle a list.
def list_shuffle(l):
from random import shuffle
print('shuffling')
shuffle(l)
print(l)
# -----------------------------------------
# Question 78
# write a program to generate all sentences where subject is in ["I", "You"] and verb is in ["Play", "Love"] and the
# object is in ["Hockey","Football"].
#
# Hints:
# Use list[index] notation to get a element from a list.
def sentence_generator(subj, verb, obj):
for s in subj:
for v in verb:
for o in obj:
print(s + ' ' + v + ' ' + o)
# -----------------------------------------
# Question 79
# write a program to print the list after removing delete even numbers in [5,6,77,45,22,12,24].
#
# Hints:
# Use list comprehension to delete a bunch of element from a list.
def print_remove_evens(l):
print([x for x in l if x % 2])
# -----------------------------------------
# Question 80
# By using list comprehension, please write a program to print the list after removing delete numbers which are
# divisible by 5 and 7 in [12,24,35,70,88,120,155].
#
# Hints:
# Use list comprehension to delete a bunch of element from a list.
def print_remove_div_by5n7(l):
print([x for x in l if (x % 5 and x % 7)])
# -----------------------------------------
# Question 81
# By using list comprehension, please write a program to print the list after removing the 0th, 2nd, 4th, 6th numbers
# in [12,24,35,70,88,120,155].
#
# Hints:
# Use list comprehension to delete a bunch of element from a list.
# Use enumerate() to get (index, value) tuple
def print_lst_remove_at_position(l):
print([x for (i, x) in enumerate(l) if i % 2])
# -----------------------------------------
# Question 82
# By using list comprehension, please write a program to generate a 3*5*8 3D array whose each element is 0.
#
# Hints:
# Use list comprehension to make an array.
def gen_list_3d_array(d1, d2, d3):
return [[[0 for col in range(d3)] for col in range(d2)] for row in range(d1)]
# -----------------------------------------
# Question 83
# By using list comprehension, please write a program to print the list after removing the 0th,4th,5th numbers
# in [12,24,35,70,88,120,155].
#
# Hints:
# Use list comprehension to delete a bunch of element from a list.
# Use enumerate() to get (index, value) tuple.
def print_remv_at_posit(l, rl):
print([x for (i, x) in enumerate(l) if i not in rl])
# -----------------------------------------
# Question 84
# By using list comprehension, please write a program to print the list after removing the value 24
# in [12,24,35,24,88,120,155].
#
# Hints:
# Use list's remove method to delete a value.
def print_l_rm_val(l, val):
l.remove(val)
print(l)
# -----------------------------------------
# Question 85
# With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155], write a program to make a list whose elements are
# intersection of the above given lists.
#
# Hints:
# Use set() and "&=" to do set intersection operation.
def intersect(la, lb):
return list(set(la) & set(lb))
# -----------------------------------------
# Question 86
# With a given list [12,24,35,24,88,120,155,88,120,155], write a program to print this list after removing all
# duplicate values with original order reserved.
#
# Hints:
# Use set() to store a number of values without duplicate.
def rmv_lst_dublicts(l):
seen = set()
unique_l = []
for x in l:
if x not in seen:
unique_l.append(x)
seen.add(x)
return unique_l
# -----------------------------------------
# Question 87
# Define a class Person and its two child classes: Male and Female. All classes have a method "getGender" which can
# print "Male" for Male class and "Female" for Female class.
#
# Hints:
# Use Subclass(Parentclass) to define a child class.
class Person:
def getGender(self):
return 'Unknown'
class Male(Person):
def getGender(self):
return 'Male'
class Female(Person):
def getGender(self):
return 'Female'
# -----------------------------------------
# Question 88
# Please write a program which count and print the numbers of each character in a string input by console.
#
# Example:
# If the following string is given as input to the program:
#
# abcdefgabc
#
# Then, the output of the program should be:
#
# a,2
# c,2
# b,2
# e,1
# d,1
# g,1
# f,1
#
# Hints:
# Use dict to store key/value pairs.
# Use dict.get() method to lookup a key with default value.
def char_freq_strng():
d = {}
s = input('Enter string: ')
for c in s:
d[c] = d.get(c, 0) + 1
for k, v in d.items():
print(k, ',', v)
# -----------------------------------------
# Question 89
# Please write a program which accepts a string from console and print it in reverse order.
#
# Example:
# If the following string is given as input to the program:
#
# rise to vote sir
#
# Then, the output of the program should be:
#
# ris etov ot esir
#
# Hints:
# Use list[::-1] to iterate a list in a reverse order.
def print_s_reverse():
s = input('Enter string: ')
print(s[::-1])
# -----------------------------------------
# Question 90
# Please write a program which accepts a string from console and print the characters that have | |
<reponame>HenrikPilz/BMEcatConverter<filename>src/importer/xml/bmecatImportHandler.py
'''
Created on 05.05.2017
@author: henrik.pilz
'''
from array import array
from datetime import datetime
from xml.sax import handler
import logging
from datamodel import Feature
from datamodel import FeatureSet
from datamodel import Mime
from datamodel import Price
from datamodel import PriceDetails
from datamodel import Product
from datamodel import Reference
from datamodel import TreatmentClass
from datamodel import ValidatingObject
from datamodel import Variant
from transformer import SeparatorTransformer
class BMEcatImportHandler(handler.ContentHandler):
'''
Handler fuer Sax2Parser, welcher BMEcats in den Formaten 1.01,1.2,2005, 2005.1 sowie ETIM aller Arten liest.
'''
''' alle registrierten StartElementhandler '''
__startElementHandler = { "article" : "createProduct",
"article_details" : "createProductDetails",
"order_details" : "createOrderDetails",
"price_details" : "createPriceDetails",
"price" : "createPrice",
"mime" : "createMime",
"mime_info" : "startMimeInfo",
"datetime" : "startDateTime",
"article_features" : "createFeatureSet",
"feature" : "createFeature",
"special_treatment_class" : "createTreatmentClass",
"article_reference" : "createReference",
"variants" : "createFeatureVariantSet",
"variant" : "createFeatureVariant",
"description_long" : "_startDescription",
"description" : "_startDescription" }
''' Moegliche Aliase fuer Varianten der BMEcats '''
__alias = {
"product" : "article",
"product_details" : "article_details",
"supplier_pid" : "supplier_aid",
"supplier_alt_pid" : "supplier_alt_aid",
"manufacturer_pid" : "manufacturer_aid",
"buyer_pid" : "buyer_aid",
"article_order_details" : "order_details",
"article_price_details" : "price_details",
"article_price" : "price",
"product_features" : "article_features",
"international_pid" : "ean",
"product_order_details" : "order_details",
"product_price_details" : "price_details",
"product_price" : "price",
"product_reference" : "article_reference",
"prod_id_to" : "art_id_to",
"supplier_pid_supplement" : "supplier_aid_supplement"
}
''' alle registrierten EndElementhandler '''
__endElementHandler = {
"catalog_group_system" : "_resetAll",
"feature" : "saveFeature",
"article" : "saveProduct",
"mime" : "saveMime",
"variants" : "saveFeatureVariantSet",
"vorder" : "addFeatureVariantSetOrder",
"variant" : "addFeatureVariant",
"article_features" : "saveFeatureSet",
"special_treatment_class" : "saveTreatmentClass",
"article_reference" : "saveReference",
"price" : "savePrice",
"price_details" : "savePriceDetails",
"mime_info" : "endMimeInfo",
"datetime" : "endDateTime",
"article_details" : "endProductDetails",
"order_details" : "endOrderDetails",
"date" : "addDate",
# Informationen am CurrentElement
"territory" : ("_addAttributeToCurrentElement", "territory", False),
"keyword" : ("_addAttributeToCurrentElement", "keywords", False),
# Artikelinformationen
"supplier_aid" : ("_addAttributeToCurrentArticle", "productId", True),
"supplier_alt_aid" : ("_addAttributeToCurrentArticleDetails", "supplierAltId", False),
"buyer_aid" : ("_addAttributeToCurrentArticleDetails", "buyerId", False),
"manufacturer_aid" : ("_addAttributeToCurrentArticleDetails", "manufacturerArticleId", False),
"manufacturer_name" : ("_addAttributeToCurrentArticleDetails", "manufacturerName", False),
"ean" : ("_addAttributeToCurrentArticleDetails", "ean", False),
"description_long" : ("_addAttributeToCurrentArticleDetails", "description", False),
"description_short" : ("_addAttributeToCurrentArticleDetails", "title", False),
"delivery_time" : ("_addAttributeToCurrentArticleDetails", "deliveryTime", False),
"article_status" : ("_addAttributeToCurrentArticleDetails", "articleStatus", False),
# Preisinformationen
"price_amount" : ("_addAttributeToCurrentPrice", "amount", False),
"tax" : ("_addAttributeToCurrentPrice", "tax", False),
"price_currency" : ("_addAttributeToCurrentPrice", "currency", False),
"price_factor" : ("_addAttributeToCurrentPrice", "factor", False),
"lower_bound" : ("_addAttributeToCurrentPrice", "lowerBound", False),
# Bestellinformationen
"order_unit" : ("_addAttributeToCurrentArticleOrderDetails", "orderUnit", False),
"content_unit" : ("_addAttributeToCurrentArticleOrderDetails", "contentUnit", False),
"no_cu_per_ou" : ("_addAttributeToCurrentArticleOrderDetails", "packingQuantity", False),
"price_quantity" : ("_addAttributeToCurrentArticleOrderDetails", "priceQuantity", False),
"quantity_min" : ("_addAttributeToCurrentArticleOrderDetails", "quantityMin", False),
"quantity_interval" : ("_addAttributeToCurrentArticleOrderDetails", "quantityInterval", False),
# Bildinformationen
"mime_source" : ("_addAttributeToCurrentMime", "source", False),
"mime_type" : ("_addAttributeToCurrentMime", "mimeType", False),
"mime_descr" : ("_addAttributeToCurrentMime", "description", False),
"mime_alt" : ("_addAttributeToCurrentMime", "alternativeContent", False),
"mime_purpose" : ("_addAttributeToCurrentMime", "purpose", False),
"mime_order" : ("_addAttributeToCurrentMime", "order", False),
# Attributinformationen
"fname" : ("_addAttributeToCurrentFeature", "name", False),
"fvalue" : ("_addAttributeToCurrentFeature", "values", False),
"fvalue_details" : ("_addAttributeToCurrentFeature", "valueDetails", False),
"funit" : ("_addAttributeToCurrentFeature", "unit", False),
"fdesc" : ("_addAttributeToCurrentFeature", "description", False),
# Referenzinformationen
"art_id_to" : ("_addAttributeToCurrentReference", "supplierArticleId", False),
"reference_descr" : ("_addAttributeToCurrentReference", "description", False),
# AttributeSetinformationen
"supplier_aid_supplement" : ("_addAttributeToCurrentVariant", "productIdSuffix", False),
"reference_feature_system_name" : ("_addAttributeToCurrentFeatureSet", "referenceSystem", False),
"reference_feature_group_id" : ("_addAttributeToCurrentFeatureSet", "referenceGroupId", False),
"reference_feature_group_name" : ("_addAttributeToCurrentFeatureSet", "referenceGroupName", False) }
__fieldsToTransform = [ "amount", "tax", "factor"]
''' Konstruktor '''
def __init__(self, dateFormat, separatorTransformer=SeparatorTransformer("detect")):
self.__dateFormat = dateFormat
self._separatorTransformer = separatorTransformer
'''articles by SKU and Product Structure as Value'''
self.articles = { "new" : [], "update" : [], "delete" : [], "failed" : [] }
self.__currentArticle = None
self.__currentPrice = None
self.__currentMime = None
self.__currentArticleDetails = None
self.__currentOrderDetails = None
self.__currentPriceDetails = None
self.__currentElement = None
self.__currentContent = ""
self.__dateType = None
self.__currentFeatureSet = None
self.__currentFeature = None
self.__currentTreatmentClass = None
self.__currentReference = None
self.__currentVariant = None
self.__currentVariantSet = None
self.__lineFeedToHTML = False
self.__currentArticleMode = "failed"
''' Starte aktuelles XML Element '''
def startElement(self, name, attrs):
self._workOnElement(name, attrs, True)
''' Schliesse aktuelles XML Element '''
def endElement(self, name):
self._workOnElement(name, None, False)
''' Handler ermitteln, der die Arbeit macht. '''
def _workOnElement(self, name, attrs, bOpen):
logging.debug("Call for Tag <" + name + ">")
method = None
try:
handlerInfo = self._determineTagHandlername(name, bOpen)
if handlerInfo is None:
self.__currentContent = ""
return
if isinstance(handlerInfo, (tuple)):
method = getattr(self, handlerInfo[0])
method(handlerInfo[1], handlerInfo[2])
else:
method = getattr(self, handlerInfo)
method(attrs)
self.__currentContent = ""
except AttributeError:
raise NotImplementedError("Class [{0}] does not implement [{1}]".format(self.__class__.__name__, method))
''' Handlernamen fuer das XML-Element ermitteln. '''
def _determineTagName(self, tag, bOpen):
name = tag.lower()
if tag.lower() in self.__alias:
logging.debug("[{0}] '{1}' has an alias".format("start" if bOpen else "end", tag))
name = self.__alias[tag.lower()]
return name
def _determineTagHandlername(self, tag, bOpen):
name = self._determineTagName(tag, bOpen)
if bOpen:
return self._determineHandlername(name, self.__startElementHandler)
else:
return self._determineHandlername(name, self.__endElementHandler)
def _determineHandlername(self, name, handlerByName):
try:
return handlerByName[name]
except KeyError:
logging.debug("Call for Tag <" + name + "> FAILED:")
''' ---------------------------------------------------------------------'''
def _resetAll(self, attrs=None):
self.__currentArticle = None
self.__currentPrice = None
self.__currentMime = None
self.__currentPriceDetails = None
self.__currentElement = None
self.__currentContent = ""
self.__dateType = None
self.__currentFeatureSet = None
self.__currentFeature = None
self.__currentTreatmentClass = None
''' ---------------------------------------------------------------------'''
''' Anfang Artikel '''
def createProduct(self, attrs):
logging.debug("Anfang Produkt " + ", ".join(attrs.getNames()))
self._objectIsNone(self.__currentArticle,
"Fehler im BMEcat: Neuer Artikel soll erstellt " +
"werden. Es wird schon ein Artikel verarbeitet.",
True)
self.__currentArticle = Product()
self.__currentContent = ""
self.__currentElement = self.__currentArticle
if 'mode' in attrs.getNames():
self.__currentArticleMode = attrs.getValue('mode')
else:
self.__currentArticleMode = 'new'
logging.warning("Fehler im BMEcat: es wurde kein mode fuer den Artikel angegeben.")
''' Artikel speichern '''
def saveProduct(self, attr=None):
logging.info("Produkt validieren: " + self.__currentArticle.productId)
self._objectIsNotNone(self.__currentArticle , "Es wurde kein aktuell zu bearbeitender Artikel gefunden.", True)
self.__currentArticle.validate(False)
logging.debug("Neues Produkt erstellt. Modus: " + self.__currentArticleMode)
self.articles[self.__currentArticleMode].append(self.__currentArticle)
logging.debug("Produktende")
self._resetAll()
''' ---------------------------------------------------------------------'''
def createProductDetails(self, attrs):
self._objectIsNotNone(self.__currentArticle,
"Artikeldetails sollen erstellt werden. Aber es ist kein Artikel vorhanden", True)
self._objectIsNone(self.__currentArticle.details,
"Fehler im BMEcat: Neue Artikeldetails sollen erstellt werden. Es werden schon Artikeldetails verarbeitet.", True)
self.__currentArticle.addDetails()
self.__currentArticleDetails = self.__currentArticle.details
self.__currentElement = self.__currentArticle.details
def endProductDetails(self, attrs=None):
self.__currentArticleDetails = None
self.__currentElement = self.__currentArticle
''' ---------------------------------------------------------------------'''
def createOrderDetails(self, attrs=None):
self._objectIsNotNone(self.__currentArticle,
"Bestelldetails sollen erstellt werden. Aber es ist kein Artikel vorhanden", True)
self._objectIsNone(self.__currentOrderDetails,
"Fehler im BMEcat: Neue Bestelldetails sollen erstellt werden. Es werden schon Bestelldetails verarbeitet.", True)
self.__currentArticle.addOrderDetails()
self.__currentOrderDetails = self.__currentArticle.orderDetails
def endOrderDetails(self, attrs=None):
self._objectIsNotNone(self.__currentArticle,
"Bestelldetails sollen gespeichert werden. Aber es ist kein Artikel vorhanden", True)
self.__currentOrderDetails = None
self.__currentElement = self.__currentArticle
''' ---------------------------------------------------------------------'''
def createPriceDetails(self, attrs):
self._objectIsNone(self.__currentPriceDetails,
"Fehler im BMEcat: Neue Preisdetails sollen erstellt werden. Es werden schon Preisdetails verarbeitet.", True)
self.__currentPriceDetails = PriceDetails()
self.__currentElement = self.__currentPriceDetails
def savePriceDetails(self, attrs):
self._objectIsNotNone(self.__currentArticle,
"Preisdetails sollen gespeichert werden. Aber es ist kein Artikel vorhanden", True)
self.__currentArticle.addPriceDetails(self.__currentPriceDetails, False)
self.__currentPriceDetails = None
self.__currentElement = None
''' ---------------------------------------------------------------------'''
''' Anfang Preis '''
def createPrice(self, attrs):
self._objectIsNone(self.__currentPrice,
"Fehler im BMEcat: Neuer Preis soll erstellt werden. Es wird schon ein Preis verarbeitet.", True)
priceType = "other"
try:
priceType = attrs.getValue('price_type')
except KeyError as ke:
logging.warning(str(ke))
self.__currentPrice = Price(priceType)
self.__currentElement = self.__currentPrice
''' Preis speichern '''
def savePrice(self, attrs):
self._objectIsNotNone(self.__currentPriceDetails, "Preis soll gespeichert werden. Aber es sind keine Preisdetails vorhanden", True)
self.__currentPriceDetails.addPrice(self.__currentPrice, False)
self.__currentPrice = None
self.__currentElement = self.__currentPriceDetails
''' ---------------------------------------------------------------------'''
def startMimeInfo(self, attrs=None):
self.__currentElement = self.__currentArticle
self.__currentMime = None
def endMimeInfo(self, attrs=None):
self.__currentMime = None
self.__currentElement = None
''' ---------------------------------------------------------------------'''
''' Anfang Bild '''
def createMime(self, attrs):
self._objectIsNone(self.__currentMime,
"Fehler im BMEcat: Neues Bild soll erstellt werden. Es wird schon ein Bild verarbeitet.",
True)
self.__currentMime = Mime()
''' Bild speichern '''
def saveMime(self, attrs):
if self._objectIsNotNone(self.__currentElement, "Bild konnte nicht gespeichert werden.", False):
self.__currentElement.addMime(self.__currentMime, raiseException=False)
self.__currentMime = None
''' ---------------------------------------------------------------------'''
''' Anfang TreatmentClass '''
def createTreatmentClass(self, attrs):
self._objectIsNone(self.__currentTreatmentClass,
"Fehler im BMEcat: Neue SpecialTreatmentClass soll erstellt werden. Es wird schon ein SpecialTreatmentClass verarbeitet.",
True)
self.__currentTreatmentClass = TreatmentClass(attrs.getValue('type'))
self.__currentElement = self.__currentTreatmentClass
''' TreatmentClass speichern '''
def saveTreatmentClass(self, attrs):
self._objectIsNotNone(self.__currentArticle,
"SpecialTreatmentClass soll gespeichert werden. Aber es ist kein Artikel vorhanden",
True)
self.__currentTreatmentClass.value = self.__currentContent
self.__currentArticle.addSpecialTreatmentClass(self.__currentTreatmentClass)
self.__currentTreatmentClass = None
self.__currentElement = None
''' ---------------------------------------------------------------------'''
def createFeatureSet(self, attrs=None):
self._objectIsNone(self.__currentFeatureSet,
"Fehler im BMEcat: Neues Attributset soll erstellt werden. Es wird schon ein Attributset verarbeitet.",
True)
self.__currentFeatureSet = FeatureSet()
self.__currentContent = ""
def saveFeatureSet(self, attrs=None):
self._objectIsNotNone(self.__currentArticle,
"Attributset soll gespeichert werden. Aber es ist kein Artikel vorhanden", True)
self.__currentArticle.addFeatureSet(self.__currentFeatureSet)
self.__currentFeatureSet = None
''' ---------------------------------------------------------------------'''
def createFeature(self, attrs=None):
self._objectIsNone(self.__currentFeature, "Fehler im BMEcat: Neues Attribut soll erstellt werden. Es wird schon ein Attribut verarbeitet.", True)
self.__currentFeature = Feature()
self.__currentElement = self.__currentFeature
self.__currentContent = ""
def saveFeature(self, attrs=None):
if | |
<filename>py3plex/core/HINMINE/decomposition.py
# this is the code for the network decomposition
from math import log
import numpy as np
from collections import defaultdict
def aggregate_sum(input_thing, classes, universal_set):
if type(input_thing) == list:
return sum(input_thing)
elif type(input_thing) == dict:
output = {}
for key in input_thing:
output[key] = sum(input_thing[key])
return output
else:
raise AttributeError('Expected dictionary or list as first argument')
def aggregate_weighted_sum(input_thing, classes, universal_set):
n = len(universal_set)
weights = [(len(cl.train_members) + len(cl.validate_members)) * 1.0 / n
for cl in classes]
n_classes = len(classes)
if type(input_thing) == list:
running_sum = 0
for i in range(n_classes):
running_sum += weights[i] * input_thing[i]
return running_sum
elif type(input_thing) == dict:
return_dict = {}
for key in input_thing:
running_sum = 0
for i in range(n_classes):
running_sum += weights[i] * input_thing[key][i]
return_dict[key] = running_sum
return return_dict
else:
raise AttributeError('Expected dictionary or list as first argument')
def get_calculation_method(method_name):
if method_name == 'tf':
return calculate_importance_tf
elif method_name == 'chi':
return calculate_importance_chi
elif method_name == 'ig':
return calculate_importance_ig
elif method_name == 'gr':
return calculate_importance_gr
elif method_name == 'idf':
return calculate_importance_idf
elif method_name == 'delta':
return calculate_importance_delta
elif method_name == 'rf':
return calculate_importance_rf
elif method_name == 'okapi':
return calculate_importance_okapi
elif method_name == "w2w": # TBA
return calculate_importance_w2w
else:
raise Exception('Unknown weighing method')
def get_aggregation_method(method_name):
if method_name == 'sum':
return aggregate_sum
elif method_name == 'weighted_sum':
return aggregate_weighted_sum
else:
raise Exception('Unknown aggregation method')
def calculate_importances(midpoints,
classes,
universal_set,
method,
degrees=None,
avgdegree=None):
n = len(universal_set)
importance_calculator = get_calculation_method(method)
return_dict = {}
for midpoint in midpoints:
if degrees is None:
return_dict[midpoint] = importance_calculator(
classes, universal_set, midpoints[midpoint], n)
else:
return_dict[midpoint] = importance_calculator(classes,
universal_set,
midpoints[midpoint],
n,
degrees=degrees,
avgdegree=avgdegree)
return return_dict
def calculate_importance_tf(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using term frequency weighing.
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
return [1.0 / len(classes) for _ in classes]
def np_calculate_importance_tf(predicted, label_matrix):
return (1.0 / label_matrix.shape[1]) * np.ones(label_matrix.shape[1])
def calculate_importance_chi(classes, universal_set, linked_nodes, n,
**kwargs):
"""
Calculates importance of a single midpoint using chi-squared weighing.
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(chi_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def np_calculate_importance_chi(predicted, label_matrix, actual_pos_nums):
tp = predicted * label_matrix
predicted_pos_num = np.count_nonzero(predicted) # TODO: speed this up!
tp_nums = np.ones((1, label_matrix.shape[0])).dot(tp)
fp_nums = predicted_pos_num - tp_nums
fn_nums = actual_pos_nums - tp_nums
tn_nums = label_matrix.shape[0] - tp_nums - fp_nums - fn_nums
tmp = tp_nums * tn_nums - fp_nums * fn_nums
# TODO: alternative: tp_nums = count something greater than 0.
top = tmp * tmp
bot = predicted_pos_num * (fn_nums +
tn_nums) * actual_pos_nums * (tn_nums + fp_nums)
# bot_zeros = np.where(bot == 0)[0]
# bot[bot_zeros] = 1
# if not np.all(top[bot_zeros] == 0):
# raise Exception('Error in chi implementation')
bot[bot == 0] = 1
res = top / bot
return res
def calculate_importance_w2w(classes, universal_set, linked_nodes, n,
**kwargs):
pass
def calculate_importance_ig(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using IG (information gain) weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(ig_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def calculate_importance_gr(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using the GR (gain ratio)
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(gr_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def calculate_importance_okapi(classes,
universal_set,
linked_nodes,
n,
degrees=None,
avgdegree=None):
k1 = 1.5
b = 0.75
predicted_pos = universal_set.intersection(linked_nodes) #
predicted_pos_num = len(predicted_pos)
log((n - predicted_pos_num + 0.5) / (predicted_pos_num + 0.5))
return_vec = np.zeros((len(linked_nodes), 1))
for i, linked_node in enumerate(linked_nodes):
return_vec[i] = (k1 +
1) / (1 + k1 *
(1 - b + b * degrees[linked_node] / avgdegree))
return [return_vec for _ in classes]
def calculate_importance_idf(classes, universal_set, linked_nodes, n,
**kwargs):
"""
Calculates importance of a single midpoint using idf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
idf = log(n * 1.0 / (1 + predicted_pos_num))
return_list = [idf for _ in classes]
return return_list
def calculate_importance_delta(classes, universal_set, linked_nodes, n,
**kwargs):
"""
Calculates importance of a single midpoint using delta-idf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
predicted_neg_num = n - predicted_pos_num
return_list = []
for label in classes:
if label is None:
continue
actual_pos_num = label.not_test_members_num
actual_neg_num = n - actual_pos_num
diff = actual_pos_num * 1.0 / (predicted_pos_num +
1) - actual_neg_num * 1.0 / (
predicted_neg_num + 1)
return_list.append(abs(diff))
return return_list
def calculate_importance_rf(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using rf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(rf_value(predicted_pos_num, tp))
return return_list
def rf_value(predicted_pos_num, tp):
fp = predicted_pos_num - tp
return log(2 + tp * 1.0 / max(1, fp), 2)
def ig_value(actual_pos_num, predicted_pos_num, tp, n):
fp = predicted_pos_num - tp
fn = actual_pos_num - tp
tn = n - tp - fp - fn
tpp = tp * 1.0 / n
tnp = tn * 1.0 / n
fpp = fp * 1.0 / n
fnp = fn * 1.0 / n
r = 0
if tp > 0:
r += tpp * log(tp * n * 1.0 / (actual_pos_num * predicted_pos_num), 2)
if fn > 0:
r += fnp * log(
fn * n * 1.0 / (actual_pos_num * (n - predicted_pos_num)), 2)
if fp > 0:
r += fpp * log(
fp * n * 1.0 / ((n - actual_pos_num) * predicted_pos_num), 2)
if tn > 0:
r += tnp * log(
tn * n * 1.0 / ((n - actual_pos_num) * (n - predicted_pos_num)), 2)
assert r >= 0
return r
def gr_value(actual_pos_num, predicted_pos_num, tp, n):
pp = actual_pos_num * 1.0 / n
if pp == 1 or pp == 0:
return 0
return ig_value(actual_pos_num, predicted_pos_num, tp,
n) / (-pp * log(pp, 2) - (1 - pp) * log((1 - pp), 2))
def chi_value(actual_pos_num, predicted_pos_num, tp, n):
fp = predicted_pos_num - | |
if pos_paths is None:
raise KeyError(('Raw/online position tracking file does not exist '
'for animal ({}), date ({}) and epoch ({}).').
format(self.anim_name, date, epoch))
return pos_paths
def get_raw_pos_path(self, date, epoch, label_ext):
return self.get_raw_pos_paths(date, epoch)[label_ext]
def get_raw_postime_paths(self, date, epoch):
postime_paths = TrodesAnimalInfo._lookup_date_epoch_dict(
self.raw_postime_files, date, epoch)
if postime_paths is None:
raise KeyError('Online position timestamps file does not exist for animal ({}), date ({}) and epoch ({}).'.
format(self.anim_name, date, epoch))
return postime_paths
def get_raw_postime_path(self, date, epoch, label_ext):
return self.get_raw_postime_paths(date, epoch)[label_ext]
def get_raw_poshwframecount_paths(self, date, epoch):
poshwframecount_paths = TrodesAnimalInfo._lookup_date_epoch_dict(
self.raw_poshwframecount_files, date, epoch)
if poshwframecount_paths is None:
raise KeyError(('Online position hwFrameCount file does not exist for '
'animal ({}), date ({}) and epoch ({}).').
format(self.anim_name, date, epoch))
return poshwframecount_paths
def get_raw_poshwframecount_path(self, date, epoch, label_ext):
return self.get_raw_poshwframecount_paths(date, epoch)[label_ext]
def get_raw_rec_path(self, date, epoch):
rec_path = TrodesAnimalInfo._lookup_date_epoch_dict(
self.raw_rec_files, date, epoch)
if rec_path is None:
raise KeyError(('Rec files does not exist for '
'animal ({}), date ({}) and epoch ({}).').
format(self.anim_name, date, epoch))
return rec_path
def get_raw_dir(self):
return self._get_raw_dir(self.base_dir, self.anim_name)
def get_preprocessing_dir(self):
return self._get_preprocessing_dir(self.out_dir, self.anim_name)
def get_analysis_dir(self):
return self._get_analysis_dir(self.out_dir, self.anim_name)
def get_preprocessing_date_dir(self, date, stop_error=True):
path = os.path.join(self.get_preprocessing_dir(), date)
if not os.path.isdir(path) and stop_error:
if os.path.exists(path):
raise TrodesAnimalInfoError('Animal {}, path ({}) exists but is not a directory.'.format(self.anim_name,
path))
else:
raise TrodesAnimalInfoError('Animal {}, path ({}) does not exist.'.format(self.anim_name,
path))
return path
def get_date_trodesconf(self, date):
return self.raw_date_trodesconf[date]
def _get_preprocessing_date_path_dict(self, preprocess_path):
return self._get_day_dirs(preprocess_path)
@staticmethod
def _get_preprocessing_date_data_path_df(date_path_dict):
full_data_paths = pd.DataFrame(
columns=['date', 'epoch', 'label_ext', 'datatype', 'directory'])
for date, date_path in date_path_dict.items():
date_path_entries = os.scandir(date_path)
for date_path_entry in date_path_entries:
if date_path_entry.is_dir():
try:
entry_name_parser = TrodesRawFileNameParser(
date_path_entry.name)
full_data_paths = full_data_paths.append(dict(zip(full_data_paths.columns,
[date,
entry_name_parser.epochtuple,
entry_name_parser.label_ext,
entry_name_parser.ext,
date_path_entry.path])),
ignore_index=True)
except TrodesDataFormatError:
logger.warn(('Invalid folder name in preprocessing folder date ({}) folder ({}), ignoring.'.
format(date, date_path_entry.name)))
# sort and reindex paths
full_data_paths = full_data_paths.sort_values(
['date', 'epoch', 'label_ext', 'datatype']).reset_index(drop=True)
return full_data_paths
@staticmethod
def _get_extracted_file_list(path, ExtractedFileParser=TrodesRawFileNameParser):
dir_entries = os.scandir(path)
file_list = []
for dir_entry in dir_entries:
if dir_entry.is_file:
try:
filename_parser = ExtractedFileParser(dir_entry.name)
file_list.append((filename_parser, dir_entry.path))
except TrodesDataFormatError:
logger.warn('File ({}) does not match file parser ({}). Skipping.'.
format(dir_entry.path,
ExtractedFileParser.__name__),
TrodesDataFormatWarning)
return file_list
@staticmethod
def _expand_str_date(date_str):
return time.strptime(date_str, '%Y%m%d')
@staticmethod
def _get_raw_dir(base_dir, anim_name):
return os.path.join(base_dir, anim_name, 'raw')
@staticmethod
def _get_analysis_dir(base_dir, anim_name):
return os.path.join(base_dir, anim_name, 'analysis')
@staticmethod
def _get_preprocessing_dir(base_dir, anim_name):
return os.path.join(base_dir, anim_name, 'preprocessing')
@staticmethod
def _get_day_dirs(anim_path):
anim_day_paths = {}
try:
anim_dir_entries = os.scandir(anim_path)
for anim_dir_entry in anim_dir_entries:
if anim_dir_entry.is_dir():
try:
TrodesAnimalInfo._expand_str_date(anim_dir_entry.name)
anim_day_paths[anim_dir_entry.name] = anim_dir_entry.path
except ValueError:
logger.warn(('animal path ({}) contains a data directory ({}) '
'that does not conform to date format %Y%m%d.').
format(anim_path, anim_dir_entry.name), TrodesDataFormatWarning)
except FileNotFoundError:
logger.warn(('anim path ({}) does not exist.'.format(
anim_path)), TrodesDataFormatWarning)
return anim_day_paths
@staticmethod
def _get_rec_paths(path, RawFileNameParser=TrodesRawFileNameParser):
dir_entries = os.scandir(path)
anim_rec_paths = []
for dir_entry in dir_entries:
if dir_entry.is_file():
# look only at rec files, ignore others
if re.match('^.*\.rec$', dir_entry.name):
# check to see if filename format is good
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
anim_rec_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes rec filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return anim_rec_paths
@staticmethod
def _get_trodes_version(path):
rec_files = Path(path).glob('**/*.rec')
return np.unique(np.asarray([get_trodes_version(rec_file)[0]
for rec_file in rec_files]))[0]
@staticmethod
def _get_video_tracking_paths(path, RawFileNameParser=TrodesRawFileNameParser):
anim_pos_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*\.videoPositionTracking$', dir_entry.name):
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
anim_pos_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes videoPositionTracking filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return anim_pos_paths
@staticmethod
def _get_h264_paths(path, RawFileNameParser=TrodesRawFileNameParser):
anim_h264_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*\.h264$', dir_entry.name):
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
anim_h264_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes h264 filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return anim_h264_paths
@staticmethod
def _get_video_timestamp_paths(path, RawFileNameParser=TrodesRawFileNameParser):
anim_video_times_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*\.videoTimeStamps$', dir_entry.name):
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
anim_video_times_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes videoTimeStamps filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return anim_video_times_paths
@staticmethod
def _get_video_hwframecount_paths(path, RawFileNameParser=TrodesRawFileNameParser):
anim_video_hwframecount_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*\.videoTimeStamps\.(?:cameraHWFrameCount$|cameraHWSync)', dir_entry.name):
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
anim_video_hwframecount_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes videoTimeStamps.cameraHWFrameCount filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return anim_video_hwframecount_paths
@staticmethod
def _get_trodes_comments_paths(path, RawFileNameParser=TrodesRawFileNameParser):
trodes_comment_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*\.trodesComments$', dir_entry.name):
try:
trodes_filename_parsed = RawFileNameParser(
dir_entry.name)
trodes_comment_paths.append(
(trodes_filename_parsed, dir_entry.path))
except TrodesDataFormatError:
logger.warn(('Invalid trodes .trodesComments filename ({}), '
'cannot be parsed, skipping.').
format(dir_entry.path), TrodesDataFormatWarning)
return trodes_comment_paths
@staticmethod
def _get_trodesconf_paths(path):
trodesconf_paths = []
dir_entries = os.scandir(path)
for dir_entry in dir_entries:
if dir_entry.is_file():
if re.match('^.*.trodesconf$', dir_entry.name):
trodesconf_paths.append(dir_entry.path)
return trodesconf_paths
class TrodesPreprocessingLFPEpoch:
def __init__(self, anim: TrodesAnimalInfo, date, epochtuple):
self.anim = anim
self.date = date
self.epochtuple = epochtuple
LFP_paths = anim.preproc_LFP_paths[(anim.preproc_LFP_paths['date'] == date) &
(anim.preproc_LFP_paths['epoch'] == epochtuple)]
self.LFP_data_paths = LFP_paths[LFP_paths['timestamp_file'] == False]
self.LFP_timestamp_paths = LFP_paths[LFP_paths['timestamp_file'] == True]
self.lfp = pd.DataFrame()
for path_tup in self.LFP_data_paths.itertuples():
if not np.isnan(path_tup.ntrode) and not np.isnan(path_tup.channel):
lfp_bin = TrodesLFPBinaryLoader(path_tup.path)
single_col = pd.MultiIndex.from_tuples([(path_tup.ntrode, path_tup.channel)],
names=['ntrode', 'channel'])
self.lfp = pd.concat([self.lfp, pd.DataFrame(lfp_bin.data, columns=single_col)], axis=1,
verify_integrity=True)
else:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has a bad preprocessing path entry, ntrode or '
'channel has a nan entry that is not a timestamp '
'file, skipping.').format(anim.anim_name, date, epochtuple))
# get default timestamp
try:
orig_timestamp_path_entries = self.LFP_timestamp_paths[
self.LFP_timestamp_paths['time_label'] == '']
orig_timestamp_path = orig_timestamp_path_entries['path'].values[0]
if len(orig_timestamp_path_entries) > 1:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has multiple original timestamp path entries, '
'using ({}).').format(anim.anim_name, date, epochtuple, orig_timestamp_path))
orig_timestamp_bin = TrodesTimestampBinaryLoader(
orig_timestamp_path)
self.orig_timestamps = orig_timestamp_bin.data
except (IndexError, FileNotFoundError):
self.orig_timestamps = None
raise TrodesDataFormatError(('Animal ({}), date ({}), epoch ({}) '
'missing default timestamps file.').format(anim.anim_name, date, epochtuple))
try:
adj_timestamp_path_entries = self.LFP_timestamp_paths[
self.LFP_timestamp_paths['time_label'] == 'adj']
adj_timestamp_path = adj_timestamp_path_entries['path'].values[0]
if len(adj_timestamp_path_entries) > 1:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has multiple adjusted timestamp path entries, '
'using ({}).').format(anim.anim_name, date, epochtuple, adj_timestamp_path))
adj_timestamp_bin = TrodesTimestampBinaryLoader(adj_timestamp_path)
self.adj_timestamps = adj_timestamp_bin.data
except (IndexError, FileNotFoundError):
self.adj_timestamps = None
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'missing adjusted timestamps file.').format(anim.anim_name, date, epochtuple),
TrodesDataFormatWarning)
# always use original timestamp
self.lfp.set_index(keys=self.orig_timestamps, inplace=True)
class TrodesPreprocessingSpikeEpoch:
def __init__(self, anim: TrodesAnimalInfo, date, epochtuple, time_label, parallel_instances=1):
self.spike_paths = anim.preproc_spike_paths[(anim.preproc_spike_paths['date'] == date) &
(anim.preproc_spike_paths['epoch'] == epochtuple) &
(anim.preproc_spike_paths['time_label'] == time_label)]
self.anim = anim
self.date = date
self.epochtuple = epochtuple
# index (ntrode_index)
self.spikes = {}
path_list = []
ntrode_list = []
for path_tup in self.spike_paths.itertuples():
if not pd.isnull(path_tup.ntrode):
path_list.append(path_tup.path)
ntrode_list.append(path_tup.ntrode)
else:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has a bad preprocessing path entry, ntrode '
'has a nan entry that is not a timestamp '
'file, skipping.').format(anim.anim_name, date, epochtuple))
p = multiprocessing.Pool(parallel_instances)
spikes_loaded = p.map(TrodesSpikeBinaryLoader, path_list, chunksize=1)
self.spikes = dict(
zip(ntrode_list, [loader.spikes for loader in spikes_loaded]))
p.close()
class TrodesPreprocessingPosEpoch:
def __init__(self, anim: TrodesAnimalInfo, date, epochtuple):
self.anim = anim
self.date = date
self.epochtuple = epochtuple
self.pos_paths = anim.preproc_pos_paths[(anim.preproc_pos_paths['date'] == date) &
(anim.preproc_pos_paths['epoch'] == epochtuple)]
self.timestamps = {}
self.pos = {}
for path_tup in self.pos_paths.itertuples():
if path_tup.timestamp_file:
if path_tup.time_label in self.timestamps:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has multiple timestamps with same label, using first one.').
format(anim.anim_name, date, epochtuple))
break
timestamp_bin = TrodesTimestampBinaryLoader(path_tup.path)
self.timestamps[path_tup.time_label] = timestamp_bin.data
elif not pd.isnull(path_tup.pos_label):
# assume path is for a position file
if path_tup.pos_label in self.pos:
logger.warn(('Animal ({}), date ({}), epoch ({}) '
'has multiple pos data with same label, using first one.').
format(anim.anim_name, date, epochtuple))
break
pos_bin = TrodesPosBinaryLoader(path_tup.path)
self.pos[path_tup.pos_label] = pos_bin.pos
class TrodesPreprocessingDIOEpoch:
def __init__(self, anim: TrodesAnimalInfo, date, epochtuple):
self.anim = anim
self.date = date
self.epochtuple = epochtuple
self.dio_paths = anim.preproc_dio_paths[(anim.preproc_dio_paths['date'] == date) &
(anim.preproc_dio_paths['epoch'] == epochtuple)]
self.dio = {}
for path_tup in self.dio_paths.itertuples():
if not pd.isnull(path_tup.channel):
dio_list = self.dio.setdefault(path_tup.time_label, [])
dio_bin = TrodesDIOBinaryLoader(path_tup.path)
dio_bin.dio.columns = pd.MultiIndex.from_tuples([(path_tup.direction, int(path_tup.channel))],
names=['direction', 'channel'])
dio_list.append(dio_bin.dio)
class TrodesPreprocessingToAnalysis:
def __init__(self, anim: TrodesAnimalInfo):
self.trodes_anim = anim
def convert_lfp_day(self, date):
self._convert_generic_day(
date, self.trodes_anim.preproc_LFP_paths, 'lfp', self._write_lfp_epoch)
def _write_lfp_epoch(self, date, epoch, hdf_store):
lfp_epoch = TrodesPreprocessingLFPEpoch(self.trodes_anim, date, epoch)
hdf_store['preprocessing/LFP/' +
'e{:02d}'.format(epoch[0]) + '/data'] = lfp_epoch.lfp
def convert_spike_day(self, date, time_label='', parallel_instances=1):
self._convert_generic_day(date, self.trodes_anim.preproc_spike_paths, 'spikewaves',
functools.partial(self._write_spike_epoch,
time_label=time_label, parallel_instances=parallel_instances)
)
def _write_spike_epoch(self, date, epoch, hdf_store, time_label, parallel_instances=1):
spike_epoch = TrodesPreprocessingSpikeEpoch(self.trodes_anim, date, epoch, time_label,
parallel_instances=parallel_instances)
for ntrode, spike_ntrode in spike_epoch.spikes.items():
hdf_store.put('preprocessing/EventWaveform/' + 'e{:02d}'.format(int(epoch[0])) +
'/t{:02d}'.format(int(ntrode)) + '/data',
spike_ntrode, expectedrows=len(spike_ntrode))
def convert_pos_day(self, date):
self._convert_generic_day(
date, | |
<filename>carpyncho.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME>
# License: BSD-3-Clause
# Full Text: https://github.com/carpyncho/carpyncho-py/blob/master/LICENSE
# =============================================================================
# DOCS
# =============================================================================
"""Python client for Carpyncho VVV dataset collection.
This code access as a Pandas DataFrame all the data of the web version of
Carpyncho https://carpyncho.github.io/.
"""
__all__ = ["Carpyncho", "CARPYNCHOPY_DATA_PATH"]
__version__ = "0.0.5"
# =============================================================================
# IMPORTS
# =============================================================================
import os
import io
import bz2
import pathlib
import typing as t
import inspect
import hashlib
import functools
import urllib
import json
import attr
import diskcache as dcache
import tqdm
import humanize
import requests
import clize
import pandas as pd
# =============================================================================
# CONSTANTS
# =============================================================================
VERSION = __version__
#: Location of the entire dataset index.
CARPYNCHO_INDEX_URL = "https://raw.githubusercontent.com/carpyncho/carpyncho-py/master/data/index.json" # noqa
#: Google drive location.
DRIVE_URL = "https://docs.google.com/uc?export=download"
#: Where carpyncho gonna store the entire data.
CARPYNCHOPY_DATA_PATH = pathlib.Path(
os.path.expanduser(os.path.join('~', 'carpyncho_py_data')))
#: Chunk size when the library are download the big files of Carpyncho.
CHUNK_SIZE = 32768
#: Maximun cache size (10TB)
DEFAULT_CACHE_SIZE_LIMIT = int(1e10)
#: The location of the cache catabase and files.
DEFAULT_CACHE_DIR = CARPYNCHOPY_DATA_PATH / "_cache_"
# =============================================================================
# CACHE ORCHESTRATION
# =============================================================================
def from_cache(
cache, tag, function, cache_expire,
force=False, *args, **kwargs
):
"""Simplify cache orchestration.
Parameters
----------
tag: str
Normally every function call the cache with their own tag.
We sugest "module.function" or "module.Class.function"
function: callable
The function to be cached
force: bool (default=False)
If the vale of the cache must be ignored and re-execute the
function.
cache_expire: bool or None
Time in seconds to expire the function call
args and kwargs:
All the parameters needed to execute the function.
Returns
-------
The result of calling the function or the cached version of the same value.
"""
# start the cache orchestration
key = dcache.core.args_to_key(
base=("carpyncho", tag), args=args, kwargs=kwargs, typed=False)
with cache as c:
c.expire()
value = (
dcache.core.ENOVAL if force else
c.get(key, default=dcache.core.ENOVAL, retry=True))
if value is dcache.core.ENOVAL:
value = function(**kwargs)
c.set(
key, value, expire=cache_expire,
tag=f"carpyncho.{tag}", retry=True)
return value
# =============================================================================
# CLIENT
# =============================================================================
@attr.s(hash=False, frozen=True)
class Carpyncho:
"""Client to access the *Carpyncho VVV dataset collection*.
This code access as a Pandas Dataframe all the data of the web version of
Carpyncho. https://carpyncho.github.io/.
Parameters
----------
cache : ``diskcache.Cache``, ``diskcache.Fanout``,
or ``None`` (default: ``None``)
Any instance of ``diskcache.Cache``, ``diskcache.Fanout`` or
``None`` (Default). If it's ``None`` a ``diskcache.Cache``
istance is created with the parameter
``directory = carpyncho.DEFAULT_CACHE_DIR``.
More information: http://www.grantjenks.com/docs/diskcache
cache_expire : ``float`` or None (default=``None``)
Seconds until item expires (default ``None``, no expiry)
More information: http://www.grantjenks.com/docs/diskcache
"""
#: Local cache of the carpyncho database.
cache: t.Union[dcache.Cache, dcache.FanoutCache] = attr.ib()
#: Default timout of the catalog-cache.
#: Try to always set to None (default), the catalogs are big and mostly
#: never change.
cache_expire: float = attr.ib(default=None, repr=False)
#: Location of the carpyncho index (usefull for development)
index_url: str = attr.ib(default=CARPYNCHO_INDEX_URL)
# =========================================================================
# ATTRS ORCHESTRATION
# =========================================================================
@cache.default
def _cache_default(self):
return dcache.Cache(
directory=DEFAULT_CACHE_DIR, size_limit=DEFAULT_CACHE_SIZE_LIMIT)
# =========================================================================
# UTILITIES FOR CHECK THE REMOTE DATA
# =========================================================================
def retrieve_index(self, reset):
"""Access the remote index of the Carpyncho-Dataset.
The index is stored internally for 1 hr.
Parameters
----------
reset: bool
If its True the entire cache is ignored and a new index is
donwloaded and cached.
Returns
-------
dict with the index structure.
"""
def get_json_data(url):
parsed = urllib.parse.urlparse(url)
if parsed.scheme in ("http", "https", "ftp"):
response = requests.get(
url, headers={'Cache-Control': 'no-cache'})
return response.json()
with open(url) as fp:
return json.load(fp)
return from_cache(
cache=self.cache,
tag="get_index",
function=get_json_data,
cache_expire=3600,
force=reset,
url=self.index_url)
@property
def index_(self):
"""Structure of the Carpyncho dataset information as a Python-dict."""
return self.retrieve_index(reset=False)
def list_tiles(self):
"""Retrieve available tiles with catalogs as a tuple of str."""
index = self.index_
return tuple(k for k in index.keys() if not k.startswith("_"))
def list_catalogs(self, tile):
"""Retrieve the available catalogs for a given tile.
Parameters
----------
tile: str
The name of the tile to retrieve the catalogs.
Returns
-------
tuple of str:
The names of available catalogs in the given tile.
Raises
------
ValueError:
If the tile is not found.
"""
index = self.index_
if tile not in index:
raise ValueError(f"Tile {tile} not found")
return tuple(index[tile])
def has_catalog(self, tile, catalog):
"""Check if a given catalog and tile exists.
Parameters
----------
tile: str
The name of the tile.
catalog:
The name of the catalog.
Returns
-------
bool:
True if the convination tile+catalog exists.
"""
cat = self.index_.get(tile, {}).get(catalog)
return bool(cat)
def catalog_info(self, tile, catalog):
"""Retrieve the information about a given catalog.
Parameters
----------
tile: str
The name of the tile.
catalog:
The name of the catalog.
Returns
-------
dict:
The entire information of the given catalog file. This include
drive-id, md5 checksum, size in bytes, number of total records,
etc.
Raises
------
ValueError:
If the tile or the catalog is not found.
"""
index = self.index_
if tile not in index:
raise ValueError(f"Tile {tile} not found")
tile = index[tile]
if catalog not in tile:
raise ValueError(
f"Catalog {catalog} for tile {tile} not found")
return tile[catalog]
# =========================================================================
# THE DOWNLOAD PART
# =========================================================================
def _grive_download(self, tile, catalog, driveid, size, md5sum):
# https://stackoverflow.com/a/39225272
# https://stackoverflow.com/a/27508615
# prepare the parameters and download the token
params = {'id': driveid}
session = requests.Session()
response = session.get(
DRIVE_URL, params=params, stream=True,
headers={'Cache-Control': 'no-cache'})
# retrieve the token from gdrive page
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
# if we have token add to the parameters
if token:
params['confirm'] = token
# make the real deal request
response = session.get(
DRIVE_URL, params=params, stream=True,
headers={'Cache-Control': 'no-cache'})
# progress bar
pbar = tqdm.tqdm(
total=size, initial=0, unit='B',
unit_scale=True, desc=f"{tile}-{catalog}")
# the file is a bz2 file, we are going to decompress and store
# the raw parquet data into a BytesIO
decompressor = bz2.BZ2Decompressor()
parquet_stream = io.BytesIO()
# ademas necesitamos fijarnos que el md5 este ok
file_hash = hashlib.md5()
# retrive all the data one chunk at the time
for chunk in response.iter_content(CHUNK_SIZE):
if not chunk:
break
parquet_stream.write(decompressor.decompress(chunk))
file_hash.update(chunk)
pbar.update(CHUNK_SIZE)
# stop the progress bar
pbar.close()
# check if the file was download correctly
if file_hash.hexdigest() != md5sum:
raise IOError(
f"'{tile}-{catalog}' incorrect download.\n"
f"expected: {md5sum}\n"
f"caclulated: {file_hash.hexdigest()}")
# read the entire stream into a dataframe
df = pd.read_parquet(parquet_stream)
return df
def get_catalog(self, tile, catalog, force=False):
"""Retrieve a catalog from the carpyncho dataset.
Parameters
----------
tile: str
The name of the tile.
catalog:
The name of the catalog.
force: bool (default=False)
If its True, the cached version of the catalog is ignored and
redownloaded. Try to always set force to False.
Returns
-------
pandas.DataFrame:
The columns of the DataFrame changes between the different catalog.
Raises
------
ValueError:
If the tile or the catalog is not found.
IOError:
If the checksum not match.
"""
info = self.catalog_info(tile, catalog)
driveid, size = info["driveid"], info["size"]
md5sum = info["md5sum"].split()[0].strip().lower()
df = from_cache(
cache=self.cache,
tag="get_catalog",
function=self._grive_download,
cache_expire=None,
force=force,
# params to _gdrive_download
tile=tile, catalog=catalog,
driveid=driveid, size=size, md5sum=md5sum)
return df
# =============================================================================
# CLI
# =============================================================================
@attr.s(hash=False, frozen=True)
class CLI:
"""Carpyncho console client.
Explore and download the entire https://carpyncho.github.io/
catalogs from your command line.
"""
footnotes = "\n".join([
"This software is under the BSD 3-Clause License.",
"Copyright (c) 2020, <NAME>.",
"For bug reporting or other instructions please check:"
" https://github.com/carpyncho/carpyncho-py"])
#: Carpyncho client.
client = attr.ib()
def get_commands(self):
methods = {}
for k in dir(self):
if k.startswith("_"):
continue
v = getattr(self, k)
if inspect.ismethod(v) and k != "get_commands":
methods[k] = v
return methods
def version(self):
"""Print Carpyncho version."""
print(VERSION)
def list_tiles(self):
"""Show available tiles."""
for tile in self.client.list_tiles():
print(f"- {tile}")
def list_catalogs(self, tile):
"""Show the available catalogs for a given tile.
tile:
The name of the tile to retrieve the catalogs.
"""
print(f"Tile {tile}")
for catalog in self.client.list_catalogs(tile=tile):
print(f" - {catalog}")
def has_catalog(self, tile, catalog):
"""Check if a given catalog and tile exists.
tile:
catalog:
The name of the catalog.
"""
has = "" if self.client.has_catalog(tile, catalog) else "NO "
print(f"Catalog '{catalog}' or tile '{tile}': {has}exists")
def catalog_info(self, tile, catalog):
"""Retrieve the information about a given catalog.
tile:
The name of the tile.
catalog:
The name of the catalog.
"""
FORMATTERS = {
"size": functools.partial(humanize.naturalsize, binary=True),
"records": humanize.intcomma
}
print(f"Catalog {tile}-{catalog}")
for k, v in self.client.catalog_info(tile, catalog).items():
fmt = FORMATTERS.get(k, str)
print(f" - {k}: {fmt(v)}")
def download_catalog(self, | |
from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lasagne.layers import InputLayer, Conv1DLayer, Pool1DLayer
from lasagne.regularization import regularize_network_params, l2
VERBOSE = False
GRID_SEARCH = False
def bootstrap(data, labels, boot_type="downsample"):
print("Bootstrapping data...")
ot_class = 0
mw_class = 1
ot_idx = np.where(labels == ot_class)
mw_idx = np.where(labels == mw_class)
# Get OT examples
ot_data = data[ot_idx]
ot_labels = labels[ot_idx]
print(" - OT (class: {}) | Data: {} | Labels: {}".format(ot_class, ot_data.shape, ot_labels.shape))
# Get MW examples
mw_data = data[mw_idx]
mw_labels = labels[mw_idx]
print(" - MW (class: {}) | Data: {} | Labels: {}".format(mw_class, mw_data.shape, mw_labels.shape))
# Set majority and minority classes
if ot_data.shape[0] > mw_data.shape[0]:
maj_class, maj_data, maj_labels = ot_class, ot_data, ot_labels
min_class, min_data, min_labels = mw_class, mw_data, mw_labels
else:
maj_class, maj_data, maj_labels = mw_class, mw_data, mw_labels
min_class, min_data, min_labels = ot_class, ot_data, ot_labels
print(" - Majority class: {} (N = {}) | Minority class: {} (N = {})".format(maj_class, maj_data.shape[0],
min_class, min_data.shape[0]))
# Upsample minority class
if boot_type == "upsample":
print("Upsampling minority class...")
num_to_boot = maj_data.shape[0] - min_data.shape[0]
print(" - Number to upsample: {}".format(num_to_boot))
bootstrap_idx = np.random.randint(min_data.shape[0], size=num_to_boot)
min_data_boot = min_data[bootstrap_idx]
min_labels_boot = min_labels[bootstrap_idx]
final_data = np.concatenate((data, min_data_boot), axis=0)
final_labels = np.concatenate((labels, min_labels_boot), axis=0)
elif boot_type == "downsample":
print("Downsampling majority class...")
# Resample N = number of minority examples
num_to_boot = min_data.shape[0]
bootstrap_idx = np.random.randint(maj_data.shape[0], size=num_to_boot)
maj_data_boot = maj_data[bootstrap_idx]
maj_labels_boot = maj_labels[bootstrap_idx]
final_data = np.concatenate((maj_data_boot, min_data), axis=0)
final_labels = np.concatenate((maj_labels_boot, min_labels), axis=0)
print("Final class balance: {} ({}) - {} ({})".format(
maj_class, len(np.where(final_labels==maj_class)[0]),
min_class, len(np.where(final_labels==min_class)[0])))
return final_data, final_labels
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch.npy'))
data = data.transpose(0,2,1) # Equivalent to dimshuffle
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_labels.npy'))
data_labels = data_labels[:,1]
data_labels = data_labels.repeat(data.shape[1]) # Repeat each label
temp_arrays = []
for i in range(data.shape[0]):
temp_arrays.append(data[i,:])
data = np.concatenate(temp_arrays) # Stack the 2D arrays vertically
del temp_arrays
# Data now contains 2804736 rows
data = data.reshape(-1, 1, 30) # Add extra channel for depth
# Electrode Order (30 channels)
electrode_order = ('Fp1','Fp2','Fz',
'F4','F8','FC6',
'C4','T8','CP6',
'P4','P8','P10',
'O2','Oz','O1',
'P9','P3','P7',
'CP5','C3','T7',
'FC5','F7','F3',
'FC1','FC2','Cz',
'CP1','CP2','Pz')
# Significantly improves gradient descent
data = data*1e5 # Increase size of values
# Up/downsample the data to balance classes
data, data_labels = bootstrap(data, data_labels, "downsample") # 2402304 rows
# Create train, validation, test sets
rng = np.random.RandomState(5347) # Set random seed
indices = rng.permutation(data.shape[0])
split_train, split_val, split_test = .6, .2, .2
split_train = int(round(data.shape[0]*split_train))
split_val = split_train + int(round(data.shape[0]*split_val))
train_idx = indices[:split_train]
val_idx = indices[split_train:split_val]
test_idx = indices[split_val:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
test_data = data[test_idx,:]
test_labels = data_labels[test_idx]
def build_cnn(k_width=5, input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 1, 30), input_var=input_var)
l_conv1 = Conv1DLayer(incoming = l_in, num_filters = 16,
filter_size = k_width,
stride = 1, pad = 'same',
W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool1DLayer(incoming = l_conv1, pool_size = 2, stride = 2)
l_drop1 = lasagne.layers.dropout(l_pool1, p=.2)
l_fc = lasagne.layers.DenseLayer(
l_drop1,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify)
l_drop2 = lasagne.layers.dropout(l_fc, p=.2)
l_out = lasagne.layers.DenseLayer(
l_drop2,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# tqdm() can be removed if no visual progress bar is needed
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500, k_width=5):
# Prepare Theano variables for inputs and targets
input_var = T.tensor3('inputs')
target_var = T.ivector('targets')
network = build_cnn(k_width, input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
l2_reg = regularize_network_params(network, l2)
loss += l2_reg * 0.00001
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.1)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
test_perc = (test_acc / test_batches) * 100
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_perc))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve\nKernel size: (1,{}) - Test acc: {:.2f}%".format(k_width, test_perc))
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
return test_perc
if GRID_SEARCH:
# Set filter sizes to search across (odd size only)
search_widths = range(3, 30, 4) # Across spatial domain (electrodes)
# Preallocate accuracy grid
grid_accuracy = np.empty((1, len(search_widths)))
num_kernels = grid_accuracy.size
cur_kernel = 0
for i, w in enumerate(search_widths):
# Train with current kernel size
cur_kernel += 1
print("***** Kernel {}/{} | Size: (1,{}) *****".format(cur_kernel, num_kernels, w))
cur_test_acc = main(batch_size=2000, num_epochs=20, k_width=w)
grid_accuracy[0, i] = cur_test_acc
# Show accuracy heatmap
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(grid_accuracy, cmap = plt.cm.bone, interpolation = | |
data is not None else None
if datatype == "album":
data = await self.api_request(
{
"user": username,
"method": "user.gettopalbums",
"limit": limit,
},
ignore_errors=True,
)
return data["topalbums"]["album"] if data is not None else None
if datatype == "track":
data = await self.api_request(
{
"user": username,
"method": "user.gettoptracks",
"limit": limit,
},
ignore_errors=True,
)
return data["toptracks"]["track"] if data is not None else None
@commands.command(aliases=["wk", "whomstknows"])
@commands.guild_only()
@is_small_server()
@commands.cooldown(2, 60, type=commands.BucketType.user)
async def whoknows(self, ctx, *, artistname=None):
"""
Who has listened to a given artist the most.
Usage:
>whoknows <artist name>
>whoknows np
"""
if artistname is None:
return await util.send_command_help(ctx)
artistname = remove_mentions(artistname)
if artistname.lower() == "np":
artistname = (await self.getnowplaying(ctx))["artist"]
if artistname is None:
raise exceptions.Warning("Could not get currently playing artist!")
listeners = []
tasks = []
for user_id, lastfm_username in await self.server_lastfm_usernames(
ctx, filter_cheaters=True
):
member = ctx.guild.get_member(user_id)
if member is None:
continue
tasks.append(self.get_playcount(artistname, lastfm_username, member))
if tasks:
data = await asyncio.gather(*tasks)
for playcount, member, name in data:
artistname = name
if playcount > 0:
listeners.append((playcount, member))
else:
return await ctx.send("Nobody on this server has connected their last.fm account yet!")
artistname = util.escape_md(artistname)
rows = []
old_king = None
new_king = None
total = 0
for i, (playcount, member) in enumerate(
sorted(listeners, key=lambda p: p[0], reverse=True), start=1
):
if i == 1:
rank = ":crown:"
old_king = await self.bot.db.execute(
"SELECT user_id FROM artist_crown WHERE artist_name = %s AND guild_id = %s",
artistname,
ctx.guild.id,
one_value=True,
)
await self.bot.db.execute(
"""
INSERT INTO artist_crown (guild_id, user_id, artist_name, cached_playcount)
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
cached_playcount = VALUES(cached_playcount),
user_id = VALUES(user_id)
""",
ctx.guild.id,
member.id,
artistname,
playcount,
)
if old_king:
old_king = ctx.guild.get_member(old_king)
new_king = member
else:
rank = f"`#{i:2}`"
rows.append(
f"{rank} **{util.displayname(member)}** — **{playcount}** {format_plays(playcount)}"
)
total += playcount
if not rows:
return await ctx.send(f"Nobody on this server has listened to **{artistname}**")
content = discord.Embed(title=f"Who knows **{artistname}**?")
image_url = await self.get_artist_image(artistname)
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Collective plays: {total}")
content.colour = await self.cached_image_color(image_url)
await util.send_as_pages(ctx, content, rows)
if not old_king or old_king is None or old_king.id == new_king.id:
return
await ctx.send(
f"> **{util.displayname(new_king)}** just stole the **{artistname}** crown from **{util.displayname(old_king)}**"
)
@commands.command(aliases=["wkt", "whomstknowstrack"])
@commands.guild_only()
@is_small_server()
@commands.cooldown(2, 60, type=commands.BucketType.user)
async def whoknowstrack(self, ctx, *, track=None):
"""
Who has listened to a given song the most.
Usage:
>whoknowstrack <track name> | <artist name>
>whoknowstrack np
"""
if track is None:
return await util.send_command_help(ctx)
track = remove_mentions(track)
if track.lower() == "np":
npd = await self.getnowplaying(ctx)
trackname = npd["track"]
artistname = npd["artist"]
if None in [trackname, artistname]:
raise exceptions.Warning("Could not get currently playing track!")
else:
try:
trackname, artistname = [x.strip() for x in track.split("|")]
if "" in (trackname, artistname):
raise ValueError
except ValueError:
raise exceptions.Warning("Incorrect format! use `track | artist`")
listeners = []
tasks = []
for user_id, lastfm_username in await self.server_lastfm_usernames(
ctx, filter_cheaters=True
):
member = ctx.guild.get_member(user_id)
if member is None:
continue
tasks.append(self.get_playcount_track(artistname, trackname, lastfm_username, member))
if tasks:
data = await asyncio.gather(*tasks)
for playcount, user, metadata in data:
artistname, trackname, image_url = metadata
if playcount > 0:
listeners.append((playcount, user))
else:
return await ctx.send("Nobody on this server has connected their last.fm account yet!")
artistname = util.escape_md(artistname)
trackname = util.escape_md(trackname)
rows = []
total = 0
for i, (playcount, user) in enumerate(
sorted(listeners, key=lambda p: p[0], reverse=True), start=1
):
rows.append(
f"`#{i:2}` **{util.displayname(user)}** — **{playcount}** {format_plays(playcount)}"
)
total += playcount
if not rows:
return await ctx.send(
f"Nobody on this server has listened to **{trackname}** by **{artistname}**"
)
if image_url is None:
image_url = await self.get_artist_image(artistname)
content = discord.Embed(title=f"Who knows **{trackname}**\n— by {artistname}")
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Collective plays: {total}")
content.colour = await self.cached_image_color(image_url)
await util.send_as_pages(ctx, content, rows)
@commands.command(aliases=["wka", "whomstknowsalbum"])
@commands.guild_only()
@is_small_server()
@commands.cooldown(2, 60, type=commands.BucketType.user)
async def whoknowsalbum(self, ctx, *, album):
"""
Who has listened to a given album the most.
Usage:
>whoknowsalbum <album name> | <artist name>
>whoknowsalbum np
"""
if album is None:
return await util.send_command_help(ctx)
album = remove_mentions(album)
if album.lower() == "np":
npd = await self.getnowplaying(ctx)
albumname = npd["album"]
artistname = npd["artist"]
if None in [albumname, artistname]:
raise exceptions.Warning("Could not get currently playing album!")
else:
try:
albumname, artistname = [x.strip() for x in album.split("|")]
if "" in (albumname, artistname):
raise ValueError
except ValueError:
raise exceptions.Warning("Incorrect format! use `album | artist`")
listeners = []
tasks = []
for user_id, lastfm_username in await self.server_lastfm_usernames(
ctx, filter_cheaters=True
):
member = ctx.guild.get_member(user_id)
if member is None:
continue
tasks.append(self.get_playcount_album(artistname, albumname, lastfm_username, member))
if tasks:
data = await asyncio.gather(*tasks)
for playcount, user, metadata in data:
artistname, albumname, image_url = metadata
if playcount > 0:
listeners.append((playcount, user))
else:
return await ctx.send("Nobody on this server has connected their last.fm account yet!")
artistname = util.escape_md(artistname)
albumname = util.escape_md(albumname)
rows = []
total = 0
for i, (playcount, user) in enumerate(
sorted(listeners, key=lambda p: p[0], reverse=True), start=1
):
rows.append(
f"`#{i:2}` **{util.displayname(user)}** — **{playcount}** {format_plays(playcount)}"
)
total += playcount
if not rows:
return await ctx.send(
f"Nobody on this server has listened to **{albumname}** by **{artistname}**"
)
if image_url is None:
image_url = await self.get_artist_image(artistname)
content = discord.Embed(title=f"Who knows **{albumname}**\n— by {artistname}")
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Collective plays: {total}")
content.colour = await self.cached_image_color(image_url)
await util.send_as_pages(ctx, content, rows)
@commands.command()
@commands.guild_only()
async def crowns(self, ctx, *, user: discord.Member = None):
"""Check your artist crowns."""
if user is None:
user = ctx.author
crownartists = await self.bot.db.execute(
"""
SELECT artist_name, cached_playcount FROM artist_crown
WHERE guild_id = %s AND user_id = %s ORDER BY cached_playcount DESC
""",
ctx.guild.id,
user.id,
)
if not crownartists:
return await ctx.send(
"You haven't acquired any crowns yet! "
"Use the `>whoknows` command to claim crowns of your favourite artists :crown:"
)
rows = []
for artist, playcount in crownartists:
rows.append(
f"**{util.escape_md(str(artist))}** with **{playcount}** {format_plays(playcount)}"
)
content = discord.Embed(color=discord.Color.gold())
content.set_author(
name=f"👑 {util.displayname(user, escape=False)} artist crowns",
icon_url=user.avatar_url,
)
content.set_footer(text=f"Total {len(crownartists)} crowns")
await util.send_as_pages(ctx, content, rows)
@commands.command(hidden=True)
async def report(self, ctx, lastfm_username, *, reason):
"""Report lastfm account."""
lastfm_username = lastfm_username.strip("/").split("/")[-1].lower()
url = f"https://www.last.fm/user/{lastfm_username}"
data = await self.api_request(
{"user": lastfm_username, "method": "user.getinfo"}, ignore_errors=True
)
if data is None:
raise exceptions.Warning(f"`{url}` is not a valid Last.fm profile.")
exists = await self.bot.db.execute(
"SELECT * FROM lastfm_cheater WHERE lastfm_username = %s", lastfm_username.lower()
)
if exists:
raise exceptions.Info("This Last.fm account is already flagged")
content = discord.Embed(title="New Last.fm user report")
content.add_field(name="Profile", value=url)
content.add_field(name="Reason", value=reason)
content.description = (
"Are you sure you want to report this lastfm account?"
" Please note sending false reports or spamming **will get you blacklisted**."
)
# send confirmation message
msg = await ctx.send(embed=content)
async def confirm_ban():
content.add_field(
name="Reported by",
value=f"{ctx.author} (`{ctx.author.id}`)",
inline=False,
)
user_ids = await self.bot.db.execute(
"SELECT user_id FROM user_settings WHERE lastfm_username = %s", lastfm_username
)
if user_ids:
connected_accounts = []
for x in user_ids:
user = self.bot.get_user(x[0])
connected_accounts.append(f"{user} (`{user.id}`)")
content.add_field(
name="Connected by",
value=", ".join(connected_accounts),
inline=False,
)
content.set_footer(text=f">fmflag {lastfm_username} [reason]")
content.description = ""
await self.send_report(ctx, content, lastfm_username, reason)
await msg.edit(content="📨 Report sent!", embed=None)
async def cancel_ban():
await msg.edit(content="❌ Report cancelled.", embed=None)
functions = {"✅": confirm_ban, "❌": cancel_ban}
asyncio.ensure_future(
util.reaction_buttons(ctx, msg, functions, only_author=True, single_use=True)
)
async def send_report(self, ctx, content, lastfm_username, reason=None):
reports_channel = self.bot.get_channel(729736304677486723)
if reports_channel is None:
raise exceptions.Warning("Something went wrong.")
msg = await reports_channel.send(embed=content)
async def confirm_ban():
await self.bot.db.execute(
"INSERT INTO lastfm_cheater VALUES(%s, %s, %s)",
lastfm_username.lower(),
arrow.utcnow().datetime,
reason,
)
content.description = "Account flagged"
content.color = discord.Color.green()
await msg.edit(embed=content)
async def cancel_ban():
content.description = "Report ignored"
content.color = discord.Color.red()
await msg.edit(embed=content)
functions = {"✅": confirm_ban, "❌": cancel_ban}
asyncio.ensure_future(
util.reaction_buttons(ctx, msg, functions, single_use=True, only_owner=True)
)
@util.patrons_only()
@commands.command()
async def lyrics(self, ctx, *, query):
"""Search for song lyrics."""
if query.lower() == "np":
npd = await self.getnowplaying(ctx)
trackname = npd["track"]
artistname = npd["artist"]
if None in [trackname, artistname]:
return await ctx.send(":warning: Could not get currently playing track!")
query = artistname + " " + trackname
url = "https://api.audd.io/findLyrics/"
request_data = {
"api_token": AUDDIO_TOKEN,
"q": query,
}
async with aiohttp.ClientSession() as session:
async with session.post(url=url, data=request_data) as response:
data = await response.json()
if data["status"] != "success":
raise exceptions.Warning(
f"Something went wrong! `error {data['error']['error_code']}: {data['error']['error_message']}`"
)
results = data["result"]
if not results:
return await ctx.send("Found | |
import logging
import quopri
import re
from datetime import datetime
from email import utils as email_utils
from email.message import EmailMessage
from typing import List, Optional, Set, Tuple
from aiohttp.web_exceptions import HTTPBadRequest
from bs4 import BeautifulSoup
from buildpg.asyncpg import BuildPgConnection
from em2.background import push_all, push_multiple
from em2.core import Action, ActionTypes, Connections, MsgFormat, UserTypes, apply_actions, create_conv, get_create_user
from em2.protocol.core import Em2Comms, HttpError
from em2.utils.smtp import find_smtp_files
logger = logging.getLogger('em2.protocol.views.smtp')
__all__ = ['InvalidEmailMsg', 'remove_participants', 'get_email_recipients', 'process_smtp']
class InvalidEmailMsg(ValueError):
pass
async def remove_participants(conn: BuildPgConnection, conv_id: int, ts: datetime, user_ids: List[int]):
"""
Remove participants from a conversation, doesn't use actions since apply_actions is not used, and actor changes.
Should be called inside a transaction.
"""
await conn.execute('delete from participants where conv=$1 and user_id=any($2)', conv_id, user_ids)
# TODO add reason when removing participant
r = await conn.fetch(
"""
insert into actions (conv, ts, act, actor, participant_user)
(select $1, $2, 'participant:remove', unnest($3::int[]), unnest($3::int[]))
returning id
""",
conv_id,
ts,
user_ids,
)
await conn.execute('update participants set seen=false where conv=$1', conv_id)
return [r_[0] for r_ in r]
async def get_email_recipients(to: List[str], cc: List[str], message_id: str, conn: BuildPgConnection) -> List[str]:
# recipients is a unique list of recipients which retains the order from to + cc
recipients = []
addr_set = set()
for _, addr in email_utils.getaddresses(to + cc):
if addr not in addr_set:
recipients.append(addr)
addr_set.add(addr)
if not recipients:
logger.warning('email with no recipient, ignoring %s', message_id)
raise HTTPBadRequest(text='no recipient, ignoring')
loc_users = await conn.fetchval("select 1 from users where user_type='local' and email=any($1) limit 1", recipients)
if not loc_users:
logger.warning('email with no local recipient (%r), ignoring %s', recipients, message_id)
raise HTTPBadRequest(text='no local recipient, ignoring')
return recipients
async def process_smtp(
conns: Connections,
msg: EmailMessage,
recipients: List[str],
storage: str,
*,
spam: bool = None,
warnings: dict = None,
):
assert not msg['EM2-ID'], 'messages with EM2-ID header should be filtered out before this'
p = ProcessSMTP(conns)
await p.run(msg, recipients, storage, spam, warnings)
inline_regex = re.compile(' src')
class ProcessSMTP:
__slots__ = ('conns',)
def __init__(self, conns: Connections):
self.conns: Connections = conns
async def run(self, msg: EmailMessage, recipients: List[str], storage: str, spam: bool, warnings: dict):
# TODO deal with non multipart
_, actor_email = email_utils.parseaddr(msg['From'])
if not actor_email:
logger.warning('invalid smtp msg: "From" header', extra={msg: msg})
raise InvalidEmailMsg('invalid "From" header')
actor_email = actor_email.lower()
try:
message_id = msg['Message-ID'].strip('<> ')
except AttributeError as e:
logger.warning('invalid smtp msg (Message-ID): %s', e, exc_info=True, extra={msg: msg})
raise InvalidEmailMsg('no "Message-ID" header found') from e
try:
timestamp = email_utils.parsedate_to_datetime(msg['Date'])
except (TypeError, ValueError) as e:
logger.warning('invalid smtp msg (Date) %s: %s', e.__class__.__name__, e, exc_info=True, extra={msg: msg})
raise InvalidEmailMsg('invalid "Date" header')
conv_id, original_actor_id = await self.get_conv(msg)
actor_id = await get_create_user(self.conns, actor_email, UserTypes.remote_other)
existing_conv = bool(conv_id)
body, is_html, images = self.get_smtp_body(msg, message_id, existing_conv)
files = find_smtp_files(msg)
pg = self.conns.main
if existing_conv:
async with pg.transaction():
existing_prts = await pg.fetch(
'select email from participants p join users u on p.user_id=u.id where conv=$1', conv_id
)
existing_prts = {r[0] for r in existing_prts}
if actor_email not in existing_prts:
# reply from different address, we need to add the new address to the conversation
a = Action(act=ActionTypes.prt_add, participant=actor_email, actor_id=original_actor_id)
all_action_ids = await apply_actions(self.conns, conv_id, [a])
assert all_action_ids
else:
all_action_ids = []
# note: this could change the order of new participants to not match the SMTP headers, doesn't matter?
new_prts = set(recipients) - existing_prts
msg_format = MsgFormat.html if is_html else MsgFormat.plain
body = (body or '').strip()
actions = [
Action(act=ActionTypes.msg_add, actor_id=actor_id, body=body, msg_format=msg_format, files=files)
]
actions += [Action(act=ActionTypes.prt_add, actor_id=actor_id, participant=addr) for addr in new_prts]
action_ids = await apply_actions(self.conns, conv_id, actions, spam=spam, warnings=warnings)
assert action_ids
all_action_ids += action_ids
await pg.execute(
'update actions set ts=$1 where conv=$2 and id=any($3)', timestamp, conv_id, all_action_ids
)
send_id, add_action_pk = await pg.fetchrow(
"""
insert into sends (action, ref, complete, storage)
(select pk, $1, true, $2 from actions where conv=$3 and id=any($4) and act='message:add' limit 1)
returning id, action
""",
message_id,
storage,
conv_id,
action_ids,
)
await pg.execute('update files set send=$1 where action=$2', send_id, add_action_pk)
await push_multiple(self.conns, conv_id, action_ids, transmit=False)
else:
async with pg.transaction():
actions = [Action(act=ActionTypes.prt_add, actor_id=actor_id, participant=r) for r in recipients]
actions += [
Action(
act=ActionTypes.msg_add,
actor_id=actor_id,
body=body.strip(),
msg_format=MsgFormat.html if is_html else MsgFormat.plain,
files=files,
),
Action(act=ActionTypes.conv_publish, actor_id=actor_id, ts=timestamp, body=msg['Subject'] or '-'),
]
conv_id, conv_key = await create_conv(
conns=self.conns,
creator_email=actor_email,
actions=actions,
spam=spam,
warnings=warnings,
live=False,
)
send_id, add_action_pk = await pg.fetchrow(
"""
insert into sends (action, ref, complete, storage)
(select pk, $1, true, $2 from actions where conv=$3 and act='message:add' limit 1)
returning id, action
""",
message_id,
storage,
conv_id,
)
await pg.execute('update files set send=$1 where action=$2', send_id, add_action_pk)
await self.conns.redis.enqueue_job('post_receipt', conv_id)
if images:
await self.conns.redis.enqueue_job('get_images', conv_id, add_action_pk, images)
async def get_conv(self, msg: EmailMessage) -> Tuple[int, int]:
conv_actor = None
# find which conversation this relates to
in_reply_to = msg['In-Reply-To']
if in_reply_to:
conv_actor = await self.conns.main.fetchrow(
"""
select a.conv, a.actor from sends
join actions a on sends.action = a.pk
where sends.node is null and sends.ref = $1
order by a.id desc
limit 1
""",
self.clean_msg_id(in_reply_to),
)
references = msg['References']
if not conv_actor and references:
# try references instead to try and get conv_id
ref_msg_ids = {self.clean_msg_id(msg_id) for msg_id in references.split(' ') if msg_id}
if ref_msg_ids:
conv_actor = await self.conns.main.fetchrow(
"""
select a.conv, a.actor from sends
join actions a on sends.action = a.pk
where sends.node is null and sends.ref = any($1)
order by a.id desc
limit 1
""",
ref_msg_ids,
)
return conv_actor or (None, None)
def clean_msg_id(self, msg_id):
msg_id = msg_id.strip('<>\r\n')
if msg_id.endswith(self.conns.settings.smtp_message_id_domain):
msg_id = msg_id.split('@', 1)[0]
return msg_id
def get_smtp_body(self, msg: EmailMessage, message_id: str, existing_conv: bool) -> Tuple[str, bool, Set[str]]:
m: EmailMessage = msg.get_body(preferencelist=('html', 'plain'))
if not m:
raise RuntimeError('email with no content')
body = m.get_content()
is_html = m.get_content_type() == 'text/html'
if is_html and m['Content-Transfer-Encoding'] == 'quoted-printable':
# are there any other special characters to remove?
body = quopri.decodestring(body.replace('\xa0', '')).decode()
if not body:
logger.warning('Unable to find body in email "%s"', message_id)
images = set()
if is_html:
body, images = self.parse_html(body, existing_conv)
return body, is_html, images
def parse_html(self, body: str, existing_conv: bool) -> Tuple[str, Set[str]]:
soup = BeautifulSoup(body, 'html.parser')
if existing_conv:
# remove the body only if conversation already exists in the db
for el_selector in to_remove:
for el in soup.select(el_selector):
el.decompose()
# find images
images = [img['src'] for img in soup.select('img') if src_url_re.match(img['src'])]
for style in soup.select('style'):
images += [m.group(2) for m in style_url_re.finditer(style.string)]
# do it like this as we want to take the first max_ref_image_count unique images
image_set = set()
for image in images:
if image not in image_set:
image_set.add(image)
if len(image_set) >= self.conns.settings.max_ref_image_count:
break
# body = soup.prettify()
body = str(soup)
for regex, rep in html_regexes:
body = regex.sub(rep, body)
return body, image_set
to_remove = 'div.gmail_quote', 'div.gmail_extra' # 'div.gmail_signature'
style_url_re = re.compile(r'\surl\(([\'"]?)((?:https?:)?//.+?)\1\)', re.I)
src_url_re = re.compile(r'(?:https?:)?//', re.I)
html_regexes = [
(re.compile(r'<br/></div><br/>$', re.M), ''),
(re.compile(r'<br/>$', re.M), ''),
(re.compile(r'\n{2,}'), '\n'),
]
async def post_receipt(ctx, conv_id: int):
"""
run after receiving a conversation: decide on leader, set live and notify
"""
async with ctx['pg'].acquire() as conn:
leader = await get_leader(ctx, conv_id, conn)
await conn.execute('update conversations set live=true, leader_node=$1 where id=$2', leader, conv_id)
conns = Connections(conn, ctx['redis'], ctx['settings'])
await push_all(conns, conv_id, transmit=False)
async def get_leader(ctx, conv_id: int, pg: BuildPgConnection) -> Optional[str]:
"""
Iterate over participants in the conversation (except the creator) and find the first one which is either local
or associated with another em2 node, return that node as leader (None if local).
"""
em2 = Em2Comms(ctx['settings'], ctx['client_session'], ctx['signing_key'], ctx['redis'], ctx['resolver'])
prt_users = await pg.fetch(
"""
select u.email, u.user_type from users u
join participants p on u.id = p.user_id
where p.conv=$1 and u.user_type != 'remote_other'
order by p.id
""",
conv_id,
)
for email, user_type in prt_users:
if user_type == UserTypes.local:
# this node is leader
return
elif user_type == UserTypes.new:
if await em2.check_local(email):
await pg.execute("update users set user_type='local' where email=$1", email)
return
try:
em2_node = await em2.get_em2_node(email)
except HttpError:
# domain has an em2 platform, but request failed, have to assume this isn't the leader
# TODO this could cause problems where different nodes assume different leaders
continue
new_user_type = UserTypes.remote_em2 if em2_node else UserTypes.remote_other
if user_type != new_user_type:
await pg.execute('update users set user_type=$1, v=null where email=$2', new_user_type, email)
if em2_node:
return | |
shape is not None:
if not isinstance(shape, dict):
raise TypeError('Window shape must be specified as a dictionary')
for key in ['cc', 'sim']:
if not isinstance(shape[key], str):
raise TypeError('Window shape must be a string')
if shape[key] not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = {key: 'rect' for key in ['cc', 'sim']}
# shape = 'rect'
if pad is None:
pad = {key: 1.0 for key in ['cc', 'sim']}
else:
if not isinstance(pad, dict):
raise TypeError('Padding for delay transform must be specified as a dictionary')
for key in ['cc', 'sim']:
if not isinstance(pad[key], (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad[key] < 0.0:
pad[key] = 0.0
if verbose:
print '\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).'
if not isinstance(bpcorrect, bool):
raise TypeError('Input keyword bpcorrect must be of boolean type')
vis_noise_freq = NP.copy(self.ia.vis_noise_freq)
result = {}
for key in ['cc', 'sim']:
if (key == 'sim') or ((key == 'cc') and (self.cc_lags is not None)):
freq_wts = NP.empty((bw_eff[key].size, self.f.size))
frac_width = DSP.window_N2width(n_window=None, shape=shape[key], area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff[key] / self.df).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center[key].reshape(-1,1), distance_ULIM=0.5*self.df, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = NP.sqrt(frac_width * n_window[i]) * DSP.windowing(n_window[i], shape=shape[key], centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.5*self.df, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
bpcorrection_factor = 1.0
npad = int(self.f.size * pad[key])
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True)
if key == 'cc':
skyvis_freq = self.cc_skyvis_freq[:,:self.f.size,:]
vis_freq = self.cc_vis_freq[:,:self.f.size,:]
skyvis_res_freq = self.cc_skyvis_res_freq[:,:self.f.size,:]
vis_res_freq = self.cc_vis_res_freq[:,:self.f.size,:]
skyvis_net_freq = self.cc_skyvis_net_freq[:,:self.f.size,:]
vis_net_freq = self.cc_vis_net_freq[:,:self.f.size,:]
if bpcorrect:
bpcorrection_factor = NP.where(NP.abs(self.bp_wts)>0.0, 1/self.bp_wts, 0.0)
bpcorrection_factor = bpcorrection_factor[:,NP.newaxis,:,:]
else:
skyvis_freq = NP.copy(self.ia.skyvis_freq)
vis_freq = NP.copy(self.ia.vis_freq)
skyvis_lag = DSP.FT1D(NP.pad(skyvis_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
vis_lag = DSP.FT1D(NP.pad(vis_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
vis_noise_lag = DSP.FT1D(NP.pad(vis_noise_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
lag_kernel = DSP.FT1D(NP.pad(self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result[key] = {'freq_center': freq_center[key], 'shape': shape[key], 'freq_wts': freq_wts, 'bw_eff': bw_eff[key], 'npad': npad, 'lags': lags, 'skyvis_lag': skyvis_lag, 'vis_lag': vis_lag, 'lag_kernel': lag_kernel, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=1)}
if key == 'cc':
skyvis_res_lag = DSP.FT1D(NP.pad(skyvis_res_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
vis_res_lag = DSP.FT1D(NP.pad(vis_res_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
skyvis_net_lag = DSP.FT1D(NP.pad(skyvis_net_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
vis_net_lag = DSP.FT1D(NP.pad(vis_net_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result[key]['vis_res_lag'] = vis_res_lag
result[key]['skyvis_res_lag'] = skyvis_res_lag
result[key]['vis_net_lag'] = vis_net_lag
result[key]['skyvis_net_lag'] = skyvis_net_lag
result[key]['bpcorrect'] = bpcorrect
else:
result[key]['vis_noise_lag'] = vis_noise_lag
if verbose:
print '\tSub-band(s) delay transform computed'
self.subband_delay_spectra = result
if action == 'return':
return result
#############################################################################
def get_horizon_delay_limits(self, phase_center=None,
phase_center_coords=None):
"""
-------------------------------------------------------------------------
Estimates the delay envelope determined by the sky horizon for the
baseline(s) for the phase centers
Inputs:
phase_center
[numpy array] Phase center of the observation as 2-column or
3-column numpy array. Two columns are used when it is specified
in 'hadec' or 'altaz' coordinates as indicated by the input
phase_center_coords or by three columns when 'dircos' coordinates
are used. This is where the telescopes will be phased up to as
reference. Coordinate system for the phase_center is specified
by another input phase_center_coords. Default=None implies the
corresponding attribute from the DelaySpectrum instance is used.
This is a Nx2 or Nx3 array
phase_center_coords
[string] Coordinate system for array phase center. Accepted
values are 'hadec' (HA-Dec), 'altaz' (Altitude-Azimuth) or
'dircos' (direction cosines). Default=None implies the
corresponding attribute from the DelaySpectrum instance is used.
Outputs:
horizon_envelope:
NxMx2 matrix where M is the number of baselines and N is the number
of phase centers. horizon_envelope[:,:,0] contains the minimum delay
after accounting for (any) non-zenith phase center.
horizon_envelope[:,:,1] contains the maximum delay after accounting
for (any) non-zenith phase center(s).
-------------------------------------------------------------------------
"""
if phase_center is None:
phase_center = self.ia.phase_center
phase_center_coords = self.ia.phase_center_coords
if phase_center_coords not in ['hadec', 'altaz', 'dircos']:
raise ValueError('Phase center coordinates must be "altaz", "hadec" or "dircos"')
if phase_center_coords == 'hadec':
pc_altaz = GEOM.hadec2altaz(phase_center, self.ia.latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif phase_center_coords == 'altaz':
pc_dircos = GEOM.altaz2dircos(phase_center, units='degrees')
elif phase_center_coords == 'dircos':
pc_dircos = phase_center
horizon_envelope = DLY.horizon_delay_limits(self.ia.baselines, pc_dircos, units='mks')
return horizon_envelope
#############################################################################
def set_horizon_delay_limits(self):
"""
-------------------------------------------------------------------------
Estimates the delay envelope determined by the sky horizon for the
baseline(s) for the phase centers of the DelaySpectrum instance. No
output is returned. Uses the member function get_horizon_delay_limits()
-------------------------------------------------------------------------
"""
self.horizon_delay_limits = self.get_horizon_delay_limits()
#############################################################################
def save(self, ds_outfile, ia_outfile, tabtype='BinTabelHDU', overwrite=False,
verbose=True):
"""
-------------------------------------------------------------------------
Saves the interferometer array delay spectrum information to disk.
Inputs:
outfile [string] Filename with full path for for delay spectrum
data to be saved to. Will be appended with '.ds.fits'
ia_outfile [string] Filename with full path for interferometer array
data to be saved to. Will be appended with '.fits'
extension
Keyword Input(s):
tabtype [string] indicates table type for one of the extensions in
the FITS file. Allowed values are 'BinTableHDU' and
'TableHDU' for binary and ascii tables respectively. Default
is 'BinTableHDU'.
overwrite [boolean] True indicates overwrite even if a file already
exists. Default = False (does not overwrite)
verbose [boolean] If True (default), prints diagnostic and progress
messages. If False, suppress printing such messages.
-------------------------------------------------------------------------
"""
try:
ds_outfile, ia_outfile
except NameError:
raise NameError('Both delay spectrum and interferometer array output filenames must be specified. Aborting DelaySpectrum.save()...')
if verbose:
print '\nSaving information about interferometer array...'
self.ia.save(ia_outfile, tabtype=tabtype, overwrite=overwrite,
verbose=verbose)
if verbose:
print '\nSaving information about delay spectra...'
hdulist = []
hdulist += [fits.PrimaryHDU()]
hdulist[0].header['EXTNAME'] = 'PRIMARY'
hdulist[0].header['NCHAN'] = (self.f.size, 'Number of frequency channels')
hdulist[0].header['NLAGS'] = (self.lags.size, 'Number of lags')
hdulist[0].header['freq_resolution'] = (self.df, 'Frequency resolution (Hz)')
hdulist[0].header['N_ACC'] = (self.n_acc, 'Number of accumulations')
hdulist[0].header['PAD'] = (self.pad, 'Padding factor')
hdulist[0].header['DBUFFER'] = (self.clean_window_buffer, 'CLEAN window buffer (1/bandwidth)')
hdulist[0].header['IARRAY'] = (ia_outfile+'.fits', 'Location of InterferometerArray simulated visibilities')
if verbose:
print '\tCreated a primary HDU.'
# cols = []
# cols += [fits.Column(name='frequency', format='D', array=self.f)]
# cols += [fits.Column(name='lag', format='D', array=self.lags)]
# columns = _astropy_columns(cols, tabtype=tabtype)
# tbhdu = fits.new_table(columns)
# tbhdu.header.set('EXTNAME', 'SPECTRAL INFO')
# hdulist += [tbhdu]
# if verbose:
# print '\tCreated an extension for spectral information.'
hdulist += [fits.ImageHDU(self.f, name='FREQUENCIES')]
hdulist += [fits.ImageHDU(self.lags, name='LAGS')]
if verbose:
print '\tCreated an extension for spectral information.'
hdulist += [fits.ImageHDU(self.horizon_delay_limits, name='HORIZON LIMITS')]
if verbose:
print '\tCreated an extension for horizon delay limits of size {0[0]} x {0[1]} x {0[2]} as a function of snapshot instance, baseline, and (min,max) limits'.format(self.horizon_delay_limits.shape)
hdulist += [fits.ImageHDU(self.bp, name='BANDPASS')]
if verbose:
print '\tCreated an extension for bandpass functions of size {0[0]} x {0[1]} x {0[2]} as a function of baseline, frequency, and snapshot instance'.format(self.bp.shape)
hdulist += [fits.ImageHDU(self.bp_wts, name='BANDPASS WEIGHTS')]
if verbose:
print '\tCreated an extension for bandpass weights of size {0[0]} x {0[1]} x {0[2]} as a function of baseline, frequency, and snapshot instance'.format(self.bp_wts.shape)
if self.lag_kernel is not None:
hdulist += [fits.ImageHDU(self.lag_kernel.real, name='LAG KERNEL REAL')]
hdulist += [fits.ImageHDU(self.lag_kernel.imag, name='LAG KERNEL IMAG')]
if verbose:
print '\tCreated an extension for convolving lag kernel of size {0[0]} x {0[1]} x {0[2]} as a function | |
node)
event = self.parse_event_expr(node, literal=False)
if event is not None:
event.record_history = True
expr.pattern = self.pattern_from_event(event)
if node.func.id == KW_RECV_QUERY:
expr.domain = self.create_expr(dast.ReceivedExpr, node)
else:
expr.domain = self.create_expr(dast.SentExpr, node)
expr.domain.event = event
self.pop_state()
self.pop_state()
return expr
elif (isinstance(node, Compare) and len(node.ops) == 1 and
type(node.ops[0]) is In):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment(expr)
expr.pattern = self.parse_pattern_expr(node.left)
self.current_context = IterRead(expr, type=expr.pattern)
expr.domain = self.visit(node.comparators[0])
if isinstance(expr.domain, dast.HistoryExpr):
expr.pattern = self.pattern_from_event(expr.domain.event)
self.pop_state()
return expr
elif isinstance(node, comprehension) or isinstance(node, For):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment(expr)
if self.get_option('enable_iterator_pattern', default=False):
expr.pattern = self.parse_pattern_expr(node.target)
else:
expr.pattern = self.visit(node.target)
self.current_context = IterRead(expr, type=expr.pattern)
expr.domain = self.visit(node.iter)
if isinstance(expr.domain, dast.HistoryExpr):
expr.pattern = self.pattern_from_event(expr.domain.event)
self.pop_state()
return expr
else:
raise MalformedStatementError("malformed domain specifier.", node)
def parse_quantified_expr(self, node):
if node.func.id == KW_EXISTENTIAL_QUANT:
context = dast.ExistentialOp
elif node.func.id == KW_UNIVERSAL_QUANT:
context = dast.UniversalOp
else:
raise MalformedStatementError("unknown quantifier.", node,
node.func.id)
expr = self.create_expr(dast.QuantifiedExpr, node, {'op': context})
self.current_context = Read(expr)
self.enter_query()
try:
expr.domains, predicates = self.parse_domains_and_predicate(node)
if len(predicates) > 1:
self.warn("multiple predicates in quantified expression, "
"first one is used, the rest are ignored.", node)
expr.predicate = predicates[0]
finally:
self.leave_query(audit=True)
self.pop_state()
return expr
def parse_comprehension(self, node):
if node.func.id == KW_COMP_SET:
expr_type = dast.SetCompExpr
elif node.func.id == KW_COMP_LIST:
expr_type = dast.ListCompExpr
elif node.func.id == KW_COMP_DICT:
expr_type = dast.DictCompExpr
elif node.func.id == KW_COMP_TUPLE:
expr_type = dast.TupleCompExpr
expr = self.create_expr(expr_type, node)
self.enter_query()
first_arg = node.args[0]
for arg in node.args[1:]:
condition = None
# try to treat it as domain spec first:
if self.test_domain_spec(arg):
try:
dom = self.parse_domain_spec(arg)
if len(dom.freevars) == 0:
# no freevars, degenerate to membership test:
condition = self.create_expr(dast.ComparisonExpr, arg)
condition.left = Pattern2Constant(
self.current_parent).visit(dom.pattern.pattern)
if condition.left is None:
self.error("internal error: unable to generate "
"constant from pattern.", node)
condition.comparator = dast.InOp
condition.right = dom.domain
self.pop_state()
else:
condition = dom
except MalformedStatementError as e:
self.error("malformed domain spec: " + e.reason, e.node)
else:
# if not, then it's just a boolean condition:
condition = self.visit(arg)
if condition is not None:
expr.conditions.append(condition)
if expr_type is dast.DictCompExpr:
if not (isinstance(first_arg, Tuple) and
len(first_arg.elts) == 2):
self.error("malformed element in dict comprehension.",
first_arg)
else:
kv = dast.KeyValue(expr)
kv.key = self.visit(node.key)
kv.value = self.visit(node.value)
expr.elem = kv
else:
expr.elem = self.visit(first_arg)
self.leave_query()
self.pop_state()
return expr
def audit_query(self, expr):
self.debug("auditing " + str(expr), expr)
self.debug("...freevars: " + str(expr.freevars), expr)
self.debug("...boundvars: " + str(expr.boundvars), expr)
intersect = {v.name for v in expr.ordered_freevars} & \
{v.name for v in expr.ordered_boundvars}
if intersect:
msg = ("query variables " +
" ".join(["'" + n + "'" for n in intersect]) +
" are both free and bound.")
self.error(msg, expr)
def parse_aggregates(self, node):
if node.func.id == KW_AGGREGATE_SUM:
expr_type = dast.SumExpr
elif node.func.id == KW_AGGREGATE_SIZE:
expr_type = dast.SizeExpr
elif node.func.id == KW_AGGREGATE_MIN:
expr_type = dast.MinExpr
elif node.func.id == KW_AGGREGATE_MAX:
expr_type = dast.MaxExpr
expr = self.create_expr(expr_type, node)
self.current_context = Read(expr)
first_arg = node.args[0]
node.args = node.args[1:]
try:
expr.domains, expr.conditions = self.parse_domains_and_predicate(node)
expr.elem = self.visit(first_arg)
finally:
self.pop_state()
return expr
def parse_domains_and_predicate(self, node):
preds = []
# Find predicate:
for kw in node.keywords:
if kw.arg == KW_SUCH_THAT:
preds.append(kw.value)
else:
self.error("Unknown keyword '%s' in comprehension expression." %
kw.arg, node)
# ..if no predicate found, then default to True:
if len(preds) == 0:
preds= [NameConstant(True)]
domains = node.args
if len(domains) == 0:
self.warn("No domain specifiers in comprehension expression.", node)
dadomains = [self.parse_domain_spec(node) for node in domains]
dapredicates = [self.visit(pred) for pred in preds]
return dadomains, dapredicates
class NameTransformer(NodeTransformer):
def visit_Name(self, node):
return Str(node.id)
def parse_config_value(self, vnode):
value = None
# Configuration values can not contain variables, so we treat all
# 'Name's as 'Str's:
vnode = Parser.NameTransformer().visit(vnode)
if isinstance(vnode, Num) or isinstance(vnode, Str) or \
isinstance(vnode, Bytes) or isinstance(vnode, NameConstant) or \
isinstance(vnode, Set) or isinstance(vnode, List) or \
isinstance(vnode, Tuple):
value = self.visit(vnode)
else:
self.error("Invalid configuration value.", vnode)
return value
def parse_config_section(self, node):
res = []
for argexpr in node.args:
if (isinstance(argexpr, Compare) and
isinstance(argexpr.left, Name) and
len(argexpr.ops) == 1 and
type(argexpr.ops[0]) is Is):
key = argexpr.left.id.casefold()
value = self.parse_config_value(argexpr.comparators[0])
if value is not None:
res.append((key, value))
for kw in node.keywords:
key = kw.arg.casefold()
vnode = kw.value
value = self.parse_config_value(vnode)
if value is not None:
res.append((key, value))
return res
def visit_Call(self, node):
try:
if self.expr_check(Quantifiers, 1, None, node,
optional_keywords={KW_SUCH_THAT}):
return self.parse_quantified_expr(node)
if self.expr_check(ComprehensionTypes, 2, None, node):
return self.parse_comprehension(node)
if self.current_process is not None and \
self.expr_check({KW_RECV_QUERY, KW_SENT_QUERY}, 1, 1, node,
optional_keywords=EventKeywords):
if isinstance(self.current_context, IterRead):
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
expr.context = self.current_context.type
expr.event = self.parse_event_expr(
node, literal=(not self.get_option(
'enable_iterator_pattern', default=False)))
self.pop_state()
if expr.event is not None:
expr.event.record_history = True
return expr
else:
outer = self.create_expr(dast.ComparisonExpr, node)
outer.comparator = dast.InOp
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
if self.current_context is not None:
expr.context = self.current_context.type
event = self.parse_event_expr(
node, literal=(not self.get_option(
'enable_membertest_pattern', default=False)))
self.pop_state()
expr.event = event
outer.right = expr
if event is not None:
outer.left = self.pattern_from_event(
event, literal=(not self.get_option(
'enable_membertest_pattern', default=False)))
event.record_history = True
self.pop_state()
return outer
except MalformedStatementError as e:
self.error("malformed {name} expression: {reason}".format(
name=e.name if e.name is not None else "",
reason=e.reason), e.node)
return dast.SimpleExpr(self.current_parent)
expr = None
if self.expr_check(ApiMethods, None, None, node,
keywords=None, optional_keywords=None):
self.debug("Api method call: " + node.func.id, node)
expr = self.create_expr(dast.ApiCallExpr, node)
expr.func = node.func.id
elif self.expr_check(BuiltinMethods, None, None, node,
keywords=None, optional_keywords=None):
self.debug("Builtin method call: " + node.func.id, node)
expr = self.create_expr(dast.BuiltinCallExpr, node)
expr.func = node.func.id
elif self.expr_check({KW_SETUP}, None, None, node,
keywords=None, optional_keywords=None):
self.debug("Setup expression. ", node)
expr = self.create_expr(dast.SetupExpr, node)
elif self.expr_check({KW_START}, None, None, node,
keywords=None, optional_keywords=None):
self.debug("Start expression. ", node)
expr = self.create_expr(dast.StartExpr, node)
elif self.expr_check({KW_CONFIG}, None, None, node,
keywords=None, optional_keywords=None):
self.debug("Config expression. ", node)
expr = self.create_expr(dast.ConfigExpr, node)
elif self.expr_check(AggregateMap, 1, None, node,
keywords={}, optional_keywords={}):
self.debug("Aggregate: " + node.func.id, node)
expr = self.create_expr(AggregateMap[node.func.id], node)
else:
if isinstance(node.func, Name):
self.debug("Method call: " + str(node.func.id), node)
expr = self.create_expr(dast.CallExpr, node)
self.current_context = FunCall(expr)
expr.func = self.visit(node.func)
self.current_context = Read(expr)
expr.args = [self.visit(a) for a in node.args]
expr.keywords = [(kw.arg, self.visit(kw.value))
for kw in node.keywords]
# Python 3.5 got rid of `starargs' and `kwargs' on Call objects:
if sys.version_info < (3 ,5):
expr.starargs = self.visit(node.starargs) \
if node.starargs is not None else None
expr.kwargs = self.visit(node.kwargs) \
if node.kwargs is not None else None
self.pop_state()
return expr
def visit_Name(self, node):
if node.id in {KW_TRUE, KW_FALSE, KW_NULL}:
if type(self.current_context) in {Assignment, Update, Delete}:
self.warn("Constant expression in update context.", node)
if node.id == KW_TRUE:
return self.create_expr(dast.TrueExpr, node, nopush=True)
elif node.id == KW_FALSE:
return self.create_expr(dast.FalseExpr, node, nopush=True)
elif node.id == KW_NULL:
return self.create_expr(dast.NoneExpr, node, nopush=True)
if self.current_process is not None and node.id == KW_SELF:
return self.create_expr(dast.SelfExpr, node, nopush=True)
if (self.current_process is not None and
(node.id in {KW_RECV_QUERY, KW_SENT_QUERY})):
if node.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
event_type = dast.ReceivedEvent
else:
expr = self.create_expr(dast.SentExpr, node)
event_type = dast.SentEvent
if (isinstance(self.current_context, Read) and
isinstance(self.current_context.type, dast.PatternExpr)):
expr.context = self.current_context.type
event = self.event_from_pattern(expr.context, event_type)
expr.event = event
event.record_history = True
else:
self.error("Invalid context for '%s'" % node.id, node)
self.pop_state()
return expr
# NamedVar is not by itself an Expression, we'll have to wrap it in a
# SimpleExpr:
expr = self.create_expr(dast.SimpleExpr, node)
n = self.current_scope.find_name(node.id, local=False)
if isinstance(self.current_context, Assignment) or\
isinstance(self.current_context, Delete):
if n is None:
self.debug("Adding name %s to %s" % (node.id,
self.current_scope), node)
n = self.current_scope.add_name(node.id)
n.add_assignment(self.current_context.node,
self.current_context.type)
elif isinstance(self.current_context, Update):
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
n = self.current_scope.add_name(node.id)
n.add_update(self.current_context.node, None,
self.current_context.type)
elif isinstance(self.current_context, Read) or \
isinstance(self.current_context, FunCall):
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
if self.current_scope.parent_scope is not None:
self.debug(self.current_scope.parent_scope._names, node)
else:
self.debug(self.current_scope._names, node)
n = self.current_scope.add_name(node.id)
n.add_read(expr)
if n is None:
# A fallback for the cases we don't care about (i.e.
# annontations)
n = self.current_scope.add_name(node.id)
expr.value = n
self.pop_state()
return expr
def visit_Str(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.s
self.pop_state()
return expr
def visit_FormattedValue(self, node):
expr = self.create_expr(dast.FormattedValueExpr, | |
"""
Inference: loads models from hdf5-files and renders them
"""
import os
import numpy as np
import torch
import torch.nn.functional as F
from typing import Union, List, Any, Optional
import enum
import h5py
import io
import collections
import imageio
from functools import lru_cache
import logging
import subprocess
import common.utils as utils
import pyrenderer
from volnet.network import SceneRepresentationNetwork, InputParametrization
from volnet.input_data import TrainingInputData
from volnet.raytracing import Raytracing
class LoadedModel:
"""
Class to load trained models from hdf5-checkpoints,
evaluate them in world and screen space and
convert them to compiled tensorcore implementations.
Note: for time-dependent volumes,
the time-indices are the actual timestep from the underlying dataset.
That is, the integer values represent actual ground truth data.
As the latent space variables are usually only defined sparsely,
the
"""
class EvaluationMode(enum.Enum):
TENSORCORES_SHARED = enum.auto()
TENSORCORES_MIXED = enum.auto()
PYTORCH32 = enum.auto()
PYTORCH16 = enum.auto()
@staticmethod
def _get_input_data(opt, force_config_file:str, _CACHE=dict()):
# setup config file mapper
def mapper(name:str, force_config_file=force_config_file):
if force_config_file is not None:
return force_config_file
#else use from checkpoint
if os.path.exists(name): return name
# replace "server" config files with normal config files
return name.replace('-server.json', '.json')
TrainingInputData.set_config_file_mapper(mapper)
# translate volume filenames if trained on the server, evaluated locally
volume_filenames = opt['volume_filenames']
if volume_filenames is not None and os.name=='nt':
base_data_folder = os.path.abspath(os.path.join(os.path.split(__file__)[0], '../../..'))
volume_filenames = volume_filenames.replace("/home/weiss", base_data_folder)
# filter out options only for TrainingInputData for better caching
opt2 = {
'settings': opt['settings'],
'tf_directory': opt['tf_directory'],
'volume_filenames': volume_filenames,
'ensembles': opt['ensembles'],
'time_keyframes': opt['time_keyframes'],
'time_train': opt['time_train'],
'time_val': opt['time_val']
}
opt_string = str(opt2)
d = _CACHE.get(opt_string, None)
if d is None:
d = TrainingInputData(opt2, check_volumes_exist=False)
_CACHE[opt_string] = d
return d
@staticmethod
def setup_config_file_mapper():
if LoadedModel._config_file_mapper_initialized: return
def mapper(name:str):
if os.path.exists(name): return name
# replace "server" config files with normal config files
return name.replace('-server.json', '.json')
TrainingInputData.set_config_file_mapper(mapper)
LoadedModel._config_file_mapper_initialized = True
def __init__(self, filename_or_hdf5:Union[str, h5py.File],
epoch:int=-1, grid_encoding=None,
force_config_file:str=None):
"""
Loads the network from the filename or directly h5py file.
:param filename_or_hdf5: the filename
:param epoch: the epoch to read the weights from
:param grid_encoding: the grid encoding for TensorCores
:param force_config_file: if not None, the path to the .json config file
that is enforced. This overwrites the TF and camera,
filenames of the volumes are retained.
"""
if isinstance(filename_or_hdf5, str):
assert filename_or_hdf5.endswith(".hdf5")
self._filename = os.path.splitext(os.path.split(filename_or_hdf5)[1])[0]
print("Load network from", filename_or_hdf5)
with h5py.File(filename_or_hdf5, 'r') as f:
self._init_from_hdf5(f, epoch, grid_encoding, force_config_file)
elif isinstance(filename_or_hdf5, h5py.File):
self._filename = None
self._init_from_hdf5(filename_or_hdf5, epoch, grid_encoding, force_config_file)
else:
raise ValueError("Unknown argument", filename_or_hdf5)
def _init_from_hdf5(self, f:h5py.File, epoch:int, grid_encoding, force_config_file:str):
self._dtype = torch.float32
self._device = torch.device("cuda")
self._opt = collections.defaultdict(lambda: None)
self._opt.update(f.attrs)
self._input_data = LoadedModel._get_input_data(self._opt, force_config_file)
self._image_evaluator = self._input_data.default_image_evaluator()
# self._image_evaluator.selected_channel = pyrenderer.IImageEvaluator.ChannelMode.Color
total_losses = f['total']
if total_losses[-1] == 0:
print("WARNING: Last loss is zero, training most likely didn't finish. Filename: "+f.filename)
self._training_time = float(f['times'][-1])
# hack, fix for old networks
is_new_network = True
git_hash = self._opt['git'] or ""
if len(git_hash)>0:
try:
exit_code = subprocess.run(["git", "merge-base", "--is-ancestor", "59fc3010267a00d111a16bce591fd6a0e7cd6c8b", git_hash]).returncode
is_new_network = True if exit_code==0 else False
print("Based on the git-commit of the checkpoint, it is a %s network"%("new" if is_new_network else "old"))
except:
print("unable to check git commit, assume new network architecture")
InputParametrization.PREMULTIPLY_2_PI = is_new_network
self._network = SceneRepresentationNetwork(self._opt, self._input_data, self._dtype, self._device)
self._has_direction = self._network.use_direction()
weights_np = f['weights'][epoch, :]
weights_bytes = io.BytesIO(weights_np.tobytes())
self._network.load_state_dict(
torch.load(weights_bytes, map_location=self._device), strict=True)
self._network.to(device=self._device)
weights_bytes = io.BytesIO(weights_np.tobytes())
self._network16 = SceneRepresentationNetwork(self._opt, self._input_data, self._dtype, self._device)
self._network16.load_state_dict(
torch.load(weights_bytes, map_location=self._device), strict=True)
self._network16.to(dtype=torch.float16, device=self._device)
self._volume_grid = self._image_evaluator.volume
# create tensorcores network
self._tensorcores_available = False
if grid_encoding is None:
grid_encoding = pyrenderer.SceneNetwork.LatentGrid.Float
try:
self._scene_network, self._grid_encoding_error = self._network.export_to_pyrenderer(
self._opt, grid_encoding, return_grid_encoding_error = True)
self._num_parameters = self._scene_network.num_parameters()
def to_float3(v):
return pyrenderer.float3(v.x, v.y, v.z)
self._scene_network.box_min = to_float3(self._image_evaluator.volume.box_min())
self._scene_network.box_size = to_float3(self._image_evaluator.volume.box_size())
self._warps_shared = self._scene_network.compute_max_warps(False)
self._warps_mixed = self._scene_network.compute_max_warps(True)
print("Warps shared:", self._warps_shared, ", warps mixed:", self._warps_mixed)
self._volume_network = pyrenderer.VolumeInterpolationNetwork()
self._volume_network.set_network(self._scene_network)
self._tensorcores_available = True
except Exception as ex:
print("Unable to load tensor core implementation:", ex)
print("Loaded, output mode:", self._network.output_mode())
self._network_output_mode = self._network.output_mode().split(':')[0] # trim options
self._raytracing = Raytracing(self._input_data.default_image_evaluator(),
self._network_output_mode, 0.01, 128, 128,
self._dtype, self._device)
def get_attr_or_None(a):
return f.attrs[a] if a in f.attrs else None
self.time_keyframes = get_attr_or_None('time_keyframes')
self.time_train = get_attr_or_None('time_train')
def filename(self):
return self._filename
def training_time_seconds(self):
return self._training_time
def fill_weights(self, weights, epoch:int):
weights_np = weights[epoch, :]
weights_bytes = io.BytesIO(weights_np.tobytes())
self._network.load_state_dict(
torch.load(weights_bytes, map_location=self._device), strict=True)
weights_bytes = io.BytesIO(weights_np.tobytes())
self._network16.load_state_dict(
torch.load(weights_bytes, map_location=self._device), strict=True)
self._network16.to(dtype=torch.float16, device=self._device)
self._scene_network = self._network.export_to_pyrenderer(self._opt)
def is_time_dependent(self):
"""
Returns true iff the network/data is time- or ensemble-dependent.
:return:
"""
return self._input_data.volume_filenames() is not None
def min_timestep(self):
"""
If time-dependent, returns the minimal timestep index (inclusive)
"""
assert self.is_time_dependent()
return self._input_data.time_keyframe_indices()[0]
def max_timestep(self):
"""
If time-dependent, returns the maximal timestep index (inclusive)
"""
assert self.is_time_dependent()
return self._input_data.time_keyframe_indices()[-1]
def min_ensemble(self):
"""
If time-dependent, returns the minimal timestep index (inclusive)
"""
assert self.is_time_dependent()
return self._input_data.ensemble_indices()[0]
def max_ensemble(self):
"""
If time-dependent, returns the maximal timestep index (inclusive)
"""
assert self.is_time_dependent()
return self._input_data.ensemble_indices()[-1]
def timestep_interpolation_index(self, timestep: Union[float, int]):
"""
Given the current timestep (self.min_timestep() <= timestep <= self.max_timestep()),
returns the interpolation index into the latent space vector or grid
(in [0, self.get_input_data().num_timekeyframes]).
:param timestep: the timestep of the data
:return: the index into the latent space grid
"""
assert self.is_time_dependent()
return self._input_data.timestep_to_index(timestep)
def ensemble_interpolation_index(self, ensemble: Union[float, int]):
"""
Given the current ensemble (self.min_ensemble() <= ensemble <= self.max_ensemble()),
returns the interpolation index into the latent space vector or grid
(in [0, self.get_input_data().num_ensembles()-1]).
:param ensemble: the ensemble of the data
:return: the index into the latent space grid
"""
assert self.is_time_dependent()
return self._input_data.ensemble_to_index(ensemble)
def timestep_training_type(self, timestep: int):
"""
Evaluates how that timestep was used during training.
Returns a tuple of two booleans
is_keyframe, is_trained = self.timestep_training_type(timestep)
Where 'is_keyframe' is true iff there was a keyframe / latent vector at that timestep;
and 'is_trained' is true iff that timestep was used in the training data
(either directly via a keyframe or interpolated).
:param timestep: the timestep to check
:return: is_keyframe, is_trained
"""
assert self.is_time_dependent()
is_keyframe = timestep in self._input_data.time_keyframe_indices()
is_trained = timestep in self._input_data.time_train_indices()
return is_keyframe, is_trained
def save_compiled_network(self, filename):
if not self._tensorcores_available:
print("No tensorcores available, can't save")
return
self._scene_network.save(filename)
def warps_mixed(self):
return self._warps_mixed
def warps_shared(self):
return self._warps_shared
def num_parameters(self):
return self._num_parameters
def is_tensorcores_available(self):
return self._tensorcores_available
def get_image_evaluator(self):
return self._input_data.default_image_evaluator()
def get_input_data(self):
return self._input_data
def get_raytracing(self) -> Raytracing:
return self._raytracing
def get_network_pytorch(self):
return self._network, self._network16
def set_network_pytorch(self, network32, network16):
self._network = network32
self._network16 = network16
self._network_output_mode = self._network.output_mode().split(':')[0] # trim options
self._raytracing = Raytracing(self._input_data.default_image_evaluator(),
self._network_output_mode, 0.01, 128, 128,
self._dtype, self._device)
def get_grid_encoding_error(self):
return self._grid_encoding_error
def get_network_tensorcores(self):
return self._scene_network
def set_network_tensorcores(self, network):
self._scene_network = network
self._volume_network.set_network(self._scene_network)
def enable_preintegration(self, enabled, convert_to_texture:bool = False):
re = self._image_evaluator.ray_evaluator
if convert_to_texture and isinstance(re, pyrenderer.RayEvaluationSteppingDvr):
re.convert_to_texture_tf()
print("TF converted to texture")
tf = re.tf
if isinstance(tf, pyrenderer.TransferFunctionTexture):
if enabled:
tf.preintegration_mode = pyrenderer.TransferFunctionTexture.Preintegrate2D
print("preintegration enabled")
else:
tf.preintegration_mode = pyrenderer.TransferFunctionTexture.Off
elif enabled:
print("TF is not a texture, can't use preintegration")
def set_alpha_early_out(self, enabled:bool):
re = self._image_evaluator.ray_evaluator
if isinstance(re, pyrenderer.RayEvaluationSteppingDvr):
re.early_out = enabled
else:
print("Warning, ray evaluator is not an instance of RayEvaluationSteppingDvr, can't set alpha early out")
def get_default_camera(self) -> torch.Tensor:
"""
Reference camera as specified in the settings
"""
image_evaluator = self._input_data.default_image_evaluator()
_camera_center = image_evaluator.camera.center.value
image_evaluator.camera.pitchYawDistance.value = self._input_data.default_camera_pitch_yaw_distance()
camera_parameters = image_evaluator.camera.get_parameters()
return camera_parameters
def get_test_cameras(self, N:int) -> List[torch.Tensor]:
"""
Random cameras based on the fibonacci sphere
:param N: the number of cameras
:return:
"""
image_evaluator = self._input_data.default_image_evaluator()
_camera_center = image_evaluator.camera.center.value
_camera_center_np = utils.cvector_to_numpy(_camera_center)
_camera_pitch_cpu, _camera_yaw_cpu = utils.fibonacci_sphere(N)
_camera_distance = image_evaluator.camera.pitchYawDistance.value.z
params = []
for i in range(N):
image_evaluator.camera.pitchYawDistance.value = pyrenderer.double3(
_camera_pitch_cpu[i], _camera_yaw_cpu[i], _camera_distance)
camera_parameters = image_evaluator.camera.get_parameters().clone()
params.append(camera_parameters)
return params
def get_rotation_cameras(self, N:int) -> List[torch.Tensor]:
"""
Based on the default setting, rotate around the object
:param N: num steps for the whole rotation
:return:
"""
image_evaluator = self._input_data.default_image_evaluator()
_camera_center = image_evaluator.camera.center.value
pyd = self._input_data.default_camera_pitch_yaw_distance()
pitch = pyd.x
yaw = pyd.y
distance = pyd.z
params = []
for yaw_offset in np.linspace(0, 2*np.pi, N, endpoint=False):
image_evaluator.camera.pitchYawDistance.value = pyrenderer.double3(
pitch, yaw+yaw_offset, distance)
camera_parameters = image_evaluator.camera.get_parameters().clone()
params.append(camera_parameters)
return params
def get_rotation_camera(self, t:float) -> torch.Tensor:
"""
Based on the default setting, rotate around the object
:param t: the time of rotation in [0,1] for a full rotation
:return: the camera matrix
"""
image_evaluator = self._input_data.default_image_evaluator()
_camera_center = image_evaluator.camera.center.value
pyd = self._input_data.default_camera_pitch_yaw_distance()
pitch = pyd.x
yaw = pyd.y
distance = pyd.z
params | |
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from scipy.special import softmax
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import matplotlib as mpl
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, compass_dataset, vector="1"):
if vector not in ("1","2","average"):
raise ValueError("Select the weight vector from: ('1','2','average')")
if vector == "average":
print("Loading average of 1st and 2nd weights.")
avg_embedding = embedding_file.replace(".vec","_avg.vec")
secondary_weights = embedding_file.replace(".vec","2.vec")
GeneEmbedding.average_vector_results(embedding_file,secondary_weights,avg_embedding)
self.embeddings = self.read_embedding(avg_embedding)
elif vector == "1":
print("Loading first weights.")
self.embeddings = self.read_embedding(embedding_file)
elif vector == "2":
print("Loading second weights.")
secondary_weights = embedding_file.replace(".vec","2.vec")
self.embeddings = self.read_embedding(secondary_weights)
self.vector = []
self.context = compass_dataset.data
self.embedding_file = embedding_file
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.embeddings.keys()):
# if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None, feature_type=None):
if gene not in self.embeddings:
return None
if feature_type:
subset = []
for gene in list(self.embeddings.keys()):
if feature_type == self.context.feature_types[gene]:
subset.append(gene)
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def get_similar_genes(self, vector):
distances = dict()
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, threshold=0.75, lower_bound=1):
cluster_definitions = collections.defaultdict(list)
G = embed.generate_network(threshold=threshold)
G.remove_edges_from(networkx.selfloop_edges(G))
for i, connected_component in enumerate(networkx.connected_components(G)):
subg = G.subgraph(connected_component)
if len(subg.nodes()) > lower_bound:
# if len(subg.nodes()) == 2:
# cluster_definitions[str(i+j+100)] += list(subg.nodes())
# continue
clique_tree = networkx.tree.junction_tree(subg)
clique_tree.remove_nodes_from(list(networkx.isolates(clique_tree)))
for j, cc in enumerate(nx.connected_components(clique_tree)):
for clique_cc in cc:
cluster_definitions[str(i+j)] += list(set(clique_cc))
self.cluster_definitions = cluster_definitions
return self.cluster_definitions
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
assert len(vector) != 0, genes
return list(numpy.median(vector, axis=0))
def cluster_definitions_as_df(self, top_n=20):
similarities = self.cluster_definitions
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(self.cluster_labels, ax, labels=labels, method=method, pcs=pcs, remove=remove)
# if png:
# plt.savefig(png)
# plt.close()
# else:
plt.show()
return pcs
def marker_labels(self,top_n=5):
markers = []
cluster_definitions = self.cluster_definitions
marker_labels = dict()
for gclust, genes in cluster_definitions.items():
print(gclust, ",".join(genes[:5]))
markers += genes[:top_n]
for gene in genes[:top_n]:
marker_labels[gene] = gclust
return markers, marker_labels
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
def relabel_clusters(self, clusters, annotations):
_clusters = []
for cluster in clusters:
if cluster in annotations:
_clusters.append(annotations[cluster])
else:
_clusters.append(cluster)
self.cluster_labels = _clusters
return _clusters
def plot_similarity_matrix(self, top_n=5, png=None):
markers, marker_labels = self.marker_labels(top_n=top_n)
cmap = matplotlib.cm.tab20
node_color = {}
type_color = {}
ctypes = []
for marker in markers:
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color[marker] = cmap(ctypes.index(marker_labels[marker]))
type_color[marker_labels[marker]] = cmap(ctypes.index(marker_labels[marker]))
mm = pandas.DataFrame(markers, index=markers)
mm["Gene Cluster"] = mm[0]
row_colors = mm["Gene Cluster"].map(node_color)
similarity_matrix = []
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
from matplotlib.patches import Patch
plt.figure()
matrix = numpy.array(similarity_matrix)
df = pandas.DataFrame(matrix,index=markers,columns=markers)
sns.clustermap(df,cbar_pos=None,figsize=(12,12), dendrogram_ratio=0.1, cmap="mako",row_colors=row_colors,yticklabels=True,xticklabels=True)
handles = [Patch(facecolor=type_color[name]) for name in type_color]
plt.legend(handles, type_color, title='Gene Cluster',
bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure, loc='upper right')
plt.tight_layout()
if png:
plt.savefig("marker_similarity.png")
else:
plt.show()
def similarity_network(self,top_n=5):
markers, marker_labels = self.marker_labels(top_n=top_n)
G = nx.Graph()
for marker in markers:
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene, similarity in resdict.items():
if gene != marker:
if gene not in G.nodes():
G.add_node(gene)
G.add_edge(marker, gene, weight=similarity)
return G
def plot_similarity_network(self, top_n=5, png=None):
markers, marker_labels = self.marker_labels(top_n=top_n)
cmap = matplotlib.cm.tab20
G = nx.petersen_graph()
node_color = []
node_order = []
node_size = []
edge_order = []
edge_color = []
edge_labels = dict()
for marker in markers:
node_order.append(marker)
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color.append(ctypes.index(marker_labels[marker]))
node_size.append(400)
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
i = 0
for gene, similarity in resdict.items():
if i > 9: break
if gene != marker:
if gene not in G.nodes():
node_size.append(0)
G.add_node(gene)
node_order.append(gene)
node_color.append(len(set(marker_labels.values())))
G.add_edge(marker, gene, weight=similarity)
edge_color.append(similarity)
edge_order.append((marker,gene))
edge_labels[(marker,gene)] = str(round(similarity,2))
i += 1
# print(node_color)
# c = max(nx.connected_components(G), key=len)
# G = G.subgraph(c).copy()
for i in range(10):
G.remove_node(i)
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(1,1,1)
pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order,
node_size=node_size,
edgelist=edge_order,
node_color=node_color,
edge_color=edge_color,
edge_vmin=0,
edge_vmax=1.0,
edge_cmap=plt.cm.Greys,
with_labels=True, width=1,font_size=7)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)
plt.axis('off')
plt.tight_layout()
if png:
plt.savefig(png)
else:
plt.show()
return G
@staticmethod
def read_vector(vec):
lines = open(vec,"r").read().splitlines()
dims = lines.pop(0)
vecs = dict()
for line in lines:
line = line.split()
gene = line.pop(0)
vecs[gene] = list(map(float,line))
return vecs, dims
@staticmethod
def cluster_network(genes, nxg, threshold=0.0, title="", display=True):
G = nxg.subgraph(genes)
for subg in nx.connected_components(G):
if len(subg) > 1:
if display:
fig = plt.figure(figsize=(14,6))
ax = plt.subplot(1,2,1)
subG = G.subgraph(list(subg))
centrality = dict(nx.betweenness_centrality(subG))
low, *_, high = sorted(centrality.values())
norm = mpl.colors.Normalize(vmin=low, vmax=high, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.coolwarm)
pos = nx.nx_agraph.graphviz_layout(subG, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
nx.draw(subG,pos,with_labels=True,
node_color=[mapper.to_rgba(i) for i in centrality.values()],
node_size=100,ax=ax,font_size=16,
edge_color=[[0.5,0.5,0.5,0.5] for _ in subG.edges()])
vector = embed.generate_vector(list(subg))
ax = plt.subplot(1,2,2)
pcs = self.pcs["UMAP"]
distances = []
dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])
for cell_vector in self.matrix:
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
d = distance-dataset_distance
if d < threshold:
d = threshold
distances.append(d)
data = {"x":pcs[0],"y":pcs[1],"Distance": distances}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Distance', ax=ax,linewidth=0.00,s=7,alpha=0.7)
if title != None:
plt.title(title)
plt.show()
def get_similar_genes(self, vector):
distances = dict()
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def generate_network(self, threshold=0.5):
G = nx.Graph()
a = pandas.DataFrame.from_dict(self.embeddings).to_numpy()
similarities = cosine_similarity(a.T)
genes = list(self.embeddings.keys())
similarities[similarities < threshold] = 0
edges = []
nz = list(zip(*similarities.nonzero()))
for n in tqdm.tqdm(nz):
edges.append((genes[n[0]],genes[n[1]]))
G.add_nodes_from(genes)
G.add_edges_from(edges)
return G
@staticmethod
def average_vector_results(vec1, vec2, fname):
output = open(fname,"w")
vec1, dims = GeneEmbedding.read_vector(vec1)
vec2, _ = GeneEmbedding.read_vector(vec2)
genes = list(vec1.keys())
output.write(dims+"\n")
for gene in genes:
v1 = vec1[gene]
v2 = vec2[gene]
meanv = []
for x,y in zip(v1,v2):
meanv.append(str((x+y)/2))
output.write("{} {}\n".format(gene," ".join(meanv)))
output.close()
class CellEmbedding(object):
def __init__(self, compass_dataset, embed):
self.context | |
for mp_ in self.metapartitions]
if any(
[mp_["label"] in existing_label for mp_ in metapartition.metapartitions]
):
raise RuntimeError(
"Duplicate labels for nested metapartitions are not allowed!"
)
if schema_validation:
table_meta = {}
for table, meta in self.table_meta.items():
other = metapartition.table_meta.get(table, None)
# This ensures that only schema-compatible metapartitions can be nested
# The returned schema by validate_compatible is the reference schema with the most
# information, i.e. the fewest null columns
table_meta[table] = validate_compatible([meta, other])
metadata_merger = metadata_merger or combine_metadata
new_dataset_metadata = metadata_merger(
[self.dataset_metadata, metapartition.dataset_metadata]
)
new_object = MetaPartition(
label="NestedMetaPartition",
dataset_metadata=new_dataset_metadata,
metadata_version=metapartition.metadata_version,
table_meta=table_meta,
partition_keys=metapartition.partition_keys or None,
logical_conjunction=metapartition.logical_conjunction or None,
)
# Add metapartition information to the new object
new_metapartitions = self.metapartitions.copy()
new_metapartitions.extend(metapartition.metapartitions.copy())
new_object.metapartitions = new_metapartitions
return new_object
@staticmethod
def from_dict(dct):
"""
Create a :class:`~kartothek.io_components.metapartition.MetaPartition` from a dictionary.
Parameters
----------
dct : dict
Dictionary containing constructor arguments as keys
Returns
-------
"""
return MetaPartition(
label=dct["label"],
files=dct.get("files", {}),
metadata=dct.get("metadata", {}),
data=dct.get("data", {}),
indices=dct.get("indices", {}),
metadata_version=dct.get("metadata_version", None),
dataset_metadata=dct.get("dataset_metadata", {}),
table_meta=dct.get("table_meta", {}),
partition_keys=dct.get("partition_keys", None),
logical_conjunction=dct.get("logical_conjunction", None),
)
def to_dict(self):
return {
"label": self.label,
"files": self.files or {},
"data": self.data or {},
"indices": self.indices,
"metadata_version": self.metadata_version,
"dataset_metadata": self.dataset_metadata,
"table_meta": self.table_meta,
"partition_keys": self.partition_keys,
"logical_conjunction": self.logical_conjunction,
}
@_apply_to_list
def remove_dataframes(self):
"""
Remove all dataframes from the metapartition in memory.
"""
return self.copy(data={})
def _split_predicates_in_index_and_content(self, predicates):
"""
Split a list of predicates in the parts that can be resolved by the
partition columns and the ones that are persisted in the data file.
"""
# Predicates are split in this function into the parts that apply to
# the partition key columns `key_part` and the parts that apply to the
# contents of the file `content_part`.
split_predicates = []
has_index_condition = False
for conjunction in predicates:
key_part = []
content_part = []
for literal in conjunction:
if literal.column in self.partition_keys:
has_index_condition = True
key_part.append(literal)
else:
content_part.append(literal)
split_predicates.append(_SplitPredicate(key_part, content_part))
return split_predicates, has_index_condition
def _apply_partition_key_predicates(self, table, indices, split_predicates):
"""
Apply the predicates to the partition_key columns and return the remaining
predicates that should be pushed to the DataFrame serialiser.
"""
# Construct a single line DF with the partition columns
schema = self.table_meta[table]
index_df_dct = {}
for column, value in indices:
pa_dtype = schema[schema.get_field_index(column)].type
value = IndexBase.normalize_value(pa_dtype, value)
if pa.types.is_date(pa_dtype):
index_df_dct[column] = pd.Series(
pd.to_datetime([value], infer_datetime_format=True)
).dt.date
else:
dtype = pa_dtype.to_pandas_dtype()
index_df_dct[column] = pd.Series([value], dtype=dtype)
index_df = pd.DataFrame(index_df_dct)
filtered_predicates = []
for conjunction in split_predicates:
predicates = [conjunction.key_part]
if (
len(conjunction.key_part) == 0
or len(
filter_df_from_predicates(
index_df, predicates, strict_date_types=True
)
)
> 0
):
if len(conjunction.content_part) > 0:
filtered_predicates.append(conjunction.content_part)
else:
# A condititon applies to the whole DataFrame, so we need to
# load all data.
return None
return filtered_predicates
@default_docs
@_apply_to_list
def load_dataframes(
self,
store,
tables=None,
columns=None,
predicate_pushdown_to_io=True,
categoricals=None,
dates_as_object=False,
predicates=None,
):
"""
Load the dataframes of the partitions from store into memory.
Parameters
----------
tables : list of string, optional
If a list is supplied, only the given tables of the partition are
loaded. If the given table does not exist it is ignored.
Examples
.. code::
>>> part = MetaPartition(
... label='part_label'
... files={
... 'core': 'core_key_in_store',
... 'helper': 'helper_key_in_store'
... }
... )
>>> part.data
{}
>>> part = part.load_dataframes(store, ['core'])
>>> part.data
{
'core': pd.DataFrame()
}
"""
if columns is None:
columns = {}
if categoricals is None:
categoricals = {}
LOGGER.debug("Loading internal dataframes of %s", self.label)
if len(self.files) == 0:
# This used to raise, but the specs do not require this, so simply do a no op
LOGGER.debug("Partition %s is empty and has not tables/files", self.label)
return self
new_data = copy(self.data)
predicates = _combine_predicates(predicates, self.logical_conjunction)
predicates = _predicates_to_named(predicates)
for table, key in self.files.items():
table_columns = columns.get(table, None)
categories = categoricals.get(table, None)
dataset_uuid, _, indices, file_name = decode_key(key)
if tables and table not in tables:
continue
# In case the columns only refer to the partition indices, we need to load at least a single column to
# determine the length of the required dataframe.
if table_columns is None or (
table_columns is not None
and self.partition_keys
and set(table_columns) == set(self.partition_keys)
):
table_columns_to_io = None
else:
table_columns_to_io = table_columns
filtered_predicates = predicates
self._load_table_meta(dataset_uuid=dataset_uuid, table=table, store=store)
# Filter predicates that would apply to this partition and remove the partition columns
if predicates:
# Check if there are predicates that match to the partition columns.
# For these we need to check if the partition columns already falsify
# the conditition.
#
# We separate these predicates into their index and their Parquet part.
split_predicates, has_index_condition = self._split_predicates_in_index_and_content(
predicates
)
filtered_predicates = []
if has_index_condition:
filtered_predicates = self._apply_partition_key_predicates(
table, indices, split_predicates
)
else:
filtered_predicates = [
pred.content_part for pred in split_predicates
]
# Remove partition_keys from table_columns_to_io
if (
self.partition_keys
and table_columns_to_io
and len(set(self.partition_keys) & set(table_columns_to_io)) > 0
):
keys_to_remove = set(self.partition_keys) & set(table_columns_to_io)
# This is done to not change the ordering of the list
table_columns_to_io = [
c for c in table_columns_to_io if c not in keys_to_remove
]
start = time.time()
df = DataFrameSerializer.restore_dataframe(
key=key,
store=store,
columns=table_columns_to_io,
categories=categories,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=filtered_predicates,
date_as_object=dates_as_object,
)
LOGGER.debug("Loaded dataframe %s in %s seconds.", key, time.time() - start)
# Metadata version >=4 parse the index columns and add them back to the dataframe
df = self._reconstruct_index_columns(
df=df,
key_indices=indices,
table=table,
columns=table_columns,
categories=categories,
date_as_object=dates_as_object,
)
df.columns = df.columns.map(ensure_string_type)
if table_columns is not None:
# TODO: When the write-path ensures that all partitions have the same column set, this check can be
# moved before `DataFrameSerializer.restore_dataframe`. At the position of the current check we
# may want to double check the columns of the loaded DF and raise an exception indicating an
# inconsistent dataset state instead.
missing_cols = set(table_columns).difference(df.columns)
if missing_cols:
raise ValueError(
"Columns cannot be found in stored dataframe: {}".format(
", ".join(sorted(missing_cols))
)
)
df = df.loc[:, table_columns]
new_data[table] = df
return self.copy(data=new_data)
@_apply_to_list
def load_all_table_meta(self, store, dataset_uuid):
"""
Loads all table metadata in memory and stores it under the `tables` attribute
"""
for table in self.files:
self._load_table_meta(dataset_uuid, table, store)
return self
def _load_table_meta(self, dataset_uuid, table, store):
if table not in self.table_meta:
_common_metadata = read_schema_metadata(
dataset_uuid=dataset_uuid, store=store, table=table
)
self.table_meta[table] = _common_metadata
return self
def _reconstruct_index_columns(
self, df, key_indices, table, columns, categories, date_as_object
):
if len(key_indices) == 0:
return df
index_cols = []
original_columns = list(df.columns)
pd_index = pd.RangeIndex(stop=len(df))
zeros = np.zeros(len(df), dtype=int)
schema = self.table_meta[table]
for primary_key, value in key_indices:
# If there are predicates, don't reconstruct the index if it wasn't requested
if columns is not None and primary_key not in columns:
continue
pa_dtype = schema.field_by_name(primary_key).type
dtype = pa_dtype.to_pandas_dtype()
convert_to_date = False
if date_as_object and pa_dtype in [pa.date32(), pa.date64()]:
convert_to_date = True
if isinstance(dtype, type):
value = dtype(value)
elif isinstance(dtype, np.dtype):
if dtype == np.dtype("datetime64[ns]"):
value = pd.Timestamp(value)
else:
value = dtype.type(value)
else:
raise RuntimeError(
"Unexepected object encountered: ({}, {})".format(
dtype, type(dtype)
)
)
if categories and primary_key in categories:
if convert_to_date:
cats = pd.Series(value).dt.date
else:
cats = [value]
cat = pd.Categorical.from_codes(zeros, categories=cats)
ind_col = pd.Series(cat, index=pd_index, name=primary_key)
else:
ind_col = pd.Series(
value, index=pd_index, dtype=dtype, name=primary_key
)
if convert_to_date:
ind_col = ind_col.dt.date
index_cols.append(ind_col)
df = df.reset_index(drop=True)
index_names = [col.name for col in index_cols]
# The index might already be part of the dataframe which is recovered from the parquet file.
# In this case, still use the reconstructed index col to have consistent index columns behavior.
# In this case the column in part of `original_columns` and must be removed to avoid duplication
# in the column axis
cleaned_original_columns = [
orig for orig in original_columns if orig not in index_names
]
if cleaned_original_columns != original_columns:
# indexer call is slow, so only do that if really necessary
df = df.loc[:, cleaned_original_columns]
return pd.concat(index_cols + [df], axis=1, sort=False, join="inner")
@_apply_to_list
def merge_dataframes(
self, left, right, output_label, merge_func=pd.merge, merge_kwargs=None
):
"""
Merge internal dataframes.
The two referenced dataframes are removed from the internal list and
the newly created dataframe is added.
The merge | |
<reponame>tudelft-eemcs-dml/fltk-testbed-gr-1<filename>fltk/synthpriv/attacks/nasr.py
"""
Membership inference attack based on https://github.com/privacytrustlab/ml_privacy_meter/blob/master/ml_privacy_meter/attack/meminf.py
"""
import datetime
from itertools import zip_longest
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torchextractor as tx
from sklearn.cluster import SpectralClustering
from sklearn.metrics import accuracy_score, auc, roc_curve
from torch import nn
from tqdm import tqdm
class ReshapeFCForGradConv(nn.Module):
def forward(self, x):
if x.dim() == 3:
return x[:, None, ...] # add channel dimension
if x.dim() == 4:
return x
else:
raise Exception("Only 3D and 4D inputs are supported to gradient convolution modules!")
class Print(nn.Module):
def forward(self, x):
print(x.shape)
return x
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.normal_(m.weight, mean=0, std=0.01)
m.bias.data.fill_(0)
def fcn_module(inputsize, layer_size=128):
"""
Creates a FCN submodule. Used in different attack components.
"""
fcn = nn.Sequential(
nn.Linear(inputsize, layer_size),
nn.ReLU(),
nn.Linear(layer_size, 64),
nn.ReLU(),
)
fcn.apply(init_weights)
return fcn
def cnn_for_fcn_gradients(input_shape):
"""
Creates a CNN submodule for Linear layer gradients.
"""
dim1, dim2 = input_shape
cnn = nn.Sequential(
ReshapeFCForGradConv(),
nn.Dropout(0.2),
nn.Conv2d(1, 100, kernel_size=(1, dim2)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(100 * dim1, 2024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(2024, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
def cnn_for_cnn_layeroutputs(input_shape):
"""
Creates a CNN submodule for Conv Layer outputs
"""
print("CNN 4 CNN")
_, c, h, w = input_shape
cnn = nn.Sequential(
nn.Conv2d(c, c, kernel_size=(h, w)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(c, 1024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
class PermuteCNNGradient(nn.Module):
def forward(self, x):
b, c_out, c_in, k1, k2 = x.shape
return x.reshape(b, c_out, c_in, k1 * k2).permute(0, 2, 1, 3)
def cnn_for_cnn_gradients(input_shape):
"""
Creates a CNN submodule for Conv layer gradients
"""
print("CNN 4 CNN grads")
c_out, c_in, k1, k2 = input_shape
cnn = nn.Sequential(
PermuteCNNGradient(),
nn.Conv2d(c_in, c_out, kernel_size=(c_out, k1 * k2)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(c_out, 64),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
def transpose(l):
return list(map(list, zip_longest(*l, fillvalue=None)))
# Decide what attack component (FCN or CNN) to use on the basis of the layer name.
# CNN_COMPONENTS_LIST are the layers requiring each input in 3 dimensions.
# GRAD_COMPONENTS_LIST are the layers which have trainable components for computing gradients
CNN_COMPONENT_LIST = ["Conv", "MaxPool"]
GRAD_LAYERS_LIST = ["Conv", "Linear"]
class NasrAttack(nn.Module):
"""
This attack was originally proposed by Nasr et al. It exploits one-hot encoding of true labels, loss value,
intermediate layer activations and gradients of intermediate layers of the target model on data points, for training
the attack model to infer membership in training data.
Paper link: https://arxiv.org/abs/1812.00910
Args:
------
device: torch.device() to use for training and testing
target_model: The target classification model that'll be attacked
train_dataloader: Dataloader with samples for training
test_dataloader: Dataloader with samples for testing
layers_to_exploit: a list of integers specifying the indices of layers, whose activations will be exploited by the
attack model. If the list has only a single element and it is equal to the index of last layer,
the attack can be considered as a "blackbox" attack.
gradients_to_exploit: a list of integers specifying the indices of layers whose gradients will be exploited by the
attack model
exploit_loss: boolean; whether to exploit loss value of target model or not
exploit_label: boolean; whether to exploit one-hot encoded labels or not
optimizer: The optimizer for training the attack model
learning_rate: learning rate for training the attack model
epochs: Number of epochs to train the attack model
"""
def __init__(
self,
device,
target_model,
train_dataloader,
test_dataloader,
layers_to_exploit=[],
gradients_to_exploit=[],
exploit_loss=True,
exploit_label=True,
optimizer=torch.optim.Adam,
learning_rate=0.001,
epochs=30,
):
super().__init__()
self.target_model = target_model.requires_grad_(False)
self.device = device
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.train_shape = next(iter(self.train_dataloader[0]))[0].shape
self.layers_to_exploit = layers_to_exploit
self.gradients_to_exploit = gradients_to_exploit
self.exploit_loss = exploit_loss
self.exploit_label = exploit_label
self.n_labels = list(target_model.parameters())[-1].shape[0]
self.create_attack_model()
self.optimizer = optimizer(
[p for n, p in self.named_parameters() if not "target_model" in n and not "feature_extractor" in n],
lr=learning_rate,
)
self.epochs = epochs
self.out_name = "_".join(
[
self.__class__.__name__,
self.target_model.__class__.__name__,
f"label={exploit_label}",
f"loss={exploit_loss}",
f"layers={','.join([str(l) for l in layers_to_exploit])}" if layers_to_exploit else "nolayers",
f"gradients={','.join([str(g) for g in gradients_to_exploit])}" if gradients_to_exploit else "nograds",
str(datetime.datetime.now()).replace(" ", "-").split(".")[0],
]
)
def create_attack_model(self):
self.input_modules = nn.ModuleList()
classifier_input_size = 0
if len(self.layers_to_exploit):
layer_names_and_classes = [
(n, m.__class__.__name__)
for i, (n, m) in enumerate(self.target_model.named_modules())
if i in self.layers_to_exploit
]
self.layers_to_exploit, layer_classes = transpose(layer_names_and_classes)
self.intermediate_feature_extractor = tx.Extractor(self.target_model, self.layers_to_exploit)
example = next(iter(self.train_dataloader[0]))[0]
layer_shapes = [v.shape for v in self.intermediate_feature_extractor(example)[1].values()]
for shape, type in zip(layer_shapes, layer_classes):
requires_cnn = map(lambda i: i in type, CNN_COMPONENT_LIST)
if any(requires_cnn):
module = cnn_for_cnn_layeroutputs(shape)
else:
module = fcn_module(shape[1], 100)
self.input_modules.append(module)
classifier_input_size += 64
if len(self.gradients_to_exploit):
layers = list(self.target_model.modules())
self.grad_exploit_layers = []
for l in self.gradients_to_exploit:
layer = layers[l]
assert any(
map(lambda i: i in layer.__class__.__name__, GRAD_LAYERS_LIST)
), f"Only Linear & Conv layers are supported for gradient-based attacks"
requires_cnn = map(lambda i: i in layer.__class__.__name__, CNN_COMPONENT_LIST)
self.grad_exploit_layers.append(layer.weight)
if any(requires_cnn):
module = cnn_for_cnn_gradients(layer.weight.shape)
classifier_input_size += 64
else:
module = cnn_for_fcn_gradients(layer.weight.shape)
classifier_input_size += 256
self.input_modules.append(module)
if self.exploit_loss:
self.input_modules.append(fcn_module(1, 100))
classifier_input_size += 64
if self.exploit_label:
self.input_modules.append(fcn_module(self.n_labels))
classifier_input_size += 64
classifier = nn.Sequential(
nn.Linear(classifier_input_size, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 1),
nn.Sigmoid(),
)
classifier.apply(init_weights)
self.classifier = classifier
# print(self)
def compute_gradients(self, model, features, labels):
gradients = []
model.requires_grad_(True)
logits = model(features)
for l, label in enumerate(labels):
loss = F.cross_entropy(logits[None, l], label[None])
grads = torch.autograd.grad(loss, self.target_model.parameters(), retain_graph=True)
gradients.append(grads)
model.requires_grad_(False)
return gradients
def get_gradient_norms(self, model, features, labels):
return [torch.norm(grads[-1]) for grads in self.compute_gradients(model, features, labels)]
def forward(self, model, features, labels):
i = 0
attack_input = []
if len(self.gradients_to_exploit):
model.requires_grad_(True)
if len(self.layers_to_exploit):
self.logits, intermediate_feature = self.intermediate_feature_extractor(features)
else:
self.logits = model(features)
if len(self.layers_to_exploit):
for layer_output in intermediate_feature.values():
attack_input.append(self.input_modules[i](layer_output))
i += 1
individual_losses = []
for l, label in enumerate(labels):
individual_losses.append(F.cross_entropy(self.logits[None, l], label[None]))
if len(self.gradients_to_exploit):
gradients = [
torch.autograd.grad(loss, self.grad_exploit_layers, retain_graph=True) for loss in individual_losses
]
gradients = [torch.stack(grads) for grads in transpose(gradients)]
for grads in gradients:
attack_input.append(self.input_modules[i](grads))
i += 1
model.requires_grad_(False)
if self.exploit_loss:
self.loss = torch.tensor(individual_losses, device=self.device).mean()[None, None]
loss_feature = self.input_modules[i](self.loss)
loss_feature = torch.tile(loss_feature, (len(features), 1))
attack_input.append(loss_feature)
i += 1
if self.exploit_label:
self.preds = torch.argmax(self.logits, axis=1)
self.preds = F.one_hot(self.preds, num_classes=self.n_labels).float()
attack_input.append(self.input_modules[i](self.preds))
i += 1
return self.classifier(torch.cat(attack_input, axis=1))
def attack_accuracy(self, members, nonmembers):
"""
Computes attack accuracy of the attack model.
"""
preds, targets = [], []
for (membatch, nonmembatch) in zip(members, nonmembers):
mfeatures, mlabels = membatch
nmfeatures, nmlabels = nonmembatch
# Computing the membership probabilities
mprobs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
nmprobs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
probs = torch.cat((mprobs, nmprobs)).cpu()
target_ones = torch.ones(mprobs.shape, dtype=bool)
target_zeros = torch.zeros(nmprobs.shape, dtype=bool)
target = torch.cat((target_ones, target_zeros))
preds.append(probs > 0.5)
targets.append(target)
return accuracy_score(np.concatenate(preds), np.concatenate(targets))
def train_attack(self):
"""
Trains the attack model
"""
best_state_dict = self.state_dict()
self.to(self.device)
self.input_modules.train()
self.classifier.train()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtestset, nmtestset = self.test_dataloader
member_loader, nonmember_loader = self.train_dataloader
nmfeat, nmlbl = transpose(nonmember_loader)
preds = np.argmax(self.target_model(torch.cat(nmfeat).to(self.device)).cpu(), axis=1)
acc = accuracy_score(np.concatenate(nmlbl), preds.cpu())
print("Target model test accuracy", acc)
best_accuracy = 0
pbar = tqdm(range(self.epochs), desc="Training attack model...")
for e in pbar:
for (mfeatures, mlabels), (nmfeatures, nmlabels) in zip(member_loader, nonmember_loader):
self.optimizer.zero_grad()
moutputs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
nmoutputs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
memtrue = torch.ones(moutputs.shape, device=self.device)
nonmemtrue = torch.zeros(nmoutputs.shape, device=self.device)
target = torch.cat((memtrue, nonmemtrue))
probs = torch.cat((moutputs, nmoutputs))
attackloss = F.mse_loss(target, probs)
attackloss.backward()
self.optimizer.step()
attack_accuracy = self.attack_accuracy(mtestset, nmtestset)
if attack_accuracy > best_accuracy:
best_accuracy = attack_accuracy
best_state_dict = self.state_dict()
pbar.write(f"Epoch {e} : Attack test accuracy: {attack_accuracy:.3f}, Best accuracy : {best_accuracy:.3f}")
self.out_name += f"_{best_accuracy:.3f}"
self.load_state_dict(best_state_dict)
torch.save(
self.cpu().eval().requires_grad_(False).state_dict(),
f"models/{self.out_name}.pt",
)
def test_attack(self):
"""
Test the attack model on dataset and save plots for visualization.
"""
self.to(self.device)
self.input_modules.eval()
self.classifier.eval()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtrainset, nmtrainset = self.test_dataloader
mpreds, mlab, nmpreds, nmlab, mfeat, nmfeat, mtrue, nmtrue = [], [], [], [], [], [], [], []
mgradnorm, nmgradnorm = [], []
for (mfeatures, mlabels) in mtrainset:
moutputs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device)).detach()
mpreds.extend(moutputs.cpu().numpy())
mlab.extend(mlabels.cpu().numpy())
mfeat.extend(mfeatures.cpu().numpy())
mtrue.extend(np.ones(moutputs.shape))
if len(self.gradients_to_exploit):
mgradientnorm = self.get_gradient_norms(
self.target_model, mfeatures.to(self.device), mlabels.to(self.device)
)
mgradnorm.extend(mgradientnorm)
for (nmfeatures, nmlabels) in nmtrainset:
nmoutputs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device)).detach()
nmpreds.extend(nmoutputs.cpu().numpy())
nmlab.extend(nmlabels.cpu().numpy())
nmfeat.extend(nmfeatures.cpu().numpy())
nmtrue.extend(np.zeros(nmoutputs.shape))
if len(self.gradients_to_exploit):
nmgradientnorm = self.get_gradient_norms(
self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device)
)
nmgradnorm.extend(nmgradientnorm)
target = np.concatenate((np.concatenate(mtrue), np.concatenate(nmtrue)))
probs = np.concatenate((np.concatenate(mpreds), np.concatenate(nmpreds)))
self.plot(mpreds, nmpreds, target, probs, mlab, nmlab, mgradnorm, nmgradnorm)
def plot(self, mpreds, nmpreds, target, probs, mlab, nmlab, mgradientnorm, nmgradientnorm):
font = {"weight": "bold", "size": 10}
matplotlib.rc("font", **font)
unique_mem_lab = sorted(np.unique(mlab))
unique_nmem_lab = sorted(np.unique(nmlab))
# Creates a histogram for Membership Probability
| |
import datetime
import errno
import json
import os
import select
import signal
import socket
import struct
import subprocess
import warnings
from contextlib import closing
from collections.abc import Mapping
import grpc
from . import proto
from .exceptions import (context, ConnectionError, TimeoutError,
ApplicationNotRunningError, ApplicationError,
DriverNotRunningError, DriverError)
from .kv import KeyValueStore
from .ui import WebUI
from .model import (Security, ApplicationSpec, ApplicationReport,
ApplicationState, ContainerState, Container,
FinalStatus, Resources, container_instance_from_string,
LogLevel, NodeState, NodeReport, Queue, ApplicationLogs)
from .utils import (cached_property, grpc_fork_support_disabled, pid_exists,
datetime_to_millis)
__all__ = ('Client', 'ApplicationClient', 'properties')
_SKEIN_DIR = os.path.abspath(os.path.dirname(os.path.relpath(__file__)))
_SKEIN_JAR = os.path.join(_SKEIN_DIR, 'java', 'skein.jar')
class Properties(Mapping):
"""Skein runtime properties.
This class implements an immutable mapping type, exposing properties
determined at import time.
Attributes
----------
application_id : str or None
The current application id. None if not running in a container.
appmaster_address : str or None
The address of the current application's appmaster. None if not running
in a container.
config_dir : str
The path to the configuration directory.
container_id : str or None
The current skein container id (of the form
``'{service}_{instance}'``). None if not running in a container.
container_resources : Resources or None
The resources allocated to the current container. None if not in a
container.
container_dir : str or None
The absolute path to the working directory for this container. None if
not in a container.
yarn_container_id : str or None
The current YARN container id. None if not running in a container.
"""
def __init__(self):
config_dir = os.environ.get('SKEIN_CONFIG',
os.path.join(os.path.expanduser('~'), '.skein'))
application_id = os.environ.get('SKEIN_APPLICATION_ID')
appmaster_address = os.environ.get('SKEIN_APPMASTER_ADDRESS')
container_id = os.environ.get('SKEIN_CONTAINER_ID')
yarn_container_id = os.environ.get('CONTAINER_ID')
try:
container_resources = Resources(
int(os.environ.get('SKEIN_RESOURCE_MEMORY')),
int(os.environ.get('SKEIN_RESOURCE_VCORES')))
except (ValueError, TypeError):
container_resources = None
# Find the container directory from all the options
container_dir = None
if yarn_container_id is not None:
for path in os.environ.get('LOCAL_DIRS', '').split(','):
check_dir = os.path.join(path, yarn_container_id)
# YARN will create all possible directories, but only populate
# one of them. We have to check that the files exist in the
# directory, rather than just that the directory exists.
if (os.path.exists(os.path.join(check_dir, '.skein.crt')) and
os.path.exists(os.path.join(check_dir, '.skein.pem'))):
container_dir = check_dir
break
mapping = dict(application_id=application_id,
appmaster_address=appmaster_address,
config_dir=config_dir,
container_id=container_id,
container_resources=container_resources,
yarn_container_id=yarn_container_id,
container_dir=container_dir)
object.__setattr__(self, '_mapping', mapping)
def __getitem__(self, key):
return self._mapping[key]
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError("%r object has no attribute %r"
% (type(self).__name__, key))
def __setattr__(self, key, val):
raise AttributeError("%r object has no attribute %r"
% (type(self).__name__, key))
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self._mapping if c.isidentifier())
return list(o)
def __iter__(self):
return iter(self._mapping)
def __len__(self):
return len(self._mapping)
properties = Properties()
def secure_channel(address, security):
cert_bytes = security._get_bytes('cert')
key_bytes = security._get_bytes('key')
creds = grpc.ssl_channel_credentials(cert_bytes, key_bytes, cert_bytes)
options = [('grpc.ssl_target_name_override', 'skein-internal'),
('grpc.default_authority', 'skein-internal')]
return grpc.secure_channel(address, creds, options)
def _read_driver():
try:
with open(os.path.join(properties.config_dir, 'driver'), 'r') as fil:
data = json.load(fil)
address = data['address']
pid = data['pid']
except Exception:
address = pid = None
return address, pid
def _write_driver(address, pid):
# Ensure the config dir exists
os.makedirs(properties.config_dir, exist_ok=True)
# Write to the driver file
with open(os.path.join(properties.config_dir, 'driver'), 'w') as fil:
json.dump({'address': address, 'pid': pid}, fil)
def _start_driver(security=None, set_global=False, keytab=None, principal=None,
log=None, log_level=None, java_options=None):
if security is None:
security = Security.from_default()
if log_level is None:
log_level = LogLevel(
os.environ.get('SKEIN_LOG_LEVEL', LogLevel.INFO)
)
else:
log_level = LogLevel(log_level)
if not os.path.exists(_SKEIN_JAR):
raise context.FileNotFoundError("Failed to find the skein jar file")
if keytab is not None:
keytab = os.path.abspath(keytab)
if not os.path.exists(keytab):
raise context.FileNotFoundError("keytab doesn't exist at %r" % keytab)
if principal is None:
raise context.ValueError("Principal must be specified for keytab login")
elif principal is not None:
raise context.ValueError("Keytab must be specified for keytab login")
# Compose the command to start the driver server
java_bin = ('%s/bin/java' % os.environ['JAVA_HOME']
if 'JAVA_HOME' in os.environ
else 'java')
command = [java_bin,
'-Dskein.log.level=%s' % log_level]
# Configure location of native libs if directory exists
if 'HADOOP_HOME' in os.environ:
native_path = '%s/lib/native' % os.environ['HADOOP_HOME']
if os.path.exists(native_path):
command.append('-Djava.library.path=%s' % native_path)
if java_options is None:
java_options = os.environ.get('SKEIN_DRIVER_JAVA_OPTIONS', '')
if isinstance(java_options, str):
java_options = java_options.split()
command.extend(java_options)
command.extend(['com.anaconda.skein.Driver', '--jar', _SKEIN_JAR])
if keytab is not None:
command.extend(['--keytab', keytab, '--principal', principal])
if set_global:
command.append("--daemon")
env = dict(os.environ)
env['SKEIN_CERTIFICATE'] = security._get_bytes('cert')
env['SKEIN_KEY'] = security._get_bytes('key')
# Update the classpath in the environment
classpath = (subprocess.check_output(['yarn', 'classpath', '--glob'])
.decode('utf-8'))
env['CLASSPATH'] = '%s:%s' % (_SKEIN_JAR, classpath)
callback = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
callback.bind(('127.0.0.1', 0))
callback.listen(1)
with closing(callback):
_, callback_port = callback.getsockname()
env['SKEIN_CALLBACK_PORT'] = str(callback_port)
if log is None:
outfil = None
elif log is False:
outfil = subprocess.DEVNULL
else:
outfil = open(log, mode='w')
infil = None if set_global else subprocess.PIPE
proc = subprocess.Popen(command,
stdin=infil,
stdout=outfil,
stderr=outfil,
env=env,
start_new_session=True)
while proc.poll() is None:
readable, _, _ = select.select([callback], [], [], 1)
if callback in readable:
connection = callback.accept()[0]
with closing(connection):
stream = connection.makefile(mode="rb")
msg_port = stream.read(4)
if not msg_port:
raise DriverError("Failed to read in client port")
port = struct.unpack("!i", msg_port)[0]
msg_length = stream.read(2)
if not msg_length:
raise DriverError("Failed to read length of hostname address")
length = struct.unpack("!h", msg_length)[0]
msg_hostname = stream.read(length)
if not msg_hostname:
raise DriverError("Failed to read hostname address")
hostname = msg_hostname.decode('utf-8')
break
else:
raise DriverError("Failed to start java process")
address = '%s:%d' % (hostname,port)
if set_global:
Client.stop_global_driver()
_write_driver(address, proc.pid)
proc = None
return address, proc
class _ClientBase(object):
__slots__ = ('__weakref__',)
def _call(self, method, req, timeout=None):
try:
return getattr(self._stub, method)(req, timeout=timeout)
except grpc.RpcError as _exc:
exc = _exc
code = exc.code()
if code == grpc.StatusCode.UNAVAILABLE:
raise ConnectionError("Unable to connect to %s" % self._server_name)
if code == grpc.StatusCode.DEADLINE_EXCEEDED:
raise TimeoutError("Unable to connect to %s" % self._server_name)
elif code == grpc.StatusCode.NOT_FOUND:
raise context.KeyError(exc.details())
elif code in (grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ALREADY_EXISTS):
raise context.ValueError(exc.details())
else:
raise self._server_error(exc.details())
class Client(_ClientBase):
"""Connect to and schedule applications on the YARN cluster.
Parameters
----------
address : str, optional
The address for the driver. By default will create a new driver
process. Pass in address explicitly to connect to a different driver.
To connect to the global driver see ``Client.from_global_driver``.
security : Security, optional
The security configuration to use to communicate with the driver.
Defaults to the global configuration.
keytab : str, optional
Path to a keytab file to use when starting the driver. If not provided,
the driver will login using the ticket cache instead.
principal : str, optional
The principal to use when starting the driver with a keytab.
log : str, bool, or None, optional
When starting a new driver, sets the logging behavior for the driver.
Values may be a path for logs to be written to, ``None`` to log to
stdout/stderr, or ``False`` to turn off logging completely. Default is
``None``.
log_level : str or skein.model.LogLevel, optional
The driver log level. Sets the ``skein.log.level`` system property. One
of {'ALL', 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', 'OFF'}
(from most to least verbose). Default is 'INFO'.
java_options : str or list of str, optional
Additional Java options to forward to the driver. Can also be
configured by setting the environment variable
``SKEIN_DRIVER_JAVA_OPTIONS``.
Examples
--------
>>> with skein.Client() as client:
... app_id = client.submit('spec.yaml')
"""
__slots__ = ('address', 'security', '_channel', '_stub', '_proc')
_server_name = 'driver'
_server_error = DriverError
def __init__(self, address=None, security=None, keytab=None,
principal=None, log=None, log_level=None, java_options=None):
if security is None:
security = Security.from_default()
if address is None:
address, proc = _start_driver(security=security,
keytab=keytab,
principal=principal,
log=log,
log_level=log_level,
java_options=java_options)
else:
proc = None
with grpc_fork_support_disabled():
self._channel = secure_channel(address, security)
self._stub = proto.DriverStub(self._channel)
self.address = address
self.security = security
self._proc = proc
try:
# Ping server to check connection
self._call('ping', proto.Empty())
except Exception:
if proc is not None:
proc.stdin.close() # kill the driver on error
proc.wait()
raise
def __reduce__(self):
return (type(self), (self.address, self.security))
@classmethod
def from_global_driver(self):
"""Connect to the global driver."""
address, _ = _read_driver()
if address is None:
raise DriverNotRunningError("No driver currently running")
security = Security.from_default()
return Client(address=address, security=security)
@staticmethod
def start_global_driver(keytab=None, principal=None, log=None,
log_level=None, java_options=None):
"""Start the global driver.
No-op if the global driver is already running.
Parameters
----------
keytab : str, optional
Path to a keytab file to use when starting the driver. If not
provided, the driver will login using the ticket cache instead.
principal : str, optional
The principal to use when starting the driver with a keytab.
log : str, bool, or None, optional
Sets | |
<filename>correction_tools/alignment.py
import os,time
import numpy as np
from .. import _allowed_colors, _image_size, _num_buffer_frames, _num_empty_frames, _image_dtype
from .. import _correction_folder
def _find_boundary(_ct, _radius, _im_size):
_bds = []
for _c, _sz in zip(_ct, _im_size):
_bds.append([max(_c-_radius, 0), min(_c+_radius, _sz)])
return np.array(_bds, dtype=np.int)
def generate_drift_crops(single_im_size=_image_size, coord_sel=None, drift_size=None):
"""Function to generate drift crop from a selected center and given drift size
keywards:
single_im_size: single image size to generate crops, np.ndarray like;
coord_sel: selected center coordinate to split image into 4 rectangles, np.ndarray like;
drift_size: size of drift crop, int or np.int;
returns:
crops: 4x3x2 np.ndarray.
"""
# check inputs
_single_im_size = np.array(single_im_size)
if coord_sel is None:
coord_sel = np.array(_single_im_size/2, dtype=np.int)
if coord_sel[-2] >= _single_im_size[-2] or coord_sel[-1] >= _single_im_size[-1]:
raise ValueError(f"wrong input coord_sel:{coord_sel}, should be smaller than single_im_size:{single_im_size}")
if drift_size is None:
drift_size = int(np.max(_single_im_size)/3)
# generate crop centers
crop_cts = [
np.array([coord_sel[-3]/2,
coord_sel[-2]/2,
coord_sel[-1]/2,]),
np.array([coord_sel[-3]/2,
(coord_sel[-2]+_single_im_size[-2])/2,
(coord_sel[-1]+_single_im_size[-1])/2,]),
np.array([coord_sel[-3]/2,
(coord_sel[-2]+_single_im_size[-2])/2,
coord_sel[-1]/2,]),
np.array([coord_sel[-3]/2,
coord_sel[-2]/2,
(coord_sel[-1]+_single_im_size[-1])/2,]),
np.array([coord_sel[-3]/2,
coord_sel[-2],
coord_sel[-1]/2,]),
np.array([coord_sel[-3]/2,
coord_sel[-2],
(coord_sel[-1]+_single_im_size[-1])/2,]),
np.array([coord_sel[-3]/2,
coord_sel[-2]/2,
coord_sel[-1],]),
np.array([coord_sel[-3]/2,
(coord_sel[-2]+_single_im_size[-2])/2,
coord_sel[-1],]),
]
# generate boundaries
crops = [_find_boundary(_ct, _radius=drift_size/2, _im_size=single_im_size) for _ct in crop_cts]
return np.array(crops)
def align_beads(tar_cts, ref_cts,
tar_im=None, ref_im=None,
use_fft=True, fft_filt_size=0,
match_distance_th=2.,
check_paired_cts=True,
outlier_sigma=1.5,
return_paired_cts=True,
verbose=True):
"""Align single bead image to return drifts
with two options:
not use_fft: slow: enumerate all possible combinations of center pairs, keep good ones, canculate drift between pairs.
use_fft: use FFT to get pixel-level traslation, then calculate finer drift.
Inputs:
tar_cts: target centers from target image, 2d-numpy array
ref_cts: reference centers from reference image, 2d-numpy array
tar_im: target image, np.ndarray, required if use_fft
ref_im: reference image, np.ndarray, required if use_fft
fft_filt_size: blurring filter size before FFT, int(default: 0)
match_distance_th: threshold to uniquely match centers, float (default: 2.)
check_paired_cts: do extra checking for a bead pair, whether it is close to its neighboring drifts, bool (default: True)
outlier_sigma: gaussian sigma for outlier bead shift, float (default: 1.5 (over this threshold))
return_paired_cts: whether return paired centers, bool (default: True)
verbose: say something!, bool (default: True)
Outputs:
_mean_shift: mean drift calculated from paired centers, 1d numpy array of dim,
conditional outputs:
_paired_tar_cts: paired target centers, 2d numpy arrray of n_spots*dim
_paired_ref_cts: paired reference centers, 2d numpy arrray of n_spots*dim
"""
# convert inputs
_tar_cts = np.array(tar_cts)
_ref_cts = np.array(ref_cts)
_distance_th = float(match_distance_th)
from ..alignment_tools import fft3d_from2d
from ..spot_tools.matching import find_paired_centers, check_paired_centers
# case 1: directly align centers by brute force
if not use_fft:
from ..alignment_tools import translation_align_pts
# calculate drift
_drift, _paired_ref_cts, _paired_tar_cts = translation_align_pts(
_ref_cts, _tar_cts,
cutoff=_distance_th, return_pts=True,
verbose=verbose,
)
# case 2: fft align images and match centers
else:
if tar_im is None or ref_im is None:
raise ValueError(f"both tar_im and ref_im should be given if use FFT!")
if np.shape(tar_im) != np.shape(ref_im):
raise IndexError(f"tar_im shape:{np.shape(tar_im)} should match ref_im shape:{np.shape(ref_im)}")
# do rough alignment
_rough_drift = fft3d_from2d(tar_im, ref_im,
gb=fft_filt_size,
max_disp=np.max(np.shape(tar_im))/2)
# matche centers
_drift, _paired_tar_cts, _paired_ref_cts = find_paired_centers(
_tar_cts, _ref_cts, _rough_drift,
cutoff=_distance_th, return_paired_cts=True,
verbose=verbose,
)
print("before check:", _drift, len(_paired_ref_cts))
# check paired centers
if check_paired_cts and len(_paired_ref_cts) > 3:
_drift, _paired_tar_cts, _paired_ref_cts = check_paired_centers(
_paired_tar_cts, _paired_ref_cts,
outlier_sigma=outlier_sigma,
return_paired_cts=True,
verbose=verbose,
)
# return
_return_args = [_drift]
if return_paired_cts:
_return_args.append(_paired_tar_cts)
_return_args.append(_paired_ref_cts)
return tuple(_return_args)
# basic function to align single image
def align_single_image(filename, crop_list, bead_channel='488',
all_channels=_allowed_colors,
single_im_size=_image_size,
num_buffer_frames=_num_buffer_frames,
num_empty_frames=_num_empty_frames,
illumination_corr=True,
correction_folder=_correction_folder,
ref_filename=None, ref_all_channels=None,
ref_centers=None, ref_ims=None,
th_seed=100, th_seed_per=98, use_percentile=False,
max_num_seeds=None, min_num_seeds=50,
fitting_kwargs={},
use_fft=True, fft_filt_size=0,
match_distance_th=2.,
check_paired_cts=True,
outlier_sigma=1.5,
good_drift_th=1.,
return_target_ims=False,
return_paired_cts=False,
verbose=False,
):
"""Function to align one single image
Inputs:
Outputs:
"""
from scipy.spatial.distance import cdist, pdist, squareform
from ..io_tools.load import correct_fov_image
from ..alignment_tools import fft3d_from2d
from ..spot_tools.fitting import get_centers
## check inputs
# check crop_list:
if len(crop_list) < 2:
raise IndexError(f"crop_list should at least have 2 elements")
elif len(crop_list[0]) != len(single_im_size):
raise IndexError("dimension of crop_list should match single_im_size")
# check channels:
_all_channels = [str(_ch) for _ch in all_channels]
# check bead_channel
_bead_channel = str(bead_channel)
if _bead_channel not in all_channels:
raise ValueError(f"bead channel {_bead_channel} not exist in all channels given:{_all_channels}")
# check ref_all_channels
if ref_all_channels is None:
_ref_all_channels = _all_channels
else:
_ref_all_channels = [str(_ch) for _ch in ref_all_channels]
# check filename file type
if isinstance(filename, np.ndarray):
if verbose:
print(f"-- start aligning given image to", end=' ')
_bead_im = filename
if np.shape(_bead_im) != tuple(single_im_size):
raise IndexError(f"shape of target image:{np.shape(_bead_im)} and single_im_size:{single_im_size} doesn't match!")
elif isinstance(filename, str):
if verbose:
print(f"-- start aligning file {filename} to", end=' ')
if not os.path.isfile(filename) or filename.split('.')[-1] != 'dax':
raise IOError(f"input filename: {filename} should be a .dax file!")
_bead_im = correct_fov_image(filename, [_bead_channel],
single_im_size=single_im_size,
all_channels=all_channels,
num_buffer_frames=num_buffer_frames,
num_empty_frames=num_empty_frames,
calculate_drift=False,
correction_folder=correction_folder,
illumination_corr=illumination_corr,
bleed_corr=False, chromatic_corr=False,
z_shift_corr=False, hot_pixel_corr=True,
normalization=False, return_drift=False,
verbose=False,
)[0]
else:
raise IOError(f"Wrong input file type, {filename} should be .dax file or np.ndarray")
# crop target image:
_tar_ims = [_bead_im[tuple([slice(_s[0], _s[-1]) for _s in _c])] for _c in crop_list]
# get centers
_tar_ct_list = [get_centers(_im, th_seed=th_seed,
th_seed_per=th_seed_per,
use_percentile=use_percentile,
max_num_seeds=max_num_seeds,
min_num_seeds=min_num_seeds,
**fitting_kwargs,
) for _im in _tar_ims]
## acquire references
# case 1: ref_centers and ref_ims are given:
if ref_centers is not None and ref_ims is not None:
if verbose:
print(f"given ref_centers and images, n={len(ref_centers)}")
if len(ref_centers) != len(ref_ims):
raise IndexError(f"length of ref_centers:{len(ref_centers)} should match length of ref_ims:{len(ref_ims)}")
elif len(crop_list) != len(ref_centers):
raise IndexError(f"length of crop_list:{len(crop_list)} should match length of ref_centers:{len(ref_centers)}")
_ref_ims = ref_ims
_ref_ct_list = ref_centers
# case 2: ref_filename is given:
elif ref_filename is not None:
if isinstance(ref_filename, np.ndarray):
if verbose:
print(f"ref image directly given")
_ref_bead_im = ref_filename
elif isinstance(ref_filename, str):
if verbose:
print(f"ref_file: {ref_filename}")
_ref_bead_im = old_correct_fov_image(ref_filename, [_bead_channel],
single_im_size=single_im_size,
all_channels=all_channels,
num_buffer_frames=num_buffer_frames,
num_empty_frames=num_empty_frames,
calculate_drift=False,
correction_folder=correction_folder,
illumination_corr=illumination_corr,
warp_image=False,
bleed_corr=False,
chromatic_corr=False,
z_shift_corr=False,
hot_pixel_corr=True,
normalization=False,
return_drift=False,
verbose=False,
)[0][0]
_ref_ims = []
for _c in crop_list:
_crop = tuple([slice(int(_s[0]), int(_s[-1])) for _s in _c])
_ref_ims.append(_ref_bead_im[_crop])
# collect ref_ct_list
from ..spot_tools.fitting import select_sparse_centers
_ref_ct_list = []
for _im in _ref_ims:
_cand_cts = get_centers(_im, th_seed=th_seed,
th_seed_per=th_seed_per,
use_percentile=use_percentile,
max_num_seeds=max_num_seeds,
min_num_seeds=min_num_seeds,
**fitting_kwargs,
)
_ref_ct_list.append(select_sparse_centers(_cand_cts,
distance_th=match_distance_th))
else:
raise ValueError(f"ref_filename or ref_centers+ref_ims should be given!")
# Do alignment
_drift_list = []
_paired_tar_ct_list = []
_paired_ref_ct_list = []
# iterate until find good drifts or calculated all cropped images
while len(_drift_list) < len(crop_list):
# get image
_cid = len(_drift_list)
# calculate drift
_drift, _paired_tar_cts, _paired_ref_cts = align_beads(
_tar_ct_list[_cid], _ref_ct_list[_cid],
_tar_ims[_cid], _ref_ims[_cid],
use_fft=use_fft,
fft_filt_size=fft_filt_size,
match_distance_th=match_distance_th,
check_paired_cts=check_paired_cts,
outlier_sigma=outlier_sigma,
return_paired_cts=True,
verbose=verbose,
)
# judge whether this matching is successful
if len(_paired_tar_cts) == 0:
_drift = np.inf * np.ones(len(single_im_size))
# append
_drift_list.append(_drift)
_paired_tar_ct_list.append(_paired_tar_cts)
_paired_ref_ct_list.append(_paired_ref_cts)
# check if matched well:
if len(_drift_list) >=2:
if (cdist(_drift[np.newaxis,:], _drift_list[:-1])[0] < good_drift_th).any():
break
## select drifts
_dists = squareform(pdist(_drift_list))
_dists[np.arange(len(_dists)), np.arange(len(_dists))] = np.inf
_inds = np.unravel_index(np.argmin(_dists, axis=None), _dists.shape)
# get the two that are closest
if _dists[_inds] > good_drift_th:
_success_flag = False
print(f"-- Suspicious Failure: selcted drifts: {_drift_list[_inds[0]]}, {_drift_list[_inds[1]]} are not close enough.")
else:
_success_flag = True
# extract _final_drift and return
_final_drift = np.nanmean([_drift_list[_inds[0]], _drift_list[_inds[1]]], axis=0)
# return
_return_args = [_final_drift, _success_flag]
if return_target_ims:
_return_args.append(_tar_ims)
if return_paired_cts:
_return_args.append(_paired_tar_ct_list)
_return_args.append(_paired_ref_ct_list)
return tuple(_return_args)
# basic function to align single image
def cross_correlation_align_single_image(im, ref_im, precision_fold=100,
all_channels=_allowed_colors,
ref_all_channels=None, drift_channel='488',
single_im_size=_image_size,
num_buffer_frames=_num_buffer_frames,
num_empty_frames=_num_empty_frames,
correction_folder=_correction_folder,
correction_args={},
return_all=False,
verbose=True, detailed_verbose=False,
):
"""Function to align one single image by FFT
Inputs:
im:
Outputs:
"""
if verbose:
print(f"-- aligning image", end=' ')
if isinstance(im, str):
print(os.path.join(os.path.basename(os.path.dirname(im))), os.path.basename(im), end=' ')
if isinstance(ref_im, str):
print('to '+os.path.join(os.path.basename(os.path.dirname(ref_im))), os.path.basename(ref_im), end=' ')
# set default correction args
_correction_args = {
'hot_pixel_corr':True,
'hot_pixel_th':4,
'z_shift_corr':False,
'illumination_corr':True,
'illumination_profile':None,
'bleed_corr':False,
'chromatic_corr':False,
'normalization':False,
}
_correction_args.update(correction_args)
# check im file type
if isinstance(im, np.ndarray):
if verbose:
print(f"-> directly use image")
_im = im.copy()
if np.shape(_im) != tuple(np.array(single_im_size)):
raise IndexError(f"shape of im:{np.shape(_im)} and single_im_size:{single_im_size} doesn't match!")
elif isinstance(im, str):
if 'correct_fov_image' not in locals():
from ..io_tools.load import correct_fov_image
# load image
_im = correct_fov_image(im, [drift_channel],
single_im_size=single_im_size, all_channels=all_channels,
num_buffer_frames=num_buffer_frames, num_empty_frames=num_empty_frames,
drift=[0,0,0], | |
"""
Transform contains structures helpful for writing analysis and
transformation passes over blocks.
Most of the functions in this module are for advanced users only.
However, the following functions are prebuilt transformations
that everyone can use:
(As of 7/1/16 there are none in this folder).
Other user accessible transforms that are based on these function
can be found in the passes module.
PyRTL makes it easy to make your own transformation. However
in order to make your first transform, some knowledge about the
structure of PyRTL Internal Representation (IR) of the circuit
is necessary. Specifically, one must know what Block, LogicNet,
and WireVector are as well as how Blocks store the latter two
structures (through Block.logic, block.Wirevector_set, etc).
"""
import functools
from pyrtl.pyrtlexceptions import PyrtlError
from .core import set_working_block, LogicNet, working_block
from .wire import Const, Input, Output, WireVector, Register
def net_transform(transform_func, block=None, **kwargs):
""" Maps nets to new sets of nets according to a custom function.
:param transform_func:
Function signature: func(orig_net (logicnet)) -> keep_orig_net (bool)
:param block: optional block to work on (defaults to working block)
:return:
If transform_func does not return True, the original net is removed from
the block's logic set. The net's argument wire/destination wires are not removed.
"""
block = working_block(block)
with set_working_block(block, True):
for net in block.logic.copy():
keep_orig_net = transform_func(net, **kwargs)
if not keep_orig_net:
block.logic.remove(net)
def all_nets(transform_func):
""" Decorator that wraps a net transform function. """
@functools.wraps(transform_func)
def t_res(**kwargs):
net_transform(transform_func, **kwargs)
return t_res
def wire_transform(transform_func, select_types=WireVector,
exclude_types=(Input, Output, Register, Const), block=None):
""" Maps Wires to new sets of nets and wires according to a custom function.
:param transform_func: The function you want to run on all wires.
Function signature: func(orig_wire (WireVector)) -> src_wire, dst_wire
src_wire is the src for the stuff you made in the transform func
and dst_wire is the sink. To indicate that the wire has not been changed,
make src_wire and dst_wire both the original wire.
:param select_types: Type or Tuple of types of WireVectors to replace
:param exclude_types: Type or Tuple of types of WireVectors to exclude from replacement
:param block: The Block to replace wires on
Note that if both new_src and new_dst don't equal orig_wire, orig_wire will
be removed from the block entirely.
"""
block = working_block(block)
src_nets, dst_nets = block.net_connections(include_virtual_nodes=False)
for orig_wire in block.wirevector_subset(select_types, exclude_types):
new_src, new_dst = transform_func(orig_wire)
replace_wire_fast(orig_wire, new_src, new_dst, src_nets, dst_nets, block)
def all_wires(transform_func):
""" Decorator that wraps a wire transform function. """
@functools.wraps(transform_func)
def t_res(**kwargs):
wire_transform(transform_func, **kwargs)
return t_res
def replace_wires(wire_map, block=None):
""" Replace all wires in a block.
:param {old_wire: new_wire} wire_map: mapping of old wires to new wires
:param block: block to operate over (defaults to working block)
"""
block = working_block(block)
src_nets, dst_nets = block.net_connections(include_virtual_nodes=False)
for old_w, new_w in wire_map.items():
replace_wire_fast(old_w, new_w, new_w, src_nets, dst_nets, block)
def replace_wire_fast(orig_wire, new_src, new_dst, src_nets, dst_nets, block=None):
""" Replace orig_wire with new_src and/or new_dst.
:param WireVector orig_wire: Wire to be replaced
:param WireVector new_src: Wire to replace orig_wire, anywhere orig_wire is the
destination of a net. Ignored if orig_wire equals new_src.
:param WireVector new_dst: Wire to replace orig_wire, anywhere orig_wire is an
argument of a net. Ignored if orig_wire equals new_dst.
:param {WireVector: LogicNet} src_nets: Maps a wire to the net where it is a dest
:param {WireVector: List[LogicNet]} dst_nets: Maps a wire to list of nets where it is an arg
:param Block block: The block on which to operate (defaults to working block)
The net that orig_wire originates from (its source net) will use new_src as its
destination wire. The nets that orig_wire went to (its destination nets) will now
have new_dst as one of their argument wires instead.
This removes and/or adds nets to the block's logic set. This also *updates* the
src_nets and dst_nets maps that are passed in, such that the following hold:
```
old_src_net = src_nets[orig_wire]
src_nets[new_src] = old_src_net (where old_src_net.dests = (new_src,))
```
and
```
old_dst_nets = dst_nets[orig_wire]
dst_nets[new_dst] = [old_dst_net (where old_dst_net.args replaces orig_wire with new_dst) foreach old_dst_net] # noqa
```
For example, given the graph on left, `replace_wire_fast(w1, w4, w1, ...)` produces on right:
```
a b c d a b c d
| | | | | | | |
net net net net
| | | |
w1 w2 ==> produces ==> w4 w1 w2
| | | |
net net
| |
w3 w3
```
And given the graph on the left, `replace_wire_fast(w1, w1, w4, ...)` produces on the right:
```
a b c d a b c d
| | | | | | | |
net net net net
| | | |
w1 w2 ==> produces ==> w1 w4 w2
| | | |
net net
| |
w3 w3
```
Calling `replace_wire_fast(w1, w4, w4, ...)`, then, fully replaces w1 with w3 in both
its argument and dest positions:
```
a b c d a b c d
| | | | | | | |
net net net net
| | | |
w1 w2 ==> produces ==> w4 w2
| | | |
net net
| |
w3 w3
```
"""
def remove_net(net_):
for arg in set(net_.args):
dst_nets[arg].remove(net_)
if not len(dst_nets[arg]):
del dst_nets[arg]
if len(net_.dests) == 1:
del src_nets[net_.dests[0]]
block.logic.remove(net_)
def add_net(net_):
for arg in set(net_.args):
if arg not in dst_nets:
dst_nets[arg] = [net_]
else:
dst_nets[arg].append(net_)
if len(net_.dests) == 1:
src_nets[net_.dests[0]] = net_
block.add_net(net_)
# src and dst in this function are all relative to wires
block = working_block(block)
if new_src is not orig_wire and orig_wire in src_nets:
# don't need to add the new_src and new_dst because they were made at creation
net = src_nets[orig_wire]
new_net = LogicNet(
op=net.op, op_param=net.op_param, args=net.args,
dests=tuple(new_src if w is orig_wire else w for w in net.dests))
remove_net(net)
add_net(new_net)
if new_dst is not orig_wire and orig_wire in dst_nets:
old_nets = tuple(dst_nets[orig_wire]) # need a copy bc the original will be modified
for net in old_nets:
new_net = LogicNet(
op=net.op, op_param=net.op_param, dests=net.dests,
args=tuple(new_dst if w is orig_wire else w for w in net.args))
remove_net(net)
add_net(new_net)
if new_dst is not orig_wire and new_src is not orig_wire:
block.remove_wirevector(orig_wire)
def clone_wire(old_wire, name=None):
""" Makes a copy of any existing wire.
:param old_wire: The wire to clone
:param name: A name for the new wire (required if the old wire
and newly cloned wire are part of the same block)
This function is mainly intended to be used when the two wires are from different
blocks. Making two wires with the same name in the same block is not allowed.
"""
if name is None:
if working_block() is old_wire._block:
raise PyrtlError("Must provide a name for the newly cloned wire "
"when cloning within the same block.")
name = old_wire.name
if name in working_block().wirevector_by_name:
raise PyrtlError("Cannot give a newly cloned wire the same name "
"as an existing wire.")
if isinstance(old_wire, Const):
return Const(old_wire.val, old_wire.bitwidth, name=name)
else:
return old_wire.__class__(old_wire.bitwidth, name=name)
def copy_block(block=None, update_working_block=True):
""" Makes a copy of an existing block.
:param block: The block to clone (defaults to the working block).
:return: The resulting block
"""
block_in = working_block(block)
block_out, temp_wv_map = _clone_block_and_wires(block_in)
mems = {}
for net in block_in.logic:
_copy_net(block_out, net, temp_wv_map, mems)
block_out.mem_map = mems
block_out.io_map = {io: w for io, w in temp_wv_map.items() if isinstance(io, (Input, Output))}
block_out.reg_map = {r: w for r, w, in temp_wv_map.items() if isinstance(r, Register)}
if update_working_block:
set_working_block(block_out)
return block_out
def _clone_block_and_wires(block_in):
""" This is a generic function to copy the WireVectors for another round of
synthesis. This does not split a WireVector with multiple wires.
:param block_in: The block to change
:param synth_name: a name to prepend to all new copies of a wire
:return: the resulting block and a WireVector map
"""
block_in.sanity_check() # make sure that everything is valid
block_out = block_in.__class__()
temp_wv_map = {}
with set_working_block(block_out, no_sanity_check=True):
for wirevector in block_in.wirevector_subset():
new_wv = clone_wire(wirevector)
temp_wv_map[wirevector] = new_wv
return block_out, temp_wv_map
def _copy_net(block_out, net, temp_wv_net, mem_map):
""" This function makes a copy of all nets passed to it for synth uses.
"""
new_args = tuple(temp_wv_net[a_arg] | |
#!/usr/bin/env python
try:
import tkinter
import tkinter.simpledialog
import tkinter.filedialog
except ImportError:
import tkinter as Tkinter # Python 3
from tkinter import simpledialog as tkSimpleDialog
from tkinter import filedialog as tkFileDialog
from graphics import Pmw
import AUTOutil
from graphics import optionHandler
import math
import sys
import string
GrapherError="GrapherError"
Axes3D=None
class BasicGrapher(optionHandler.OptionHandler,tkinter.Canvas):
"""Documentation string for Basic Grapher
A simple graphing widget
By <NAME>."""
def __init__(self,parent=None,callback=None,**kw):
self.data = []
#Get the data from the arguments and then erase the
#ones which are not used by canvas
optionDefaults={}
optionDefaults["minx"] = (0,callback)
optionDefaults["maxx"] = (0,callback)
optionDefaults["miny"] = (0,callback)
optionDefaults["maxy"] = (0,callback)
optionDefaults["minz"] = (0,callback)
optionDefaults["maxz"] = (0,callback)
optionDefaults["azimuth"] = (None,callback)
optionDefaults["elevation"] = (None,callback)
optionDefaults["left_margin"] = (80,callback)
optionDefaults["right_margin"] = (40,callback)
optionDefaults["top_margin"] = (40,callback)
optionDefaults["bottom_margin"] = (40,callback)
optionDefaults["decorations"] = (True,callback)
optionDefaults["xlabel"] = (None,callback)
optionDefaults["xlabel_fontsize"] = (None,callback)
optionDefaults["ylabel"] = (None,callback)
optionDefaults["ylabel_fontsize"] = (None,callback)
optionDefaults["zlabel"] = (None,callback)
optionDefaults["zlabel_fontsize"] = (None,callback)
optionDefaults["xticks"] = (5,callback)
optionDefaults["yticks"] = (5,callback)
optionDefaults["zticks"] = (5,callback)
optionDefaults["grid"] = (True,callback)
optionDefaults["tick_label_template"] = ("%.2e",callback)
optionDefaults["tick_length"] = (0.2,callback)
optionDefaults["odd_tick_length"] = (0.4,callback)
optionDefaults["even_tick_length"] = (0.2,callback)
# background is handled by the Canvas widget
optionDefaults["foreground"] = ("black",callback)
optionDefaults["color_list"] = ("black red green blue",callback)
optionDefaults["symbol_font"] = ("-misc-fixed-*-*-*-*-*-*-*-*-*-*-*-*",callback)
optionDefaults["symbol_color"] = ("red",callback)
optionDefaults["smart_label"] = (True,callback)
optionDefaults["line_width"] = (2,callback)
optionDefaults["realwidth"] = (1,callback)
optionDefaults["realheight"] = (1,callback)
optionDefaults["use_labels"] = (True,callback)
optionDefaults["use_symbols"] = (True,callback)
optionDefaults["top_title"] = ("",callback)
optionDefaults["top_title_fontsize"] = (None,callback)
optionDefaults["dashes"] = ((6.0,6.0),callback)
optionDefaults["width"] = (600,callback)
optionDefaults["height"] = (480,callback)
optionAliases = {}
optionAliases["fg"] = "foreground"
# __parseOptions uses functions from the Canvas
# widget, so we need to initialize it first
if kw.get("hide") and 'graphics.grapher_mpl' in sys.modules:
optionHandler.OptionHandler.__init__(self)
else:
tkinter.Canvas.__init__(self,parent)
optionHandler.OptionHandler.__init__(self,tkinter.Canvas)
for key in list(kw):
if key not in optionDefaults:
del kw[key]
self.addOptions(**optionDefaults)
self.addAliases(**optionAliases)
BasicGrapher._configNoDraw(self,**kw)
def __len__(self):
return len(self.data)
def config(self,cnf=None,**kw):
rval = self._configNoDraw(cnf,**kw)
if isinstance(cnf, str) or (cnf is None and not kw):
return rval
self.clear()
self.draw()
configure=config
# This version can be used to increase efficiency
# for example, if you want to config, but know you
# will need to redraw later.
def _configNoDraw(self,cnf=None,**kw):
# note: reset xticks/yticks if min/max are set without ticks
if (cnf is not None or kw) and not isinstance(cnf, str):
dct = (cnf or {}).copy()
dct.update(kw)
for coord in ["x", "y", "z"]:
minc = "min" + coord
maxc = "max" + coord
ticks = coord + "ticks"
if (minc in dct or maxc in dct) and ticks not in dct:
dct[ticks] = None
return optionHandler.OptionHandler.config(self,**dct)
return optionHandler.OptionHandler.config(self,cnf,**kw)
_configureNoDraw = _configNoDraw
def _addData(self,data,newsect=None,color=None,stable=None):
for array in data:
if len(array[0]) != len(array[1]):
raise GrapherError("Array lengths must match")
new_array={}
new_array["x"]=array[0]
new_array["y"]=array[1]
if len(array) > 2:
new_array["z"]=array[2]
new_array["stable"]=stable
new_array["newsect"]=newsect
new_array["color"]=color
if len(array[0]) > 0:
new_array["minx"]=min(array[0])
new_array["maxx"]=max(array[0])
if len(array[1]) > 0:
new_array["miny"]=min(array[1])
new_array["maxy"]=max(array[1])
if "z" in new_array and len(array[2]) > 0:
new_array["minz"]=min(array[2])
new_array["maxz"]=max(array[2])
self.data.append(new_array)
def addData(self,data):
self._addData(data)
self.computeXRange()
self.computeYRange()
self.computeZRange()
self.draw()
def addArray(self,array):
self._addData((array,))
self.computeXRange()
self.computeYRange()
self.computeZRange()
self.draw()
def addDataNoDraw(self,data):
self._addData(data)
def addArrayNoDraw(self,array,newsect=None,color=None,stable=None):
self._addData((array,),newsect,color,stable)
def _delAllData(self):
self.data=[]
# check type for next data
try:
zcolumn = self.cget(self.cget("type")+"_z")
except tkinter.TclError: #in regression test
return
if zcolumn is not None:
self._configNoDraw({self.cget("type")+"_z":None})
print("\nWithout matplotlib 3D plots are not supported.")
print("Plotting only the first two coordinates.")
def delAllData(self):
self._delAllData()
self.clear()
def _delData(self,index):
del self.data[index]
def delData(self,index):
self._delData(index)
self.clear()
self.draw()
def _round(self,val,increment):
"This function returns the closest integer multiple to increment"
quotient = val/increment
remainder = quotient-math.floor(quotient)
if remainder < 0.5:
return math.floor(quotient)*increment
else:
return (math.floor(quotient)+1)*increment
def _computeNiceRanges(self,minimum,maximum):
# This bit of code computes "nice" range values. Given a
# minimum and manximum it computes a new minimum, maximum,
# and number of divisions so that the number of digits in
# the the numbers in the value at each tick mark
# in minimized
therange = maximum - minimum
inc = math.pow(10,math.ceil(math.log10(therange) - 1.0))
if therange / inc <= 2:
inc = inc / 4
elif therange / inc <= 4:
inc = inc / 2
minimumr = self._round(minimum,inc)
if minimumr > minimum:
minimum = minimumr - inc
else:
minimum = minimumr
maximumr = self._round(maximum,inc)
if maximumr < maximum:
maximum = maximumr + inc
else:
maximum = maximumr ;
num = int(round(( maximum - minimum ) / inc))
return {"min": minimum, "max": maximum, "divisions": num + 1}
def computeRange(self,coordinate,guess_minimum=None,guess_maximum=None):
minc = "min"+coordinate
maxc = "max"+coordinate
if len(self.data) > 0 and minc not in self.data[0]:
return
if guess_minimum is None and len(self.data) > 0:
guess_minimum = min([entry[minc] for entry in self.data])
if guess_maximum is None and len(self.data) > 0:
guess_maximum = max([entry[maxc] for entry in self.data])
if guess_minimum != guess_maximum:
d = self._computeNiceRanges(guess_minimum,guess_maximum)
self._configNoDraw(**{minc:d["min"],maxc:d["max"],
coordinate+'ticks':d["divisions"]})
elif guess_maximum != None:
self._configNoDraw(**{minc:guess_minimum-1,maxc:guess_maximum+1})
def computeXRange(self,guess_minimum=None,guess_maximum=None):
self.computeRange("x",guess_minimum,guess_maximum)
def computeYRange(self,guess_minimum=None,guess_maximum=None):
self.computeRange("y",guess_minimum,guess_maximum)
def computeZRange(self,guess_minimum=None,guess_maximum=None):
self.computeRange("z",guess_minimum,guess_maximum)
def getXRange(self):
return [self.cget("minx"),self.cget("maxx")]
def getYRange(self):
return [self.cget("miny"),self.cget("maxy")]
def getZRange(self):
return [self.cget("minz"),self.cget("maxz")]
def clear(self):
for x in self.find_all():
self.delete(x)
def plot(self):
pass
def draw(self):
color_list = self.cget("color_list").split()
minx=self.cget("minx")
maxx=self.cget("maxx")
miny=self.cget("miny")
maxy=self.cget("maxy")
top_margin = self.cget("top_margin")
bottom_margin = self.cget("bottom_margin")
left_margin = self.cget("left_margin")
right_margin = self.cget("right_margin")
width = int(self.cget("realwidth"))
height = int(self.cget("realheight"))
if self.cget("decorations"):
# border
self.create_polygon(left_margin,top_margin,
int(width)-right_margin,top_margin,
int(width)-right_margin,int(height)-bottom_margin,
left_margin,int(height)-bottom_margin,fill="",outline=self.cget("foreground"))
# data
line_width=self.cget("line_width")
adjwidth = width - (left_margin + right_margin)
adjheight = height - (top_margin + bottom_margin)
xscale = (float(maxx) - minx) / adjwidth
yscale = (float(maxy) - miny) / adjheight
i=-1
for d in self.data:
if d["newsect"] is None or d["newsect"]:
i = i+1
if d["color"] is None:
color = i
else:
color = d["color"]
fill=color_list[color%len(color_list)]
curve="curve:%d"%(i,)
n=len(d["x"])
[x,y]=self.__valueToCanvasFast([d["x"][0],d["y"][0]],minx,maxx,miny,maxy,
width,height,left_margin,right_margin,top_margin,bottom_margin)
# If we only have one point we draw a small circle
if n == 1:
self.create_oval(x-3,y-3,x+3,y+3,
tags=("data_point:%d"%(0,),"curve:%d"%(i,),"data"),
fill=fill)
else:
line = [x, y]
xs = d["x"]
ys = d["y"]
stable = d["stable"]
for j in range(1, n):
line.append((xs[j]-minx) / xscale + left_margin)
line.append((adjheight - (ys[j]-miny) / yscale + top_margin))
if stable is None or stable:
self.create_line(line,width=line_width,tags=(curve,"data"),fill=fill)
else:
self.create_line(line,width=line_width,tags=(curve,"data"),fill=fill,dash=(10,10))
if self.cget("decorations"):
# clip stuff outside box
self.create_polygon(0,0,
int(width),0,
int(width),top_margin-1,
0,top_margin-1,
fill=self["background"])
self.create_polygon(0,0,
0,int(height),
left_margin-1,int(height),
left_margin-1,0,
fill=self["background"])
self.create_polygon(int(width),int(height),
int(width),0,
int(width)-right_margin+1,0,
int(width)-right_margin+1,int(height),
fill=self["background"])
self.create_polygon(int(width),int(height),
0,int(height),
0,int(height)-bottom_margin+1,
int(width),int(height)-bottom_margin+1,
fill=self["background"])
# tick marks
xw=float(width) - (float(left_margin) + float(right_margin))
yw=float(height) - (float(top_margin) + float(bottom_margin))
tick_label_template=self.cget("tick_label_template")
tick_length=self.cget("tick_length")
odd_tick_length=self.cget("odd_tick_length")
even_tick_length=self.cget("even_tick_length")
xticks = self.cget("xticks")
if xticks is None:
xticks = self.config("xticks")[3]
else:
xticks=int(xticks)
tick_start_y=yw+bottom_margin
for i in range(xticks):
# The odd tick marks should be longer
if i%2==0:
tick_end_y=yw+bottom_margin*(1+even_tick_length)
else:
tick_end_y=yw+bottom_margin*(1+odd_tick_length)
tick_x=left_margin + float(i)*xw/float(xticks-1)
self.create_line(tick_x,tick_start_y,tick_x,tick_end_y,fill=self.cget("foreground"))
val = self.canvasToValue((tick_x,tick_start_y))
self.create_text(tick_x,tick_end_y,text=tick_label_template%(val[0],),anchor="n",fill=self.cget("foreground"))
if i != 0 and i != xticks - 1 and self.cget("grid") in ["yes",True]:
self.create_line(tick_x,tick_start_y,tick_x,tick_start_y-yw,
fill=self.cget("foreground"),stipple="gray50")
yticks = self.cget("yticks")
if yticks is None:
yticks = self.config("yticks")[3]
else:
yticks=int(yticks)
tick_start_x=left_margin
tick_end_x=left_margin*(1-tick_length)
for i in range(yticks):
tick_y=bottom_margin + float(i)*yw/float(yticks-1)
self.create_line(tick_start_x,tick_y,tick_end_x,tick_y,fill=self.cget("foreground"))
val = self.canvasToValue((tick_start_x,tick_y))
self.create_text(tick_end_x,tick_y,text=tick_label_template%(val[1],),anchor="e",fill=self.cget("foreground"))
if i != 0 and i != yticks - 1 and self.cget("grid") in ["yes",True]:
self.create_line(tick_start_x,tick_y,tick_start_x + xw,tick_y,
fill=self.cget("foreground"),stipple="gray50")
# Axis labels
self.create_text(left_margin*0.3,bottom_margin*0.3,
text=self.cget("ylabel"),anchor="nw",fill=self.cget("foreground"))
self.create_text(int(width)-left_margin*0.3,int(height)-bottom_margin*0.1,
text=self.cget("xlabel"),anchor="se",fill=self.cget("foreground"))
# Title
self.create_text((left_margin-right_margin+int(width))//2,
top_margin*0.1,text=self.cget("top_title"),anchor="n",
fill=self.cget("foreground"))
def valueToCanvas(self,val):
if len(val) != 2:
raise GrapherError("Illegal value choosen for coordinate transformation. Must be a tuple with 2 elements.")
# make a few constants shorter
minx=self.cget("minx")
maxx=self.cget("maxx")
miny=self.cget("miny")
maxy=self.cget("maxy")
width = int(self.cget("realwidth"))
height = int(self.cget("realheight"))
left_margin = self.cget("left_margin")
right_margin = self.cget("right_margin")
top_margin = self.cget("top_margin")
bottom_margin = self.cget("bottom_margin")
return self.__valueToCanvasFast(val,minx,maxx,miny,maxy,
width,height,left_margin,right_margin,top_margin,bottom_margin)
def __valueToCanvasFast(self,val,minx,maxx,miny,maxy,
width,height,left_margin,right_margin,top_margin,bottom_margin):
x = val[0]
y = val[1]
width = width - (left_margin + right_margin)
height = height - (top_margin + bottom_margin)
return [((x-minx)/(maxx-minx))*width + left_margin,
height - ((y-miny)/(maxy-miny))*height + top_margin]
def transform(self,val):
[x,y] = self.valueToCanvas(val)
return [x,self.cget("realheight") - y]
def transform_seq(self,seqs):
minx=self.cget("minx")
maxx=self.cget("maxx")
miny=self.cget("miny")
maxy=self.cget("maxy")
width = int(self.cget("realwidth"))
height = int(self.cget("realheight"))
left_margin = self.cget("left_margin")
right_margin = self.cget("right_margin")
top_margin = self.cget("top_margin")
bottom_margin = self.cget("bottom_margin")
valuetocanvasfast = self.__valueToCanvasFast
sp2 = 5 #fontsize
sp4 = 5
l = []
for i in range(len(seqs[0])):
val = [seqs[0][i],seqs[1][i]]
[x,y] = valuetocanvasfast(val,minx,maxx,miny,maxy,
width,height,left_margin,right_margin,top_margin,bottom_margin)
l.append(
[(x - left_margin) / sp2,((height - y) - bottom_margin) / sp4])
return l
def canvasToValue(self,val):
if len(val) != 2:
raise GrapherError("Illegal value choosen for coordinate transformation. Must be a tuple with 2 elements.")
x = val[0]
if x < self.cget("left_margin"):
x = self.cget("left_margin")
if x > int(self.cget("realwidth")) - self.cget("right_margin"):
x = int(self.cget("realwidth")) - self.cget("right_margin")
y = val[1]
if y < self.cget("top_margin"):
y = self.cget("top_margin")
if y > int(self.cget("realheight")) - self.cget("bottom_margin"):
| |
if dependency not in instance:
yield ValidationError(
"%r is a dependency of %r" % (dependency, property)
)
def validate_enum(self, enums, instance, schema):
if instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def validate_ref(self, ref, instance, schema):
with self.resolver.resolving(ref) as resolved:
for error in self.descend(instance, resolved):
yield error
@validates("draft3")
class Draft3Validator(ValidatorMixin, _Draft34CommonMixin, object):
"""
A validator for JSON Schema draft 3.
"""
def validate_type(self, types, instance, schema):
types = _list(types)
all_errors = []
for index, type in enumerate(types):
if type == "any":
return
if self.is_type(type, "object"):
errors = list(self.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
elif self.is_type(type, "string"):
if self.is_type(instance, type):
return
else:
yield ValidationError(
_types_msg(instance, types), context=all_errors,
)
def validate_properties(self, properties, instance, schema):
if not self.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in self.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
elif subschema.get("required", False):
error = ValidationError("%r is a required property" % property)
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error
def validate_disallow(self, disallow, instance, schema):
for disallowed in _list(disallow):
if self.is_valid(instance, {"type" : [disallowed]}):
yield ValidationError(
"%r is disallowed for %r" % (disallowed, instance)
)
def validate_extends(self, extends, instance, schema):
if self.is_type(extends, "object"):
for error in self.descend(instance, extends):
yield error
return
for index, subschema in enumerate(extends):
for error in self.descend(instance, subschema, schema_path=index):
yield error
validate_divisibleBy = _Draft34CommonMixin._validate_multipleOf
META_SCHEMA = {
"$schema" : "http://json-schema.org/draft-03/schema#",
"id" : "http://json-schema.org/draft-03/schema#",
"type" : "object",
"properties" : {
"type" : {
"type" : ["string", "array"],
"items" : {"type" : ["string", {"$ref" : "#"}]},
"uniqueItems" : True,
"default" : "any"
},
"properties" : {
"type" : "object",
"additionalProperties" : {"$ref" : "#", "type": "object"},
"default" : {}
},
"patternProperties" : {
"type" : "object",
"additionalProperties" : {"$ref" : "#"},
"default" : {}
},
"additionalProperties" : {
"type" : [{"$ref" : "#"}, "boolean"], "default" : {}
},
"items" : {
"type" : [{"$ref" : "#"}, "array"],
"items" : {"$ref" : "#"},
"default" : {}
},
"additionalItems" : {
"type" : [{"$ref" : "#"}, "boolean"], "default" : {}
},
"required" : {"type" : "boolean", "default" : False},
"dependencies" : {
"type" : ["string", "array", "object"],
"additionalProperties" : {
"type" : ["string", "array", {"$ref" : "#"}],
"items" : {"type" : "string"}
},
"default" : {}
},
"minimum" : {"type" : "number"},
"maximum" : {"type" : "number"},
"exclusiveMinimum" : {"type" : "boolean", "default" : False},
"exclusiveMaximum" : {"type" : "boolean", "default" : False},
"minItems" : {"type" : "integer", "minimum" : 0, "default" : 0},
"maxItems" : {"type" : "integer", "minimum" : 0},
"uniqueItems" : {"type" : "boolean", "default" : False},
"pattern" : {"type" : "string", "format" : "regex"},
"minLength" : {"type" : "integer", "minimum" : 0, "default" : 0},
"maxLength" : {"type" : "integer"},
"enum" : {"type" : "array", "minItems" : 1, "uniqueItems" : True},
"default" : {"type" : "any"},
"title" : {"type" : "string"},
"description" : {"type" : "string"},
"format" : {"type" : "string"},
"maxDecimal" : {"type" : "number", "minimum" : 0},
"divisibleBy" : {
"type" : "number",
"minimum" : 0,
"exclusiveMinimum" : True,
"default" : 1
},
"disallow" : {
"type" : ["string", "array"],
"items" : {"type" : ["string", {"$ref" : "#"}]},
"uniqueItems" : True
},
"extends" : {
"type" : [{"$ref" : "#"}, "array"],
"items" : {"$ref" : "#"},
"default" : {}
},
"id" : {"type" : "string", "format" : "uri"},
"$ref" : {"type" : "string", "format" : "uri"},
"$schema" : {"type" : "string", "format" : "uri"},
},
"dependencies" : {
"exclusiveMinimum" : "minimum", "exclusiveMaximum" : "maximum"
},
}
@validates("draft4")
class Draft4Validator(ValidatorMixin, _Draft34CommonMixin, object):
"""
A validator for JSON Schema draft 4.
"""
def validate_type(self, types, instance, schema):
types = _list(types)
if not any(self.is_type(instance, type) for type in types):
yield ValidationError(_types_msg(instance, types))
def validate_properties(self, properties, instance, schema):
if not self.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in self.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def validate_required(self, required, instance, schema):
if not self.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def validate_minProperties(self, mP, instance, schema):
if self.is_type(instance, "object") and len(instance) < mP:
yield ValidationError("%r is too short" % (instance,))
def validate_maxProperties(self, mP, instance, schema):
if not self.is_type(instance, "object"):
return
if self.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r is too short" % (instance,))
def validate_allOf(self, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in self.descend(instance, subschema, schema_path=index):
yield error
def validate_oneOf(self, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errors = list(self.descend(instance, subschema, schema_path=index))
if not errors:
first_valid = subschema
break
all_errors.extend(errors)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if self.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def validate_anyOf(self, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errors = list(self.descend(instance, subschema, schema_path=index))
if not errors:
break
all_errors.extend(errors)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def validate_not(self, not_schema, instance, schema):
if self.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
validate_multipleOf = _Draft34CommonMixin._validate_multipleOf
META_SCHEMA = {
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": {"$ref": "#"}
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [
{"$ref": "#/definitions/positiveInteger"}, {"default": 0}
]
},
"simpleTypes": {
"enum": [
"array",
"boolean",
"integer",
"null",
"number",
"object",
"string",
]
},
"stringArray": {
"type": "array",
"items": {"type": "string"},
"minItems": 1,
"uniqueItems": True
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": True
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": False
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": False
},
"maxLength": {"$ref": "#/definitions/positiveInteger"},
"minLength": {"$ref": "#/definitions/positiveIntegerDefault0"},
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{"type": "boolean"},
{"$ref": "#"}
],
"default": {}
},
"items": {
"anyOf": [
{"$ref": "#"},
{"$ref": "#/definitions/schemaArray"}
],
"default": {}
},
"maxItems": {"$ref": "#/definitions/positiveInteger"},
"minItems": {"$ref": "#/definitions/positiveIntegerDefault0"},
"uniqueItems": {
"type": "boolean",
"default": False
},
"maxProperties": {"$ref": "#/definitions/positiveInteger"},
"minProperties": {"$ref": "#/definitions/positiveIntegerDefault0"},
"required": {"$ref": "#/definitions/stringArray"},
"additionalProperties": {
"anyOf": [
{"type": "boolean"},
{"$ref": "#"}
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": {"$ref": "#"},
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": {"$ref": "#"},
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": {"$ref": "#"},
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"$ref": "#"},
{"$ref": "#/definitions/stringArray"}
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": True
},
"type": {
"anyOf": [
{"$ref": "#/definitions/simpleTypes"},
{
"type": "array",
"items": {"$ref": "#/definitions/simpleTypes"},
"minItems": 1,
"uniqueItems": True
}
]
},
"allOf": {"$ref": "#/definitions/schemaArray"},
"anyOf": {"$ref": "#/definitions/schemaArray"},
"oneOf": {"$ref": "#/definitions/schemaArray"},
"not": {"$ref": "#"}
},
"dependencies": {
"exclusiveMaximum": ["maximum"],
"exclusiveMinimum": ["minimum"]
},
"default": {}
}
class FormatChecker(object):
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
:class:`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
returns a ``bool``, use the :meth:`FormatChecker.checks` or
:meth:`FormatChecker.cls_checks` decorators.
:argument iterable formats: the known formats to validate. This argument
can be used to limit which formats will be used
during validation.
>>> checker = FormatChecker(formats=("date-time", "regex"))
"""
checkers = {}
def __init__(self, formats=None):
if formats is None:
| |
<filename>src/tests/core/test_scenario.py
from datetime import datetime
from unittest.mock import Mock, patch, call
from pytest import raises
from cicadad.core import scenario as scenario_module
from cicadad.services import datastore
from cicadad.util.constants import DOCKER_CONTAINER_MODE
@patch("cicadad.services.datastore.get_work")
def test_has_work(get_work_mock):
user_id = "abc"
address = "some address"
s = Mock()
get_work_mock.return_value = 1
uc = scenario_module.UserCommands(s, user_id, address)
assert uc.has_work()
assert uc.available_work == 0
get_work_mock.assert_called_with(user_id, address)
@patch("cicadad.services.datastore.get_work")
def test_has_more_work(get_work_mock):
user_id = "abc"
address = "some address"
s = Mock()
get_work_mock.return_value = 2
uc = scenario_module.UserCommands(s, user_id, address)
assert uc.has_work(500)
assert uc.has_work(500)
assert uc.available_work == 0
get_work_mock.assert_called_once_with(user_id, address)
@patch("cicadad.services.datastore.get_work")
def test_has_no_work(get_work_mock):
user_id = "abc"
address = "some address"
s = Mock()
get_work_mock.return_value = 0
uc = scenario_module.UserCommands(s, user_id, address)
assert not uc.has_work()
assert get_work_mock.call_count == 2
get_work_mock.assert_called_with(user_id, address)
def test_run_logs():
def test_fn():
print("foo")
user_id = "abc"
address = "some address"
s = Mock()
s.fn = test_fn
uc = scenario_module.UserCommands(s, user_id, address)
output, exception, logs = uc.run()
assert output is None
assert exception is None
assert logs == "foo\n"
def test_run_output():
def test_fn():
return 42
user_id = "abc"
address = "some address"
s = Mock()
s.fn = test_fn
uc = scenario_module.UserCommands(s, user_id, address)
output, exception, logs = uc.run()
assert output == 42
assert exception is None
assert logs == ""
def test_run_exception():
def test_fn():
raise ValueError("some error")
user_id = "abc"
address = "some address"
s = Mock()
s.fn = test_fn
uc = scenario_module.UserCommands(s, user_id, address)
output, exception, logs = uc.run(log_traceback=False)
assert output is None
assert isinstance(exception, ValueError)
assert str(exception) == "some error"
assert logs == ""
def test_scale_users_up():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
sc.start_users = Mock()
sc.stop_users = Mock()
sc.scale_users(10)
sc.start_users.assert_called_once_with(10)
def test_scale_users_down():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
sc.start_users = Mock()
sc.stop_users = Mock()
sc.num_users = 20
sc.scale_users(10)
sc.stop_users.assert_called_once_with(10)
@patch("cicadad.core.scenario.container_service.start_docker_container")
@patch("cicadad.core.scenario.datastore.add_user_event")
def test_start_users(add_user_event_mock, start_container_mock):
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
s.name = "s"
s.users_per_container = 3
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
sc.buffered_work = 10
sc.add_work = Mock()
sc.start_users(5)
assert start_container_mock.call_count == 2
assert add_user_event_mock.call_count == 2
sc.add_work.assert_called_once_with(10)
assert sc.buffered_work == 0
def test_start_users_negative():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
with raises(ValueError, match="Must supply a positive number of users to start"):
sc.start_users(-1)
@patch("cicadad.services.container_service.stop_docker_container")
@patch("cicadad.services.datastore.add_user_event")
def test_stop_users(add_user_event_mock, stop_container_mock):
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
add_user_event_mock.return_value = None
stop_container_mock.return_value = None
sc.num_users = 4
sc.user_ids = ["1", "2", "3", "4"]
sc.user_locations = {"1": "a", "2": "a", "3": "b", "4": "b"}
sc.user_manager_counts = {"a": 2, "b": 2}
sc.stop_users(3)
assert sc.num_users == 1
assert sc.user_ids == ["4"]
def test_stop_users_too_many():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
with raises(ValueError, match="Scenario currently has less than 3 users"):
sc.stop_users(3)
def test_stop_users_negative():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
with raises(ValueError, match="Must supply a positive number of users to stop"):
sc.stop_users(-1)
@patch("cicadad.services.datastore.distribute_work")
def test_add_work(distribute_work_mock):
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
distribute_work_mock.return_value = None
sc.num_users = 3
sc.user_ids = ["1", "2", "3"]
sc.add_work(11)
assert distribute_work_mock.call_count == 1
def test_has_work_buffered():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
sc.add_work(10)
assert sc.buffered_work == 10
def test_aggregate_results():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
def aggregator_fn(previous, latest_results):
if previous is None:
p = 0
else:
p = previous
return p + sum(latest_results)
s.result_aggregator = aggregator_fn
assert sc.aggregate_results([1]) == 1
assert sc.aggregate_results([2]) == 3
assert sc.aggregate_results([3]) == 6
assert sc.aggregated_results == 6
def test_aggregate_results_default():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
s.result_aggregator = None
assert sc.aggregate_results([datastore.Result(output=1)]) == 1
assert sc.aggregate_results([datastore.Result(output=2)]) == 2
assert sc.aggregate_results([datastore.Result(output=3)]) == 3
assert sc.aggregated_results == 3
def test_verify_results():
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
sc = scenario_module.ScenarioCommands(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
def result_verifier(results):
return ["error" for r in results if not r]
s.result_verifier = result_verifier
assert sc.verify_results([False, True]) == ["error"]
def test_filter_scenarios():
s1 = Mock()
s2 = Mock()
s1.tags = ["foo", "bar"]
s2.tags = ["fizz", "buzz"]
assert scenario_module.filter_scenarios_by_tag([s1, s2], ["fizz", "bizz"]) == [s2]
def test_filter_scenarios_empty():
s1 = Mock()
s2 = Mock()
assert scenario_module.filter_scenarios_by_tag([s1, s2], []) == [s1, s2]
@patch("cicadad.services.datastore.move_scenario_result")
@patch("cicadad.services.datastore.add_test_event")
@patch("cicadad.services.container_service.start_docker_container")
def test_test_runner(
start_container_mock, add_test_event_mock, move_scenario_event_mock
):
s1 = Mock()
s2 = Mock()
s3 = Mock()
cmd_foo = Mock()
cmd_foo.return_value = "xyz"
cmds = {"foo": cmd_foo}
s1.name = "s1"
s2.name = "s2"
s3.name = "s3"
s1.console_metric_displays = cmds
s2.console_metric_displays = None
s3.console_metric_displays = None
s1.dependencies = []
s2.dependencies = [s1]
s3.dependencies = [s2]
ss = [s1, s2, s3]
tags = []
tid = "test-123"
img = "foo"
n = "bar"
namespace = "default"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
r1 = {
"output": "42",
"exception": None,
"logs": "",
"timestamp": str(datetime.now()),
"time_taken": 3,
}
r2 = {
"output": None,
"exception": "some error",
"logs": "",
"timestamp": str(datetime.now()),
"time_taken": 3,
}
move_scenario_event_mock.side_effect = [r1, r2]
scenario_module.test_runner(
ss,
tags,
tid,
img,
n,
namespace,
datastore_addr,
container_service_addr,
container_mode,
)
assert add_test_event_mock.call_count == 8
assert start_container_mock.call_count == 2
@patch("cicadad.services.datastore.set_scenario_result")
def test_run_scenario(set_scenario_result_mock):
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
def load_model(sc, c):
sc.aggregated_results = 42
sc.stop_users = Mock()
s.load_model = load_model
s.output_transformer = None
scenario_module.scenario_runner(
s,
tid,
image,
network,
namespace,
sid,
datastore_addr,
container_service_addr,
container_mode,
ctx,
)
# Get kwargs of first call
assert set_scenario_result_mock.mock_calls[0][2]["output"] == 42
@patch("cicadad.services.datastore.set_scenario_result")
def test_run_scenario_result_transformer(set_scenario_result_mock):
s = Mock()
tid = "t-123"
image = "foo"
network = "bar"
namespace = "default"
sid = "abc"
datastore_addr = "fizz"
container_service_addr = "buzz"
container_mode = DOCKER_CONTAINER_MODE
ctx = {}
def load_model(sc, c):
sc.aggregated_results = 42
sc.stop_users = Mock()
def double_result(ar):
return ar * 2
s.load_model = load_model
s.output_transformer = double_result
scenario_module.scenario_runner(
s,
tid,
image,
network,
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime
import ipaddress
from ipaddress import ip_address
import re
from simple_history.models import HistoricalRecords
from solo.models import SingletonModel
import django_filters
from django.contrib.auth.models import User, Group
def findips(start, end):
start = ip_address(start)
end = ip_address(end)
result = []
while start <= end:
result.append(str(start))
start += 1
return result
"""
Model AceConfig where is registered system configuration
"""
class AceConfig(SingletonModel):
org = models.TextField(u'Nome da organização/empresa.', max_length=2000, default='Empresa Exemplo', blank=True, null=True)
email_to = models.EmailField(u'Endereço de destinatário.', max_length=75,
default='<EMAIL>',
help_text="Notificações de alteração de ramais/senhas: - Ex: <em><EMAIL></em>.")
email_from = models.EmailField(u'Endereço de remetente.', max_length=75,
default='<EMAIL>',
help_text="Email de senhas e notificações de alteração de ramais/senhas - Ex: <em><EMAIL></em>.")
email_co = models.EmailField(u'Email - Cópia: Endereço de destino.', max_length=75, default='<EMAIL>',
help_text="Campo do email para envio de cópia de mensagem. Ex: <em><EMAIL></em>.")
password_email_text = models.TextField(u'Mensagem adicional (Texto puro).', max_length=6000,
default='Texto padrão do email - modo texto',
help_text="Insira aqui o texto adicional que deseja que seja enviado no email de senhas - texto puro")
password_email_html_text = models.TextField(u'Mensagem adicional (HTML)', max_length=10000,
default='Texto padrão do email - HTML',
help_text="Insira aqui o texto adicional que deseja que seja enviado no email de senhas - em HTML")
phonelist_results = models.IntegerField(u'Quantidade de telefones a serem exibidos na listagem', default=10)
passwordlist_results = models.IntegerField(u'Quantidade de senhas a serem exibidas na listagem', default=10)
hostlist_results = models.IntegerField(u'Quantidade de hosts a serem exibidos na listagem', default=10)
iplist_results = models.IntegerField(u'Quantidade de ips a serem exibidos na listagem', default=10)
networklist_results = models.IntegerField(u'Quantidade de redes a serem exibidas na listagem', default=10)
owneridlist_results = models.IntegerField(u'Quantidade de patrimonios a serem exibidas na listagem', default=10)
placelist_results = models.IntegerField(u'Quantidade de locais a serem exibidos na listagem', default=10)
patchpanel_results = models.IntegerField(u'Quantidade de patchpanels a serem exibidos na listagem', default=10)
netpointlist_results = models.IntegerField(u'Quantidade de pontos de rede a serem exibidos na listagem', default=10)
racklist_results = models.IntegerField(u'Quantidade de racks a serem exibidos na listagem', default=10)
sectorlist_results = models.IntegerField(u'Quantidade de setores a serem exibidos na listagem', default=10)
servicelist_results = models.IntegerField(u'Quantidade de setores a serem exibidos na listagem', default=10)
stacklist_results = models.IntegerField(u'Quantidade de pilhas de switch a serem exibidos na listagem', default=10)
switchlist_results = models.IntegerField(u'Quantidade de switches a serem exibidos na listagem', default=10)
userlist_results = models.IntegerField(u'Quantidade de usuários a serem exibidos na listagem', default=10)
printerlist_results = models.IntegerField(u'Quantidade de impressoras a serem exibidos na listagem', default=10)
default_host_group = models.ForeignKey(Group, verbose_name='Grupo padrão para equipamentos', null=True, blank=True, related_name="default_hostgroup")
default_printer_group = models.ForeignKey(Group, verbose_name='Grupo padrão para impressoras', null=True, blank=True)
def __unicode__(self):
return u"Ace - Configurações"
class Meta:
verbose_name = u"Ace - Configurações"
class Log(models.Model):
record_name = models.CharField(max_length=200,editable=False,)
event = models.CharField(max_length=30,editable=False,)
event_date = models.DateTimeField(u'Data de modificação',editable=False,blank=True,null=True)
record_type = models.CharField(max_length=50,editable=False,blank=True,null=True)
actor = models.CharField(max_length=200,editable=False,)
object_id = models.CharField(max_length=30,editable=False,null=True)
def __unicode__(self):
return self.record_name
class Person(User):
class Meta:
proxy = True
ordering = ('first_name', )
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Manufactorer(models.Model):
class Meta:
verbose_name = u'Fabricante'
verbose_name_plural = u'Fabricantes'
ordering = ['name']
name = models.CharField(max_length=200, verbose_name='Nome')
def __unicode__(self):
return self.name
class Sector(models.Model):
class Meta:
verbose_name = u'Setor/Departamento'
verbose_name_plural = u'Setores/Departamentos'
ordering = ['name']
name = models.CharField(max_length=200, verbose_name='Nome', unique=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
def __unicode__(self):
return self.name
class Floor(models.Model):
class Meta:
verbose_name = u'Andar'
verbose_name_plural = u'Andares'
ordering = ['name']
name = models.CharField(max_length=20, unique=True, help_text=u"Ex: <em>1º</em> ou <em>1º andar</em>.")
def __unicode__(self):
return self.name
class NetpointManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
query_set = super(NetpointManager, self).get_queryset()
return query_set.extra(
select={
'_netpoint_total': 'SELECT COUNT(*) FROM ace_netpoint where ace_netpoint.place_id = ace_place.id',
},
)
class Place(models.Model):
class Meta:
verbose_name = u'Local'
verbose_name_plural = u'Locais'
ordering = ['name']
SALA = 'sala'
GABINETE = 'gabinete'
SALA_TECNICA = 'sala-tecnica'
SALA_COFRE = 'sala-cofre'
SALA_SEGURA = 'sala-segura'
AUDITORIO = 'auditorio'
OUTROS = 'outros'
TIPO_LOCAL_CHOICES = (
('', '---------'),
(SALA, 'Sala'),
(GABINETE, 'Gabinete'),
(SALA_TECNICA, 'Sala Técnica'),
(SALA_COFRE, 'Sala Cofre'),
(SALA_SEGURA, 'Sala Segura'),
(AUDITORIO, 'Auditório'),
(OUTROS, 'Outros'),
)
name = models.CharField('Ident/Num', max_length=200, help_text=u"Identificação do local<em>100</em> ou <em>A</em>.")
placetype = models.CharField(max_length=20, choices=TIPO_LOCAL_CHOICES, verbose_name='Tipo')
floor = models.ForeignKey(Floor, verbose_name='Andar')
sector = models.ForeignKey(Sector, verbose_name='Setor/Departamento', blank=True, null=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
# validacao
def clean(self):
try:
f = self.floor
l = Place.objects.all().filter(name=self.name, floor=f).exclude(pk=self.id)
if l:
raise ValidationError(u"Já existe este local neste andar.")
except:
raise ValidationError(u"Verifique o preenchimento dos campos abaixo")
def __unicode__(self):
return u'%s %s (%s)' % (self.placetype, self.name, self.floor)
class RackManager(models.Manager):
def get_queryset(self):
query_set = super(RackManager, self).get_queryset()
return query_set.extra(
select={
'_patchpanel_total': 'SELECT COUNT(*) FROM ace_patchpanel where ace_patchpanel.rack_id = ace_rack.id',
},
)
class Rack(models.Model):
class Meta:
verbose_name = 'Rack'
verbose_name_plural = 'Racks'
ordering = ['name']
name = models.CharField(verbose_name=u'Rack/Armário ', max_length=50)
place = models.ForeignKey(Place, verbose_name=u'Localização', blank=True, null=True)
objects = RackManager()
def patchpanel_total(self):
return self._patchpanel_total or 0
def __unicode__(self):
# return self.nome, self.localizacao
return u'%s - %s' % (self.name, self.place)
# return self.nome
class Vlan(models.Model):
class Meta:
verbose_name = 'VLAN'
verbose_name_plural = 'VLANs'
ordering = ['vlanid']
vlanid = models.CharField(verbose_name=u'Id da VLAN', max_length=10, help_text="Ex: 1 , 5, 100", unique=True,
default="1")
name = models.CharField(verbose_name=u'Nome da VLAN', max_length=50, help_text="Ex: Vlan dos servidores")
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
def __unicode__(self):
return self.name
def __unicode__(self):
return u'%s (%s)' % (self.vlanid, self.name)
class Network(models.Model):
class Meta:
verbose_name = 'Rede'
verbose_name_plural = 'Redes'
ordering = ['name']
name = models.CharField(verbose_name=u'Nome da rede', max_length=200, help_text="Ex: Rede 1º andar")
address = models.GenericIPAddressField(unique=True, help_text="Ex: 10.0.0.0", verbose_name=u"Endereço da rede")
mask = models.IntegerField(u'Máscara', help_text="Ex: 24. Para mascara /24", default=24)
gateway = models.GenericIPAddressField(help_text="Ex: 10.0.0.1", verbose_name=u"Gateway da rede", blank=True,
null=True)
dhcp = models.BooleanField('Usa DHCP?', default=False)
dhcp_start = models.GenericIPAddressField(u'Primeiro endereço do range DHCP', help_text="Ex: 10.0.0.1", blank=True,
null=True)
dhcp_end = models.GenericIPAddressField(u'Último endereço do range DHCP', help_text="Ex: 10.0.0.254", blank=True,
null=True)
# vlan = models.CharField(verbose_name=u'VLAN', max_length=200,blank=True, null=True)
vln = models.ForeignKey(Vlan, verbose_name='VLAN', blank=True, null=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
def clean(self):
redeid = self.id
dhcp = self.dhcp
r = self.address
m = str(self.mask)
if r != None and m != None:
rm = r + "/" + m
ip1 = self.dhcp_start
ip2 = self.dhcp_end
''' checa se ip é valido '''
try:
ipaddress.ip_network(rm)
except:
raise ValidationError("Rede e máscara inválidos")
ipsrede = list(ipaddress.ip_network(rm).hosts())
#qtdipsrede = len(list(ipaddress.ip_network(rm).hosts()))
qtdipsrede = len(ipsrede)
fixos = Ip.objects.filter(network=redeid)
#qtdfixos = Ip.objects.filter(network=redeid).count()
qtdfixos = fixos.count()
try:
gw = self.gateway
if gw != None:
verificagw = ipaddress.ip_address(gw) in ipaddress.ip_network(rm)
if verificagw == False:
raise ValidationError("Gateway informado não pertence à rede selecionada. Por favor, corrija.")
except:
raise ValidationError("Por favor preencha o gateway.")
if m == None:
raise ValidationError("Campo Máscara deve ser preenchido")
masc = int(m)
if masc > 31:
raise ValidationError("Valor de máscara incorreto")
if dhcp == False:
if ip1:
raise ValidationError("Esta rede não utiliza DHCP. Primeiro IP do range não deve ser cadastrado")
if ip2:
raise ValidationError("Esta rede não utiliza DHCP. Segundo IP do range não deve ser cadastrado")
if dhcp == True:
if not ip1:
raise ValidationError("Esta rede utiliza DHCP, primeiro ip do range não pode ser vazio")
if not ip2:
raise ValidationError("Esta rede utiliza DHCP, segundo ip do range não pode ser vazio")
if ip1:
verificaip1 = ipaddress.ip_address(ip1) in ipaddress.ip_network(rm)
if verificaip1 == False:
raise ValidationError(
"Primeiro endereço do range DHCP não pertence à rede selecionada. Por favor, corrija.")
if ip2:
verificaip2 = ipaddress.ip_address(ip2) in ipaddress.ip_network(rm)
if verificaip2 == False:
raise ValidationError(
"Segundo endereço do range DHCP não pertence à rede selecionada. Por favor, corrija.")
if ip1 and ip2 :
start = re.split(r'(\.|/)', ip1)
end = re.split(r'(\.|/)', ip2)
ipstart = int(start[-1])
ipend = int(end[-1])
verificatamanho = ipstart < ipend
rede = ipaddress.ip_network(rm)
broadcast = rede.broadcast_address
e = self.address
if verificatamanho == False:
raise ValidationError("Primeiro IP do range DHCP deve ser menor que o Segundo")
if (ipaddress.ip_address(ip1) == broadcast) or (ipaddress.ip_address(ip2) == broadcast):
raise ValidationError("Endereço de broadcast não pode fazer parte do range de DHCP")
if (ipaddress.ip_address(ip1) == ipaddress.ip_address(e)) or (
ipaddress.ip_address(ip2) == ipaddress.ip_address(e)):
raise ValidationError("Endereço da rede não pode fazer parte do range de DHCP")
start = re.split(r'(\.|/)', ip1)
end = re.split(r'(\.|/)', ip2)
ipstart = int(start[-1])
ipend = int(end[-1])
qtddhcp = len(range(ipstart, ipend + 1))
if (qtddhcp + qtdfixos) > qtdipsrede:
raise ValidationError(
"Não é possível alocar mais IPs para DHCP nesta rede - Todos os IPs já estão em uso")
dhcprange = findips(self.dhcp_start, self.dhcp_end)
listaips = []
for i in fixos:
| |
<reponame>nakedible/vpnease-l2tp
"""L2TP UI specific helpers."""
import os
import re
import datetime
import textwrap
import formal
from twisted.internet import reactor, protocol, defer, error
from twisted.python.util import mergeFunctionMetadata
from nevow import inevow, url, appserver, tags as T, static
from twisted.names import client, dns
from twisted.mail import smtp
from zope.interface import implements
from codebay.common import rdf
from codebay.common import datatypes
from codebay.common import logger
from codebay.nevow.formalutils import formalutils
from codebay.l2tpserver import db
from codebay.l2tpserver import helpers
from codebay.l2tpserver import runcommand
from codebay.l2tpserver import constants
from codebay.l2tpserver import versioninfo
from codebay.l2tpserver import licensemanager
from codebay.l2tpserver.rdfconfig import ns, ns_ui
run_command = runcommand.run_command
_log = logger.get('l2tpserver.webui.uihelpers')
# --------------------------------------------------------------------------
def saferender(default='', silent=False):
"""Decorator for Nevow render_* and macro_*.
Wraps execution, catches and logs exceptions, and returns a default
value if rendering fails. Hopefully minimizes code clutter in
renderers.
"""
def _f(f):
def g(*args, **kw):
try:
return f(*args, **kw)
except:
if not silent:
_log.exception('render/macro failed, returning default value: \'%s\'' % default)
return default
mergeFunctionMetadata(f, g)
return g
return _f
# --------------------------------------------------------------------------
def _set_expiry_headers(ctx, exp_time):
request = inevow.IRequest(ctx)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21
expires = datetime.datetime.utcnow() + datetime.timedelta(0, int(exp_time), 0)
request.setHeader('expires', smtp.rfc822date(expires.utctimetuple()))
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.3
request.setHeader('cache-control', 'max-age=%d' % int(exp_time))
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
pass
def _set_nocache_headers(ctx):
request = inevow.IRequest(ctx)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1
request.setHeader('cache-control', 'no-cache')
# extra safety
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21
# "HTTP/1.1 clients and caches MUST treat other invalid date formats,
# especially including the value "0", as in the past (i.e., "already expired")."
request.setHeader('expires', '-1')
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
request.setHeader('pragma', 'no-cache')
class ExpiringFile(static.File):
"""Nevow static.File but with expiration headers.
Default expiration time is 1 hour.
"""
expiration_time = 3600.0
def renderHTTP(self, ctx):
_set_expiry_headers(ctx, self.expiration_time)
return static.File.renderHTTP(self, ctx)
class UncachedFile(static.File):
"""Nevow static.File but without caching."""
def renderHTTP(self, ctx):
_set_nocache_headers(ctx)
return static.File.renderHTTP(self, ctx)
class ExpiringData(static.Data):
"""Nevow static.Data but with expiration headers.
Default expiration time is 1 hour.
"""
expiration_time = 3600.0
def renderHTTP(self, ctx):
_set_expiry_headers(ctx, self.expiration_time)
return static.Data.renderHTTP(self, ctx)
class UncachedData(static.Data):
"""Nevow static.Data but without caching."""
def renderHTTP(self, ctx):
_set_nocache_headers(ctx)
return static.Data.renderHTTP(self, ctx)
# --------------------------------------------------------------------------
class TimezoneHelper:
def __init__(self):
self.tz = None
def get_timezones(self):
import pytz
# XXX: common should be enough for everyone..
#return pytz.all_timezones
return pytz.common_timezones
def set_timezone(self, tzname):
self.tz = tzname
def get_timezone(self):
return self.tz
def render_datetime(self, dt, show_seconds=False, show_timezone=False):
if self.tz is None:
raise Exception('timezone not set')
fmt = '%Y-%m-%d %H:%M'
if show_seconds:
fmt = fmt + ':%S'
if show_timezone:
fmt = fmt + ' %Z%z'
loc_dt = convert_datetime_to_local_datetime(dt, self.tz)
return loc_dt.strftime(fmt)
def convert_datetime_to_local_datetime(self, dt, output_naive=False):
return convert_datetime_to_local_datetime(dt, self.tz, output_naive=output_naive)
def get_timezone_helper():
"""Get a TimezoneHelper initialized with current GUI settings."""
# XXX: timezone could be user-specific - change the lookup here if this is done
tz = TimezoneHelper()
try:
root = db.get_db().getRoot()
tzname = root.getS(ns_ui.uiConfig, rdf.Type(ns_ui.UiConfig)).getS(ns_ui.timezone, rdf.String)
tz.set_timezone(tzname)
except:
_log.warning('Cannot figure out timezone - using %s' % constants.DEFAULT_TIMEZONE)
tz.set_timezone(constants.DEFAULT_TIMEZONE)
return tz
def render_datetime(dt, show_seconds=False, show_timezone=False):
return get_timezone_helper().render_datetime(dt, show_seconds=show_seconds, show_timezone=show_timezone)
# --------------------------------------------------------------------------
# Shared form for user information pages
class UserInformationForm:
next_uri = None
prev_uri = None
next_label = 'Next'
prev_label = 'Back'
def user_information_form_next_uri(self, ctx):
request = inevow.IRequest(ctx)
return request.URLPath().sibling(self.next_uri)
def user_information_form_prev_uri(self, ctx):
request = inevow.IRequest(ctx)
return request.URLPath().sibling(self.prev_uri)
def form_user_information_form(self, ctx):
def _submitted_next(ctx, form, data):
fda = formalutils.FormDataAccessor(form, [], ctx)
fda.finalize_validation()
request = inevow.IRequest(ctx)
request.redirect(self.user_information_form_next_uri(ctx))
request.finish()
return ''
def _submitted_prev(ctx, form, data):
fda = formalutils.FormDataAccessor(form, [], ctx)
fda.finalize_validation()
request = inevow.IRequest(ctx)
request.redirect(self.user_information_form_prev_uri(ctx))
request.finish()
return ''
form = formal.Form()
sg = formalutils.SubmitFieldGroup('buttons')
if self.prev_uri is not None:
form.addAction(_submitted_prev, name='submitprev', validate=False)
sg.add(formalutils.SubmitField('submitprev', formal.String(), label=self.prev_label))
if self.next_uri is not None:
form.addAction(_submitted_next, name='submitnext', validate=False)
sg.add(formalutils.SubmitField('submitnext', formal.String(), label=self.next_label))
form.add(sg)
return form
# --------------------------------------------------------------------------
class RadauthuserProcessProtocol(protocol.ProcessProtocol):
def __init__(self, callback):
_log.info('RadauthuserProcessProtocol: __init__')
self.callback = callback
self.stdout = ''
self.badresp_re = re.compile(r'^BADRESP$')
self.error_re = re.compile(r'^ERROR$')
self.ok_re = re.compile(r'^OK$')
self.timeout_re = re.compile(r'^TIMEOUT$')
self.unknown_re = re.compile(r'^UNKNOWN$')
self.avp_re = re.compile(r'^AVP:\s(.*?):(.*?):(.*?):(.*?):(.*?)\s*$')
def outReceived(self, data):
_log.info('RadauthuserProcessProtocol: outReceived: %s' % data)
self.stdout += data
def processEnded(self, reason):
_log.info('RadauthuserProcessProtocol: processEnded: %s' % reason)
auth_ok = False
admin_privs = False
for line in self.stdout.split('\n'):
line = line.strip()
try:
m = self.badresp_re.match(line)
if m is not None:
pass
m = self.error_re.match(line)
if m is not None:
pass
m = self.ok_re.match(line)
if m is not None:
auth_ok = True
m = self.timeout_re.match(line)
if m is not None:
pass
m = self.unknown_re.match(line)
if m is not None:
pass
m = self.avp_re.match(line)
if m is not None:
avp_name, avp_attribute, avp_type, avp_lvalue, avp_strvalue = m.groups()
dec_name = avp_name.decode('hex')
dec_attribute = avp_attribute.decode('hex')
dec_type = avp_type.decode('hex')
dec_lvalue = avp_lvalue.decode('hex')
dec_strvalue = avp_strvalue.decode('hex')
_log.debug('parsed avp: name=%s, attribute=%s, type=%s, lvalue=%s, strvalue=%s' % (dec_name, dec_attribute, dec_type, dec_lvalue, dec_strvalue))
# NB: we're dependent on the radiusclient dictionary to get this name
# when parsing a vendor specific extension (see dictionary.vpnease).
if dec_name == 'VE-User-Administrator-Privileges':
if dec_lvalue == '\x00\x00\x00\x01': # dec 1
admin_privs = True
except:
_log.exception('failed to parse radauthuser output line: %s' % line)
self.callback(auth_ok, admin_privs)
def radius_authenticate(username, password):
d = defer.Deferred()
def _auth_done(auth_ok, admin_privs):
_log.debug('radius_authenticate: _auth_done -> %s, admin %s' % (auth_ok, admin_privs))
d.callback((auth_ok, admin_privs))
proc = RadauthuserProcessProtocol(_auth_done)
cmd = constants.CMD_RADAUTHUSER
args = [cmd, username, password]
reactor.spawnProcess(proc, executable=cmd, args=args, usePTY=1)
return d
# --------------------------------------------------------------------------
class UserAgentHelpers:
"""Helpers to deduce platform from User-Agent.
This class attempts to produce a good guess of the platform (e.g. Windows
XP, Windows Vista, x86 vs x64) based on the User-Agent string. This is
fundamentally impossible, because browsers are free to fill User-Agent as
they please. We try to work as well as possible with IE and FF.
See:
* http://en.wikipedia.org/wiki/User_agent
* http://www.user-agents.org/
* http://msdn2.microsoft.com/en-us/library/ms537503.aspx
* http://blogs.msdn.com/astebner/archive/2007/02/05/how-to-workaround-install-problems-with-msn-remote-record-on-windows-vista-x64.aspx
* http://forums.mozillazine.org/viewtopic.php?t=563404
Safari / OSX:
* http://developer.apple.com/internet/safari/faq.html
* http://developer.apple.com/internet/safari/uamatrix.html
In short, it seems that OSX version cannot be detected based on
User-Agent string. Hence, no selection is applied for now.
"""
def detect_platform_from_user_agent(self, useragent):
platform = None
architecture = None
if ('Windows NT 6.0' in useragent) or ('Windows Vista' in useragent):
platform = 'vista'
if ('x64' in useragent) or ('Win64' in useragent) or ('WOW64' in useragent):
# WOW64 = 32-bit browser in a 64-bit platform; we still detect as 64-bit OS
architecture = 'x64'
else:
architecture = 'x86'
elif ('Windows NT 5.1' in useragent) or ('Windows XP' in useragent):
platform = 'winxp'
# this check is guesswork
if ('x64' in useragent) or ('Win64' in useragent) or ('WOW64' in useragent):
# WOW64 = 32-bit browser in a 64-bit platform; we still detect as 64-bit OS
architecture = 'x64'
else:
architecture = 'x86'
elif 'Windows NT 5.2' in useragent:
# winxp on x64 seems to use Windows NT 5.2 (unfortunately, so does Windows Server 2003)
platform = 'winxp'
architecture = 'x64'
elif ('Windows NT 5.0' in useragent) or ('Windows NT 5.01' in useragent) or ('Windows 2000' in useragent):
# win2k does not have an x64 version
platform = 'win2k'
architecture = 'x86'
return {'platform': platform, 'architecture': architecture}
@saferender()
def render_platform_from_user_agent(self, ctx, data):
request = inevow.IRequest(ctx)
useragent = request.getHeader('User-Agent')
return self.detect_platform_from_user_agent(useragent)
def get_platform_and_architecture_dropdown(self, useragent=None):
options = [
('vista-32', 'Windows Vista (32-bit)'),
('vista-64', 'Windows Vista (64-bit)'),
('winxp-32', 'Windows XP (32-bit)'),
('winxp-64', 'Windows XP (64-bit)'),
('win2k-32', 'Windows 2000 (32-bit)'),
# ('osx105-any', 'Mac Os X 10.5 (Leopard)'),
# ('osx104-any', 'Mac Os X 10.4 (Tiger)'),
]
selected = ''
if useragent is not None:
pi = self.detect_platform_from_user_agent(useragent)
tmp = (pi['platform'], pi['architecture'])
for platform, architecture, selvalue in [
('winxp', 'x86', 'winxp-32'),
('winxp', 'x64', 'winxp-64'),
('vista', 'x86', 'vista-32'),
('vista', 'x64', 'vista-64'),
('win2k', 'x86', 'win2k-32'),
]:
if tmp == (platform, architecture):
selected = selvalue
break
fld = formalutils.Field('platform_and_architecture', formal.String(required=True),
formal.widgetFactory(formal.SelectChoice, options=options),
label='Operating system')
return fld, selected
# --------------------------------------------------------------------------
# NB: requires UserAgentHelpers and CommonPage
class AutoconfigureHelpers:
def form_autoconfigure(self, ctx):
request = inevow.IRequest(ctx)
useragent = request.getHeader('User-Agent')
if useragent is None:
useragent = ''
form = formal.Form()
g = formalutils.CollapsibleGroup('platform', label='Autoconfiguration')
dropdown, selected = self.get_platform_and_architecture_dropdown(useragent)
g.add(dropdown)
sg = formalutils.SubmitFieldGroup('buttons')
sg.add(formalutils.SubmitField('submit', formal.String(), label='Autoconfigure'))
g.add(sg)
form.add(g)
form.addAction(self.submitted_autoconfigure, name='submit', validate=False)
fda = formalutils.FormDataAccessor(form, [], ctx)
fda = fda.descend('platform')
fda['platform_and_architecture'] = selected
return form
def render_form_autoconfigure_onsubmit_adder(self, ctx, data):
# Javascript code to add an onsubmit handler
#
# Quite dirty approach:
# * Followup page depends on selected value
# * Form and input names are fixed, based on prior Nevow knowledge
jscode = textwrap.dedent("""
<script type="text/javascript">
// <![CDATA[
function _autoconfig_onsubmit() {
var n = document.getElementById("autoconfigure-platform-platform_and_architecture");
if ((n == null) || (n == undefined)) {
return true;
}
var plat = n.value;
var | |
<reponame>moazzamwaheed2017/carparkapi<gh_stars>0
from arcgis._impl.common._mixins import PropertyMap
from arcgis.features import FeatureLayer, FeatureLayerCollection
from arcgis.features._version import Version, VersionManager
########################################################################
class ParcelFabricManager(object):
"""
The Parcel Fabric Server is responsible for exposing parcel management
capabilities to support a variety of workflows from different clients
and systems.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
url Required String. The URI to the service endpoint.
-------------------- --------------------------------------------------------------------
gis Required GIS. The enterprise connection.
-------------------- --------------------------------------------------------------------
version Required Version. This is the version object where the modification
will occur.
-------------------- --------------------------------------------------------------------
flc Required FeatureLayerCollection. This is the parent container for
ParcelFabricManager.
==================== ====================================================================
"""
_con = None
_flc = None
_gis = None
_url = None
_version = None
_properties = None
#----------------------------------------------------------------------
def __init__(self,
url,
gis,
version,
flc):
"""Constructor"""
self._url = url
self._gis = gis
self._con = gis._portal.con
self._version = version
self._flc = flc
#----------------------------------------------------------------------
def __str__(self):
return "<ParcelFabricManager @ %s>" % self._url
#----------------------------------------------------------------------
def __repr__(self):
return self.__str__()
#----------------------------------------------------------------------
def __enter__(self):
return self
#----------------------------------------------------------------------
def __exit__(self, type, value, traceback):
return
#----------------------------------------------------------------------
@property
def layer(self):
"""returns the Parcel Layer for the service"""
if "controllerDatasetLayers" in self._flc.properties and \
"parcelLayerId" in self._flc.properties.controllerDatasetLayers:
url = "%s/%s" % (self._flc.url,
self._flc.properties.controllerDatasetLayers.parcelLayerId)
return FeatureLayer(url=url, gis=self._gis)
return None
#----------------------------------------------------------------------
@property
def properties(self):
"""returns the properties of the service"""
if self._properties is None:
res = self._con.get(self._url, {'f':'json'})
self._properties = PropertyMap(res)
return self._properties
#----------------------------------------------------------------------
def build(self,
extent=None,
moment=None,
return_errors=False):
"""
A `build` will fix known parcel fabric errors.
For example, if a parcel polygon exists without lines, then build will
construct the missing lines. If lines are missing, the polygon row(s)
are created. When constructing this objects, build will attribute the
related keys as appropriate. Build also maintains `lineage` and `record`
features. The parcel fabric must have sufficient information for build
to work correctly. Ie, source reference document, and connected lines.
Build provides options to increase performance. The process can just
work on specific parcels, geometry types or only respond to parcel point
movement in the case of an adjustment.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
extent Optional Envelope. The extent to build.
Syntax: {"xmin":X min,"ymin": y min, "xmax": x max, "ymax": y max,
"spatialReference": <wkt of spatial reference>}
-------------------- --------------------------------------------------------------------
moment Optional String. This should only be specified by the client when
they do not want to use the current moment
-------------------- --------------------------------------------------------------------
return_errors Optional Boolean. If True, a verbose response will be given if errors
occured. The default is False
==================== ====================================================================
:return: Boolean
"""
url = "{base}/build".format(base=self._url)
params = {
"gdbVersion" : self._version.properties.versionName,
"sessionId" : self._version._guid,
"moment" : moment,
"buildExtent" : extent,
"returnErrors" : return_errors,
"f": "json"
}
return self._con.post(url, params)
#----------------------------------------------------------------------
def clip(self,
parent_parcels,
clip_record=None,
clipping_parcels=None,
geometry=None,
moment=None,
):
"""
Clip cuts a new child parcel into existing parent parcels. Commonly
it retires the parent parcel(s) it cuts into to generate a reminder
child parcel. This type of split is often part of a `parcel split
metes and bounds` record driven workflow.
======================= ====================================================================
**Argument** **Description**
----------------------- --------------------------------------------------------------------
parent_parcels parent parcels that will be clipped into.
Syntax: parentParcels= <parcel (guid)+layer (name)...>
----------------------- --------------------------------------------------------------------
clip_record Optional String. It is the GUID for the active legal record.
----------------------- --------------------------------------------------------------------
clipping_parcels Optional List. A list of child parcels that will be used to clip
into the parent parcels. Parcel lineage is created if the child
'clipping_parcels' and the parcels being clipped are of the same
parcel type.
Syntax: clippingParcels= < id : parcel guid, layered: <layer id>...>
Example:
[{"id":"{D01D3F47-5FE2-4E39-8C07-E356B46DBC78}","layerId":"16"}]
**Either clipping_parcels or geometry is required.**
----------------------- --------------------------------------------------------------------
geometry Optional Polygon. Allows for the clipping a parcel based on geometry instead of
'clippingParcels' geometry. No parcel lineage is created.
**Either clipping_parcels or geometry is required.**
----------------------- --------------------------------------------------------------------
moment Optional String. This should only be specified by the client when
they do not want to use the current moment
======================= ====================================================================
:returns: Dictionary
"""
gdb_version = self._version.properties.versionName
session_id = self._version._guid
url = "{base}/clip".format(base=self._url)
params = {
"gdbVersion": gdb_version,
"sessionId": session_id,
"parentParcels": parent_parcels,
"moment" : moment,
"clipRecord" : clip_record,
"clippingParcels" : clipping_parcels,
"clippingGeometry" : geometry,
"f": "json"
}
return self._con.post(url, params)
#----------------------------------------------------------------------
def merge(self,
parent_parcels,
target_parcel_type,
attribute_overrides=None,
child_name=None,
default_area_unit=None,
merge_record=None,
merge_into=None,
moment=None):
"""
Merge combines 2 or more parent parcels into onenew child parcel. Merge
sums up legal areas of parent parcels to the new child parcel legal
area (using default area units as dictated by client). The child parcel
lines arecomposed from the outer boundaries of the parent parcels.
Merge can create multipart parcels as well as proportion lines (partial
overlap of parent parcels). Record footprint is updated to match the
child parcel.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
parent_parcels Required String. It is the parcel(guid)+layer(name) identifiers to
merge.
-------------------- --------------------------------------------------------------------
target_parcel_type Required String. Layer where parcel is merged to. History is
created when parents and child are of the same parcel type
-------------------- --------------------------------------------------------------------
attribute_overrides Optional List. A list of attributes to set on the child parcel, if
they exist. Pairs of field name and value.
Syntax: attributeOverrides= [{ "type":"PropertySet","propertySetItems":[<field name>,<field value>]}]
* to set subtype, include subtype value in this list.
-------------------- --------------------------------------------------------------------
child_name Optional String. A descript of the child layer.
-------------------- --------------------------------------------------------------------
default_area_unit Optional String. The area units of the child parcel.
-------------------- --------------------------------------------------------------------
merge_record Optional String. Record identifier (guid). If missing, no history
is created.
-------------------- --------------------------------------------------------------------
merge_into Optional String. A parcel identifier (guid). Invalid to have a
record id.
-------------------- --------------------------------------------------------------------
moment Optional String. This parameter represents the session moment (the
default is the version current moment). This should only be
specified by the client when they do not want to use the current
moment.
==================== ====================================================================
:return: Dictionary
"""
gdb_version = self._version.properties.versionName
session_id = self._version._guid
url = "{base}/merge".format(base=self._url)
params = {
"gdbVersion" : gdb_version,
"sessionId" : session_id,
"parentParcels" : parent_parcels,
"mergeRecord" : merge_record,
"moment" : moment,
"targetParcelType" : target_parcel_type,
"mergeInto" : merge_into,
"childName" : child_name,
"defaultAreaUnit" : default_area_unit,
"attributeOverrides" : attribute_overrides,
"f": "json"
}
return self._con.post(url, params)
#----------------------------------------------------------------------
def copy_lines_to_parcel_type(self,
parent_parcels,
record,
target_type,
moment=None,
mark_historic=False,
use_source_attributes=False,
attribute_overrides=None):
"""
Copy lines to parcel type is used when the construction of the
child parcel is based on parent parcel geometry. It creates a
copy of the parent parcels lines that the user can modify (insert,
delete, update) before they build the child parcels. If the source
parcel type and the target parcel type are identical (common)
parcel lineage is created.
======================= ====================================================================
**Argument** **Description**
----------------------- --------------------------------------------------------------------
parent_parcels Required String. Parcel parcels from which lines are copied.
----------------------- --------------------------------------------------------------------
record Required String. The unique identifier (guid) of the active legal
record.
----------------------- --------------------------------------------------------------------
target_type Required String. The target parcel layer to which the lines will be
copied to.
----------------------- --------------------------------------------------------------------
moment Optional String. This parameter represents the session moment (the
default is the version current moment). This should only be
specified by the client when they do not want to use the current
moment.
----------------------- --------------------------------------------------------------------
mark_historic Optional Boolean. Mark the parent parcels historic. The default is
False.
----------------------- --------------------------------------------------------------------
use_source_attributes Optional Boolean. If the source and the target line schema match,
attributes from the parent parcel lines will be copied to the new
child parcel lines when it is set to True. The default is False.
----------------------- --------------------------------------------------------------------
attribute_overrides Optional Dictionary. To set fields on the child parcel lines with a
specific value. Uses a key/value pair of FieldName/Value.
Example:
{'type' : "PropertySet", "propertySetItems" : []}
======================= ====================================================================
:returns: boolean
"""
gdb_version = self._version.properties.versionName
session_id = self._version._guid
url = "{base}/copyLinesToParcelType".format(base=self._url)
params = {
"gdbVersion": gdb_version,
"sessionId": session_id,
"parentFeatures": parent_parcels,
"record" : record,
"markParentAsHistoric" : mark_historic,
"useSourceAttributes": use_source_attributes,
"targetParcelType" : target_type,
"attributeOverrides": attribute_overrides,
"moment" : moment,
"f": "json"
}
return self._con.post(url, params)
#----------------------------------------------------------------------
def change_type(self,
parcels,
target_type,
parcel_subtype=0,
moment=None):
"""
Changes a set of parcels to a new parcel type. It creates new
polygons and lines and deletes them from the source type. This
is used when a parcel was | |
"""
Python Lexical Analyser
Regular Expressions
"""
import array
import string
import types
from sys import maxsize
from plex import errors
#
# Constants
#
BOL = 'bol'
EOL = 'eol'
EOF = 'eof'
nl_code = ord('\n')
#
# Helper functions
#
def chars_to_ranges(s):
"""
Return a list of character codes consisting of pairs
[code1a, code1b, code2a, code2b,...] which cover all
the characters in |s|.
"""
char_list = list(s)
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
code1 = ord(char_list[i])
code2 = code1 + 1
i = i + 1
while i < n and code2 >= ord(char_list[i]):
code2 = code2 + 1
i = i + 1
result.append(code1)
result.append(code2)
return result
def uppercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
lower case letters, return the corresponding upper case range.
"""
code3 = max(code1, ord('a'))
code4 = min(code2, ord('z') + 1)
if code3 < code4:
d = ord('A') - ord('a')
return (code3 + d, code4 + d)
else:
return None
def lowercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
upper case letters, return the corresponding lower case range.
"""
code3 = max(code1, ord('A'))
code4 = min(code2, ord('Z') + 1)
if code3 < code4:
d = ord('a') - ord('A')
return (code3 + d, code4 + d)
else:
return None
def CodeRanges(code_list):
"""
Given a list of codes as returned by chars_to_ranges, return
an RE which will match a character in any of the ranges.
"""
re_list = []
for i in range(0, len(code_list), 2):
re_list.append(CodeRange(code_list[i], code_list[i + 1]))
return Alt(*tuple(re_list))
def CodeRange(code1, code2):
"""
CodeRange(code1, code2) is an RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
return Alt(RawCodeRange(code1, nl_code),
RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2)
#
# Abstract classes
#
class RE:
"""RE is the base class for regular expression constructors.
The following operators are defined on REs:
re1 + re2 is an RE which matches |re1| followed by |re2|
re1 | re2 is an RE which matches either |re1| or |re2|
"""
nullable = 1 # True if this RE can match 0 input symbols
match_nl = 1 # True if this RE can match a string ending with '\n'
str = None # Set to a string to override the class's __str__ result
def build_machine(self, machine, initial_state, final_state,
match_bol, nocase):
"""
This method should add states to |machine| to implement this
RE, starting at |initial_state| and ending at |final_state|.
If |match_bol| is true, the RE must be able to match at the
beginning of a line. If nocase is true, upper and lower case
letters should be treated as equivalent.
"""
raise exceptions.UnimplementedMethod(
"%s.build_machine not implemented" %
self.__class__.__name__)
def build_opt(self, m, initial_state, c):
"""
Given a state |s| of machine |m|, return a new state
reachable from |s| on character |c| or epsilon.
"""
s = m.new_state()
initial_state.link_to(s)
initial_state.add_transition(c, s)
return s
def __add__(self, other):
return Seq(self, other)
def __or__(self, other):
return Alt(self, other)
def __str__(self):
if self.str:
return self.str
else:
return self.calc_str()
def check_re(self, num, value):
if not isinstance(value, RE):
self.wrong_type(num, value, "Plex.RE instance")
def check_string(self, num, value):
if type(value) != type(''):
self.wrong_type(num, value, "string")
def check_char(self, num, value):
self.check_string(num, value)
if len(value) != 1:
raise errors.PlexValueError(
"Invalid value for argument %d of Plex.%s."
"Expected a string of length 1, got: %s" % (
num, self.__class__.__name__, repr(value)))
def wrong_type(self, num, value, expected):
if type(value) == types.InstanceType:
got = "%s.%s instance" % (
value.__class__.__module__, value.__class__.__name__)
else:
got = type(value).__name__
raise errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
"(expected %s, got %s" % (
num, self.__class__.__name__, expected, got))
#
# Primitive RE constructors
# -------------------------
#
# These are the basic REs from which all others are built.
#
## class Char(RE):
## """
## Char(c) is an RE which matches the character |c|.
## """
## nullable = 0
## def __init__(self, char):
## self.char = char
## self.match_nl = char == '\n'
## def build_machine(self, m, initial_state, final_state, match_bol, nocase):
## c = self.char
## if match_bol and c <> BOL:
## s1 = self.build_opt(m, initial_state, BOL)
## else:
## s1 = initial_state
## if c == '\n' or c == EOF:
## s1 = self.build_opt(m, s1, EOL)
## if len(c) == 1:
## code = ord(self.char)
## s1.add_transition((code, code+1), final_state)
## if nocase and is_letter_code(code):
## code2 = other_case_code(code)
## s1.add_transition((code2, code2+1), final_state)
## else:
## s1.add_transition(c, final_state)
## def calc_str(self):
## return "Char(%s)" % repr(self.char)
def Char(c):
"""
Char(c) is an RE which matches the character |c|.
"""
if len(c) == 1:
result = CodeRange(ord(c), ord(c) + 1)
else:
result = SpecialSymbol(c)
result.str = "Char(%s)" % repr(c)
return result
class RawCodeRange(RE):
"""
RawCodeRange(code1, code2) is a low-level RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|, where the range
does not include newline. For internal use only.
"""
nullable = 0
match_nl = 0
range = None # (code, code)
uppercase_range = None # (code, code) or None
lowercase_range = None # (code, code) or None
def __init__(self, code1, code2):
self.range = (code1, code2)
self.uppercase_range = uppercase_range(code1, code2)
self.lowercase_range = lowercase_range(code1, code2)
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.range, final_state)
if nocase:
if self.uppercase_range:
initial_state.add_transition(self.uppercase_range, final_state)
if self.lowercase_range:
initial_state.add_transition(self.lowercase_range, final_state)
def calc_str(self):
return "CodeRange(%d,%d)" % (self.code1, self.code2)
class _RawNewline(RE):
"""
RawNewline is a low-level RE which matches a newline character.
For internal use only.
"""
nullable = 0
match_nl = 1
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
s = self.build_opt(m, initial_state, EOL)
s.add_transition((nl_code, nl_code + 1), final_state)
RawNewline = _RawNewline()
class SpecialSymbol(RE):
"""
SpecialSymbol(sym) is an RE which matches the special input
symbol |sym|, which is one of BOL, EOL or EOF.
"""
nullable = 0
match_nl = 0
sym = None
def __init__(self, sym):
self.sym = sym
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
# Sequences 'bol bol' and 'bol eof' are impossible, so only need
# to allow for bol if sym is eol
if match_bol and self.sym == EOL:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.sym, final_state)
class Seq(RE):
"""Seq(re1, re2, re3...) is an RE which matches |re1| followed by
|re2| followed by |re3|..."""
def __init__(self, *re_list):
nullable = 1
for i in range(len(re_list)):
re = re_list[i]
self.check_re(i, re)
nullable = nullable and re.nullable
self.re_list = re_list
self.nullable = nullable
i = len(re_list)
match_nl = 0
while i:
i = i - 1
re = re_list[i]
if re.match_nl:
match_nl = 1
break
if not re.nullable:
break
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
re_list = self.re_list
if len(re_list) == 0:
initial_state.link_to(final_state)
else:
s1 = initial_state
n = len(re_list)
for i in range(n):
if i < n - 1:
s2 = m.new_state()
else:
s2 = final_state
re = re_list[i]
re.build_machine(m, s1, s2, match_bol, nocase)
s1 = s2
match_bol = re.match_nl or (match_bol and re.nullable)
def calc_str(self):
return "Seq(%s)" % string.join(list(map(str, self.re_list)), ",")
class Alt(RE):
"""Alt(re1, re2, re3...) is an RE which matches either |re1| or
|re2| or |re3|..."""
def __init__(self, *re_list):
self.re_list = re_list
nullable = 0
match_nl = 0
nullable_res = []
non_nullable_res = []
i = 1
for re in re_list:
self.check_re(i, re)
if re.nullable:
nullable_res.append(re)
nullable = 1
else:
non_nullable_res.append(re)
if re.match_nl:
match_nl = 1
i = i + 1
self.nullable_res = nullable_res
self.non_nullable_res = non_nullable_res
self.nullable = nullable
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
for re in self.nullable_res:
re.build_machine(m, initial_state, final_state, match_bol, nocase)
if self.non_nullable_res:
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
for re in self.non_nullable_res:
re.build_machine(m, initial_state, final_state, 0, nocase)
def calc_str(self):
return "Alt(%s)" % string.join(list(map(str, self.re_list)), ",")
class Rep1(RE):
"""Rep1(re) is an RE which matches one or more repetitions of |re|."""
def __init__(self, re):
self.check_re(1, re)
self.re = re
self.nullable = re.nullable
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
s1 = m.new_state()
s2 = m.new_state()
initial_state.link_to(s1)
self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase)
s2.link_to(s1)
s2.link_to(final_state)
def calc_str(self):
return "Rep1(%s)" % self.re
class SwitchCase(RE):
"""
SwitchCase(re, | |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import time
import json
import numpy as np
import numba as nb
from enum import Enum
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import compile_snapshot_circuit
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis, make_worst_contingency_transfer_limits
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.results_table import ResultsTable
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import DriverTemplate
########################################################################################################################
# Optimal Power flow classes
########################################################################################################################
class AvailableTransferMode(Enum):
Generation = 0
InstalledPower = 1
Load = 2
GenerationAndLoad = 3
@nb.njit()
def compute_alpha(ptdf, P0, Pinstalled, idx1, idx2, bus_types, dT=1.0, mode=0):
"""
Compute all lines' ATC
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param P0: all bus injections [p.u.]
:param idx1: bus indices of the sending region
:param idx2: bus indices of the receiving region
:param bus_types: Array of bus types {1: pq, 2: pv, 3: slack}
:param dT: Exchange amount
:param mode: Type of power shift
0: shift generation based on the current generated power
1: shift generation based on the installed power
2: shift load
3 (or else): shift using generation and load
:return: Exchange sensitivity vector for all the lines
"""
nbr = ptdf.shape[0]
nbus = ptdf.shape[1]
# declare the bus injections increment due to the transference
dP = np.zeros(nbus)
if mode == 0: # move the generators based on the generated power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
elif mode == 1: # move the generators based on the installed power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += Pinstalled[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * Pinstalled[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += Pinstalled[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * Pinstalled[i] / abs(n2)
elif mode == 2: # move the load ------------------------------------------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
else: # move all of it -----------------------------------------------------------------
# set the sending power increment proportional to the current power
n1 = 0.0
for i in idx1:
n1 += P0[i]
for i in idx1:
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power
n2 = 0.0
for i in idx2:
n2 += P0[i]
for i in idx2:
dP[i] = -dT * P0[i] / abs(n2)
# ----------------------------------------------------------------------------------------
# compute the line flow increments due to the exchange increment dT in MW
dflow = ptdf.dot(dP)
# compute the sensitivity
alpha = dflow / dT
return alpha
@nb.njit()
def compute_atc(br_idx, ptdf, lodf, alpha, flows, rates, contingency_rates, threshold=0.005):
"""
Compute all lines' ATC
:param br_idx: array of branch indices to analyze
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param lodf: Line outage distribution factors (n-branch, n-outage branch)
:param alpha: Branch sensitivities to the exchange [p.u.]
:param flows: branches power injected at the "from" side [MW]
:param rates: all branches rates vector
:param contingency_rates: all branches contingency rates vector
:param threshold: value that determines if a line is studied for the ATC calculation
:return:
beta_mat: Matrix of beta values (branch, contingency_branch)
beta: vector of actual beta value used for each branch (n-branch)
atc_n: vector of ATC values in "N" (n-branch)
atc_final: vector of ATC in "N" or "N-1" whatever is more limiting (n-branch)
atc_limiting_contingency_branch: most limiting contingency branch index vector (n-branch)
atc_limiting_contingency_flow: most limiting contingency flow vector (n-branch)
"""
nbr = len(br_idx)
# explore the ATC
atc_n = np.zeros(nbr)
atc_mc = np.zeros(nbr)
atc_final = np.zeros(nbr)
beta_mat = np.zeros((nbr, nbr))
beta_used = np.zeros(nbr)
atc_limiting_contingency_branch = np.zeros(nbr)
atc_limiting_contingency_flow = np.zeros(nbr)
# processed = list()
# mm = 0
for im, m in enumerate(br_idx): # for each branch
if abs(alpha[m]) > threshold and abs(flows[m]) < rates[m]: # if the branch is relevant enough for the ATC...
# compute the ATC in "N"
if alpha[m] == 0:
atc_final[im] = np.inf
elif alpha[m] > 0:
atc_final[im] = (rates[m] - flows[m]) / alpha[m]
else:
atc_final[im] = (-rates[m] - flows[m]) / alpha[m]
# remember the ATC in "N"
atc_n[im] = atc_final[im]
# set to the current branch, since we don't know if there will be any contingency that make the ATC worse
atc_limiting_contingency_branch[im] = m
# explore the ATC in "N-1"
for ic, c in enumerate(br_idx): # for each contingency
# compute the exchange sensitivity in contingency conditions
beta_mat[im, ic] = alpha[m] + lodf[m, c] * alpha[c]
if m != c:
# compute the contingency flow
contingency_flow = flows[m] + lodf[m, c] * flows[c]
# set the default values (worst contingency by itself, not comparing with the base situation)
if abs(contingency_flow) > abs(atc_limiting_contingency_flow[im]):
atc_limiting_contingency_flow[im] = contingency_flow # default
atc_limiting_contingency_branch[im] = c
# now here, do compare with the base situation
if abs(beta_mat[im, ic]) > threshold and abs(contingency_flow) <= contingency_rates[m]:
# compute the ATC in "N-1"
if beta_mat[im, ic] == 0:
atc_mc[im] = np.inf
elif beta_mat[im, ic] > 0:
atc_mc[im] = (contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
else:
atc_mc[im] = (-contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
# refine the ATC to the most restrictive value every time
if abs(atc_mc[im]) < abs(atc_final[im]):
atc_final[im] = atc_mc[im]
beta_used[im] = beta_mat[im, ic]
atc_limiting_contingency_flow[im] = contingency_flow
atc_limiting_contingency_branch[im] = c
return beta_mat, beta_used, atc_n, atc_mc, atc_final, atc_limiting_contingency_branch, atc_limiting_contingency_flow
class AvailableTransferCapacityResults(ResultsTemplate):
def __init__(self, n_bus, br_names, bus_names, bus_types, bus_idx_from, bus_idx_to, br_idx):
"""
:param n_bus:
:param br_names:
:param bus_names:
:param bus_types:
:param bus_idx_from:
:param bus_idx_to:
:param br_idx:
"""
ResultsTemplate.__init__(self,
name='ATC Results',
available_results=[ResultTypes.AvailableTransferCapacity,
ResultTypes.NetTransferCapacity,
ResultTypes.AvailableTransferCapacityN,
ResultTypes.AvailableTransferCapacityAlpha,
ResultTypes.AvailableTransferCapacityBeta,
ResultTypes.AvailableTransferCapacityReport
],
data_variables=['alpha',
'beta_mat',
'beta',
'atc',
'atc_n',
'atc_limiting_contingency_branch',
'atc_limiting_contingency_flow',
'base_flow',
'rates',
'contingency_rates',
'report',
'report_headers',
'report_indices',
'branch_names',
'bus_names',
| |
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.spatial import Delaunay
from quadtree.point import Point
from quadtree.utils import find_children, get_neighbor_of_greater_or_equal_size, Direction, deeper, DEBUG_MODE, is_inside_polygon, node_is_inside_polygon, contains
class Mesh():
def __init__(self, qtree):
self.qtree = qtree
self.triangles = []
self.pts_inside_contour = []
self.nodes_inside_polygon = []
def mesh_generation(self):
c = find_children(self.qtree.root)
for u in c:
d = u.depth
dN = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.N))
dS = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.S))
dW = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.W))
dE = deeper(get_neighbor_of_greater_or_equal_size(u, Direction.E))
x, y = u.x0, u.y0
w_, h_ = u.width, u.height
#print(d, dN, dS, dW, dE)
# Padrao #1 -------------------------
if d>=dN and d>=dS and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# Padrao #2 -------------------------
if dW>d and d>=dN and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
if dE>d and d>=dN and d>=dS and d>=dW:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
if dN>d and d>=dW and d>=dS and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
if dS>d and d>=dN and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
# triangle 3
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# Padrao #3 -------------------------
if dN>d and dW>d and d>=dS and d>=dE: #----1
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 4
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
# triangle 6
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dE>d and d>=dS and d>=dW: #----2
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_])
# triangle 6
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y+ h_/2.])
self.triangles.append([x + w_, y + h_/2.])
if dS>d and dE>d and d>=dW and d>=dN: #----3
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
# triangle 5
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
# triangle 5
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
if dS>d and dW>d and d>=dN and d>=dE: #----4
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
# Padrao #4 -------------------------
if dW>d and dE>d and d>=dS and d>=dN:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y + h_/2.])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x, y + h_/2.])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
if dN>d and dS>d and d>=dW and d>=dE:
# triangle 1
self.triangles.append([x, y + h_])
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 2
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 4
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# triangle 6
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
self.triangles.append([x + w_/2., y + h_/2.])
# Padrao #5 -------------------------
if dW>d and dN>d and dS>d and d>=dE:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_/2.])
# triangle 3
self.triangles.append([x + w_, y])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_/2., y])
# triangle 4
self.triangles.append([x + w_, y + h_])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x + w_, y])
# triangle 5
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y + h_])
self.triangles.append([x, y + h_])
if dW>d and dN>d and dE>d and d>=dS:
# triangle 1
self.triangles.append([x, y])
self.triangles.append([x + w_/2., y])
self.triangles.append([x, y + h_/2.])
# triangle 2
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 3
self.triangles.append([x + w_/2., y])
self.triangles.append([x + w_, y])
self.triangles.append([x + w_, y + h_/2.])
# triangle 4
self.triangles.append([x, y + h_])
self.triangles.append([x, y + h_/2.])
self.triangles.append([x + w_, y + h_/2.])
# triangle 5
self.triangles.append([x + w_, y | |
from collections import defaultdict
from rdflib.namespace import RDF, RDFS, OWL
prefixes = {'rdf:type': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type',
'rdf:comment': 'http://www.w3.org/2000/01/rdf-schema#comment',
'rdfs:subClassOf': 'http://www.w3.org/2000/01/rdf-schema#subClassOf',
'rdfs:label': 'http://www.w3.org/2000/01/rdf-schema#label',
'rdfs:domain': 'http://www.w3.org/2000/01/rdf-schema#domain',
'rdfs:range': 'http://www.w3.org/2000/01/rdf-schema#range',
'owl:allValuesFrom': 'http://www.w3.org/2002/07/owl#allValuesFrom',
'owl:someValuesFrom': 'http://www.w3.org/2002/07/owl#someValuesFrom',
'owl:intersectionOf': 'http://www.w3.org/2002/07/owl#intersectionOf',
'rdf:first': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#first',
'rdf:rest': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#rest',
'owl:onProperty': 'http://www.w3.org/2002/07/owl#onProperty',
'owl:Restriction': 'http://www.w3.org/2002/07/owl#Restriction',
'rdfs:nil': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil',
'owl:qualifiedCardinality': 'http://www.w3.org/2002/07/owl#qualifiedCardinality',
'owl:minQualifiedCardinality': 'http://www.w3.org/2002/07/owl#minQualifiedCardinality',
'owl:maxQualifiedCardinality': 'http://www.w3.org/2002/07/owl#maxQualifiedCardinality',
'owl:onClass': 'http://www.w3.org/2002/07/owl#onClass',
'owl:ObjectProperty': 'http://www.w3.org/2002/07/owl#ObjectProperty',
'owl:DatatypeProperty': 'http://www.w3.org/2002/07/owl#DatatypeProperty',
'owl:Class': 'http://www.w3.org/2002/07/owl#Class',
'owl:Ontology': 'http://www.w3.org/2002/07/owl#Ontology',
'owl:versionIRI': 'http://www.w3.org/2002/07/owl#versionIRI',
'owl:onDataRange': 'http://www.w3.org/2002/07/owl#onDataRange',
'owl:equivalentClass': 'http://www.w3.org/2002/07/owl#equivalentClass'}
class Ontology(object):
def __init__(self):
self.classes = list()
self.object_properties = list()
self.data_properties = list()
self.data_types = list()
self.domains_dict = defaultdict(list)
self.ranges_dict = defaultdict(list)
self.equivalent_dict = defaultdict(list)
# self.types_dict = defaultdict(list)
# self.labels_dict = defaultdict(list)
# self.comments_dict = defaultdict(list)
# self.subclasses_dict = defaultdict(list)
self.triples = list()
self.subsumption = list()
self.general_axioms_dict = defaultdict(list)
self.all_values_from_dict = defaultdict()
self.some_values_from_dict = defaultdict()
self.eqc_dict = defaultdict()
self.min_qc_dict = defaultdict()
self.max_qc_dict = defaultdict()
self.on_data_range_dict = defaultdict()
self.intersection_of_dict = defaultdict(list)
self.first_dict = defaultdict()
self.rest_dict = defaultdict()
self.restriction_list = list()
self.on_property_dist = defaultdict()
self.on_class_dist = defaultdict()
self.class2superclass = defaultdict(list)
self.class2subclass = defaultdict(list)
self.class2axioms = defaultdict(list)
self.axioms = list()
def construct(self, graph):
for s in graph.subjects(RDF.type, OWL.Class):
s = s.toPython()
self.classes.append(s)
for s in graph.subjects(RDF.type, OWL.ObjectProperty):
s = s.toPython()
self.object_properties.append(s)
for s in graph.subjects(RDF.type, OWL.DatatypeProperty):
s = s.toPython()
self.data_properties.append(s)
file = open('all_triples.txt', 'w')
for s, p, o in graph.triples((None, None, None)):
file.writelines('{} {} {}\n'.format(s.toPython(), p.toPython(), o.toPython()))
s = s.toPython()
p = p.toPython()
o = o.toPython()
'''
if p == prefixes['rdf:type']:
self.types_dict[s].append(o)
if p == prefixes['rdfs:label']:
self.labels_dict[s].append(o)
if p == prefixes['rdf:comment']:
self.comments_dict[s].append(o)
'''
if p == prefixes['rdfs:domain']:
self.domains_dict[s].append(o)
if p == prefixes['rdfs:range']:
self.ranges_dict[s].append(o)
if s in self.data_properties and o not in self.data_types:
self.data_types.append(o)
if p == prefixes['rdfs:subClassOf']:
# self.subclasses_dict[s].append(o)
if o[0:4] != 'http':
self.general_axioms_dict[s].append(o)
# self.classes2axioms[s].append(o)
else:
if [s, o] not in self.subsumption:
self.subsumption.append([s, o])
self.class2superclass[s].append(o)
self.class2subclass[o].append(s)
if p == prefixes['owl:intersectionOf']:
self.intersection_of_dict[s].append(o)
if p == prefixes['owl:allValuesFrom']:
self.all_values_from_dict[s] = o
if p == prefixes['owl:someValuesFrom']:
self.some_values_from_dict[s] = o
if p == prefixes['owl:qualifiedCardinality']:
self.eqc_dict[s] = o
if p == prefixes['owl:minQualifiedCardinality']:
self.min_qc_dict[s] = o
if p == prefixes['owl:maxQualifiedCardinality']:
self.max_qc_dict[s] = o
if p == prefixes['owl:onProperty']:
self.on_property_dist[s] = o
if p == prefixes['owl:onClass']:
self.on_class_dist[s] = o
if p == prefixes['owl:onDataRange']:
self.on_data_range_dict[s] = o
if p == prefixes['rdf:first']:
self.first_dict[s] = o
if p == prefixes['rdf:rest']:
self.rest_dict[s] = o
if p == prefixes['rdf:type'] and o == prefixes['owl:Restriction']:
self.restriction_list.append(s)
if p == prefixes['owl:equivalentClass']:
self.equivalent_dict[s].append(o)
if o[0:4] != 'http':
self.general_axioms_dict[s].append(o)
# self.classes2axioms[s].append(o)
else:
self.subsumption.append([s, o])
self.class2superclass[s].append(o)
self.class2subclass[o].append(s)
# self.class2superclass_flag = {k:-1 for k in self.class2superclass.keys()}
# self.class2subclass_flag = {k:-1 for k in self.class2subclass.keys()}
self.class2superclass_flag = {k: -1 for k in self.classes}
self.class2subclass_flag = {k: -1 for k in self.classes}
# The order of following functions
self.__parse_tree()
self.__parse_general_axioms()
# self.__parse_tree()
# print(self.class2subclass_flag)
# print(self.class2superclass_flag)
for c in self.classes:
if c[0:4] != 'http':
for (key, value) in self.domains_dict.items():
if (key in self.data_properties or key in self.object_properties) and c in value:
self.classes.remove(c)
break
'''
for (key, value) in self.ranges_dict.items():
if (key in self.data_properties or key in self.object_properties) and c in value:
self.classes.remove(c)
break
'''
def __parse_general_axioms(self):
# self.__parse_anonymous_properties()
for (current_class, anonymous_classes) in self.general_axioms_dict.items():
for anonymous_class in anonymous_classes:
self.__parse_anonymous_class(current_class, anonymous_class)
# not intersection
for current_class in self.classes:
self.axioms.append([current_class, 'iri', 'http://www.w3.org/2001/XMLSchema#string', '=1'])
self.class2axioms[current_class].append(['iri', 'http://www.w3.org/2001/XMLSchema#string', '=1'])
# to parse some property do not appear in any axiom but have both domain and range definition, (for all)
def __parse_anonymous_properties(self):
for dp in self.data_properties:
if dp not in self.on_property_dist.values():
'here maybe update in the future, if property has multiple domains'
if len(self.domains_dict[dp]) > 0 and len(self.ranges_dict[dp]) > 0:
axiom = [self.domains_dict[dp][0], dp, self.ranges_dict[dp][0], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[self.domains_dict[dp][0]].append([dp, self.ranges_dict[dp][0], '>=0'])
for subclass in self.class2subclass[self.domains_dict[dp][0]]:
axiom = [subclass, dp, self.ranges_dict[dp][0], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append([dp, self.ranges_dict[dp][0], '>=0'])
for op in self.object_properties:
if op not in self.on_property_dist.values():
'here maybe update in the future, if property has multiple domains'
if len(self.domains_dict[op]) > 0 and len(self.ranges_dict[op]) > 0:
axiom = [self.domains_dict[op][0], op, self.ranges_dict[op][0], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[self.domains_dict[op][0]].append([op, self.ranges_dict[op][0], '>=0'])
for subclass in self.class2subclass[self.domains_dict[op][0]]:
axiom = [subclass, op, self.ranges_dict[op][0], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append([op, self.ranges_dict[op][0], '>=0'])
# to parse an expression
def __parse_anonymous_class(self, current_class, anonymous_class):
if anonymous_class in self.intersection_of_dict.keys():
for intersection_element in self.intersection_of_dict[anonymous_class]:
first_element = self.first_dict[intersection_element]
self.__parse_anonymous_class(current_class, first_element)
rest_element = self.rest_dict[intersection_element]
self.__parse_anonymous_class(current_class, rest_element)
else:
if anonymous_class in self.restriction_list:
if anonymous_class in self.all_values_from_dict.keys():
axiom = [current_class, self.on_property_dist[anonymous_class],
self.all_values_from_dict[anonymous_class], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.all_values_from_dict[anonymous_class], '>=0'])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.all_values_from_dict[anonymous_class], '>=0']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.all_values_from_dict[anonymous_class],
'>=0'])
if self.all_values_from_dict[anonymous_class][0:4] != 'http':
new_anonymous_class = self.all_values_from_dict[anonymous_class]
self.classes.append(new_anonymous_class)
self.__parse_anonymous_class(new_anonymous_class, new_anonymous_class)
if anonymous_class in self.some_values_from_dict.keys():
axiom = [current_class, self.on_property_dist[anonymous_class],
self.some_values_from_dict[anonymous_class], '>=1']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.some_values_from_dict[anonymous_class], '>=1'])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.some_values_from_dict[anonymous_class], '>=1']
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.some_values_from_dict[anonymous_class],
'>=1'])
if self.some_values_from_dict[anonymous_class][0:4] != 'http':
new_anonymous_class = self.some_values_from_dict[anonymous_class]
self.classes.append(new_anonymous_class)
self.__parse_anonymous_class(new_anonymous_class, new_anonymous_class)
if anonymous_class in self.eqc_dict.keys():
if anonymous_class in self.on_data_range_dict.keys():
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class], '=' + str(self.eqc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'=' + str(self.eqc_dict[anonymous_class])])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class],
'=' + str(self.eqc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'=' + str(self.eqc_dict[anonymous_class])])
else:
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class], '=' + str(self.eqc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'=' + str(self.eqc_dict[anonymous_class])])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class], '=' + str(self.eqc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'=' + str(self.eqc_dict[anonymous_class])])
if self.on_class_dist[anonymous_class][0:4] != 'http':
new_anonymous_class = self.on_class_dist[anonymous_class]
self.classes.append(new_anonymous_class)
self.__parse_anonymous_class(new_anonymous_class, new_anonymous_class)
if anonymous_class in self.min_qc_dict.keys():
if anonymous_class in self.on_data_range_dict.keys():
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class], '>=' + str(self.min_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])])
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])])
else:
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class], '>=' + str(self.min_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'>=' + str(self.min_qc_dict[anonymous_class])])
if self.on_class_dist[anonymous_class][0:4] != 'http':
new_anonymous_class = self.on_class_dist[anonymous_class]
self.classes.append(new_anonymous_class)
self.__parse_anonymous_class(new_anonymous_class, new_anonymous_class)
if anonymous_class in self.max_qc_dict.keys():
if anonymous_class in self.on_data_range_dict.keys():
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class], '<=' + str(self.max_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_data_range_dict[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_data_range_dict[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])])
else:
axiom = [current_class, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class], '<=' + str(self.max_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[current_class].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])])
# add axiom for current_class's sub-current_class
for subclass in self.class2subclass[current_class]:
axiom = [subclass, self.on_property_dist[anonymous_class],
self.on_class_dist[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])]
if axiom not in self.axioms:
self.axioms.append(axiom)
self.class2axioms[subclass].append(
[self.on_property_dist[anonymous_class], self.on_class_dist[anonymous_class],
'<=' + str(self.max_qc_dict[anonymous_class])])
if self.on_class_dist[anonymous_class][0:4] != 'http':
new_anonymous_class = self.on_class_dist[anonymous_class]
self.classes.append(new_anonymous_class)
self.__parse_anonymous_class(new_anonymous_class, new_anonymous_class)
else:
if anonymous_class in self.first_dict.keys():
first_element = self.first_dict[anonymous_class]
rest_element = self.rest_dict[anonymous_class]
self.__parse_anonymous_class(current_class, first_element)
if rest_element != prefixes['rdfs:nil']:
self.__parse_anonymous_class(current_class, rest_element)
else:
# print(self.subsumption)
if [current_class, anonymous_class] not in self.subsumption:
self.subsumption.append([current_class, anonymous_class])
self.class2superclass[current_class].append(anonymous_class)
self.class2subclass[anonymous_class].append(current_class)
# print(self.class2subclass)
for subclass in self.class2subclass[current_class]:
if [subclass, anonymous_class] not in self.subsumption:
self.subsumption.append([subclass, anonymous_class])
self.class2superclass[subclass].append(anonymous_class)
self.class2subclass[subclass].append(current_class)
for axiom in self.class2axioms[anonymous_class]:
self.class2axioms[subclass].append(axiom)
self.axioms.append([subclass] + axiom)
else:
print('NO results')
# to parse a super-sub tree
def __parse_tree(self):
for c in self.classes:
if c not in self.class2superclass.keys() and c in self.class2subclass.keys():
self.__get_subclasses(c)
if c in self.class2superclass.keys() and c not in self.class2subclass.keys():
self.__get_superclasses(c)
'''
for (key, value) in self.class2subclass.items():
print(key, value, len(value))
print('------------------------------------------')
for (key, value) in self.class2superclass.items():
print(key, value, len(value))
'''
# print('TREE',self.class2subclass)
# to get all the super classes by the current class
def __get_superclasses(self, current_class):
if current_class in self.class2superclass.copy().keys():
superclasses = self.class2superclass.copy()[current_class]
for superclass in superclasses:
temp = self.__get_superclasses(superclass)
superclasses | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Decorators for functions accepting Astropy Quantities."""
__author__ = "<NAME>"
__credit__ = "astropy"
__all__ = ["quantity_output", "QuantityInputOutput", "quantity_io"]
##############################################################################
# IMPORTS
import textwrap
import typing as T
from astropy.units import dimensionless_unscaled
from astropy.units.core import Unit, add_enabled_equivalencies
from astropy.units.decorators import _get_allowed_units, _validate_arg_value
from astropy.utils.decorators import format_doc
from astropy.utils.misc import isiterable
from utilipy.utils import functools, inspect
from utilipy.utils.typing import UnitableType
from .core import _doc_base_params, _doc_base_raises, quantity_return_
###############################################################################
# PARAMETERS
_aioattrs = (
"unit",
"to_value",
"equivalencies",
"decompose",
"assumed_units",
"assume_annotation_units",
)
# ----------------------------------------
_doc_quantity_output_examples: str = """
`quantity_output` decorated function
>>> from astronat.units.decorators import quantity_output
>>> @quantity_output(unit=u.m, to_value=True)
... def example_function(x):
... return x
>>> example_function(10 * u.km)
10000.0
>>> example_function(10)
10
>>> example_function(10 * u.km, to_value=False) # doctest: +FLOAT_CMP
<Quantity 10000. m>
"""
_doc_quantity_output_examples = _doc_quantity_output_examples[1:]
_doc_quantity_output_wrapped: str = """
Other Parameters
----------------
{parameters}
Raises
-------
{raises}
Examples
--------
{examples}
""".format(
parameters=_doc_base_params,
raises=_doc_base_raises,
examples=_doc_quantity_output_examples,
)
# ----------------------------------------
# QuantityInputOutput parameters, combine base and assumed_units
_doc_qio_params: str = """function: Callable
the function to decorate (default None)
{parameters}
assumed_units: dict
dictionary of default units
(default dict())
>>> from astronat.units.decorators import quantity_io
>>> dfu = dict(x=u.km)
>>> x = 10
>>> y = 20*u.km
>>> @quantity_io(assumed_units=dfu)
... def add(x, y):
... return x + y
>>> add(x, y) # doctest: +SKIP
<Quantity 30.0 km>
assume_annotation_units: bool, optional
whether to interpret function annotations as default units
(default False)
function annotations have lower precedence than `assumed_units`
""".format(
parameters=_doc_base_params,
)
_doc_qio_notes: str = """
Order of Precedence:
1. Function Arguments
2. Decorator Arguments
3. Function Annotation Arguments
Decorator Key-Word Arguments:
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the decorator take precedence
over any function annotations present.
**note**
decorator key-word arguments are NEVER interpreted as `assumed_units`
>>> from astronat.units.decorators import quantity_io
>>> @quantity_io(x=u.m, y=u.s)
... def func(x, y):
... pass
Function Annotation Arguments:
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the function and decorator take precedence
over any function annotations present.
>>> def func(x: u.m, y: u.s) -> u.m / u.s:
... pass
if `assume_annotation_units` is True (default False)
function annotations are interpreted as default units
function annotations have lower precedence than `assumed_units`
"""
# TODO replace
_funcdec: str = """
Other Parameters
----------------
{parameters}
""".format(
parameters=_doc_qio_params
)
###############################################################################
# CODE
###############################################################################
@format_doc(
None,
parameters=textwrap.indent(_doc_base_params, " " * 4)[4:],
raises=textwrap.indent(_doc_base_raises, " " * 4),
examples=textwrap.indent(_doc_quantity_output_examples, " " * 4),
# doc_quantity_output_wrapped=textwrap.indent(
# _doc_quantity_output_wrapped, " " * 12 + "| "
# ),
)
def quantity_output(
function: T.Callable = None,
*,
unit: UnitableType = None,
to_value: bool = False,
equivalencies: T.Sequence = [],
decompose: T.Union[bool, T.Sequence] = False,
):
r"""Decorate functions for unit output.
Any wrapped function accepts the additional key-word arguments
`unit`, `to_value`, `equivalencies`, `decompose`
Parameters
----------
{parameters}
Returns
-------
wrapper: Callable
wrapped function
with the unit operations performed by
:func:`~astronat.units.quantity_return_`
Raises
------
{raises}
Examples
--------
.. code-block:: python
@quantity_output
def func(x, y):
return x + y
is equivalent to
.. code-block:: python
def func(x, y, unit=None, to_value=False, equivalencies=[],
decompose=False):
result = x + y
return quantity_return_(result, unit, to_value, equivalencies,
decompose)
{examples}
"""
# allowing for optional arguments
if function is None:
return functools.partial(
quantity_output,
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
)
# making decorator
@functools.wraps(function)
@format_doc(
None,
# _doc_quantity_output_wrapped=textwrap.indent(
# _doc_quantity_output_wrapped, " " * 8
# ),
parameters=textwrap.indent(_doc_base_params, " " * 8)[8:],
raises=textwrap.indent(_doc_base_raises, " " * 8)[8:],
examples=textwrap.indent(_doc_quantity_output_examples, " " * 8)[8:],
)
def wrapper(
*args: T.Any,
unit: T.Type[Unit] = unit,
to_value: bool = to_value,
equivalencies: T.Sequence = equivalencies,
decompose: T.Union[bool, T.Sequence] = decompose,
**kwargs: T.Any,
):
"""Wrapper docstring.
Other Parameters
----------------
{parameters}
Raises
------
{raises}
Examples
--------
{examples}
"""
return quantity_return_(
function(*args, **kwargs), # evaluated function
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
)
# /def
return wrapper
# /def
###############################################################################
class QuantityInputOutput:
"""Decorator for validating the units of arguments to functions."""
@format_doc(
None,
parameters=textwrap.indent(_doc_qio_params, " " * 8),
notes=textwrap.indent(_doc_qio_notes, " " * 8),
)
@classmethod
def as_decorator(
cls,
function: T.Callable = None,
unit: UnitableType = None,
to_value: bool = False,
equivalencies: T.Sequence = [],
decompose: T.Union[bool, T.Sequence] = False,
assumed_units: T.Dict = {},
assume_annotation_units: bool = False,
**decorator_kwargs,
):
"""Decorator for validating the units of arguments to functions.
Parameters
----------
{parameters}
See Also
--------
:class:`~astropy.units.quantity_input`
Notes
-----
{notes}
"""
# making instance from base class
self = super().__new__(cls)
# modifying docstring
_locals = locals()
self.__doc__ = __doc__.format(
**{k: _locals.get(k).__repr__() for k in set(_aioattrs)}
)
self.__init__(
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
assumed_units=assumed_units,
assume_annotation_units=assume_annotation_units,
**decorator_kwargs,
)
if function is not None:
return self(function)
return self
# /def
# ------------------------------------------
@format_doc(
None,
parameters=textwrap.indent(_doc_qio_params, " " * 4),
notes=textwrap.indent(_doc_qio_notes, " " * 4),
)
def __init__(
self,
function: T.Callable = None,
unit: UnitableType = None,
to_value: bool = False,
equivalencies: T.Sequence = [],
decompose: T.Union[bool, T.Sequence] = False,
assumed_units: dict = {},
assume_annotation_units: bool = False,
**decorator_kwargs,
):
"""Decorator for validating the units of arguments to functions.
Parameters
----------
{parameters}
See Also
--------
:class:`~astropy.units.quantity_input`
Notes
-----
{notes}
"""
super().__init__()
self.unit = unit
self.to_value = to_value
self.equivalencies = equivalencies
self.decompose = decompose
self.assumed_units = assumed_units
self.assume_annotation_units = assume_annotation_units
self.decorator_kwargs = decorator_kwargs
return
# /def
# ------------------------------------------
def __call__(self, wrapped_function: T.Callable):
"""Make decorator.
Parameters
----------
wrapped_function : Callable
function to wrap
Returns
-------
wrapped: Callable
wrapped function
"""
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
@functools.wraps(wrapped_function)
def wrapped(
*func_args: T.Any,
unit: UnitableType = self.unit,
to_value: bool = self.to_value,
equivalencies: T.Sequence = self.equivalencies,
decompose: T.Union[bool, T.Sequence] = self.decompose,
assumed_units: dict = self.assumed_units,
_skip_decorator: bool = False,
**func_kwargs: T.Any,
):
# skip the decorator
if _skip_decorator:
return wrapped_function(*func_args, **func_kwargs)
# make func_args editable
_func_args: list = list(func_args)
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*_func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for i, param in enumerate(wrapped_signature.parameters.values()):
# We do not support variable arguments (*args, **kwargs)
if param.kind in {
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
}:
continue
# Catch the (never) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# +----------------------------------+
# Get default unit or physical type,
# either from decorator kwargs
# or annotations
if param.name in assumed_units:
dfunit = assumed_units[param.name]
elif self.assume_annotation_units is True:
dfunit = param.annotation
# elif not assumed_units:
# dfunit = param.annotation
else:
dfunit = inspect.Parameter.empty
adjargbydfunit = True
# If the dfunit is empty, then no target units or physical
# types were specified so we can continue to the next arg
if dfunit is inspect.Parameter.empty:
adjargbydfunit = False
# If the argument value is None, and the default value is None,
# pass through the None even if there is a dfunit unit
elif arg is None and param.default is None:
adjargbydfunit = False
# Here, we check whether multiple dfunit unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
elif isinstance(dfunit, str):
dfunit = _get_allowed_units([dfunit])[0]
elif not isiterable(dfunit):
pass
else:
raise ValueError("target must be one Unit, not list")
if (not hasattr(arg, "unit")) & (adjargbydfunit is True):
if i < len(_func_args):
# print(i, len(bound_args.args))
_func_args[i] *= dfunit
else:
func_kwargs[param.name] *= dfunit
arg *= dfunit
# +----------------------------------+
# Get target unit or physical type,
# from decorator kwargs or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
else:
targets = param.annotation
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed | |
"▁value": 22392,
":53": 22393,
"math": 22394,
"tulo": 22395,
"▁Lue": 22396,
"▁Zeland": 22397,
"▁Ziel": 22398,
"▁pool": 22399,
"▁today": 22400,
"cado": 22401,
"ffet": 22402,
"ionis": 22403,
"olan": 22404,
"▁versi": 22405,
"ZS": 22406,
"bide": 22407,
"gazi": 22408,
"shak": 22409,
"È": 22410,
"▁Bachelor": 22411,
"▁Fé": 22412,
"▁configure": 22413,
"NAD": 22414,
"hore": 22415,
"inc": 22416,
"zane": 22417,
"▁Arriva": 22418,
"▁Krom": 22419,
"▁geni": 22420,
"▁hell": 22421,
"▁pour": 22422,
"analys": 22423,
"casa": 22424,
"chou": 22425,
"lil": 22426,
"pasi": 22427,
"yey": 22428,
"▁Asad": 22429,
"▁contact": 22430,
"▁harmoni": 22431,
"▁kin": 22432,
"▁little": 22433,
"DZ": 22434,
"MAIL": 22435,
"pped": 22436,
"▁Anar": 22437,
"▁Canaria": 22438,
"▁Rabb": 22439,
"▁aby": 22440,
"▁chal": 22441,
"▁obo": 22442,
"lasse": 22443,
"oak": 22444,
"▁Kannada": 22445,
"▁Tempo": 22446,
"▁pad": 22447,
"▁unit": 22448,
"TEAM": 22449,
"guzi": 22450,
"tsiya": 22451,
"▁Menge": 22452,
"▁Ramón": 22453,
"▁Reddy": 22454,
"▁keta": 22455,
"▁vesti": 22456,
"dense": 22457,
"hotel": 22458,
"kkonen": 22459,
"lét": 22460,
"▁Wrocław": 22461,
"▁YOU": 22462,
"▁alternative": 22463,
"▁bando": 22464,
"▁find": 22465,
"▁tram": 22466,
"6-7": 22467,
"Mb": 22468,
"Test": 22469,
"fine": 22470,
"pomorskie": 22471,
"▁Ampli": 22472,
"▁Ağrı": 22473,
"▁Kongo": 22474,
"▁currency": 22475,
"▁minutu": 22476,
"▁pata": 22477,
"IGO": 22478,
"ief": 22479,
"tourism": 22480,
"▁Abs": 22481,
"▁always": 22482,
"▁delta": 22483,
"▁mach": 22484,
"成": 22485,
"gine": 22486,
"hlav": 22487,
"▁Ordu": 22488,
"▁costat": 22489,
"▁ker": 22490,
"▁neuro": 22491,
"▁slot": 22492,
"み": 22493,
"mano": 22494,
"ysis": 22495,
"▁Drift": 22496,
"▁Muhammed": 22497,
"▁Sv": 22498,
"▁Tod": 22499,
"▁Trabzonspor": 22500,
"▁alle": 22501,
"PAL": 22502,
"direct": 22503,
"pje": 22504,
"pone": 22505,
"viy": 22506,
"▁19:30": 22507,
"▁Kwe": 22508,
"▁Lumin": 22509,
"▁Pasqua": 22510,
"▁TAL": 22511,
"▁Thread": 22512,
"▁ag": 22513,
"▁heavy": 22514,
"KU": 22515,
"joy": 22516,
"él": 22517,
"▁Dach": 22518,
"▁Familia": 22519,
"▁Heide": 22520,
"▁Merdeka": 22521,
"▁Playstation": 22522,
"▁limba": 22523,
"福": 22524,
"plet": 22525,
"sorti": 22526,
"upa": 22527,
"yos": 22528,
"▁FUN": 22529,
"▁Recon": 22530,
"cã": 22531,
"trud": 22532,
"▁vina": 22533,
"陵": 22534,
"ayu": 22535,
"sourcing": 22536,
"veiro": 22537,
"zung": 22538,
"ḅ": 22539,
"▁Bitte": 22540,
"▁Mexican": 22541,
"▁Vec": 22542,
"▁tõ": 22543,
"7.00": 22544,
"dant": 22545,
"itse": 22546,
"mpu": 22547,
"sker": 22548,
"▁Greenpeace": 22549,
"▁Practic": 22550,
"▁cop": 22551,
"▁rugby": 22552,
"一": 22553,
"1400": 22554,
":52": 22555,
"GAT": 22556,
"styr": 22557,
"▁Client": 22558,
"▁Parallel": 22559,
"▁Pub": 22560,
"▁Webcam": 22561,
"▁future": 22562,
"▁negle": 22563,
"▁scor": 22564,
"▁tart": 22565,
"amas": 22566,
"america": 22567,
"argent": 22568,
"arta": 22569,
"hamn": 22570,
"logic": 22571,
"vou": 22572,
"▁November": 22573,
"▁Shenzhen": 22574,
"▁mam": 22575,
"▁synonym": 22576,
"evento": 22577,
"gib": 22578,
"gulat": 22579,
"oby": 22580,
"oms": 22581,
"seen": 22582,
"▁VU": 22583,
"▁candida": 22584,
"▁things": 22585,
"▁trap": 22586,
"ん": 22587,
"산": 22588,
"Bundes": 22589,
"ient": 22590,
"maan": 22591,
"mete": 22592,
"▁Committee": 22593,
"▁Putrajaya": 22594,
"ARI": 22595,
"NAR": 22596,
"▁Poznań": 22597,
"▁chap": 22598,
"▁compa": 22599,
"▁crush": 22600,
"▁til": 22601,
"侯": 22602,
"akin": 22603,
"▁Raha": 22604,
"▁pir": 22605,
"▁sweat": 22606,
"Est": 22607,
"dido": 22608,
"ksyon": 22609,
"leben": 22610,
"ème": 22611,
"▁Atelier": 22612,
"▁Maastricht": 22613,
"▁Prag": 22614,
"▁arcu": 22615,
"▁cookies": 22616,
"▁impress": 22617,
"▁ALT": 22618,
"▁Abba": 22619,
"▁Qe": 22620,
"▁Tv": 22621,
"▁rosa": 22622,
"花": 22623,
"2,000": 22624,
"atio": 22625,
"stur": 22626,
"▁Aha": 22627,
"▁October": 22628,
"▁action": 22629,
"▁barbat": 22630,
"▁viral": 22631,
"2,50": 22632,
"amar": 22633,
"jam": 22634,
"trol": 22635,
"xal": 22636,
"▁Düz": 22637,
"▁Poland": 22638,
"▁Raya": 22639,
"▁Scene": 22640,
"▁pero": 22641,
"IRE": 22642,
"MUS": 22643,
"Svobod": 22644,
"anu": 22645,
"deck": 22646,
"lih": 22647,
"▁BJP": 22648,
"▁Luki": 22649,
"▁Pita": 22650,
"▁RH": 22651,
"▁Regula": 22652,
"▁geo": 22653,
"Alba": 22654,
"UBE": 22655,
"tib": 22656,
"uara": 22657,
"uw": 22658,
"▁capita": 22659,
"▁motion": 22660,
"andre": 22661,
"▁Slobo": 22662,
"▁Thermo": 22663,
"▁haute": 22664,
"▁Blok": 22665,
"▁Fet": 22666,
"▁RTL": 22667,
"▁SIA": 22668,
":31": 22669,
"DIO": 22670,
"biti": 22671,
"liche": 22672,
"rank": 22673,
"▁Moj": 22674,
"▁Nov": 22675,
"Esp": 22676,
"Quest": 22677,
"kang": 22678,
"▁Erd": 22679,
"▁Fare": 22680,
"▁Gül": 22681,
"▁Lö": 22682,
"▁Srinivas": 22683,
"▁goal": 22684,
":22": 22685,
"▁Figur": 22686,
"▁ele": 22687,
":49": 22688,
"chte": 22689,
"dav": 22690,
"grade": 22691,
"ometr": 22692,
"uwa": 22693,
"vald": 22694,
"ák": 22695,
"▁(27)": 22696,
"▁21:30": 22697,
"▁Complete": 22698,
"▁Esto": 22699,
"▁Verme": 22700,
"▁calca": 22701,
"▁metres": 22702,
"▁simplici": 22703,
"林": 22704,
"onas": 22705,
"tweet": 22706,
"▁Fredrik": 22707,
"▁Present": 22708,
"▁hotel": 22709,
"▁layout": 22710,
"brze": 22711,
"gant": 22712,
"rette": 22713,
"scheid": 22714,
"▁Sinu": 22715,
"▁aqua": 22716,
"▁lave": 22717,
"DEF": 22718,
"Galaxy": 22719,
"INO": 22720,
"rêp": 22721,
"▁Prior": 22722,
"▁front": 22723,
"30,000": 22724,
":21": 22725,
"AVE": 22726,
"UAN": 22727,
"mell": 22728,
"▁Inf": 22729,
"▁Köln": 22730,
"▁Mineral": 22731,
"▁luci": 22732,
"▁where": 22733,
"flow": 22734,
"nita": 22735,
"tici": 22736,
"ére": 22737,
"▁cola": 22738,
"▁late": 22739,
"▁meridional": 22740,
"▁pla": 22741,
"Alt": 22742,
"GUE": 22743,
"Wer": 22744,
"▁Lifestyle": 22745,
"▁Zeit": 22746,
"▁creati": 22747,
"▁gradi": 22748,
"▁moc": 22749,
":48": 22750,
"bergen": 22751,
"liz": 22752,
"rmi": 22753,
"yy": 22754,
"▁Cultural": 22755,
"▁Movement": 22756,
"▁Princip": 22757,
"▁girls": 22758,
"ANC": 22759,
"Tool": 22760,
"Um": 22761,
"▁Government": 22762,
"▁Publisher": 22763,
"▁Sulaiman": 22764,
"▁Tariq": 22765,
"▁koi": 22766,
"▁rib": 22767,
"▁still": 22768,
":28": 22769,
":58": 22770,
"ONA": 22771,
"fjord": 22772,
"inchi": 22773,
"lte": 22774,
"md": 22775,
"▁lig": 22776,
"▁range": 22777,
"Azur": 22778,
"atul": 22779,
"follow": 22780,
"▁Md": 22781,
"▁Mida": 22782,
"▁Teri": 22783,
"▁android": 22784,
"herr": 22785,
"▁Debe": 22786,
"▁Leia": 22787,
"▁MIK": 22788,
"▁limon": 22789,
"永": 22790,
"OME": 22791,
"[13]": 22792,
"gud": 22793,
"kreis": 22794,
"tiya": 22795,
"tunt": 22796,
"▁Bogu": 22797,
"▁Ew": 22798,
"▁Haq": 22799,
"Nar": 22800,
"fru": 22801,
"hug": 22802,
"llu": 22803,
"nale": 22804,
"nyt": 22805,
"rza": 22806,
"yazı": 22807,
"▁15:30": 22808,
"▁Ege": 22809,
"▁Nama": 22810,
"▁Pahang": 22811,
"▁Relation": 22812,
"▁Sancti": 22813,
"▁Sanjay": 22814,
"▁ari": 22815,
"▁filter": 22816,
"▁said": 22817,
"VOD": 22818,
"alai": 22819,
"folio": 22820,
"mee": 22821,
"nok": 22822,
"toko": 22823,
"▁Hacı": 22824,
"▁Release": 22825,
"▁Visit": 22826,
"▁would": 22827,
"▁zir": 22828,
"LIT": 22829,
"LUS": 22830,
"kse": 22831,
"kuri": 22832,
"llari": 22833,
"lul": 22834,
"▁Sofa": 22835,
"▁Verdi": 22836,
"水": 22837,
":27": 22838,
"erant": 22839,
"iPad": 22840,
"rabia": 22841,
"shay": 22842,
"▁17:30": 22843,
"▁Bahar": 22844,
"▁Deutschland": 22845,
"▁Garmin": 22846,
"▁Ukrain": 22847,
"▁park": 22848,
"ORE": 22849,
"invest": 22850,
"laza": 22851,
"metal": 22852,
"▁Dho": 22853,
"▁Hér": 22854,
"▁Jau": 22855,
"▁Jest": 22856,
"▁SEP": 22857,
"▁taka": 22858,
"Bil": 22859,
"HIN": 22860,
"centi": 22861,
"gah": 22862,
"lgi": 22863,
"▁Antón": 22864,
"▁Dictionary": 22865,
"▁Sept": 22866,
"▁stock": 22867,
"▁Álvaro": 22868,
"CEN": 22869,
"carna": 22870,
"dessus": 22871,
"guli": 22872,
"kien": 22873,
"oire": 22874,
"▁Buddha": 22875,
"▁DỤNG": 22876,
"▁fel": 22877,
"▁lade": 22878,
"Demo": 22879,
"Om": 22880,
"rasa": 22881,
"sze": 22882,
"telo": 22883,
"▁Alber": 22884,
"▁Portugal": 22885,
"▁catalog": 22886,
"▁ever": 22887,
"▁woman": 22888,
"matta": 22889,
"mpang": 22890,
"plug": 22891,
"stige": 22892,
"▁Oer": 22893,
"▁Statist": 22894,
"▁allo": 22895,
"▁aus": 22896,
"▁local": 22897,
"STER": 22898,
"erade": 22899,
"safe": 22900,
"özü": 22901,
"▁Reason": 22902,
"▁These": 22903,
"▁field": 22904,
"▁peti": 22905,
"▁push": 22906,
"nnen": 22907,
"pm": 22908,
"uden": 22909,
"urra": 22910,
"▁CCM": 22911,
"▁Giresun": 22912,
"▁Gé": 22913,
"▁Knut": 22914,
"▁angol": 22915,
"▁mara": 22916,
"rapat": 22917,
"vv": 22918,
"▁Kalam": 22919,
"▁Later": 22920,
"▁Leadership": 22921,
"▁Todo": 22922,
"▁ble": 22923,
"▁swing": 22924,
":54": 22925,
"pell": 22926,
"▁Karimov": 22927,
"▁league": 22928,
"blas": 22929,
"krati": 22930,
"pach": 22931,
"that": 22932,
"▁16:30": 22933,
"▁Fahr": 22934,
"▁Telekom": 22935,
"▁gé": 22936,
"▁kami": 22937,
"▁mori": 22938,
"Click": 22939,
"Patri": 22940,
"[14]": 22941,
"▁Bazar": 22942,
"▁Fisch": 22943,
"▁Selim": 22944,
"CRA": 22945,
"Control": 22946,
"Page": 22947,
"liza": 22948,
"pere": 22949,
"▁Meeting": 22950,
"TTI": 22951,
"insa": 22952,
"thema": 22953,
"▁Diamant": 22954,
"▁bunge": 22955,
"▁hry": 22956,
"▁pac": 22957,
"聖": 22958,
"ADO": 22959,
"LOC": 22960,
"amos": 22961,
"brig": 22962,
"dv": 22963,
"▁Guarda": 22964,
"▁Mater": 22965,
"▁Qab": 22966,
":34": 22967,
":46": 22968,
"prat": 22969,
"tott": 22970,
"yv": 22971,
"▁Geri": 22972,
"▁OFF": 22973,
"▁İzmir": 22974,
"阮": 22975,
"hok": 22976,
"mpar": 22977,
"termin": 22978,
"website": 22979,
"▁Economy": 22980,
"▁Prac": 22981,
"▁Quel": 22982,
"▁bry": 22983,
"▁speed": 22984,
"FIN": 22985,
"®": 22986,
"▁Estudi": 22987,
"▁Hapa": 22988,
"▁kir": 22989,
"JET": 22990,
"nbsp": 22991,
"pendi": 22992,
"éra": 22993,
"▁Simba": 22994,
"▁render": 22995,
"Orient": 22996,
"odu": 22997,
"stos": 22998,
"sé": 22999,
"tram": 23000,
"ums": 23001,
"vc": 23002,
"′′": 23003,
"▁Janne": 23004,
"▁Kristo": 23005,
"▁macar": 23006,
"▁sinua": 23007,
"duce": 23008,
"nag": 23009,
"nike": 23010,
"wet": 23011,
"▁Dzi": 23012,
"▁Narayan": 23013,
"▁TAB": 23014,
"▁cun": 23015,
"▁half": 23016,
"▁lien": 23017,
"Aus": 23018,
"▁Fy": 23019,
"▁Pinang": 23020,
"▁Sensor": 23021,
"▁Wert": 23022,
"▁dark": 23023,
"nze": 23024,
"qay": 23025,
"rde": 23026,
"rile": 23027,
"▁Iqbal": 23028,
"▁Krieg": 23029,
"▁tips": 23030,
"▁Çanakkale": 23031,
| |
RECORD IOD': ['Treatment Record'],
},
# AnnotationPosition
0x20300010L: {
'BASIC ANNOTATION BOX IOD': ['Basic Annotation Box'],
None: ['Basic Annotation Box'],
},
# ScheduledHumanPerformersSequence
0x00404034L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['Unified Procedure Step', 'General Purpose Scheduled Procedure Step'],
},
# AgeCorrectedSensitivityDeviationAlgorithmSequence
0x00240065L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# FieldOfViewOrigin
0x00187030L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# SamplesPerPixelUsed
0x00280003L: {
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
None: ['Image'],
},
# KeratometryLeftEyeSequence
0x00460071L: {
'KERATOMETRY MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# IssuerOfPatientID
0x00100021L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient', 'Rt Ion Machine Verification', 'Unified Procedure Step', 'Modality Performed Procedure Step', 'Rt Conventional Machine Verification', 'General Purpose Performed Procedure Step', 'General Purpose Scheduled Procedure Step'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ScheduledProcedureStepModificationDateTime
0x00404010L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['Unified Procedure Step', 'General Purpose Scheduled Procedure Step'],
},
# StructuredDisplayBackgroundCIELabValue
0x00720420L: {
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
None: ['Presentation State'],
},
# DepthOfScanField
0x00185050L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# RTImageName
0x30020003L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# CalciumScoringMassFactorDevice
0x00189352L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ViewingDistanceType
0x00460125L: {
'VISUAL ACUITY MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# GeneralPurposeScheduledProcedureStepPriority
0x00404003L: {
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['General Purpose Scheduled Procedure Step'],
},
# ScheduledProcedureStepPriority
0x00741200L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
None: ['Unified Procedure Step'],
},
# ImagedVolumeDepth
0x00480003L: {
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
None: ['Image'],
},
# StructureSetName
0x30060004L: {
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT DOSE IOD': ['Dose'],
None: ['Structure Set', 'Dose'],
},
# GridPeriod
0x00187048L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
},
# FrameNumbersOfInterest
0x00286020L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# DetectorType
0x00187004L: {
'XRF IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
None: ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# DoseReferenceSequence
0x300A0010L: {
'RT ION PLAN IOD': ['Plan'],
'RT PLAN IOD': ['Plan'],
None: ['Plan'],
},
# AcquisitionDeviceProcessingDescription
0x00181400L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# SynchronizationFrameOfReferenceUID
0x00200200L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Frame of Reference'],
None: ['Frame of Reference'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Frame of Reference'],
'12-LEAD ECG IOD': ['Frame of Reference'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Frame of Reference'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'BASIC CARDIAC EP IOD': ['Frame of Reference'],
'BASIC VOICE AUDIO IOD': ['Frame of Reference'],
'HEMODYNAMIC IOD': ['Frame of Reference'],
'US IMAGE IOD': ['Frame of Reference'],
'AMBULATORY ECG IOD': ['Frame of Reference'],
'GENERAL ECG IOD': ['Frame of Reference'],
'XRF IMAGE IOD': ['Frame of Reference'],
'RESPIRATORY WAVEFORM IOD': ['Frame of Reference'],
'X-RAY RADIATION DOSE SR IOD': ['Frame of Reference'],
'GENERAL AUDIO WAVEFORM IOD': ['Frame of Reference'],
'PROCEDURE LOG IOD': ['Frame of Reference'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Frame of Reference'],
'ARTERIAL PULSE WAVEFORM IOD': ['Frame of Reference'],
},
# CassetteOrientation
0x00181402L: {
'CR IMAGE IOD': ['Image'],
None: ['Image'],
},
# ReferencedFractionGroupNumber
0x300C0022L: {
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
None: ['Treatment Record', 'Rt Ion Machine Verification', 'Rt Conventional Machine Verification', 'Image'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'RT IMAGE IOD': ['Image'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
},
# ExposuresOnPlate
0x00181404L: {
'CR IMAGE IOD': ['Image'],
None: ['Image'],
},
# RelativeXRayExposure
0x00181405L: {
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
None: ['Image'],
},
# | |
notebook_type = notebook.get('type', 'notebook')
save_as = False
if notebook.get('parentSavedQueryUuid'): # We save into the original saved query, not into the query history
notebook_doc = Document2.objects.get_by_uuid(user=user, uuid=notebook['parentSavedQueryUuid'])
elif notebook.get('id'):
notebook_doc = Document2.objects.get(id=notebook['id'])
else:
notebook_doc = Document2.objects.create(name=notebook['name'], uuid=notebook['uuid'], type=notebook_type, owner=user)
Document.objects.link(
notebook_doc, owner=notebook_doc.owner, name=notebook_doc.name, description=notebook_doc.description, extra=notebook_type
)
save_as = True
if notebook.get('directoryUuid'):
notebook_doc.parent_directory = Document2.objects.get_by_uuid(user=user, uuid=notebook.get('directoryUuid'), perm_type='write')
else:
notebook_doc.parent_directory = Document2.objects.get_home_directory(user)
notebook['isSaved'] = True
notebook['isHistory'] = False
notebook['id'] = notebook_doc.id
_clear_sessions(notebook)
notebook_doc1 = notebook_doc._get_doc1(doc2_type=notebook_type)
if ENABLE_CONNECTORS.get():
notebook_doc.connector_id = int(notebook['snippets'][0]['connector']['type'])
notebook_doc.update_data(notebook)
notebook_doc.search = _get_statement(notebook)
notebook_doc.name = notebook_doc1.name = notebook['name']
notebook_doc.description = notebook_doc1.description = notebook['description']
notebook_doc.save()
notebook_doc1.save()
return notebook_doc, save_as
@api_error_handler
@require_POST
@check_document_modify_permission()
def save_notebook(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
notebook_doc, save_as = _save_notebook(notebook, request.user)
response['status'] = 0
response['save_as'] = save_as
response.update(notebook_doc.to_dict())
response['message'] = request.POST.get('editorMode') == 'true' and _('Query saved successfully') or _('Notebook saved successfully')
return JsonResponse(response)
def _clear_sessions(notebook):
notebook['sessions'] = [_s for _s in notebook['sessions'] if _s['type'] in ('scala', 'spark', 'pyspark', 'sparkr', 'r')]
def _historify(notebook, user):
query_type = 'query-%(dialect)s' % notebook if ENABLE_CONNECTORS.get() else notebook['type']
name = notebook['name'] if (notebook['name'] and notebook['name'].strip() != '') else DEFAULT_HISTORY_NAME
is_managed = notebook.get('isManaged') == True # Prevents None
if is_managed and Document2.objects.filter(uuid=notebook['uuid']).exists():
history_doc = Document2.objects.get(uuid=notebook['uuid'])
else:
history_doc = Document2.objects.create(
name=name,
type=query_type,
owner=user,
is_history=True,
is_managed=is_managed,
)
# Link history of saved query
if notebook['isSaved']:
# From previous history query or initial saved query
parent_doc = Document2.objects.get(uuid=notebook.get('parentSavedQueryUuid') or notebook['uuid'])
notebook['parentSavedQueryUuid'] = parent_doc.uuid
history_doc.dependencies.add(parent_doc)
if not is_managed:
Document.objects.link(
history_doc,
name=history_doc.name,
owner=history_doc.owner,
description=history_doc.description,
extra=query_type
)
notebook['uuid'] = history_doc.uuid
_clear_sessions(notebook)
if ENABLE_CONNECTORS.get():
history_doc.connector_id = int(notebook['type'].split('-')[1])
history_doc.update_data(notebook)
history_doc.search = _get_statement(notebook)
history_doc.save()
return history_doc
def _get_statement(notebook):
if notebook['snippets'] and len(notebook['snippets']) > 0:
snippet = notebook['snippets'][0]
try:
if snippet.get('executor', {}).get('executables', []): # With Connectors/Editor 2
executable = snippet['executor']['executables'][0]
if executable.get('handle'):
return executable['handle']['statement']
else:
return executable['parsedStatement']['statement']
return Notebook.statement_with_variables(snippet)
except KeyError as e:
LOG.warning('Could not get statement from query history: %s' % e)
return ''
@require_GET
@api_error_handler
@check_document_access_permission
def get_history(request):
response = {'status': -1}
doc_type = request.GET.get('doc_type')
doc_text = request.GET.get('doc_text')
connector_id = request.GET.get('doc_connector')
page = min(int(request.GET.get('page', 1)), 100)
limit = min(int(request.GET.get('limit', 50)), 100)
is_notification_manager = request.GET.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
docs = Document2.objects.get_tasks_history(user=request.user)
else:
docs = Document2.objects.get_history(doc_type='query-%s' % doc_type, connector_id=connector_id, user=request.user)
if doc_text:
docs = docs.filter(Q(name__icontains=doc_text) | Q(description__icontains=doc_text) | Q(search__icontains=doc_text))
# Paginate
docs = docs.order_by('-last_modified')
response['count'] = docs.count()
docs = __paginate(page, limit, queryset=docs)['documents']
history = []
for doc in docs:
notebook = Notebook(document=doc).get_data()
if 'snippets' in notebook:
statement = notebook['description'] if is_notification_manager else _get_statement(notebook)
history.append({
'name': doc.name,
'id': doc.id,
'uuid': doc.uuid,
'type': doc.type,
'data': {
'statement': statement[:1001] if statement else '',
'lastExecuted': notebook['snippets'][0].get('lastExecuted', -1),
'status': notebook['snippets'][0].get('status', ''),
'parentSavedQueryUuid': notebook.get('parentSavedQueryUuid', '')
} if notebook['snippets'] else {},
'absoluteUrl': doc.get_absolute_url(),
})
else:
LOG.error('Incomplete History Notebook: %s' % notebook)
response['history'] = sorted(history, key=lambda row: row['data']['lastExecuted'], reverse=True)
response['message'] = _('History fetched')
response['status'] = 0
return JsonResponse(response)
@require_POST
@api_error_handler
@check_document_modify_permission()
def clear_history(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
doc_type = request.POST.get('doc_type')
is_notification_manager = request.POST.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
history = Document2.objects.get_tasks_history(user=request.user, allow_distinct=False)
else:
history = Document2.objects.get_history(doc_type='query-%s' % doc_type, user=request.user, allow_distinct=False)
response['updated'] = history.delete()
response['message'] = _('History cleared !')
response['status'] = 0
return JsonResponse(response)
@require_GET
@check_document_access_permission
def open_notebook(request):
response = {'status': -1}
notebook_id = request.GET.get('notebook')
notebook = Notebook(document=Document2.objects.get(id=notebook_id))
notebook = upgrade_session_properties(request, notebook)
response['status'] = 0
response['notebook'] = notebook.get_json()
response['message'] = _('Notebook loaded successfully')
@require_POST
@check_document_access_permission
def close_notebook(request):
response = {'status': -1, 'result': []}
notebook = json.loads(request.POST.get('notebook', '{}'))
for session in [_s for _s in notebook['sessions']]:
try:
api = get_api(request, session)
if hasattr(api, 'close_session_idle'):
response['result'].append(api.close_session_idle(notebook, session))
else:
response['result'].append(api.close_session(session))
except QueryExpired:
pass
except Exception as e:
LOG.exception('Error closing session %s' % str(e))
return JsonResponse(response)
@require_POST
@check_document_access_permission
def close_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
operation_id = request.POST.get('operationId')
if operation_id and not notebook.get('uuid'):
notebook['uuid'] = operation_id
try:
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-close_statement') as span:
response['result'] = get_api(request, snippet).close_statement(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
except QueryExpired:
response['message'] = _('Query already expired.')
except FilesystemException:
response['message'] = _('Query id could not be found.')
else:
response['message'] = _('Query closed.')
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def autocomplete(request, server=None, database=None, table=None, column=None, nested=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
action = request.POST.get('operation', 'schema')
try:
autocomplete_data = get_api(request, snippet).autocomplete(snippet, database, table, column, nested, action)
response.update(autocomplete_data)
except QueryExpired as e:
LOG.warning('Expired query seen: %s' % e)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_sample_data(request, server=None, database=None, table=None, column=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
is_async = json.loads(request.POST.get('async', 'false'))
operation = json.loads(request.POST.get('operation', '"default"'))
sample_data = get_api(request, snippet).get_sample_data(snippet, database, table, column, is_async=is_async, operation=operation)
response.update(sample_data)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def explain(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response = get_api(request, snippet).explain(notebook, snippet)
return JsonResponse(response)
@require_POST
@api_error_handler
def format(request):
response = {'status': 0}
statements = request.POST.get('statements', '')
response['formatted_statements'] = sqlparse.format(statements, reindent=True, keyword_case='upper') # SQL only currently
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def export_result(request):
response = {'status': -1, 'message': _('Success')}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
data_format = json.loads(request.POST.get('format', '"hdfs-file"'))
destination = urllib_unquote(json.loads(request.POST.get('destination', '""')))
overwrite = json.loads(request.POST.get('overwrite', 'false'))
is_embedded = json.loads(request.POST.get('is_embedded', 'false'))
start_time = json.loads(request.POST.get('start_time', '-1'))
api = get_api(request, snippet)
if data_format == 'hdfs-file': # Blocking operation, like downloading
if request.fs.isdir(destination):
if notebook.get('name'):
destination += '/%(name)s.csv' % notebook
else:
destination += '/%(type)s-%(id)s.csv' % notebook
if overwrite and request.fs.exists(destination):
request.fs.do_as_user(request.user.username, request.fs.rmtree, destination)
response['watch_url'] = api.export_data_as_hdfs_file(snippet, destination, overwrite)
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS destination: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hive-table':
if is_embedded:
sql, success_url = api.export_data_as_table(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to table %s') % (snippet['type'], destination),
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=save_as_table¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to Hive table: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hdfs-directory':
if destination.lower().startswith("abfs"):
destination = abfspath(destination)
if request.fs.exists(destination) and request.fs.listdir_stats(destination):
raise PopupException(_('The destination is not an empty directory!'))
if is_embedded:
sql, success_url = api.export_large_data_to_hdfs(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to directory') % snippet['type'],
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready-execute',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=insert_as_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS directory: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format in ('search-index', 'dashboard'):
# Open the result in the Dashboard via a SQL sub-query or the Import wizard (quick vs scalable)
if is_embedded:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
if data_format == 'dashboard':
engine = notebook['type'].replace('query-', '')
response['watch_url'] = reverse(
'dashboard:browse',
kwargs={'name': notebook_id}
) + '?source=query&engine=%(engine)s' % {'engine': engine}
response['status'] = 0
else:
sample = get_api(request, snippet).fetch_result(notebook, snippet, rows=4, start_over=True)
for col in sample['meta']:
col['type'] = HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')
response['status'] = 0
response['id'] = notebook_id
response['name'] = _get_snippet_name(notebook)
response['source_type'] = 'query'
response['target_type'] = 'index'
response['target_path'] = destination
response['sample'] = list(sample['data'])
response['columns'] = [
Field(col['name'], col['type']).to_dict() for col in sample['meta']
]
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=index_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
if response.get('status') != 0:
response['message'] = _('Exporting result failed.')
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_risk(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
api = get_api(request, snippet)
response['query_complexity'] = api.statement_risk(interface, notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_compatibility(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
api = get_api(request, snippet)
response['query_compatibility'] = api.statement_compatibility(
interface,
notebook,
snippet,
source_platform=source_platform,
target_platform=target_platform
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_similarity(request):
response = {'status': -1, 'message': ''}
| |
Optional["ResourceId"] = None,
application_sharing_policy: Optional[Union[str, "ApplicationSharingPolicy"]] = "Shared",
ssh_settings: Optional["ComputeInstanceSshSettings"] = None,
**kwargs
):
super(ComputeInstanceProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.subnet = subnet
self.application_sharing_policy = application_sharing_policy
self.ssh_settings = ssh_settings
self.connectivity_endpoints = None
self.applications = None
self.created_by = None
self.errors = None
self.state = None
self.last_operation = None
class ComputeInstanceSshSettings(msrest.serialization.Model):
"""Specifies policy and settings for SSH access.
Variables are only populated by the server, and will be ignored when sending a request.
:param ssh_public_access: State of the public SSH port. Possible values are: Disabled -
Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
public ssh port is open and accessible according to the VNet/subnet policy if applicable.
Possible values include: "Enabled", "Disabled". Default value: "Disabled".
:type ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
:ivar admin_user_name: Describes the admin user name.
:vartype admin_user_name: str
:ivar ssh_port: Describes the port for connecting through SSH.
:vartype ssh_port: int
:param admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t
rsa -b 2048" to generate your SSH key pairs.
:type admin_public_key: str
"""
_validation = {
'admin_user_name': {'readonly': True},
'ssh_port': {'readonly': True},
}
_attribute_map = {
'ssh_public_access': {'key': 'sshPublicAccess', 'type': 'str'},
'admin_user_name': {'key': 'adminUserName', 'type': 'str'},
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'admin_public_key': {'key': 'adminPublicKey', 'type': 'str'},
}
def __init__(
self,
*,
ssh_public_access: Optional[Union[str, "SshPublicAccess"]] = "Disabled",
admin_public_key: Optional[str] = None,
**kwargs
):
super(ComputeInstanceSshSettings, self).__init__(**kwargs)
self.ssh_public_access = ssh_public_access
self.admin_user_name = None
self.ssh_port = None
self.admin_public_key = admin_public_key
class Resource(msrest.serialization.Model):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.identity = identity
self.location = location
self.type = None
self.tags = tags
self.sku = sku
class ComputeResource(Resource):
"""Machine Learning compute object wrapped into ARM resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:param properties: Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.Compute
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'properties': {'key': 'properties', 'type': 'Compute'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
properties: Optional["Compute"] = None,
**kwargs
):
super(ComputeResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.properties = properties
class Databricks(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'DatabricksProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["DatabricksProperties"] = None,
**kwargs
):
super(Databricks, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'Databricks' # type: str
self.properties = properties
class DatabricksComputeSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on Databricks.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param databricks_access_token: access token for databricks account.
:type databricks_access_token: str
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksComputeSecrets, self).__init__(**kwargs)
self.compute_type = 'Databricks' # type: str
self.databricks_access_token = databricks_access_token
class DatabricksProperties(msrest.serialization.Model):
"""DatabricksProperties.
:param databricks_access_token: Databricks access token.
:type databricks_access_token: str
"""
_attribute_map = {
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksProperties, self).__init__(**kwargs)
self.databricks_access_token = databricks_access_token
class DataFactory(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': | |
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Provide accessors to enhance interoperability between XArray and MetPy."""
from __future__ import absolute_import
import functools
import logging
import re
import warnings
import xarray as xr
from xarray.core.accessors import DatetimeAccessor
from xarray.core.indexing import expanded_indexer
from xarray.core.utils import either_dict_or_kwargs, is_dict_like
from .units import DimensionalityError, units
__all__ = []
readable_to_cf_axes = {'time': 'T', 'vertical': 'Z', 'y': 'Y', 'x': 'X'}
cf_to_readable_axes = {readable_to_cf_axes[key]: key for key in readable_to_cf_axes}
log = logging.getLogger(__name__)
@xr.register_dataarray_accessor('metpy')
class MetPyAccessor(object):
"""Provide custom attributes and methods on XArray DataArray for MetPy functionality."""
def __init__(self, data_array):
"""Initialize accessor with a DataArray."""
self._data_array = data_array
self._units = self._data_array.attrs.get('units', 'dimensionless')
@property
def units(self):
return units(self._units)
@property
def unit_array(self):
"""Return data values as a `pint.Quantity`."""
return self._data_array.values * self.units
@unit_array.setter
def unit_array(self, values):
"""Set data values as a `pint.Quantity`."""
self._data_array.values = values
self._units = self._data_array.attrs['units'] = str(values.units)
def convert_units(self, units):
"""Convert the data values to different units in-place."""
self.unit_array = self.unit_array.to(units)
@property
def crs(self):
"""Provide easy access to the `crs` coordinate."""
if 'crs' in self._data_array.coords:
return self._data_array.coords['crs'].item()
raise AttributeError('crs attribute is not available.')
@property
def cartopy_crs(self):
"""Return the coordinate reference system (CRS) as a cartopy object."""
return self.crs.to_cartopy()
@property
def cartopy_globe(self):
"""Return the globe belonging to the coordinate reference system (CRS)."""
return self.crs.cartopy_globe
def _axis(self, axis):
"""Return the coordinate variable corresponding to the given individual axis type."""
if axis in readable_to_cf_axes:
for coord_var in self._data_array.coords.values():
if coord_var.attrs.get('_metpy_axis') == readable_to_cf_axes[axis]:
return coord_var
raise AttributeError(axis + ' attribute is not available.')
else:
raise AttributeError('\'' + axis + '\' is not an interpretable axis.')
def coordinates(self, *args):
"""Return the coordinate variables corresponding to the given axes types."""
for arg in args:
yield self._axis(arg)
@property
def time(self):
return self._axis('time')
@property
def vertical(self):
return self._axis('vertical')
@property
def y(self):
return self._axis('y')
@property
def x(self):
return self._axis('x')
def coordinates_identical(self, other):
"""Return whether or not the coordinates of other match this DataArray's."""
# If the number of coordinates do not match, we know they can't match.
if len(self._data_array.coords) != len(other.coords):
return False
# If same length, iterate over all of them and check
for coord_name, coord_var in self._data_array.coords.items():
if coord_name not in other.coords or not other[coord_name].identical(coord_var):
return False
# Otherwise, they match.
return True
def as_timestamp(self):
"""Return the data as unix timestamp (for easier time derivatives)."""
attrs = {key: self._data_array.attrs[key] for key in
{'standard_name', 'long_name', 'axis', '_metpy_axis'}
& set(self._data_array.attrs)}
attrs['units'] = 'seconds'
return xr.DataArray(self._data_array.values.astype('datetime64[s]').astype('int'),
name=self._data_array.name,
coords=self._data_array.coords,
dims=self._data_array.dims,
attrs=attrs)
def find_axis_name(self, axis):
"""Return the name of the axis corresponding to the given identifier.
The given indentifer can be an axis number (integer), dimension coordinate name
(string) or a standard axis type (string).
"""
if isinstance(axis, int):
# If an integer, use the corresponding dimension
return self._data_array.dims[axis]
elif axis not in self._data_array.dims and axis in readable_to_cf_axes:
# If not a dimension name itself, but a valid axis type, get the name of the
# coordinate corresponding to that axis type
return self._axis(axis).name
elif axis in self._data_array.dims and axis in self._data_array.coords:
# If this is a dimension coordinate name, use it directly
return axis
else:
# Otherwise, not valid
raise ValueError('Given axis is not valid. Must be an axis number, a dimension '
'coordinate name, or a standard axis type.')
class _LocIndexer(object):
"""Provide the unit-wrapped .loc indexer for data arrays."""
def __init__(self, data_array):
self.data_array = data_array
def expand(self, key):
"""Parse key using xarray utils to ensure we have dimension names."""
if not is_dict_like(key):
labels = expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
return key
def __getitem__(self, key):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
return self.data_array.loc[key]
def __setitem__(self, key, value):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
self.data_array.loc[key] = value
@property
def loc(self):
"""Make the LocIndexer available as a property."""
return self._LocIndexer(self._data_array)
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Wrap DataArray.sel to handle units."""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'sel')
indexers = _reassign_quantity_indexer(self._data_array, indexers)
return self._data_array.sel(indexers, method=method, tolerance=tolerance, drop=drop)
@xr.register_dataset_accessor('metpy')
class CFConventionHandler(object):
"""Provide custom attributes and methods on XArray Dataset for MetPy functionality."""
def __init__(self, dataset):
"""Initialize accessor with a Dataset."""
self._dataset = dataset
def parse_cf(self, varname=None, coordinates=None):
"""Parse Climate and Forecasting (CF) convention metadata."""
from .plots.mapping import CFProjection
# If no varname is given, parse the entire dataset
if varname is None:
return self._dataset.apply(lambda da: self.parse_cf(da.name,
coordinates=coordinates))
var = self._dataset[varname]
if 'grid_mapping' in var.attrs:
proj_name = var.attrs['grid_mapping']
try:
proj_var = self._dataset.variables[proj_name]
except KeyError:
log.warning(
'Could not find variable corresponding to the value of '
'grid_mapping: {}'.format(proj_name))
else:
var.coords['crs'] = CFProjection(proj_var.attrs)
self._fixup_coords(var)
# Trying to guess whether we should be adding a crs to this variable's coordinates
# First make sure it's missing CRS but isn't lat/lon itself
if not self.check_axis(var, 'lat', 'lon') and 'crs' not in var.coords:
# Look for both lat/lon in the coordinates
has_lat = has_lon = False
for coord_var in var.coords.values():
has_lat = has_lat or self.check_axis(coord_var, 'lat')
has_lon = has_lon or self.check_axis(coord_var, 'lon')
# If we found them, create a lat/lon projection as the crs coord
if has_lat and has_lon:
var.coords['crs'] = CFProjection({'grid_mapping_name': 'latitude_longitude'})
# Obtain a map of axis types to coordinate variables
if coordinates is None:
# Generate the map from the supplied coordinates
coordinates = self._generate_coordinate_map(var.coords.values())
else:
# Verify that coordinates maps to coordinate variables, not coordinate names
self._fixup_coordinate_map(coordinates, var)
# Overwrite previous axis attributes, and use the coordinates to label anew
self._assign_axes(coordinates, var)
return var
# Define the criteria for coordinate matches
criteria = {
'standard_name': {
'time': 'time',
'vertical': {'air_pressure', 'height', 'geopotential_height', 'altitude',
'model_level_number', 'atmosphere_ln_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_hybrid_height_coordinate', 'atmosphere_sleve_coordinate',
'height_above_geopotential_datum', 'height_above_reference_ellipsoid',
'height_above_mean_sea_level'},
'y': 'projection_y_coordinate',
'lat': 'latitude',
'x': 'projection_x_coordinate',
'lon': 'longitude'
},
'_CoordinateAxisType': {
'time': 'Time',
'vertical': {'GeoZ', 'Height', 'Pressure'},
'y': 'GeoY',
'lat': 'Lat',
'x': 'GeoX',
'lon': 'Lon'
},
'axis': readable_to_cf_axes,
'positive': {
'vertical': {'up', 'down'}
},
'units': {
'vertical': {
'match': 'dimensionality',
'units': 'Pa'
},
'lat': {
'match': 'name',
'units': {'degree_north', 'degree_N', 'degreeN', 'degrees_north', 'degrees_N',
'degreesN'}
},
'lon': {
'match': 'name',
'units': {'degree_east', 'degree_E', 'degreeE', 'degrees_east', 'degrees_E',
'degreesE'}
},
},
'regular_expression': {
'time': r'time[0-9]*',
'vertical': (r'(bottom_top|sigma|h(ei)?ght|altitude|depth|isobaric|pres|'
r'isotherm)[a-z_]*[0-9]*'),
'y': r'y',
'lat': r'x?lat[a-z0-9]*',
'x': r'x',
'lon': r'x?lon[a-z0-9]*'
}
}
@classmethod
def check_axis(cls, var, *axes):
"""Check if var satisfies the criteria for any of the given axes."""
for axis in axes:
# Check for
# - standard name (CF option)
# - _CoordinateAxisType (from THREDDS)
# - axis (CF option)
# - positive (CF standard for non-pressure vertical coordinate)
for criterion in ('standard_name', '_CoordinateAxisType', 'axis', 'positive'):
if (var.attrs.get(criterion, 'absent') in
cls.criteria[criterion].get(axis, set())):
return True
# Check for units, either by dimensionality or name
if (axis in cls.criteria['units'] and (
(
cls.criteria['units'][axis]['match'] == 'dimensionality'
and (units.get_dimensionality(var.attrs.get('units'))
== units.get_dimensionality(cls.criteria['units'][axis]['units']))
) or (
cls.criteria['units'][axis]['match'] == 'name'
and var.attrs.get('units') in cls.criteria['units'][axis]['units']
))):
return True
# Check if name matches regular expression (non-CF failsafe)
if re.match(cls.criteria['regular_expression'][axis], var.name.lower()):
return True
def _fixup_coords(self, var):
"""Clean up the units on the coordinate variables."""
for coord_name, data_array in var.coords.items():
if (self.check_axis(data_array, 'x', 'y')
and not self.check_axis(data_array, 'lon', 'lat')):
try:
var.coords[coord_name].metpy.convert_units('meters')
except DimensionalityError: # Radians!
if 'crs' in var.coords:
new_data_array = data_array.copy()
height = var.coords['crs'].item()['perspective_point_height']
scaled_vals = new_data_array.metpy.unit_array * (height * units.meters)
new_data_array.metpy.unit_array = scaled_vals.to('meters')
var.coords[coord_name] = new_data_array
def _generate_coordinate_map(self, coords):
"""Generate a coordinate map via CF conventions and other methods."""
# Parse all the coordinates, attempting to identify x, y, vertical, time
coord_lists = {'T': [], 'Z': [], 'Y': [], 'X': []}
for coord_var in coords:
# Identify the coordinate type using check_axis helper
axes_to_check = {
'T': ('time',),
'Z': ('vertical',),
'Y': ('y', 'lat'),
'X': ('x', 'lon')
}
for axis_cf, axes_readable in axes_to_check.items():
if self.check_axis(coord_var, *axes_readable):
coord_lists[axis_cf].append(coord_var)
# Resolve any coordinate conflicts
axis_conflicts = [axis for axis in coord_lists if len(coord_lists[axis]) > 1]
for axis in axis_conflicts:
self._resolve_axis_conflict(axis, coord_lists)
# Collapse the coord_lists to a coord_map
return {axis: (coord_lists[axis][0] if len(coord_lists[axis]) > 0 else None)
for axis in coord_lists}
@staticmethod
def _fixup_coordinate_map(coord_map, var):
"""Ensure sure we have coordinate variables in map, not coordinate names."""
for axis in coord_map:
if not isinstance(coord_map[axis], xr.DataArray):
coord_map[axis] = var[coord_map[axis]]
@staticmethod
def _assign_axes(coord_map, var):
"""Assign axis attribute to coordinates in var according to coord_map."""
| |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Find files on any machine in your environment with a Cybereason sensor installed"
class Input:
FILE_FILTER = "file_filter"
SERVER_FILTER = "server_filter"
class Output:
RESPONSE = "response"
class SearchForFilesInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file_filter": {
"type": "string",
"title": "File Filter",
"description": "A fileFilters object for filtering by machine name, folder, file creation or modification time or file size with operator Equals, NotEquals, ContainsIgnoreCase, NotContainsIgnoreCase and others",
"order": 2
},
"server_filter": {
"type": "string",
"title": "Server Filter",
"description": "A Sensor filters string for filtering sensors by different criteria such as operating system",
"order": 1
}
},
"required": [
"file_filter"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class SearchForFilesOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"response": {
"$ref": "#/definitions/response",
"title": "Response",
"description": "Search file response",
"order": 1
}
},
"required": [
"response"
],
"definitions": {
"actionArguments": {
"type": "object",
"title": "actionArguments",
"properties": {
"fileSearchRequestConfiguration": {
"$ref": "#/definitions/fileSearchRequestConfiguration",
"title": "File Search Request Configuration",
"description": "File search request configuration",
"order": 4
},
"filters": {
"type": "array",
"title": "Filters",
"description": "Filters",
"items": {
"$ref": "#/definitions/filters"
},
"order": 3
},
"machines": {
"type": "array",
"title": "Machines",
"description": "Machines",
"items": {
"type": "string"
},
"order": 2
},
"yaraName": {
"type": "string",
"title": "Yara Name",
"description": "Yara name",
"order": 1
}
},
"definitions": {
"fileSearchRequestConfiguration": {
"type": "object",
"title": "fileSearchRequestConfiguration",
"properties": {
"cpuTrackingWindowMilli": {
"type": "integer",
"title": "CPU Tracking Window Milli",
"description": "CPU tracking window milli",
"order": 4
},
"diskRateBytesPerMilli": {
"type": "integer",
"title": "Disk Rate Bytes Per Milli",
"description": "Disk rate bytes per milli",
"order": 2
},
"maxConcurrentFileSearches": {
"type": "integer",
"title": "Max Concurrent File Searches",
"description": "Max concurrent file searches",
"order": 11
},
"maxDiskIOWindowMilli": {
"type": "integer",
"title": "Max Disk IOWindow Milli",
"description": "Max disk IO window milli",
"order": 9
},
"maxReadBytesPerFile": {
"type": "integer",
"title": "Max Read Bytes Per File",
"description": "Max read bytes per file",
"order": 14
},
"maxResults": {
"type": "integer",
"title": "Max Results",
"description": "Max results",
"order": 6
},
"maxYaraTimeouts": {
"type": "integer",
"title": "Max Yara timeouts",
"description": "Max yara timeouts",
"order": 7
},
"minFileReadPriceMilli": {
"type": "integer",
"title": "Min File Read Price Milli",
"description": "Min file read price milli",
"order": 3
},
"minThrottleAmountMilli": {
"type": "integer",
"title": "Min Throttle Amount Milli",
"description": "Min throttle amount milli",
"order": 1
},
"searchTimeoutDataScanSec": {
"type": "integer",
"title": "Search Timeout Data Scan Sec",
"description": "Search timeout data scan sec",
"order": 10
},
"searchTimeoutSec": {
"type": "integer",
"title": "Search Timeout Sec",
"description": "Search timeout sec",
"order": 12
},
"shouldUseNewAPI": {
"type": "boolean",
"title": "Should Use New API",
"description": "Should use new API",
"order": 13
},
"targetCpuPercentage": {
"type": "integer",
"title": "Target CPU Percentage",
"description": "Target CPU percentage",
"order": 5
},
"timoutPerFileScan": {
"type": "integer",
"title": "Timout Per File Scan",
"description": "Timout per file scan",
"order": 8
}
}
},
"filters": {
"type": "object",
"title": "filters",
"properties": {
"fieldName": {
"type": "string",
"title": "Field Name",
"description": "Field name",
"order": 3
},
"operator": {
"type": "string",
"title": "Operator",
"description": "Operator",
"order": 1
},
"values": {
"type": "array",
"title": "Values",
"description": "Values",
"items": {
"type": "string"
},
"order": 2
}
}
}
}
},
"fileSearchRequestConfiguration": {
"type": "object",
"title": "fileSearchRequestConfiguration",
"properties": {
"cpuTrackingWindowMilli": {
"type": "integer",
"title": "CPU Tracking Window Milli",
"description": "CPU tracking window milli",
"order": 4
},
"diskRateBytesPerMilli": {
"type": "integer",
"title": "Disk Rate Bytes Per Milli",
"description": "Disk rate bytes per milli",
"order": 2
},
"maxConcurrentFileSearches": {
"type": "integer",
"title": "Max Concurrent File Searches",
"description": "Max concurrent file searches",
"order": 11
},
"maxDiskIOWindowMilli": {
"type": "integer",
"title": "Max Disk IOWindow Milli",
"description": "Max disk IO window milli",
"order": 9
},
"maxReadBytesPerFile": {
"type": "integer",
"title": "Max Read Bytes Per File",
"description": "Max read bytes per file",
"order": 14
},
"maxResults": {
"type": "integer",
"title": "Max Results",
"description": "Max results",
"order": 6
},
"maxYaraTimeouts": {
"type": "integer",
"title": "Max Yara timeouts",
"description": "Max yara timeouts",
"order": 7
},
"minFileReadPriceMilli": {
"type": "integer",
"title": "Min File Read Price Milli",
"description": "Min file read price milli",
"order": 3
},
"minThrottleAmountMilli": {
"type": "integer",
"title": "Min Throttle Amount Milli",
"description": "Min throttle amount milli",
"order": 1
},
"searchTimeoutDataScanSec": {
"type": "integer",
"title": "Search Timeout Data Scan Sec",
"description": "Search timeout data scan sec",
"order": 10
},
"searchTimeoutSec": {
"type": "integer",
"title": "Search Timeout Sec",
"description": "Search timeout sec",
"order": 12
},
"shouldUseNewAPI": {
"type": "boolean",
"title": "Should Use New API",
"description": "Should use new API",
"order": 13
},
"targetCpuPercentage": {
"type": "integer",
"title": "Target CPU Percentage",
"description": "Target CPU percentage",
"order": 5
},
"timoutPerFileScan": {
"type": "integer",
"title": "Timout Per File Scan",
"description": "Timout per file scan",
"order": 8
}
}
},
"filters": {
"type": "object",
"title": "filters",
"properties": {
"fieldName": {
"type": "string",
"title": "Field Name",
"description": "Field name",
"order": 3
},
"operator": {
"type": "string",
"title": "Operator",
"description": "Operator",
"order": 1
},
"values": {
"type": "array",
"title": "Values",
"description": "Values",
"items": {
"type": "string"
},
"order": 2
}
}
},
"globalStats": {
"type": "object",
"title": "globalStats",
"properties": {
"stats": {
"$ref": "#/definitions/stats",
"title": "Stats",
"description": "Stats",
"order": 1
}
},
"definitions": {
"stats": {
"type": "object",
"title": "stats",
"properties": {
"AbortTimeout": {
"type": "integer",
"title": "Abort Timeout",
"description": "Abort timeout",
"order": 34
},
"Aborted": {
"type": "integer",
"title": "Aborted",
"description": "Aborted",
"order": 17
},
"Aborting": {
"type": "integer",
"title": "Aborting",
"description": "Aborting",
"order": 1
},
"AlreadyUpdated": {
"type": "integer",
"title": "Already Updated",
"description": "Already updated",
"order": 3
},
"BadArgument": {
"type": "integer",
"title": "Bad Argument",
"description": "Bad argument",
"order": 19
},
"ChunksRequired": {
"type": "integer",
"title": "Chunks Required",
"description": "Chunks required",
"order": 28
},
"Disconnected": {
"type": "integer",
"title": "Disconnected",
"description": "Disconnected",
"order": 24
},
"EndedWithInvalidParam": {
"type": "integer",
"title": "Ended With Invalid Param",
"description": "Ended with invalid param",
"order": 9
},
"EndedWithNoValidFolder": {
"type": "integer",
"title": "Ended With No Valid Folder",
"description": "Ended with no valid folder",
"order": 20
},
"EndedWithSensorTimeout": {
"type": "integer",
"title": "Ended With Sensor Timeout",
"description": "Ended with sensor timeout",
"order": 30
},
"EndedWithTooManyResults": {
"type": "integer",
"title": "Ended With Too Many Results",
"description": "Ended with too many results",
"order": 4
},
"EndedWithTooManySearches": {
"type": "integer",
"title": "Ended With Too Many Searches",
"description": "Ended with too many searches",
"order": 26
},
"EndedWithUnknownError": {
"type": "integer",
"title": "Ended With Unknown Error",
"description": "Ended with unknown error",
"order": 12
},
"EndedWithUnsupportedFilter": {
"type": "integer",
"title": "Ended With Unsupported Filter",
"description": "Ended with unsupported filter",
"order": 7
},
"EndedWithYaraCompileError": {
"type": "integer",
"title": "Ended With Yara Compile Error",
"description": "Ended with yara compile error",
"order": 2
},
"Failed": {
"type": "integer",
"title": "Failed",
"description": "Failed",
"order": 6
},
"FailedSending": {
"type": "integer",
"title": "Failed Sending",
"description": "Failed sending",
"order": 27
},
"FailedSendingToServer": {
"type": "integer",
"title": "Failed Sending To Server",
"description": "Failed sending to server",
"order": 37
},
"GettingChunks": {
"type": "integer",
"title": "Getting Chunks",
"description": "Getting chunks",
"order": 16
},
"InProgress": {
"type": "integer",
"title": "In Progress",
"description": "In progress",
"order": 10
},
"InvalidState": {
"type": "integer",
"title": "Invalid State",
"description": "Invalid state",
"order": 18
},
"MsiFileCorrupted": {
"type": "integer",
"title": "MSI File Corrupted",
"description": "MSI file corrupted",
"order": 13
},
"MsiSendFail": {
"type": "integer",
"title": "MSI Send Fail",
"description": "MSI Send Fail",
"order": 21
},
"NewerInstalled": {
"type": "integer",
"title": "Newer Installed",
"description": "Newer installed",
"order": 22
},
"None": {
"type": "integer",
"title": "None",
"description": "None",
"order": 23
},
"NotSupported": {
"type": "integer",
"title": "Not Supported",
"description": "Not supported",
"order": 32
},
"Pending": {
"type": "integer",
"title": "Pending",
"description": "Pending",
"order": 11
},
"Primed": {
"type": "integer",
"title": "Primed",
"description": "Primed",
"order": 36
},
"ProbeRemoved": {
"type": "integer",
"title": "Probe Removed",
"description": "Probe removed",
"order": 8
},
"SendingMsi": {
"type": "integer",
"title": "Sending MSI",
"description": "Sending MSI",
"order": 15
},
"SendingPlatform": {
"type": "integer",
| |
per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def blend_shapes_np(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
# blend_shape = betas, shape_disps
b = betas.shape[0]
m, k, l = shape_disps.shape
shape_disps_r = np.tile(shape_disps.reshape(1, m, k, l), (b, 1, 1, 1))
betas_r = np.tile(betas.reshape(b, 1, -1, 1), (1, m, 1, 1))
blend_shape = np.matmul(shape_disps_r, betas_r)
return blend_shape.reshape((b, m, 3))
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def rodnumba(p):
rot, jac = cv2.Rodrigues(p)
return rot, jac
def batch_rodrigues_np(pose_body):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: ndarray Nx3
array of N axis-angle vectors
Returns
-------
R: ndarray Nx3x3
The rotation matrices for the given axis-angle parameters
R_jac: ndarray Nx3x3x3
Jacobians of the rotation matrices
'''
batch_size = pose_body.shape[0]
n_j = int(pose_body.shape[1]/3)
dt = pose_body.dtype
rot_mats = np.zeros((batch_size, n_j, 3, 3), dtype=dt)
rot_mats_jac = np.zeros((batch_size, n_j, 3, 3, 3), dtype=dt)
for b in range(0, pose_body.shape[0]):
for i in range(0, n_j):
# rot, jac = cv2.Rodrigues(pose_body[b][3 * (i): 3 * (i + 1)].reshape(-1))
rot, jac = rodnumba(pose_body[b][3 * (i): 3 * (i + 1)].reshape(-1))
# print(numba.typeof(rot))
# print(numba.typeof(jac))
rot_mats[0, i] = rot
rot_mats_jac[0, i] = jac.reshape(3, 3, 3)
return rot_mats, rot_mats_jac
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def transform_mat_np(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
b = R.shape[0]
T = np.zeros((b, 4, 4), dtype=R.dtype)
T[:, :3, :3] = R
T[:, :3, 3] = t.reshape(b, 3)
T[:, 3, 3] = 1
return T
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
# print(transforms_mat[0][0])
# print(transforms_mat[0][1])
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# print(transforms[0][1])
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
# @jit
def batch_rigid_transform_diff(rot_mats, rot_mats_jac, transform_jac_chain, joints, parents, is_jac=True):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : ndarray BxNx3x3
Tensor of rotation matrices
rot_mats_jac : ndarray BxNx3x3x3
Tensor of rotation matrix Jacobians
joints : ndarray BxNx3
Locations of joints
parents : ndarray BxN
The kinematic tree of each object
Returns
-------
posed_joints : ndarray BxNx3
The locations of the joints after applying the pose rotations
pose_joints_jac : ndarray BxNxNx3x3
Jacobian of pose_joints w.r.t. pose
rel_transforms : ndarray BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
rel_transforms_jac : jacobian w.r.t. pose
"""
# joints = joints.reshape()
# joints = torch.unsqueeze(joints, dim=-1)
t_0 = time.time()
rel_joints = joints.copy()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat_np(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
b = joints.shape[0]
n_j = joints.shape[1]
t_1 = time.time()
if is_jac:
ainds = np.arange(0, parents.shape[0])
transform_jac_chain.fill(0)
transform_jac_chain[:, ainds, ainds, :, 0:3, 0:3] = rot_mats_jac[:, ainds]
transform_chain = np.zeros((b, parents.shape[0], 4, 4), dtype=rot_mats.dtype)
transform_chain[:, 0] = transforms_mat[:, 0]
for i in range(1, parents.shape[0]):
transform_chain[:, i] = np.matmul(transform_chain[:, parents[i]],
transforms_mat[:, i])
if is_jac:
inds = np.arange(1, len(parents))
trans_parent = transform_chain[:, parents[inds]].reshape(b, len(inds), 1, 1, 4, 4)
trans_jac = transform_jac_chain[:, inds, inds, :].reshape(1, len(inds), 1, 3, 4, 4)
transform_jac_chain[:, inds, inds, :] = np.matmul(trans_parent, trans_jac).reshape(len(inds), 3, 4, 4)
m = np.eye(parents.shape[0], dtype=np.bool)
b = transforms_mat.shape[0]
for i in range(1, parents.shape[0]):
m[i] = m[i] | m[parents[i]]
# transform_jac_chain[:, i, :, :] += np.matmul(transform_jac_chain[:, parents[i], :, :], transforms_mat[:, i])
tr_jac_ch_sel = transform_jac_chain[:, parents[i], m[i], :]
transform_jac_chain[:, i, m[i], :] += np.matmul(tr_jac_ch_sel, transforms_mat[:, i]).reshape(b, -1, 3, 4, 4)
t_2 = time.time()
transforms = transform_chain
posed_joints = transforms[:, :, :3, 3]
joints_rot = np.matmul(transforms[:, :, 0:3, 0:3], joints.reshape(b, n_j, 3, 1)).reshape((b, n_j, 3))
rel_transforms = transforms.copy()
rel_transforms[:, :, 0:3, 3] = rel_transforms[:, :, 0:3, 3] - joints_rot
if is_jac:
transforms_jac = np.transpose(transform_jac_chain, (0, 2, 3, 1, 4, 5))
posed_joints_jac = transforms_jac[:, :, :, :, :3, 3]
tjhj = np.matmul(transforms_jac[:, :, :, :, 0:3, 0:3], joints.reshape((b, 1, 1, n_j, 3, 1))).reshape(
(b, n_j, 3, n_j, 3))
rel_transforms_jac = transforms_jac.copy()
rel_transforms_jac[:, :, :, :, 0:3, 3] = rel_transforms_jac[:, :, :, :, 0:3, 3] - tjhj
else:
posed_joints_jac = None
rel_transforms_jac = None
t_3 = time.time()
# print('brgd breakdown {} {} {} '.format(t_1-t_0, t_2-t_1, t_3-t_2))
return posed_joints, posed_joints_jac, rel_transforms, rel_transforms_jac
def batch_rigid_transform_fast_diff(rot_mats, rot_mats_jac, joints, parents):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : ndarray BxNx3x3
Tensor of rotation matrices
rot_mats_jac : ndarray BxNx3x3x3
Tensor of rotation matrix Jacobians
joints : ndarray BxNx3
Locations of joints
parents : ndarray BxN
The kinematic tree of each object
Returns
-------
posed_joints : ndarray BxNx3
The locations of the joints after applying the pose rotations
pose_joints_jac : ndarray BxNxNx3x3
Jacobian of pose_joints w.r.t. pose
rel_transforms : ndarray BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
rel_transforms_jac : jacobian w.r.t. pose
"""
# joints = joints.reshape()
# joints = torch.unsqueeze(joints, dim=-1)
t_0 = time.time()
rel_joints = joints.copy()
rel_joints[:, 1:] | |
<reponame>alejandroviegener/copy-test<filename>muttlib/forecast.py<gh_stars>10-100
"""Module to give FBProphet a common interface to Sklearn and general utilities
for forecasting problems like limiting the datasets to the last n days,
allowing wider grid search for hyperparameters not available using standard
FBProphet and Sklearn libraries.
Classes:
- SkProphet: a wrapper around FBProphet to provide a scikit learn compatible
API.
- StepsSelectorEstimator: a scikit learn metaestimator to limit the amount of
days used to fit a forecast. Wraps another estimator.
These two classes can be combined to perform gridsearch using FBProphet while
also exploring the amount of training days to use in the dataset.
The most relevant docstrings are on:
- SkProphet.__init__
- SkProphet.fit
- StepsSelectorEstimator.__init__
Simple examples can be taken from the tests.
A complex example doing a grid search can be seen here:
.. code-block:: python
import pandas as pd
from sklearn.model_selection import GridSearchCV, ParameterGrid
from muttlib.forecast import SkProphet, StepsSelectorEstimator
# The grid has to be turned into a list if used in a StepsSelectorEstimator
# as it has to be copyable for get / set params
prophet_grid = list(ParameterGrid({
'sk_date_column': ['date'],
'sk_yhat_only': [True],
'sk_extra_regressors': [
[],
[{'name': 'b'}],
],
'prophet_kwargs': [
dict(daily_seasonality='auto'),
dict(daily_seasonality=True),
],
}))
days_selector_grid = {
'estimator_class': [SkProphet],
'amount_of_steps': [90, 120],
'sort_col': ['date'],
'estimator_kwargs': prophet_grid,
}
# To instance GridSearchCV, we need to pass an initialized estimator
# (for example, a `StepsSelectorEstimator`)
initial_estimator = StepsSelectorEstimator(
SkProphet,
days_selector_grid['amount_of_steps'][0],
prophet_grid[0])
cv = GridSearchCV(
initial_estimator,
days_selector_grid,
cv=2,
scoring='r2')
X = pd.DataFrame({'date': [0, 2, 3, 4, 5], 'b': [1, 4, 5, 0, 9]})
y = pd.Series([1, 1, 0, 1, 0])
cv.fit(X, y)
TODO:
- At the moment, given FBProphet's current version we have that the model's
parameter for *extra_regressors* is not set on initialization but rather it
is set by using a specific prophet method. Thus, we have that our current
SKProphet class handles this parameter by setting it manually and knowing
about this implicitly. If, for some future reason, prophet's API changes to
include a variety of other/new parameters that are added _not-on-init _,
then it'ld be probably a good idea to keep an internal dictionary of the
parameter's dtype and prophet's method used to set it, so as to iterate and
set these in a "programatic" way.
- Evaluate if SkProphet.fit and SkProphet.copy default value should be False
to save memory and cpu by default, risking to modifying the input data as a
side effect of the function.
"""
from copy import deepcopy
from inspect import isclass, signature
import numpy as np
import pandas as pd
from fbprophet import Prophet
from sklearn.base import BaseEstimator
class SkProphet(Prophet):
DS = 'ds'
def __init__(
self,
sk_date_column=DS,
sk_yhat_only=True,
sk_extra_regressors=None,
prophet_kwargs=None,
):
"""Scikit learn compatible interface for FBProphet.
Parameters
----------
sk_date_column: str
Name of the column to use as date in Prophet.
sk_yhat_only: Boolean
True to return only the yhat from Prophet predictions.
False to return everything.
sk_extra_regressors: [] or [str] or [dict()]
List with extra regressors to use. The list can have:
* strings: column names (default prophet arguments for extra
regressors will be used).
* dicts: {name: *column_name*, prior_scale: _, standardize: _,
mode: _}
For more information see Prophet.add_regressors.
prophet_kwargs: dict
Keyword arguments to forward to Prophet.
"""
if sk_extra_regressors is None:
sk_extra_regressors = []
if prophet_kwargs is None:
prophet_kwargs = {}
super().__init__(**prophet_kwargs)
self.sk_date_column = sk_date_column
self.sk_yhat_only = sk_yhat_only
self.sk_extra_regressors = sk_extra_regressors
self.prophet_kwargs = prophet_kwargs
self._set_my_extra_regressors()
def fit(
self, X, y=None, copy=True, **fit_params
): # pylint: disable=arguments-differ
"""Scikit learn's like fit on the Prophet model.
Parameters
----------
X: pd.DataFrame
A dataframe with the data to fit.
It is expected to have a column with datetime values named as
*self.sk_date_column*.
y: None or str or (list, tuple, numpy.ndarray, pandas.Series/DataFrame)
The label values to fit. If y is:
- None: the column 'y' should be contained in X.
- str: the name of the column to use in X.
- list, tuple, ndarray, etc: the values to fit.
If the values have two dimensions (a matrix instead of a vector)
the first column will be used.
E.g.: [1, 3] -> [1, 3] will be used.
E.g.: [[1], [3]] -> [1, 3] will be used.
E.g.: [[1, 2], [3, 4]] -> [1, 3] will be used.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
fit_params: keyword arguments
Keyword arguments to forward to Prophet's fit.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError('Arg "X" passed can only be of pandas.DataFrame type.')
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
if y is not None:
if isinstance(y, str) and y in X.columns:
X = X.rename({y: 'y'}, axis=1)
else:
X['y'] = self._as_np_vector(y)
return super().fit(X, **fit_params)
def predict(self, X, copy=True): # pylint: disable=arguments-differ
"""Scikit learn's predict (returns predicted values).
Parameters
----------
X: pandas.DataFrame
Input data for predictions.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
"""
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
predictions = super().predict(X)
if self.sk_yhat_only:
predictions = predictions.yhat.values
return predictions
def get_params(self, deep=True):
"""Scikit learn's get_params (returns the estimator's params)."""
prophet_attrs = [
attr for attr in signature(Prophet.__init__).parameters if attr != 'self'
]
sk_attrs = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
prophet_params = {a: getattr(self, a, None) for a in prophet_attrs}
sk_params = {a: getattr(self, a, None) for a in sk_attrs}
if deep:
sk_params = deepcopy(sk_params)
prophet_params = deepcopy(prophet_params)
sk_params['prophet_kwargs'].update(prophet_params)
return sk_params
def set_params(self, **params):
"""Scikit learn's set_params (sets the parameters provided).
Note on prophet keyword arguments precedence; this applies:
- First, if some argument is explicitly provided, this value will be kept.
- If not, but provided inside a 'prophet_kwargs' dict, the last is kept.
- Lastly, if not provided in neither way but currently set, the value is not erased.
"""
sk_kws = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
current_prophet_kws = getattr(self, 'prophet_kwargs', {})
explicit_prophet_kws = {}
args_passed_prophet_kws = {}
for attr, value in params.items():
if attr == 'prophet_kwargs':
explicit_prophet_kws = value
elif attr not in sk_kws:
args_passed_prophet_kws[attr] = value
else:
setattr(self, attr, value)
prophet_kws = current_prophet_kws
prophet_kws.update(explicit_prophet_kws)
prophet_kws.update(args_passed_prophet_kws)
for attr, value in prophet_kws.items():
setattr(self, attr, value)
setattr(self, 'prophet_kwargs', prophet_kws)
self._set_my_extra_regressors()
return self
def _set_my_extra_regressors(self):
"""Adds the regressors defined in self.sk_extra_regressors.
It is meant to be used at initialization.
"""
if self.extra_regressors:
self.extra_regressors = self.extra_regressors.__class__()
for regressor in self.sk_extra_regressors:
if isinstance(regressor, str):
self.add_regressor(regressor)
elif isinstance(regressor, dict):
self.add_regressor(**regressor)
else:
raise TypeError(
'Invalid extra_regressor in SkProphet.'
'Extra regressors must be strings or dicts with '
'{name: *column_name*, prior_scale: _, standardize: _, '
'mode: _}'
)
def _as_np_vector(self, y):
"""Ensures a list, tuple, pandas.Series, pandas.DataFrame
or numpy.ndarray is returned as a numpy.ndarray of dimension 1.
Parameters
----------
y: list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame
The object containing the y values to fit.
If y is multidimensional, e.g.: [[1, 2], [3, 4]], the first column
will be returned as y value, continuining the example: [1, 3].
Returns
-------
numpy.ndarray of dimension 1
The values as a numpy array of dimension 1.
"""
if isinstance(y, (list, tuple)):
y = np.asarray(y)
elif isinstance(y, (pd.Series, pd.DataFrame)):
y = y.values
if isinstance(y, np.ndarray):
if len(y.shape) > 1:
y = y[:, 0]
return y
def __repr__(self):
"""Text representation of the object to look it nicely in the
interpreter.
"""
return (
f'{self.__class__.__name__}('
f'sk_date_column="{self.sk_date_column}", '
f'sk_yhat_only={self.sk_yhat_only}, '
f'sk_extra_regressors={self.extra_regressors}'
f'prophet_kwargs={self.prophet_kwargs})'
)
__str__ = __repr__
class StepsSelectorEstimator(BaseEstimator):
def __init__(
self, estimator_class, amount_of_steps, estimator_kwargs=None, sort_col='date'
):
"""An estimator | |
# Copyright (C) 2021 <NAME>
# MIT License
# Generate an abstract syntax tree via a recusive descent parser
from src.error import ErrorHandler, ParseError
from src.tokenizer import Token, TokenType
from src.node import Binary, Unary, Variable, Literal, Grouping, Assignment
from src.node import Logical
from src.node import Generic, Printer, VariableDeclaration, Block, Branch
from src.node import Loop
class Parser():
def __init__(self):
"""\
convert a list of tokens to an abstract syntax tree
@i: tokens index
@ast: abstract syntax tree generated by self.expression()
"""
self.err = ErrorHandler()
self.tokens = []
self.i = 0
self.ast = None
def parse(self, tokens, limit = 10):
"""\
recursive descent entry point
@tokens: list of tokens provided by lexical analysis, tokens[-1] == EOF
@limit: internal ErrorHandler limit
Returns: abstract syntax tree, top level is a list of statements
"""
self.err = ErrorHandler(limit)
self.tokens = tokens
self.i = 0
try:
return (self.program(), self.err)
except ParseError:
return (None, self.err)
def curr_type(self):
"""\
helper function: returns token type of token at current list index
"""
token = self.tokens[self.i]
return token.type
def curr_token(self):
"""\
helper function: syntactic sugar to fetch current token
"""
return self.tokens[self.i]
def advance(self):
"""\
helper function: syntactic sugar for iteration over the tokens list
"""
assert(self.i + 1 < len(self.tokens))
self.i += 1
def prev_token(self):
"""\
helper function: syntactic sugar to fetch previous token
"""
assert(self.i - 1 >= 0)
return self.tokens[self.i - 1]
def check_semicolon(self):
if self.curr_type() == TokenType.SEMICOLON:
self.advance()
return True
else:
tok = self.curr_token()
if tok.type == TokenType.EOF:
self.trap("expected ';' at end of file")
else:
self.trap("expected ';' before {}".format(tok.lexeme))
return False
def program(self):
"""\
<program> := <declaration>* EOF
"""
tree = []
while self.curr_type() != TokenType.EOF:
tree.append(self.declaration())
assert(self.curr_type() == TokenType.EOF)
return tree
def declaration(self):
"""\
<declaration> := <variable declaration> | <statement>
"""
if self.curr_type() == TokenType.VAR:
self.advance()
return self.var_declaration()
return self.statement()
def var_declaration(self):
"""\
<var_declaration> := "var" IDENTIFIER ("=" <expression>)? ";"
"""
name = None
#if no initializer is present, assume that there was actually
#an intializer to nil, i.e., var x = nil; instead of var x;
initializer = Literal(Token(TokenType.NIL, -1, "nil", None))
if self.curr_type() == TokenType.IDENTIFIER:
name = self.curr_token()
self.advance()
if self.curr_type() == TokenType.EQUAL:
self.advance()
initializer = self.expression()
self.check_semicolon()
else:
self.trap("missing variable identifier")
return VariableDeclaration(name, initializer)
def statement(self):
"""\
<statement> := <expression statement> | <print statement> |
<block statement> | <if statement> | <while statement> |
<for statement>
"""
# this isn't the world's fastest code, a jump table or dictionary-based
# switch would be better, but hey we're writing an interpreter in
# python! This is hardly the bottleneck!
if self.curr_type() == TokenType.PRINT:
self.advance()
stmt = self.print_stmt()
elif self.curr_type() == TokenType.LEFT_BRACE:
self.advance()
stmt_list = self.block_stmt()
stmt = Block(stmt_list)
elif self.curr_type() == TokenType.IF:
self.advance()
stmt = self.branch_stmt()
elif self.curr_type() == TokenType.WHILE:
self.advance()
stmt = self.while_stmt()
elif self.curr_type() == TokenType.FOR:
self.advance()
stmt = self.for_stmt()
else:
stmt = self.generic_stmt()
return stmt
def print_stmt(self):
"""\
<print statement> := "print" <expression> ";"
"""
stmt = Printer(self.expression())
self.check_semicolon()
return stmt
def block_stmt(self):
"""\
<block statement> := "{" <declaration>* "}"
this method returns a list of statements rather than a block node, b/c
it is used for both generic block statements and function blocks. The
caller must wrap the list into the appropriate node class.
"""
stmt_list = []
while self.curr_type() != TokenType.RIGHT_BRACE:
expr = self.declaration()
stmt_list.append(expr)
if self.curr_type() == TokenType.EOF:
self.trap("expected '}' at end of file")
elif self.curr_type() != TokenType.RIGHT_BRACE:
tok = self.curr_token()
self.trap("expected '}' at {}".format(tok.lexeme))
else:
self.advance()
return stmt_list
def branch_stmt(self):
"""\
<branch> := "if" "(" <expr> ")" <stmt> ("else" <stmt>)?
"""
if self.curr_type() != TokenType.LEFT_PAREN:
self.trap("expected open parenthesis after 'if'")
return Branch(None, None, None)
self.advance()
condition = self.expression()
if self.curr_type() != TokenType.RIGHT_PAREN:
self.trap("expected close parenthesis after condition")
return Branch(None, None, None)
self.advance()
then_branch = self.statement()
else_branch = None
if self.curr_type() == TokenType.ELSE:
self.advance()
else_branch = self.statement()
return Branch(condition, then_branch, else_branch)
def while_stmt(self):
"""
<while> := "while" "(" <expression> ")" <statement>
"""
if self.curr_type() != TokenType.LEFT_PAREN:
self.trap("expected open parenthesis after 'if'")
return Loop(None, None)
self.advance()
condition = self.expression()
if self.curr_type() != TokenType.RIGHT_PAREN:
self.trap("expected close parenthesis after condition")
return Loop(None, None)
self.advance()
body = self.statement()
return Loop(condition, body)
def for_stmt(self):
"""\
<for statement> := "for" "(" (<var decl> | <expr stmt> | ";")
<expression>? ";" <expression>? ")" <statement>
for statements are desugared into an equivalent while statement.
"""
if self.curr_type() != TokenType.LEFT_PAREN:
self.trap("expected '(' after 'for'")
return Loop(None, None)
self.advance()
initializer = None
if self.curr_type() == TokenType.SEMICOLON:
self.advance()
elif self.curr_type() == TokenType.VAR:
self.advance()
initializer = self.var_declaration()
else:
initializer = self.generic_stmt()
condition = None
if self.curr_type() == TokenType.SEMICOLON:
self.advance()
else:
condition = self.expression()
if not self.check_semicolon():
return Loop(None, None)
increment = None
if self.curr_type() != TokenType.RIGHT_PAREN:
increment = self.expression()
if self.curr_type() == TokenType.RIGHT_PAREN:
self.advance()
else:
self.trap("expected ')' after a for loop clause")
return Loop(None, None)
body = self.statement()
#desugar the for loop by nesting the disjoint nodes into blocks
if increment is not None:
body = Block([body, increment])
if condition is not None:
body = Loop(condition, body)
else:
#a little different from standard lox
#if you want an infinite loop, make it clear with a while(true)
#so in reality, this isn't a by-the-book lox implementation
self.trap("infinite loop detected, use while(true) instead")
return Loop(None, None)
if initializer is not None:
body = Block([initializer, body])
return body
def generic_stmt(self):
"""\
<expression statement> := <expression> ";"
"""
stmt = Generic(self.expression())
self.check_semicolon()
return stmt
def expression(self):
"""\
dummy method used to encode the lox grammar explicity in the source.
<expression> := <assignment>
"""
return self.assignment()
def assignment(self):
"""\
assign rvalue to lvalue
<assignment> := (IDENTIFIER "=" <assignment>) | <logical or>
"""
lval = self.logical_or()
if self.curr_type() == TokenType.EQUAL:
self.advance()
rval = self.assignment()
if isinstance(lval, Variable):
#extract token from variable node as a valid lvalue
return Assignment(lval.name, rval)
self.trap("assignment target is not an l-value")
#if trap was initiated, this return node is just a dummy.
#trap synchronized to the next statement anyways so its no risk.
#on the other hand, if the branch was skipped entirely, then lval
#is just some expression.
return lval
def logical_or(self):
"""\
<logical or> := <logical and> ("or" <logical and>)*
"""
expr = self.logical_and()
if self.curr_type() == TokenType.OR:
self.advance()
left = expr
tok = self.prev_token()
right = self.logical_and()
return Logical(left, tok, right)
return expr
def logical_and(self):
"""\
<logical and> := <equality> ("and" <equality>)*
"""
expr = self.equality()
if self.curr_type() == TokenType.AND:
self.advance()
left = expr
tok = self.prev_token()
right = self.logical_and()
return Logical(left, tok, right)
return expr
def equality(self):
"""\
<equality> := <comparison> (("==" | "!=") <comparison>)*
"""
expr = self.comparison()
types = set([TokenType.EQUAL_EQUAL, TokenType.BANG_EQUAL])
while self.curr_type() in types:
self.advance()
left = expr
operator = self.prev_token()
right = self.comparison()
expr = Binary(left, operator, right)
return expr
def comparison(self):
"""\
<comparison> := <term> ((">" | "<" | "<=" | ">=") <term>)*
"""
expr = self.term()
types = set([TokenType.GREATER, TokenType.GREATER_EQUAL, \
TokenType.LESS, TokenType.LESS_EQUAL])
while self.curr_type() in types:
self.advance()
left = expr
operator = self.prev_token()
right = self.term()
expr = Binary(left, operator, right)
return expr
def term(self):
"""\
<term> := <factor> (("+" | "-") <factor>)*
"""
expr = self.factor()
while self.curr_type() in set([TokenType.PLUS, TokenType.MINUS]):
self.advance()
left = expr
operator = self.prev_token()
right = self.factor()
expr = Binary(left, operator, right)
return expr
def factor(self):
"""\
<factor> := <unary> (("*" | "/") <unary>)*
"""
expr = self.unary()
while self.curr_type() in set([TokenType.STAR, TokenType.SLASH]):
self.advance()
left = expr
operator = self.prev_token()
right = self.unary()
expr = Binary(left, operator, right)
return expr
def unary(self):
"""\
<unary> := ("!" | "-") <unary> | <primary>
"""
if self.curr_type() in set([TokenType.BANG, TokenType.MINUS]):
self.advance()
return Unary(self.prev_token(), self.unary())
return self.primary()
def primary(self):
"""\
<primary> := NUMBER | STRING | "true" | "false" | "nil"
<primary> := "(" <expression> ")"
"""
types = set([TokenType.NUMBER, TokenType.STRING, TokenType.NIL, \
TokenType.TRUE, TokenType.FALSE])
if self.curr_type() in types:
expr = Literal(self.curr_token())
self.advance()
elif self.curr_type() == TokenType.LEFT_PAREN:
self.advance()
expr = Grouping(self.expression())
if self.curr_type() == TokenType.RIGHT_PAREN:
self.advance()
else:
self.trap("missing right parenthesis for grouped expression")
elif self.curr_type() == TokenType.IDENTIFIER:
expr | |
<reponame>balazsgaspar/whoville
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1ldapApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_ldap(self, id, **kwargs):
"""
delete LDAP config by id
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_ldap(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_ldap_with_http_info(id, **kwargs)
else:
(data) = self.delete_ldap_with_http_info(id, **kwargs)
return data
def delete_ldap_with_http_info(self, id, **kwargs):
"""
delete LDAP config by id
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_ldap_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_ldap" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_ldap`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/ldap/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_private_ldap(self, name, **kwargs):
"""
delete private LDAP config by name
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_ldap(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_private_ldap_with_http_info(name, **kwargs)
else:
(data) = self.delete_private_ldap_with_http_info(name, **kwargs)
return data
def delete_private_ldap_with_http_info(self, name, **kwargs):
"""
delete private LDAP config by name
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_ldap_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_private_ldap" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_private_ldap`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/ldap/user/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_public_ldap(self, name, **kwargs):
"""
delete public (owned) or private LDAP config by name
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_ldap(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_public_ldap_with_http_info(name, **kwargs)
else:
(data) = self.delete_public_ldap_with_http_info(name, **kwargs)
return data
def delete_public_ldap_with_http_info(self, name, **kwargs):
"""
delete public (owned) or private LDAP config by name
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her clusters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_ldap_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_public_ldap" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_public_ldap`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/ldap/account/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ldap(self, id, **kwargs):
"""
retrieve LDAP config by id
LDAP server integration enables the user to provide a central place to store usernames and passwords for the users of his/her | |
import argparse
import pickle
import numpy as np
import os
import pandas as pd
import scipy.sparse as sp
import torch
from torch import nn
import torch.nn.functional as F
from scipy.sparse import linalg
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
from pygsp import graphs, filters
import numpy as np
import pandas as pd
import networkx as nx
from scipy import sparse
from texttable import Texttable
from sklearn.preprocessing import normalize
import time
import os
import util
import torch.optim as optim
import matplotlib.pyplot as plt
import pywt
class Sampling(nn.Module):
def __init__(self, args, seq_len):
super(Sampling, self).__init__()
self.conv = nn.Conv1d(seq_len, args.att_out_channel, kernel_size=1)
def forward(self, x):
"""
:param x: (batch, N=1, channel, wavelet_seq)
:return: (batch, N=1, att_out_channel, wavelet_seq[-1])
"""
x = x.squeeze()
conv_out = self.conv(x)
return conv_out[..., -1]
class WaveAttention(nn.Module):
def __init__(self, args, seq_len):
super(WaveAttention, self).__init__()
self.args = args
self.conv = nn.Conv1d(seq_len - 1, args.att_out_channel, kernel_size=1)
self.Att_W = nn.Parameter(torch.FloatTensor(self.args.att_out_channel, args.rnn_output_channel))
self.weight_init()
def forward(self, x):
"""
:param x: (batch, N, channel, wavelet_seq)
:return: (batch, N, att_out_channel + channel)
"""
#batch, N, channel, seq_len = x.shape
#x = x.reshape(batch * N, channel, seq_len).transpose(1, 2)
batch, seq_len, N, in_channel = x.shape
x = x.squeeze()
if len(x.shape) < 3:
# this is health condition:
x = x.unsqueeze(dim=1)
#print("x shape", x.shape)
att_x = x[:, :-1, :]
#print("att_x shape", att_x.shape) #[256,44,3]
h_t = x[:, -1, :].unsqueeze(dim=2)
#print("h_t shape", h_t.shape) #[256,3,1]
conv_out = self.conv(att_x).transpose(1, 2)
#print("conv_out shape", conv_out.shape) #[256,3,20]
#print("self.Att_W shape", self.Att_W.shape) #[20,64]
# batch x N, out_channel, seq_len
att1 = torch.einsum('bos, si -> boi', conv_out, self.Att_W)
#print("att1 shape", att1.shape) #[256,3,64]
att2 = torch.einsum('boi, bij -> boj', att1.transpose(1, 2), h_t).squeeze()
#print("att2 shape", att2.shape) #[256,64]
#a = torch.sigmoid(att2).unsqueeze(dim=1)
#print("a shape", a.shape) #[256,1,64]
#v = torch.einsum('biv, bvk -> bik', a, conv_out.transpose(1, 2)).squeeze()
#print("v shape", v.shape)
#out = torch.cat([v, h_t.squeeze()], dim=1).reshape(batch, N, -1)
#print("att out shape", out.shape)
out = torch.sigmoid(att2)
#print("out shape", out.shape)
return out
def weight_init(self):
nn.init.normal_(self.Att_W, mean=0.0, std=0.001)
nn.init.xavier_uniform_(self.conv.weight)
class FFNN(nn.Module):
def __init__(self, args):
super(FFNN, self).__init__()
self.args = args
self.factor_num = args.factor_num
print('FFNN :',args.rnn_output_channel)
self.encoder0 = EncoderLSTM(args, args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.encoder1 = EncoderLSTM(args, args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.encoder2 = EncoderLSTM(args, args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.encoder3 = EncoderLSTM(args, args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.encoder4 = EncoderLSTM(args, args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.seq_len0 = args.seq_len0
self.seq_len1 = args.seq_len1
self.seq_len2 = args.seq_len2
self.seq_len3 = args.seq_len3
self.seq_len4 = args.seq_len4
self.simple0 = SimpleModel(args, args.seq_len0, input_feature=args.feature_len, output_feature=32)
self.simple1 = SimpleModel(args, args.seq_len1, input_feature=args.feature_len, output_feature=32)
self.simple2 = SimpleModel(args, args.seq_len2, input_feature=args.feature_len, output_feature=32)
self.simple3 = SimpleModel(args, args.seq_len3, input_feature=args.feature_len, output_feature=32)
self.simple4 = SimpleModel(args, args.seq_len4, input_feature=args.feature_len, output_feature=32)
self.waveconvatt0 = WaveConvAtt(args, args.seq_len0, input_feature=args.feature_len, output_feature=32)
self.waveconvatt1 = WaveConvAtt(args, args.seq_len1, input_feature=args.feature_len, output_feature=32)
self.waveconvatt2 = WaveConvAtt(args, args.seq_len2, input_feature=args.feature_len, output_feature=32)
self.waveconvatt3 = WaveConvAtt(args, args.seq_len3, input_feature=args.feature_len, output_feature=32)
self.waveconvatt4 = WaveConvAtt(args, args.seq_len4, input_feature=args.feature_len, output_feature=32)
self.attention_x0 = WaveAttention(args, seq_len=args.seq_len0)
self.attention_xA3 = WaveAttention(args, seq_len=args.seq_len1)
self.attention_xD3 = WaveAttention(args, seq_len=args.seq_len2)
self.attention_xD2 = WaveAttention(args, seq_len=args.seq_len3)
self.attention_xD1 = WaveAttention(args, seq_len=args.seq_len4)
self.fc_x0_1 = nn.Linear( 5* args.rnn_hidden_len, 256)
self.fc_x0_2 = nn.Linear(256, args.rnn_hidden_len)
self.fc_a3_1 = nn.Linear( 5* args.rnn_hidden_len, 256)
self.fc_a3_2 = nn.Linear(256, args.rnn_hidden_len)
self.fc_d3_1 = nn.Linear( 5* args.rnn_hidden_len, 256)
self.fc_d3_2 = nn.Linear(256, args.rnn_hidden_len)
self.fc_d2_1 = nn.Linear( 5* args.rnn_hidden_len, 256)
self.fc_d2_2 = nn.Linear(256, args.rnn_hidden_len)
self.fc_d1_1 = nn.Linear( 5* args.rnn_hidden_len, 256)
self.fc_d1_2 = nn.Linear(256, args.rnn_hidden_len)
self.fc_cat_1 = nn.Linear( 4* args.rnn_hidden_len, 256)
self.fc_cat_2 = nn.Linear(256, 4 * args.rnn_hidden_len)
self.fc1 = nn.Linear(4*args.rnn_hidden_len, args.predict_label_num)
self.simple_last = SimpleModel(args, 1, input_feature=4 * 32, output_feature=args.predict_label_num)
self.simple_last_x0 = SimpleModel(args, args.seq_len0, input_feature=args.feature_len, output_feature=args.predict_label_num)
self.weight_init()
def forward(self, x):
"""
:param x: (batch, seq_len, N, in_channel)
"""
x0, xA3, xD3, xD2, xD1 = x
batch, seq_len_xA3, N, in_channel = xA3.shape
# print("wavelet_trans input shape:", x.shape)
xA3_lstm = self.encoder1(xA3)
xD3_lstm = self.encoder2(xD3)
xD2_lstm = self.encoder3(xD2)
xD1_lstm = self.encoder4(xD1)
x0_lstm = self.encoder0(x0)
xA3_out = xA3
xD3_out = xD3
xD2_out = xD2
xD1_out = xD1
x0_out = x0
xA3_conv = self.waveconvatt1(xA3_out)
xD3_conv = self.waveconvatt2(xD3_out)
xD2_conv = self.waveconvatt3(xD2_out)
xD1_conv = self.waveconvatt4(xD1_out)
x0_conv = self.waveconvatt0(x0_out)
catted= torch.cat([xA3_conv, xD3_conv, xD2_conv, xD1_conv], dim=1)
a3_att = torch.softmax(F.relu(self.fc_a3_2(F.relu(self.fc_a3_1(torch.cat([xA3_lstm, catted], dim=1))))),dim=1)
a3_out = a3_att * x0_conv
d3_att = torch.softmax(F.relu(self.fc_d3_2(F.relu(self.fc_d3_1(torch.cat([xD3_lstm, catted], dim=1))))),dim=1)
d3_out = d3_att * x0_conv
d2_att = torch.softmax(F.relu(self.fc_d2_2(F.relu(self.fc_d2_1(torch.cat([xD2_lstm, catted], dim=1))))),dim=1)
d2_out = d2_att * x0_conv
d1_att = torch.softmax(F.relu(self.fc_d1_2(F.relu(self.fc_d1_1(torch.cat([xD1_lstm, catted], dim=1))))),dim=1)
d1_out = d1_att * x0_conv
out = torch.cat([a3_out, d3_out, d2_out, d1_out], dim=1)
out = F.relu(self.fc1(out))
return out
class EncoderLSTM(nn.Module):
def __init__(self, args, feature_len, hidden_len, rnn_output_channel, num_layers=1, bidirectional=False):
super(EncoderLSTM, self).__init__()
self.feature_len = feature_len
self.hidden_len = hidden_len
self.num_layers = num_layers
self.rnn_output_channel = rnn_output_channel
# RNN层
self.rnn = nn.LSTM(
input_size=feature_len,
hidden_size=hidden_len,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional
)
if bidirectional:
self.conv = nn.Conv1d(2 * hidden_len, hidden_len, kernel_size=1)
else:
self.conv = nn.Conv1d(hidden_len, hidden_len, kernel_size=1)
#self.lbn = nn.LayerNorm([hidden_len, args.origin_seq_len])
self.bn1 = nn.BatchNorm1d(hidden_len)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
"""
x = (batch, seq, N, channel)
to x = (batch x N, sequence, channel)
:return:输出out(batch, N, rnn_output_channel, sequence)
"""
batch, seq, N, channel = x.shape
x = x.transpose(1, 2)
x = x.reshape(batch * N, seq, channel)
out, _ = self.rnn(x)
out = out[:,-1,:].unsqueeze(dim=1)
# out = batch*N, seq, hidden_num
out = out.transpose(1, 2)
out = self.bn1(out)
out = self.dropout(out)
out = out.reshape(batch, self.hidden_len)
return out
class SimpleModel(nn.Module):
def __init__(self, args, seq_len, input_feature, output_feature):
super(SimpleModel, self).__init__()
self.args = args
self.factor_num = args.factor_num
self.conv1 = nn.Conv1d(input_feature, 16, kernel_size=1)
self.bn1 = nn.BatchNorm1d(16)
self.dropout1 = nn.Dropout(0.2)
self.conv2 = nn.Conv1d(16, 32, kernel_size=1)
self.bn2 = nn.BatchNorm1d(32)
self.dropout2 = nn.Dropout(0.2)
# self.encoder = EncoderLSTM(args.feature_len, args.rnn_hidden_len, args.rnn_output_channel, args.rnn_layer_num)
self.encoder = EncoderLSTM(args, 4, args.rnn_hidden_len, args.rnn_output_channel,
args.rnn_layer_num, bidirectional=args.bidirectional)
# health:
self.fc1 = nn.Linear(seq_len * args.rnn_output_channel, args.predict_label_num)
self.fc2 = nn.Linear(seq_len * 32, output_feature)
self.dropout3 = nn.Dropout(0.2)
# self.fc1 = nn.Linear(4 * (args.rnn_output_channel + args.att_out_channel), 5)
# add a sigmoid to generate probability
# self.fc2 = nn.Linear()
self.weight_init()
def forward(self, x):
"""
:param x: (batch, sequence, N, channel)
returns : (batch, channel)
"""
# print("SimpleModel input: ", x.shape)
batch, seq_len, N, in_channel = x.shape
x = x.squeeze()
if len(x.shape) < 3:
# this is health condition:
x = x.unsqueeze(dim=1)
x = x.transpose(1, 2)
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.dropout1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.dropout2(x)
x = x.squeeze()
x = x.reshape(batch, -1)
out = self.fc2(x)
out = F.relu(out)
out = self.dropout3(out)
# out = F.relu(self.fc2(out))
return out
def weight_init(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
class Trainer:
def __init__(self, args, model, optimizer, criterion=nn.BCELoss()):
self.model = model
self.args = args
self.criterion = criterion
self.optimizer = optimizer
self.clip = args.clip
self.lr_decay_rate = args.lr_decay_rate
self.epochs = args.epochs
self.scheduler = optim.lr_scheduler.LambdaLR(
self.optimizer, lr_lambda=lambda epochs: self.lr_decay_rate ** epochs)
def train(self, input_data, target, need_print=False):
self.model.train()
self.optimizer.zero_grad()
# train
output = self.model(input_data)
output = output.squeeze()
loss, acc = util.calc_metrics_multi_class(output, target, criterion=self.criterion, cuda=self.args.cuda, need_print=need_print)
regularization_loss = 0
# for param in self.model.parameters():
# regularization_loss += torch.sum(abs(param))
# loss = loss + 0.001 * regularization_loss
#loss, acc = util.calc_metrics_multi_class(output, target, criterion=self.criterion, cuda=self.args.cuda, need_print=need_print)
loss.backward(retain_graph=True)
# loss.backward()
self.optimizer.step()
return loss.item(), acc.item()
def eval(self, input_data, target, need_print=False):
self.model.eval()
output = self.model(input_data) # [batch_size,seq_length,num_nodes]
output = output.squeeze()
mae, acc = util.calc_metrics_multi_class(output, target, criterion=self.criterion, cuda=self.args.cuda, need_print=need_print)
return mae.item(), acc.item()
def predict(self, input_data):
self.model.eval()
return self.model(input_data)
class WaveConvAtt(nn.Module):
def __init__(self, args, seq_len, input_feature, output_feature):
super(WaveConvAtt, self).__init__()
self.args = args
self.factor_num = args.factor_num
self.conv1 = nn.Conv1d(input_feature, 16, kernel_size=1)
self.bn1 = nn.BatchNorm1d(16)
self.dropout1 = nn.Dropout(0.2)
self.conv2 = nn.Conv1d(16, 32, kernel_size=1)
self.bn2 = nn.BatchNorm1d(32)
self.dropout2 = nn.Dropout(0.2)
self.conv3 = nn.Conv1d(input_feature, 32, kernel_size=seq_len)
self.bn3 = nn.BatchNorm1d(32)
self.encoder = EncoderLSTM(args, 4, args.rnn_hidden_len, args.rnn_output_channel,
args.rnn_layer_num, bidirectional=args.bidirectional)
self.fc1 = nn.Linear(seq_len * 32, 256) #目前最好的模型這裡是32,128
self.fc2 = nn.Linear(256, args.rnn_hidden_len)
self.fc3 = nn.Linear(seq_len * 32, args.rnn_hidden_len)
self.fc4 = nn.Linear(32, 256)
self.fc5 = nn.Linear(256, args.rnn_hidden_len)
self.dropout3 = nn.Dropout(0.2)
# self.fc1 = nn.Linear(4 * (args.rnn_output_channel + args.att_out_channel), 5)
# add a sigmoid to generate probability
# self.fc2 = nn.Linear()
self.weight_init()
def forward(self, x):
"""
:param x: (batch, sequence, N, channel)
returns : (batch, channel)
"""
# print("SimpleModel input: ", x.shape)
batch, seq_len, N, in_channel = x.shape
x = x.squeeze()
if len(x.shape) < 3:
# this is health condition:
x = x.unsqueeze(dim=1)
xx = x.transpose(1, 2)
xx = self.conv1(xx)
xx = F.relu(self.bn1(xx))
xx = self.dropout1(xx)
xx = self.conv2(xx)
xx = F.relu(self.bn2(xx))
xx = self.dropout2(xx)
| |
<reponame>iceprincefounder/selected-sources<filename>USD-Scripts/python/scenegraphLIB/lcShotgun.py
# coding=utf-8
"""
lcShotgun.py
get and store shot gun info from project and shot name
"""
import sys
import os
import shutil
global ET
#we cannot use lxml in maya2013. use default xmltree
try:
import maya.cmds as cmds
except:
pass
try:
from lxml import etree as ET
except:
import xml.etree.ElementTree as ET
import string
import re
import production.pipeline.lcProdProj as clpp
import traceback
import tempfile
import production.pipeline.utils as cu
import production.pipeline.color_log as ppcl
import logging
import production.pipeline.fastCache as ppfc
reload(ppfc)
if os.environ.has_key('LCTOOLSET'):
sys.path.append(os.path.join(os.environ['LCTOOLSET'],'lib/production'))
else:
sys.path.append('/mnt/utility/toolset/lib/production')
global sg
sg=None
def init_shotgun():
global sg
if not sg:
from production.shotgun_connection import Connection
sg = Connection('get_project_info').get_sg()
def uninit_shotgun():
if sg:sg.close()
def localMsgOut(msg,level=0):
prefix=['Info: ','Warn: ','Error: ']
print (prefix[level]+msg)
if level==2:
raise Exception(msg)
def outMsg(msg,level=0):
try:
import lcKatanaUtils as lcku
lcku.katanaPrint(msg,level)
except:
localMsgOut(msg,level)
def removeAssetDigtal(asset):
res=re.search(r"\d*$",asset)
return asset[:res.start()].lower()
def getRealNamespace(nameList,originalName):
oldAssetName = removeAssetDigtal( string.split(originalName,'.')[-1] )
realAssetName=oldAssetName
i=1
while True:
if realAssetName not in nameList:
nameList.append(realAssetName)
return realAssetName
else:
realAssetName=oldAssetName+str(i)
i+=1
#-----------------------------------------shotgun functions
def findShotgunProjects(text=False):
init_shotgun()
shotgunProj = sg.find('Project',[['sg_status','is','Active']],['name'])
if text==False:
return sorted(shotgunProj)
else:
result=[]
for i in range(1,len(shotgunProj)):
result.append(shotgunProj[i]['name'])
return sorted(result)
def findShotgunSequence(proj,text=False):
init_shotgun()
filter = [
['name', 'is', proj]
]
projInfo = sg.find_one('Project', filter)
seqFilter=[
['project', 'is', {'type': 'Project', 'id': projInfo['id']}],
]
sequences=sg.find('Sequence', seqFilter, ['id','code'])
if text==False:
return sorted(sequences)
else:
result=[]
for p in sequences:
result.append(p['code'])
return sorted(result)
def findShotgunShots(proj=None,seq=None,text=False):
init_shotgun()
filter = [
['name', 'is', proj]
]
projInfo = sg.find_one('Project', filter)
shotFilter=[]
if seq is None:
shotFilter=[
['project', 'is', {'type': 'Project', 'id': projInfo['id']}],
]
else:
allSeq=findShotgunSequence(proj)
seqId=None
for s in allSeq:
if s['code']==seq:
seqId=s
if seqId is not None:
shotFilter=[
['project', 'is', {'type': 'Project', 'id': projInfo['id']}],
['sg_sequence', 'is', {'type': 'Sequence', 'id': seqId['id']}]
]
shots=sg.find('Shot', shotFilter, ['id','code'])
if shots is None:
return None
if text==False:
return sorted(shots)
else:
result=[]
for p in shots:
result.append(p['code'])
return sorted(result)
def getShotgunPathVersion(path):
if path is None or path=='':
return ''
sep=string.split(path,'/')
i=0
for k in sep:
if k=='publish':
break
i+=1
if (i+1)<len(sep):
sepp=string.split(sep[i+1],'.')
if len(sepp)==4:
return sepp[2]+'.'+sepp[3]
return ''
#-----------------------------------------shotgun functions
class lcGunBase:
"""
lcshotgun operation base class
"""
lcBuildVersion=0.2
def __init__(self):
self.logger=ppcl.init_logger(__name__)
return None
def sortFolderInfoByCode(self,foldInfo):
"""
sort the fold path by code
"""
sortFolder = {}
for folder in foldInfo:
sortFolder[folder['code']] = folder
#sort the key
sortedkey = []
for key in sorted(sortFolder.iterkeys()):
sortedkey.append(key)
return sortedkey
class lcShotgun(lcGunBase):
"""
Give me a project and shot, i will collect relevant info for you.
You can query the sequence, assets, in time, out time .etc
And make a new xml file from the layout scene graph xml file.
The new xml file will contains the latest assets and their lookfiles, and project,shot, in and out time info.
Use lcMakeKatanaScene.py to make a katana scene from the new xml file.
Example:
sgInfo=lcShotgun('GTA','a10010')
sgInfo.initShotgunInfo()
"""
UNUSED_ATTRS_IN_INSTANCE=['fullPath','hidden','angle','distance','asb_wform']
def __init__(self, project=None, shot=None):
lcGunBase.__init__(self)
if not project:
return
init_shotgun()
self.set(project=project, shot=shot)
self.fast_cache=ppfc.FastCache(projInfo=self.projInfo)
self.fast_cache.set_shot(shot)
self.assets_pool={}
self.log_txt=[]
self.instance_parent_filter=[]
self.xml_update_type='ani'
def __del__(self):
uninit_shotgun()
def __getLatestVersion(self):
#sort the key
sortedkey = lcGunBase.sortFolderInfoByCode(self,self.shotgunFolderInfo)
self.shotgunFolderkey=[]
self.shotgunFolderPath={}
for key in sortedkey:
splitKey = string.split(key, '.')
self.shotgunFolderPath[splitKey[1]]=[]
for key in sortedkey:
splitKey = string.split(key, '.')
for folderHandle in self.shotgunFolderInfo:
if folderHandle['code'] == key and folderHandle['sg_version_folder'] and folderHandle['sg_version_folder'].has_key('local_path'):
self.shotgunFolderPath[splitKey[1]].append( folderHandle['sg_version_folder']['local_path'] )
def __initProj(self):
#####get project handle
if self.proj is not None:
projFilter = [
['name', 'is', self.proj]
]
self.shotgunProjInfo = sg.find_one('Project', projFilter)
if self.shotgunProjInfo is None:
outMsg( 'lcShotgun : '+ 'Cannot find project ' + self.proj,2)
else:
outMsg( 'No project name is given',2)
def __initShot(self,shot=None):
if shot is not None:
self.shot=shot
if not self.shot:
return
outMsg( 'lcShotgun: Init shotgun info for Project: '+ self.proj+ ' Shot: '+self.shot)
#####get shot handle
shotFilter = [
['code', 'is', self.shot],
['project', 'is', {'type': 'Project', 'id': self.shotgunProjInfo['id']}]
]
self.shotgunShotInfo = sg.find_one('Shot', shotFilter, ['sg_sequence', 'sg_cut_in', 'sg_cut_out', 'sg_ani_cut_in', 'sg_ani_cut_out'])
if self.shotgunShotInfo is None:
outMsg('lcShotgun : '+'Cannot find shot ' +self.proj+' ' +self.shot,2)
#####get shot folder handle
folderInfoFilter = [
['entity', 'is', self.shotgunShotInfo],
['project', 'is', {'type': 'Project', 'id': self.shotgunProjInfo['id']}]
]
self.shotgunFolderInfo = sg.find('Version', folderInfoFilter, ['code', 'sg_version_folder'])
# print '------',self.shotgunFolderInfo
if self.shotgunFolderInfo is None:
outMsg('lcShotgun : '+ 'Cannot find version folders for ' + self.shot,2)
self.__getLatestVersion()
#---------------------------------------xml remake
@staticmethod
def get_node_arbitrary_attrs(node):
arbitrary_attr = {}
for rr in node.getiterator('attribute'):
if rr.attrib.get('name',''):
arbitrary_attr[rr.attrib['name']]= rr.attrib.get('value', '')
return arbitrary_attr
@staticmethod
def get_asset_xml_arbitrary_attrs(file):
tree = ET.parse(file)
root = tree.getroot()
arbitrary_attr = None
for r in root.getiterator('arbitraryList'):
arbitrary_attr = lcShotgun.get_node_arbitrary_attrs(r)
return arbitrary_attr
@staticmethod
def add_mod_level_info(node):
ref_file=node.attrib.get('refFile','')
abc_info=clpp.lcProdProj.get_cache_asset(ref_file)
if abc_info:
level_info=abc_info.keys()
if len(level_info)>1:
for r in node.getiterator('arbitraryList'):
#add modLevel
attr_node=None
current_mod_level='md'
for rr in r.getiterator('attribute'):
if rr.attrib.get('name', '') == 'modLevel':
attr_node=rr
if rr.attrib.get('name','')=='fullPath' and 'plt_proxy_grp' in rr.attrib.get('value',''):
current_mod_level='lo'
if not attr_node:
attr_node=ET.SubElement(r,'attribute')
attr_node.set('name','modLevel')
attr_node.set('type','string')
attr_node.set('value',','.join(level_info))
#add currentModLevel
curretn_modlevel_node=None
for rr in r.getiterator('attribute'):
if rr.attrib.get('name', '') == 'currentModLevel':
curretn_modlevel_node=rr
if not curretn_modlevel_node:
curretn_modlevel_node=ET.SubElement(r,'attribute')
curretn_modlevel_node.set('name','currentModLevel')
curretn_modlevel_node.set('type','string')
curretn_modlevel_node.set('value',current_mod_level if current_mod_level in level_info else level_info[0])
@staticmethod
def set_node_arbitrary(node,arbit):
if arbit:
for r in node.getiterator('arbitraryList'):
for rr in r.getiterator('attribute'):
if rr.attrib.get('name', '') == 'modVersion':
rr.set('value', arbit.get('modVersion',''))
if rr.attrib.get('name', '') == 'rigVersion':
rr.set('value', arbit.get('rigVersion',''))
def __set_cache_attributes(self,node,cache_asset):
if cache_asset.get('isCache',False) or cache_asset.get('isTransCache',False):
cache_status=self.projInfo.getAniAssetStatus(self.shot,node.attrib['name'])
lcShotgun.xmlSetXformDefault(node,
cache_status,
cache_asset.get('isCache',False))
if cache_asset.get('isCache',False):
lcShotgun.xmlRemoveNodeBounds(node)
self.assets_pool[node.attrib['name']]= cache_asset.get('data','')
def __set_refFile_path(self,node,cache_asset):
newAssetPath =cache_asset.get('data','')
#set 'name' to asset namespace,
#set 'refFile' to ani asset or srf asset
if newAssetPath:
node.attrib['refFile']=str(newAssetPath)
node.attrib['type']='reference'
if '/mod/publish/' in node.attrib['refFile']:
lcShotgun.add_mod_level_info(node)
a = lcShotgun.get_asset_xml_arbitrary_attrs(newAssetPath)
lcShotgun.set_node_arbitrary(node,a)
#replace the node refFile with srf refFile.
def __xmlResetAsset_elementTree_Srf(self,node):
# lcShotgun.xmlRemoveNodeBounds(node)
srfAssetPath=None
srfSearch=self.fast_cache.get_fast_srf_data(node)
realAssetName = removeAssetDigtal(string.split(node.attrib['name'], '.')[-1])
# search in 'srf'
if srfSearch and srfSearch.get('xml') :
srfAssetPath=srfSearch['xml']
else:
msg='Can not find srf xml file for '+realAssetName
if srfAssetPath is None:
srfAssetPath=self.projInfo.getModAsset(realAssetName)
if srfAssetPath is not None:
msg+=', Use mod xml file '+getShotgunPathVersion(srfAssetPath)
else:
msg+=', Use asset in layout xml.'
outMsg(msg,1)
if srfAssetPath :
node.attrib['refFile']=srfAssetPath
node.attrib['type']='reference'
a = lcShotgun.get_asset_xml_arbitrary_attrs(srfAssetPath)
lcShotgun.set_node_arbitrary(node,a)
# else:
# node.attrib['type']='group'
def __xmlResetAsset_elementTree(self,node):
mod_type=re.findall('/projects/[a-z]{3}/asset/([a-z]{3})/', node.attrib.get('refFile',''))
if mod_type:
lcShotgun.set_arbitrary_attrs(node, {'modelType':mod_type[0]})
# get cache data
cache_asset=self.fast_cache.get_fast_cache_data(node,ani_type=self.xml_update_type)
if not cache_asset:
outMsg( 'Cannot find asset data'+node.attrib['name'],1)
return
self.__set_cache_attributes(node,cache_asset)
self.__set_refFile_path(node, cache_asset)
@staticmethod
def rollbackInstance(node,new_name,old_name):
its=node.findall(old_name)
if its:
key_value={}
for it in its:
for k,v in it.attrib.items():
key_value[k]=v
its = node.findall(new_name)
new_node=None
if its:
for it in its:
new_node=it
else:
new_node = ET.SubElement(node,new_name)
for k,v in key_value.items():
new_node.set(k,v)
return new_node
@staticmethod
def storeOldInstanceValue(node,name,key_value):
its=node.findall(name)
if not its:
new_ele = ET.SubElement(node,name)
for k,v in key_value.items():
new_ele.set(k,v)
return new_ele
@staticmethod
def xmlRemoveNodeBounds(node):
its=node.findall('bounds')
bounds_old={}
for t in its:
for k,v in t.attrib.items():
bounds_old[k]=v
node.remove(t)
lcShotgun.storeOldInstanceValue(node,'bounds_old',bounds_old)
@staticmethod
def xmlSetXformDefault(node,cache_status='',is_cache=True):
its=node.findall('xform')
xform_old={}
for it in its:
xform_old['value']=it.attrib['value']
it.attrib['value']= '1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1'
lcShotgun.storeOldInstanceValue(node,'xform_old',xform_old)
lcShotgun.set_is_cache(node,is_cache,status=cache_status)
@staticmethod
def set_arbitrary_attrs(node,attr_dict):
arb_list = node.find('arbitraryList')
if arb_list is None:
arb_list = ET.SubElement(node,'arbitraryList')
arb_attr = arb_list.findall('attribute')
attr_nodes={}
for a in arb_attr:
attr_nodes[a.attrib.get('name')]=a
for k,v in attr_dict.items():
if k in attr_nodes.keys():
attr_nodes[k].set(k,v)
else:
cache_node=ET.SubElement(arb_list,'attribute')
cache_node.set('name',k)
cache_node.set('type','string')
cache_node.set('value',v)
@staticmethod
def set_is_cache(node,is_cache,status=''):
if not is_cache:
lcShotgun.add_mod_level_info(node)
lcShotgun.set_arbitrary_attrs(node, {'isCache':['no','yes'][is_cache]})
if status: lcShotgun.set_arbitrary_attrs(node, {'cacheStatus':status})
#---------------------------------------xml remake
def set(self, project=None, shot=None):
if project is None :
return
self.proj = project
self.projInfo=clpp.lcProdProj()
self.projInfo.setProj(self.proj,context=shot)
#in shot mode
self.shot = shot
#shotgun data
self.shotgunProjInfo = None
self.shotgunShotInfo = None
self.shotgunFolderInfo = None
#shot path
self.shotgunFolderkey = None
#list of path{'lay':'...','ani':'...'}
self.shotgunFolderPath = None
self.assetTableList=None
self.lay_xml=None
def initShotgunInfo(self):
self.__initProj()
self.__initShot()
def getAniCutIn(self):
if self.shotgunShotInfo is not None:
return self.shotgunShotInfo['sg_ani_cut_in']
def getAniCutOut(self):
if self.shotgunShotInfo is not None:
return self.shotgunShotInfo['sg_ani_cut_out']
#get cut in time
def getTimeIn(self):
if self.shotgunShotInfo is not None:
cut_in=self.shotgunShotInfo['sg_cut_in']
return cut_in
#get cut out time
def getTimeOut(self):
if self.shotgunShotInfo is not None:
cut_out=self.shotgunShotInfo['sg_cut_out']
return cut_out
#get cut in time
def getCutIn(self):
if self.shotgunShotInfo is not None:
cut_in=self.shotgunShotInfo['sg_cut_in']
return cut_in
#get cut out time
def getCutOut(self):
if self.shotgunShotInfo is not None:
cut_out=self.shotgunShotInfo['sg_cut_out']
return cut_out
def __is_instance_under_group(self,all_attributes):
for at in all_attributes:
if at.attrib.get('name') =='fullPath':
full_p=at.attrib.get('value')
if full_p:
for c in self.instance_parent_filter:
if c in full_p:return True
break
return False
def instance_xml(self,inst_num,exclude=[],exclude_reverse=False):
tree = ET.parse(self.lay_xml[0])
root = tree.getroot()
self.fast_cache.asset_mod_xml_node={}
for r in root.getiterator('instance'):
if r.attrib.get('type', '')=='reference':
self.fast_cache.get_fast_cache_data(r)
instance_source_xml_node = self.__add_instance_source(root)
for k,v in self.fast_cache.asset_mod_xml_node.items():
if len(v)<inst_num :
continue
if not exclude_reverse and k in exclude:
continue
if exclude_reverse and k not in exclude:
continue
mod_type=self.projInfo.getAssetType(k)
ast_count=0
for vv in v:
arb_list = vv.find('arbitraryList')
attr_set=arb_list.findall('attribute')
if self.instance_parent_filter and not self.__is_instance_under_group(attr_set):
continue
vv.attrib['groupType']='instance'
vv.attrib['type']='group'
del vv.attrib['refFile']
del vv.attrib['refType']
for at in attr_set:
| |
+
14*np.log(1 - mckin/mbkin)))/(567*mbkin**6) +
(810496*mckin**7*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(189*mbkin**7) - (3241984*mckin**8*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(729*mbkin**8) +
(810496*mckin**9*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(243*mbkin**9) - (1620992*mckin**10*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(891*mbkin**10) +
(5673472*mckin**11*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(8019*mbkin**11) - (6483968*mckin**12*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(34749*mbkin**12) +
(810496*mckin**13*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/
(27027*mbkin**13) - (1620992*mckin**14*(1 + 14*np.log(2) +
14*np.log(1 - mckin/mbkin)))/(729729*mbkin**14) +
(108544*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/8019 -
(108544*mbkin*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(104247*mckin) - (2713600*mckin*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(34749*mbkin) +
(26593280*mckin**2*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(104247*mbkin**2) - (3799040*mckin**3*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(8019*mbkin**3) +
(759808*mckin**4*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(2673*mbkin**4) + (759808*mckin**5*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(729*mbkin**5) -
(2713600*mckin**6*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(729*mbkin**6) + (542720*mckin**7*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(81*mbkin**7) -
(5969920*mckin**8*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(729*mbkin**8) + (5318656*mckin**9*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(729*mbkin**9) -
(12916736*mckin**10*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(2673*mbkin**10) + (18995200*mckin**11*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(8019*mbkin**11) -
(87377920*mckin**12*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(104247*mbkin**12) + (542720*mckin**13*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(2673*mbkin**13) -
(3147776*mckin**14*(1 + 15*np.log(2) + 15*np.log(1 - mckin/mbkin)))/
(104247*mbkin**14) + (217088*mckin**15*(1 + 15*np.log(2) +
15*np.log(1 - mckin/mbkin)))/(104247*mbkin**15) +
(1428992*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/104247 -
(714496*mbkin*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(729729*mckin) - (5715968*mckin*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(66339*mbkin) +
(228638720*mckin**2*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(729729*mbkin**2) - (71449600*mckin**3*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(104247*mbkin**3) +
(5715968*mckin**4*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(8019*mbkin**4) + (5715968*mckin**5*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(8019*mbkin**5) -
(22863872*mckin**6*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(5103*mbkin**6) + (7144960*mckin**7*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(729*mbkin**7) -
(71449600*mckin**8*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(5103*mbkin**8) + (74307584*mckin**9*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(5103*mbkin**9) -
(91455488*mckin**10*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(8019*mbkin**10) + (54301696*mckin**11*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(8019*mbkin**11) -
(28579840*mckin**12*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(9477*mbkin**12) + (714496000*mckin**13*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(729729*mbkin**13) -
(22863872*mckin**14*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(104247*mbkin**14) + (22149376*mckin**15*(1 + 16*np.log(2) +
16*np.log(1 - mckin/mbkin)))/(729729*mbkin**15) -
(1428992*mckin**16*(1 + 16*np.log(2) + 16*np.log(1 - mckin/mbkin)))/
(729729*mbkin**16) + (57281920*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/4135131 -
(11456384*mbkin*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(12405393*mckin) - (22912768*mckin*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(243243*mbkin) +
(91651072*mckin**2*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(243243*mbkin**2) - (229127680*mckin**3*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(243243*mbkin**3) +
(45825536*mckin**4*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(34749*mbkin**4) - (91651072*mckin**6*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(18711*mbkin**6) +
(22912768*mckin**7*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(1701*mbkin**7) - (114563840*mckin**8*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(5103*mbkin**8) +
(45825536*mckin**9*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(1701*mbkin**9) - (458255360*mckin**10*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(18711*mbkin**10) +
(45825536*mckin**11*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(2673*mbkin**11) - (320778752*mckin**12*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(34749*mbkin**12) +
(916510720*mckin**13*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(243243*mbkin**13) - (91651072*mckin**14*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(81081*mbkin**14) +
(57281920*mckin**15*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(243243*mbkin**15) - (11456384*mckin**16*(1 + 17*np.log(2) +
17*np.log(1 - mckin/mbkin)))/(375921*mbkin**16) +
(22912768*mckin**17*(1 + 17*np.log(2) + 17*np.log(1 - mckin/mbkin)))/
(12405393*mbkin**17) + (173382656*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/12405393 -
(10836416*mbkin*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(12405393*mckin) - (10836416*mckin*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(106029*mbkin) +
(108364160*mckin**2*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(243243*mbkin**2) - (43345664*mckin**3*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(34749*mbkin**3) +
(173382656*mckin**4*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(81081*mbkin**4) - (43345664*mckin**5*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(34749*mbkin**5) -
(86691328*mckin**6*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(18711*mbkin**6) + (108364160*mckin**7*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(6237*mbkin**7) -
(173382656*mckin**8*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(5103*mbkin**8) + (238401152*mckin**9*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(5103*mbkin**9) -
(43345664*mckin**10*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(891*mbkin**10) + (736876288*mckin**11*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(18711*mbkin**11) -
(866913280*mckin**12*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(34749*mbkin**12) + (996950272*mckin**13*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(81081*mbkin**13) -
(86691328*mckin**14*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(18711*mbkin**14) + (314256064*mckin**15*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(243243*mbkin**15) -
(346765312*mckin**16*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(1378377*mbkin**16) + (54182080*mckin**17*(1 + 18*np.log(2) +
18*np.log(1 - mckin/mbkin)))/(1772199*mbkin**17) -
(21672832*mckin**18*(1 + 18*np.log(2) + 18*np.log(1 - mckin/mbkin)))/
(12405393*mbkin**18) + (27895936*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/1980693 -
(27895936*mbkin*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(33671781*mckin) - (195271552*mckin*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(1772199*mbkin) +
(27895936*mckin**2*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(53703*mbkin**2) - (55791872*mckin**3*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(34749*mbkin**3) +
(111583744*mckin**4*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(34749*mbkin**4) - (111583744*mckin**5*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(34749*mbkin**5) -
(111583744*mckin**6*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(34749*mbkin**6) + (55791872*mckin**7*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(2673*mbkin**7) -
(390543104*mckin**8*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(8019*mbkin**8) + (55791872*mckin**9*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(729*mbkin**9) -
(725294336*mckin**10*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(8019*mbkin**10) + (223167488*mckin**11*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(2673*mbkin**11) -
(2120091136*mckin**12*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(34749*mbkin**12) + (111583744*mckin**13*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(3159*mbkin**13) -
(557918720*mckin**14*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(34749*mbkin**14) + (195271552*mckin**15*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(34749*mbkin**15) -
(864774016*mckin**16*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(590733*mbkin**16) + (27895936*mckin**17*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(104247*mbkin**17) -
(1032149632*mckin**18*(1 + 19*np.log(2) + 19*np.log(1 - mckin/mbkin)))/
(33671781*mbkin**18) + (55791872*mckin**19*(1 + 19*np.log(2) +
19*np.log(1 - mckin/mbkin)))/(33671781*mbkin**19) +
(4822336*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/340119 -
(2411168*mbkin*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(3061071*mckin) - (120558400*mckin*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(1020357*mbkin) +
(96446720*mckin**2*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(161109*mbkin**2) - (12055840*mckin**3*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(5967*mbkin**3) +
(4822336*mckin**4*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(1053*mbkin**4) - (19289344*mckin**5*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(3159*mbkin**5) +
(24111680*mckin**7*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(1053*mbkin**7) - (48223360*mckin**8*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(729*mbkin**8) +
(9644672*mckin**9*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(81*mbkin**9) - (38578688*mckin**10*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(243*mbkin**10) +
(120558400*mckin**11*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(729*mbkin**11) - (48223360*mckin**12*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(351*mbkin**12) +
(96446720*mckin**13*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(1053*mbkin**13) - (154314752*mckin**14*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(3159*mbkin**14) +
(2411168*mckin**15*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(117*mbkin**15) - (120558400*mckin**16*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(17901*mbkin**16) +
(265228480*mckin**17*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(161109*mbkin**17) - (96446720*mckin**18*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(340119*mbkin**18) +
(2411168*mckin**19*(1 + 20*np.log(2) + 20*np.log(1 - mckin/mbkin)))/
(78489*mbkin**19) - (4822336*mckin**20*(1 + 20*np.log(2) +
20*np.log(1 - mckin/mbkin)))/(3061071*mbkin**20) +
(4*(-260123404758497280*(1 - mckin/mbkin)**5 + 390185107137745920*
(1 - mckin/mbkin)**6 - 388690823028777984*(1 - mckin/mbkin)**7 +
195377647247557632*(1 - mckin/mbkin)**8 - 34216429928650112*
(1 - mckin/mbkin)**9 - 6097700477192896*(1 - mckin/mbkin)**10 -
3945134197513600*(1 - mckin/mbkin)**11 - 3335963793491680*
(1 - mckin/mbkin)**12 - 2990240473256960*(1 - mckin/mbkin)**13 -
2738659018760960*(1 - mckin/mbkin)**14 - 2537989658886624*
(1 - mckin/mbkin)**15 - 2370644934651168*(1 - mckin/mbkin)**16 -
2227287774097176*(1 - mckin/mbkin)**17 - 2102198391233484*
(1 - mckin/mbkin)**18 - 1991562479412482*(1 - mckin/mbkin)**19 -
1892686294969085*(1 - mckin/mbkin)**20 + 132126173845585920*
(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)) -
66063086922792960*(1 - mckin/mbkin)**8*(np.log(2) +
np.log(1 - mckin/mbkin)) + 11010514487132160*(1 - mckin/mbkin)**9*
(np.log(2) + np.log(1 - mckin/mbkin)) + 5505257243566080*
(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)) +
4754540346716160*(1 - mckin/mbkin)**11*(np.log(2) +
np.log(1 - mckin/mbkin)) + 4379181898291200*(1 - mckin/mbkin)**12*
(np.log(2) + np.log(1 - mckin/mbkin)) + 4076007766871040*
(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)) +
3808925793953280*(1 - mckin/mbkin)**14*(np.log(2) +
np.log(1 - mckin/mbkin)) + 3570717547837440*(1 - mckin/mbkin)**15*
(np.log(2) + np.log(1 - mckin/mbkin)) + 3357773812673280*
(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)) +
3167016139647360*(1 - mckin/mbkin)**17*(np.log(2) +
np.log(1 - mckin/mbkin)) + 2995631463464640*(1 - mckin/mbkin)**18*
(np.log(2) + np.log(1 - mckin/mbkin)) + 2841110280328320*
(1 - mckin/mbkin)**19*(np.log(2) + np.log(1 - mckin/mbkin)) +
2701265736929760*(1 - mckin/mbkin)**20*(np.log(2) +
np.log(1 - mckin/mbkin))))/3429361293202845 + 496*np.log(2/mus) +
(512*mbkin*np.log(2/mus))/(3*mckin) + (7168*mckin*np.log(2/mus))/
(3*mbkin) - (1920*mckin**2*np.log(2/mus))/mbkin**2 -
(3200*mckin**3*np.log(2/mus))/mbkin**3 + (128*mckin**4*np.log(2/mus))/
mbkin**4 + (2560*mckin**5*np.log(2/mus))/(3*mbkin**5) +
(1408*mckin**6*np.log(2/mus))/mbkin**6 - (640*mckin**7*np.log(2/mus))/
(3*mbkin**7) - (112*mckin**8*np.log(2/mus))/mbkin**8 +
(1536*mckin*np.log(mckin**2/mbkin**2)*np.log(2/mus))/mbkin +
(512*mckin**2*np.log(mckin**2/mbkin**2)*np.log(2/mus))/mbkin**2 +
(768*mckin**3*np.log(mckin**2/mbkin**2)*np.log(2/mus))/mbkin**3 -
(2624*mckin**4*np.log(mckin**2/mbkin**2)*np.log(2/mus))/mbkin**4 -
(640*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4)*
np.log(2/mus))/3 | |
<filename>fhirclient/models/paymentreconciliation.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/PaymentReconciliation) on 2019-01-22.
# 2019, SMART Health IT.
from . import domainresource
class PaymentReconciliation(domainresource.DomainResource):
"""
P
a
y
m
e
n
t
R
e
c
o
n
c
i
l
i
a
t
i
o
n
r
e
s
o
u
r
c
e
.
T
h
i
s
r
e
s
o
u
r
c
e
p
r
o
v
i
d
e
s
t
h
e
d
e
t
a
i
l
s
i
n
c
l
u
d
i
n
g
a
m
o
u
n
t
o
f
a
p
a
y
m
e
n
t
a
n
d
a
l
l
o
c
a
t
e
s
t
h
e
p
a
y
m
e
n
t
i
t
e
m
s
b
e
i
n
g
p
a
i
d
.
"""
resource_type = "PaymentReconciliation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.created = None
"""
C
r
e
a
t
i
o
n
d
a
t
e
.
Type `FHIRDate` (represented as `str` in JSON). """
self.detail = None
"""
S
e
t
t
l
e
m
e
n
t
p
a
r
t
i
c
u
l
a
r
s
.
List of `PaymentReconciliationDetail` items (represented as `dict` in JSON). """
self.disposition = None
"""
D
i
s
p
o
s
i
t
i
o
n
m
e
s
s
a
g
e
.
Type `str`. """
self.formCode = None
"""
P
r
i
n
t
e
d
f
o
r
m
i
d
e
n
t
i
f
i
e
r
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.identifier = None
"""
B
u
s
i
n
e
s
s
I
d
e
n
t
i
f
i
e
r
f
o
r
a
p
a
y
m
e
n
t
r
e
c
o
n
c
i
l
i
a
t
i
o
n
.
List of `Identifier` items (represented as `dict` in JSON). """
self.outcome = None
"""
q
u
e
u
e
d
|
c
o
m
p
l
e
t
e
|
e
r
r
o
r
|
p
a
r
t
i
a
l
.
Type `str`. """
self.paymentAmount = None
"""
T
o
t
a
l
a
m
o
u
n
t
o
f
P
a
y
m
e
n
t
.
Type `Money` (represented as `dict` in JSON). """
self.paymentDate = None
"""
W
h
e
n
p
a
y
m
e
n
t
i
s
s
u
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.paymentIdentifier = None
"""
B
u
s
i
n
e
s
s
i
d
e
n
t
i
f
i
e
r
f
o
r
t
h
e
p
a
y
m
e
n
t
.
Type `Identifier` (represented as `dict` in JSON). """
self.paymentIssuer = None
"""
P
a
r
t
y
g
e
n
e
r
a
t
i
n
g
p
a
y
m
e
n
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.period = None
"""
P
e
r
i
o
d
c
o
v
e
r
e
d
.
Type `Period` (represented as `dict` in JSON). """
self.processNote = None
"""
N
o
t
e
c
o
n
c
e
r
n
i
n
g
p
r
o
c
e
s
s
i
n
g
.
List of `PaymentReconciliationProcessNote` items (represented as `dict` in JSON). """
self.request = None
"""
R
e
f
e
r
e
n
c
e
t
o
r
e
q
u
e
s
t
i
n
g
r
e
s
o
u
r
c
e
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.requestor = None
"""
R
e
s
p
o
n
s
i
b
l
e
p
r
a
c
t
i
t
i
o
n
e
r
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
"""
a
c
t
i
v
e
|
c
a
n
c
e
l
l
e
d
|
d
r
a
f
t
|
e
n
t
e
r
e
d
-
i
n
-
e
r
r
o
r
.
Type `str`. """
super(PaymentReconciliation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(PaymentReconciliation, self).elementProperties()
js.extend([
("created", "created", fhirdate.FHIRDate, False, None, True),
("detail", "detail", PaymentReconciliationDetail, True, None, False),
("disposition", "disposition", str, False, None, False),
("formCode", "formCode", codeableconcept.CodeableConcept, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("outcome", "outcome", str, False, None, False),
("paymentAmount", "paymentAmount", money.Money, False, None, True),
("paymentDate", "paymentDate", fhirdate.FHIRDate, False, None, True),
("paymentIdentifier", "paymentIdentifier", identifier.Identifier, False, None, False),
("paymentIssuer", "paymentIssuer", fhirreference.FHIRReference, False, None, False),
("period", "period", period.Period, False, None, False),
("processNote", "processNote", PaymentReconciliationProcessNote, True, None, False),
("request", "request", fhirreference.FHIRReference, False, None, False),
("requestor", "requestor", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
])
return js
from . import backboneelement
class PaymentReconciliationDetail(backboneelement.BackboneElement):
"""
S
e
t
t
l
e
m
e
n
t
p
a
r
t
i
c
u
l
a
r
s
.
D
i
s
t
r
i
b
u
t
i
o
n
o
f
t
h
e
p
a
y
m
e
n
t
a
m
o
u
n
t
f
o
r
a
p
r
e
v
i
o
u
s
l
y
a
c
k
n
o
w
l
e
d
g
e
d
p
a
y
a
b
l
e
.
"""
resource_type = "PaymentReconciliationDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
"""
A
m
o
u
n
t
a
l
l
o
c
a
t
e
d
t
o
t
h
i
s
p
a
y
a
b
l
e
.
Type `Money` (represented as `dict` in JSON). """
self.date = None
"""
D
a
t
e
o
f
c
o
m
m
i
t
m
e
n
t
t
o
p
a
y
.
Type `FHIRDate` (represented as `str` in JSON). """
self.identifier = None
"""
B
u
s
i
n
e
s
s
i
d
e
n
t
i
f
i
e
r
o
f
t
h
e
p
a
y
m
e
n
t
d
e
t
a
i
l
.
Type `Identifier` (represented as `dict` in JSON). """
self.payee = None
"""
R
e
c
i
p
i
e
n
t
o
f
t
h
e
p
a
y
m
e
n
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.predecessor = None
"""
B
u
s
i
n
e
s
s
i
d
e
n
t
i
f
i
e
r
o
f
t
h
e
p
r
i
o
r
p
a
y
m
e
n
t
d
e
t
a
i
l
.
Type `Identifier` (represented as `dict` in JSON). """
self.request = None
"""
R
e
q
u
e
s
t
g
i
v
i
n
g
r
i
s
e
t
o
t
h
e
p
a
y
m
e
n
| |
"/queries/string/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if string_query is not None:
query_parameters["stringQuery"] = _SERIALIZER.query("string_query", string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_enum_valid_request(*, enum_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/enum/green%20color"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if enum_query is not None:
query_parameters["enumQuery"] = _SERIALIZER.query("enum_query", enum_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_enum_null_request(*, enum_query: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/enum/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if enum_query is not None:
query_parameters["enumQuery"] = _SERIALIZER.query("enum_query", enum_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_multi_byte_request(*, byte_query: Optional[bytearray] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/byte/multibyte"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if byte_query is not None:
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_empty_request(**kwargs: Any) -> HttpRequest:
byte_query = kwargs.pop("byte_query", bytearray("", encoding="utf-8")) # type: bytearray
accept = "application/json"
# Construct URL
url = "/queries/byte/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_byte_null_request(*, byte_query: Optional[bytearray] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/byte/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if byte_query is not None:
query_parameters["byteQuery"] = _SERIALIZER.query("byte_query", byte_query, "bytearray")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_valid_request(**kwargs: Any) -> HttpRequest:
date_query = kwargs.pop("date_query", "2012-01-01") # type: datetime.date
accept = "application/json"
# Construct URL
url = "/queries/date/2012-01-01"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["dateQuery"] = _SERIALIZER.query("date_query", date_query, "date")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_null_request(*, date_query: Optional[datetime.date] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/date/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if date_query is not None:
query_parameters["dateQuery"] = _SERIALIZER.query("date_query", date_query, "date")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_time_valid_request(**kwargs: Any) -> HttpRequest:
date_time_query = kwargs.pop("date_time_query", "2012-01-01T01:01:01Z") # type: datetime.datetime
accept = "application/json"
# Construct URL
url = "/queries/datetime/2012-01-01T01%3A01%3A01Z"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["dateTimeQuery"] = _SERIALIZER.query("date_time_query", date_time_query, "iso-8601")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_date_time_null_request(
*, date_time_query: Optional[datetime.datetime] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/datetime/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if date_time_query is not None:
query_parameters["dateTimeQuery"] = _SERIALIZER.query("date_time_query", date_time_query, "iso-8601")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_null_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/null"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_csv_empty_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/csv/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_no_collection_format_empty_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/none/string/empty"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=",")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_ssv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/ssv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=" ")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_tsv_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/tsv/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div=" ")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_queries_array_string_pipes_valid_request(
*, array_query: Optional[List[str]] = None, **kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/queries/array/pipes/string/valid"
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters["arrayQuery"] = _SERIALIZER.query("array_query", array_query, "[str]", div="|")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_all_with_values_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/pathItemStringQuery/localStringQuery"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_global_query_null_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
local_string_query: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
accept = "application/json"
# Construct URL
url = "/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/localStringQuery"
path_format_arguments = {
"pathItemStringPath": _SERIALIZER.url("path_item_string_path", path_item_string_path, "str"),
"globalStringPath": _SERIALIZER.url("global_string_path", global_string_path, "str"),
"localStringPath": _SERIALIZER.url("local_string_path", local_string_path, "str"),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters["pathItemStringQuery"] = _SERIALIZER.query(
"path_item_string_query", path_item_string_query, "str"
)
if global_string_query is not None:
query_parameters["globalStringQuery"] = _SERIALIZER.query("global_string_query", global_string_query, "str")
if local_string_query is not None:
query_parameters["localStringQuery"] = _SERIALIZER.query("local_string_query", local_string_query, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_path_items_get_global_and_local_query_null_request(
path_item_string_path: str,
global_string_path: str,
local_string_path: str,
*,
path_item_string_query: Optional[str] = None,
global_string_query: Optional[str] = None,
| |
<gh_stars>0
# Copyright 2015-2016 Mirantis, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import datetime
import json
import logging
import os.path
import subprocess
import traceback
from oslo_config import cfg
from mcv_consoler.common.config import DEFAULT_CIRROS_IMAGE
from mcv_consoler.common.config import MOS_TEMPEST_MAP
from mcv_consoler.common.config import TIMES_DB_PATH
from mcv_consoler.common.errors import TempestError
from mcv_consoler.plugins.rally import runner as rrunner
from mcv_consoler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
tempest_additional_conf = {
'compute':
{'fixed_network_name': CONF.networking.network_ext_name},
'object-storage':
{'operator_role': 'admin',
'reseller_admin_role': 'admin'},
'auth': {}
}
class TempestOnDockerRunner(rrunner.RallyOnDockerRunner):
failure_indicator = TempestError.NO_RUNNER_ERROR
identity = 'tempest'
def __init__(self, ctx):
super(TempestOnDockerRunner, self).__init__(ctx)
self.path = self.ctx.work_dir.base_dir
self.container = None
self.failed_cases = 0
self.home = '/mcv'
self.homedir = '/home/mcv/toolbox/tempest'
def _verify_rally_container_is_up(self):
self.verify_container_is_up("tempest")
def create_cirros_image(self):
i_list = self.glanceclient.images.list()
for im in i_list:
if im.name == 'mcv-test-functional-cirros':
return im.id
img_fp = None
try:
img_fp = open(DEFAULT_CIRROS_IMAGE)
except IOError as e:
LOG.debug('Cannot open file {path}: {err}'.format(
path=DEFAULT_CIRROS_IMAGE,
err=str(e)))
return
im = self.glanceclient.images.create(name='mcv-test-functional-cirros',
disk_format="qcow2",
is_public=True,
container_format="bare",
data=img_fp)
def cleanup_cirros_image(self):
self.cleanup_image('mcv-test-functional-cirros')
def start_container(self):
LOG.debug("Bringing up Tempest container with credentials")
add_host = ""
# TODO(albartash): Refactor this place!
if self.access_data["auth_fqdn"] != '':
add_host = "--add-host={fqdn}:{endpoint}".format(
fqdn=self.access_data["auth_fqdn"],
endpoint=self.access_data["public_endpoint_ip"])
res = subprocess.Popen(
["docker", "run", "-d", "-P=true"] +
[add_host] * (add_host != "") +
["-p", "6001:6001",
"-e", "OS_AUTH_URL=" + self.access_data["auth_url"],
"-e", "OS_TENANT_NAME=" + self.access_data["tenant_name"],
"-e", "OS_REGION_NAME" + self.access_data["region_name"],
"-e", "OS_USERNAME=" + self.access_data["username"],
"-e", "OS_PASSWORD=" + self.access_data["password"],
"-e", "KEYSTONE_ENDPOINT_TYPE=publicUrl",
"-v", '%s:/home/rally/.rally/tempest' % self.homedir,
"-v", "%s:%s" % (self.homedir, self.home), "-w", self.home,
"-t", "mcv-tempest"],
stdout=subprocess.PIPE,
preexec_fn=utils.ignore_sigint).stdout.read()
LOG.debug('Finish bringing up Tempest container.'
'ID = %s' % str(res))
self.verify_container_is_up()
self._patch_rally()
# Hotfix. set rally's permission for .rally/ folder
# Please remove this. Use: `sudo -u rally docker run` when
# rally user gets its permissions to start docker containers
cmd = 'docker exec -t {cid} sudo chown rally:rally /home/rally/.rally'
utils.run_cmd(cmd.format(cid=self.container_id))
self.copy_config()
self.install_tempest()
def _patch_rally(self):
dist = '/tempest/requirements.txt'
LOG.debug('Patching tempest requirements')
tempest_patch = '/mcv/custom_patches/requirements.patch'
self._os_patch(dist, tempest_patch, self.container_id)
git_commit_cmd = (
'cd /tempest && git config --global user.name \"mcv-team\" && '
'git config --global user.email '
'\"<EMAIL>\" && '
'sudo git add . && sudo git commit -m \"added markupsafe to '
'requirements, which is needed for pbr\"')
utils.run_cmd('docker exec -t {cid} sh -c "{cmd}"'.format(
cid=self.container_id,
cmd=git_commit_cmd))
def make_detailed_report(self, task):
LOG.debug('Generating detailed report')
details_dir = os.path.join(self.home, 'reports/details/')
details_file = os.path.join(details_dir, task + '.txt')
cmd = "docker exec -t %(cid)s " \
"rally deployment list | grep existing | awk \'{print $2}\'" \
% dict(cid=self.container_id)
deployment_id = utils.run_cmd(cmd, quiet=True).strip()
cmd = 'docker exec -t {cid} mkdir -p {out_dir}'
utils.run_cmd(cmd.format(cid=self.container_id, out_dir=details_dir),
quiet=True)
# store tempest.conf
self.store_config(os.path.join(self.homedir,
"for-deployment-{ID}/tempest.conf"
.format(ID=deployment_id)))
self.store_config(os.path.join(self.homedir, "conf/existing.json"))
# Note(ogrytsenko): tool subunit2pyunit returns exit code '1' if
# at leas one test failed in a test suite. It also returns exit
# code '1' if some error occurred during processing a file, like:
# "Permission denied".
# We force 'exit 0' here and will check the real status lately
# by calling 'test -e <details_file>'
cmd = 'docker exec -t {cid} /bin/sh -c \" ' \
'subunit2pyunit /mcv/for-deployment-{ID}/subunit.stream ' \
'2> {out_file}\"; ' \
'exit 0'.format(cid=self.container_id,
ID=deployment_id,
out_file=details_file)
out = utils.run_cmd(cmd, quiet=True)
cmd = 'docker exec -t {cid} test -e {out_file} ' \
'&& echo -n yes || echo -n no'.format(cid=self.container_id,
out_file=details_file)
exists = utils.run_cmd(cmd)
if exists == 'no':
LOG.debug('ERROR: Failed to create detailed report for '
'{task} set. Output: {out}'.format(task=task, out=out))
return
cmd = 'mkdir -p {path}/details'.format(path=self.path)
utils.run_cmd(cmd, quiet=True)
reports_dir = os.path.join(self.homedir, 'reports')
cmd = 'cp {reports}/details/{task}.txt {path}/details'
utils.run_cmd(
cmd.format(reports=reports_dir, task=task, path=self.path),
quiet=True
)
LOG.debug(
"Finished creating detailed report for '{task}'. "
"File: {details_file}".format(task=task, details_file=details_file)
)
def fill_additional_conf(self):
if CONF.rally.existing_users:
tempest_additional_conf['auth'].update(
test_accounts_file=os.path.join(
self.home, 'additional_users.yaml'),
use_dynamic_credentials=False)
def install_tempest(self):
LOG.debug("Searching for installed tempest")
super(TempestOnDockerRunner, self)._rally_deployment_check()
self.fill_additional_conf()
LOG.debug("Generating additional config")
path_to_conf = os.path.join(self.homedir, 'additional.conf')
with open(path_to_conf, 'wb') as conf_file:
config = ConfigParser.ConfigParser()
config._sections = tempest_additional_conf
config.write(conf_file)
LOG.debug("Installing tempest...")
version = MOS_TEMPEST_MAP.get(self.access_data['mos_version'])
if not version:
cmd = ("docker exec -t {cid} "
"rally verify install --system-wide "
"--deployment existing --source /tempest ").format(
cid=self.container_id)
else:
cmd = ("docker exec -t {cid} "
"rally verify install --system-wide "
"--deployment existing --source /tempest "
"--version {version} ").format(
cid=self.container_id,
version=version)
utils.run_cmd(cmd, quiet=True)
cmd = "docker exec -t %(container)s rally verify genconfig " \
"--add-options %(conf_path)s" % \
{"container": self.container_id,
"conf_path": os.path.join(self.home, 'additional.conf')}
utils.run_cmd(cmd, quiet=True)
def _run_tempest_on_docker(self, task, *args, **kwargs):
LOG.debug("Starting verification")
if CONF.rally.existing_users:
concurr = 1
else:
concurr = 0
run_by_name = kwargs.get('run_by_name')
if run_by_name:
cmd = ("docker exec -t {cid} rally "
"--log-file {home}/log/tempest.log --rally-debug"
" verify start --system-wide "
"--regex {_set} --concurrency {con}").format(
cid=self.container_id,
home=self.home,
_set=task,
con=concurr)
else:
cmd = ("docker exec -t {cid} rally "
"--log-file {home}/log/tempest.log --rally-debug"
" verify start --system-wide "
"--set {_set} --concurrency {con}").format(
cid=self.container_id,
home=self.home,
_set=task,
con=concurr)
utils.run_cmd(cmd, quiet=True)
cmd = "docker exec -t {cid} rally verify list".format(
cid=self.container_id)
# TODO(ogrytsenko): double-check this approach
try:
p = utils.run_cmd(cmd)
except subprocess.CalledProcessError as e:
LOG.error("Task %s failed with: %s" % (task, e))
return ''
run = p.split('\n')[-3].split('|')[8]
if run == 'failed':
LOG.error('Verification failed, unable to generate report')
return ''
LOG.debug('Generating html report')
cmd = ("docker exec -t {cid} rally verify results --html "
"--out={home}/reports/{task}.html").format(
cid=self.container_id, home=self.home, task=task)
utils.run_cmd(cmd, quiet=True)
reports_dir = os.path.join(self.homedir, 'reports')
cmd = "cp {reports}/{task}.html {path} ".format(
reports=reports_dir, task=task, path=self.path)
utils.run_cmd(cmd, quiet=True)
try:
self.make_detailed_report(task)
except Exception:
LOG.debug('ERROR: \n' + traceback.format_exc())
cmd = "docker exec -t {cid} /bin/sh -c " \
"\"rally verify results --json 2>/dev/null\" "\
.format(cid=self.container_id)
return utils.run_cmd(cmd, quiet=True)
def parse_results(self, res, task):
LOG.debug("Parsing results")
if res == '':
LOG.debug("Results of test set '%s': FAILURE" % task)
self.failure_indicator = TempestError.VERIFICATION_FAILED
self.test_failures.append(task)
LOG.info(" * FAILED")
return False
try:
self.task = json.loads(res)
except ValueError:
LOG.debug("Results of test set '%s': "
"FAILURE, gotten not-JSON object. "
"Please see logs" % task)
LOG.debug("Not-JSON object: %s", res)
self.test_failures.append(task)
LOG.info(" * FAILED")
return False
time_of_tests = float(self.task.get('time', '0'))
time_of_tests = str(round(time_of_tests, 3)) + 's'
self.time_of_tests[task] = {'duration': time_of_tests}
if self.task.get('tests', 0) == 0:
self.test_failures.append(task)
LOG.debug("Task '%s' was skipped. Perhaps the service "
"is not working" % task)
LOG.info(" * FAILED")
return False
failures = self.task.get('failures')
success = self.task.get('success')
self.failed_cases += failures
LOG.debug("Results of test set '%s': "
"SUCCESS: %d FAILURES: %d" % (task, success, failures))
if not failures:
self.test_success.append(task)
LOG.info(" * PASSED")
return True
else:
self.test_failures.append(task)
self.failure_indicator = TempestError.TESTS_FAILED
LOG.info(" * FAILED")
return False
def cleanup_toolbox(self):
LOG.info('Uninstalling tempest ...')
cmd = ('docker exec -t {cid} ' 'rally verify uninstall '
'--deployment existing'.format(cid=self.container_id))
utils.run_cmd(cmd, quiet=True)
def run_batch(self, tasks, *args, **kwargs):
with self.store('rally.log', 'tempest.log'):
tool_name = kwargs["tool_name"]
all_time = kwargs["all_time"]
elapsed_time = kwargs["elapsed_time"]
# Note (ayasakov): the database execution time of each test.
# In the first run for each test tool calculate the multiplier,
# which shows the difference of execution time between testing
# on our cloud and the current cloud.
db = kwargs.get('db')
first_run = True
multiplier = 1.0
test_time = 0
all_time -= elapsed_time
self.create_cirros_image()
self._setup_rally_on_docker()
# NOTE(ogrytsenko): only test-suites are discoverable for tempest
if not kwargs.get('run_by_name'):
cid = self.container_id
tasks, missing = self.discovery(cid).match(tasks)
self.test_not_found.extend(missing)
t = []
tempest_task_results_details = {}
LOG.info("Time start: %s UTC\n" % str(datetime.datetime.utcnow()))
for task in tasks:
LOG.info("-" * 60)
task = task.replace(' ', '')
if kwargs.get('event').is_set():
LOG.info("Keyboard interrupt. Set %s won't start" % task)
break
time_start = datetime.datetime.utcnow()
LOG.info('Running %s tempest set' % task)
LOG.debug("Time start: %s UTC" % str(time_start))
if not CONF.times.update:
try:
test_time = db[tool_name][task]
except KeyError:
test_time = 0
exp_time = utils.seconds_to_humantime(test_time *
multiplier)
msg = "Expected time to complete %s: %s"
if not test_time:
LOG.debug(msg, task, exp_time)
else:
LOG.info(msg, task, exp_time)
self.run_individual_task(task, *args, **kwargs)
time_end = datetime.datetime.utcnow()
time = time_end - time_start
LOG.debug("Time end: %s UTC" % str(time_end))
if CONF.times.update:
if tool_name in db.keys():
db[tool_name].update({task: time.seconds})
else:
db.update({tool_name: {task: time.seconds}})
else:
if first_run:
| |
<reponame>kubkon/DM-Simulator
#!/usr/bin/env python
# encoding: utf-8
"""
dm.py
Created by <NAME> on 2012-08-22.
Copyright (c) 2012 University of Strathclyde. All rights reserved.
"""
import csv
import functools
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import re
import simulator.errors as errors
import simulator.modules.sim as sim
import unittest
import warnings
class BidderHelper:
"""
Helper class which implements:
- bidding behaviors;
- reputation rating update mechanisms.
"""
def __init__(self):
"""
Constructs BidderHelper instance.
"""
self.implemented_methods = {
'lebodic': {
'method': self.lebodics_reputation_update,
'params': ['window_size']
},
'mcdiarmid': {
'method': self.mcdiarmids_reputation_update,
'params': ['commitment']
},
'myopic': {
'method': self.myopic_bidding,
'params': []
},
}
def method(self, params):
"""
Returns a method inferred from the specified
params.
Arguments:
params -- Passed in params as dict
"""
if 'method' not in params:
raise errors.UnknownMethodError(params)
elif params['method'] not in self.implemented_methods:
raise errors.UnknownMethodError(params)
else:
method_name = params['method']
for param in self.implemented_methods[method_name]['params']:
if param not in params:
raise errors.UnknownMethodError(params)
args = [params[p] for p in self.implemented_methods[method_name]['params']]
return functools.partial(self.implemented_methods[method_name]['method'], *args)
def myopic_bidding(self, price_weight, cost, reputation, enemy_reputation):
"""
Returns bid calculated using myopic bidding approach.
Arguments:
price_weight -- Subscriber's price weight
cost -- Network operator's cost
reputation -- Network operator's reputation
enemy_reputation -- Other network operator's reputation
"""
def estimate_bid_hat_function(w, reps, granularity=1000):
warnings.simplefilter('error', RuntimeWarning)
# Calculate params
v1 = [(1-w)*reps[0], (1-w)*reps[0] + w]
v2 = [(1-w)*reps[1], (1-w)*reps[1] + w]
# Account for numerical imprecission
my_round = lambda x: round(x, 6)
v1 = list(map(my_round, v1))
v2 = list(map(my_round, v2))
# Check whether nontrivial NE
if (v2[1] >= v1[1]):
if (v1[1] <= 2*v2[0] - v2[1]):
graph_vf = np.linspace(v1[0], v1[1], granularity)
bids = list(map(lambda x: v2[0], graph_vf))
else:
# Bid bounds
b = [(4 * v1[0] * v2[0] - (v1[1] + v2[1])**2) / (4 * (v1[0] - v1[1] + v2[0] - v2[1])), (v1[1] + v2[1]) / 2]
# Constants of integration
c1 = ((v2[1]-v1[1])**2 + 4*(b[0]-v2[1])*(v1[0]-v1[1])) / (-2*(b[0]-b[1])*(v1[0]-v1[1])) * np.exp((v2[1]-v1[1]) / (2*(b[0]-b[1])))
c2 = ((v1[1]-v2[1])**2 + 4*(b[0]-v1[1])*(v2[0]-v2[1])) / (-2*(b[0]-b[1])*(v2[0]-v2[1])) * np.exp((v1[1]-v2[1]) / (2*(b[0]-b[1])))
# Inverse bid function
def vf(x):
try:
return v1[1] + (v2[1]-v1[1])**2 / (c1*(v2[1]+v1[1]-2*x)*np.exp((v2[1]-v1[1])/(v2[1]+v1[1]-2*x)) + 4*(v2[1]-x))
except RuntimeWarning as e:
if (re.search('.*overflow encountered in exp.*', str(e)) or
re.search('.*divide by zero encountered in double_scalars.*', str(e))):
return v1[1]
else:
raise RuntimeWarning(e)
# Sampling
bids = np.linspace(b[0], b[1], granularity)
graph_vf = list(map(vf, bids))
else:
if (v2[1] <= 2*v1[0] - v1[1]):
graph_vf = np.linspace(v1[0], v1[1], granularity)
bids = graph_vf
else:
# Bid bounds
b = [(4 * v1[0] * v2[0] - (v1[1] + v2[1])**2) / (4 * (v1[0] - v1[1] + v2[0] - v2[1])), (v1[1] + v2[1]) / 2]
# Constants of integration
c1 = ((v2[1]-v1[1])**2 + 4*(b[0]-v2[1])*(v1[0]-v1[1])) / (-2*(b[0]-b[1])*(v1[0]-v1[1])) * np.exp((v2[1]-v1[1]) / (2*(b[0]-b[1])))
c2 = ((v1[1]-v2[1])**2 + 4*(b[0]-v1[1])*(v2[0]-v2[1])) / (-2*(b[0]-b[1])*(v2[0]-v2[1])) * np.exp((v1[1]-v2[1]) / (2*(b[0]-b[1])))
# Inverse bid functions
vf = lambda x: (v1[1] + (v2[1]-v1[1])**2 / (c1*(v2[1]+v1[1]-2*x)*np.exp((v2[1]-v1[1])/(v2[1]+v1[1]-2*x)) + 4*(v2[1]-x))
if x <= b[1] else x)
# Sampling
bids = np.linspace(b[0], v1[1], granularity)
graph_vf = list(map(vf, bids))
return bids, graph_vf
if price_weight != 0.0 and price_weight != 1.0 and reputation != enemy_reputation:
# Estimate equilibrium bidding strategy functions (bids-hat)
bids_hat, costs_hat = estimate_bid_hat_function(price_weight, [reputation, enemy_reputation])
# Calculate bid
dist = list(map(lambda x: np.abs(x - ((1-price_weight)*reputation + cost*price_weight)), costs_hat))
return (bids_hat[dist.index(min(dist))] - (1-price_weight)*reputation) / price_weight
elif price_weight == 0.0:
# Return the highest possible bid
return float('inf')
else:
# Calculate bid
return (1 + cost) / 2
def lebodics_reputation_update(self, window_size, reputation, success_list):
"""
Returns reputation rating update calculated according to
LeBodic's algorithm.
Arguments:
window_size -- Window size
reputation -- Current reputation rating
success_list -- Current user's success report list
"""
if len(success_list) >= window_size:
return 1 - (sum(success_list[len(success_list)-window_size:]) / window_size)
else:
return reputation
def mcdiarmids_reputation_update(self, commitment, reputation, success_list):
"""
Returns reputation rating update calculated according to
McDiarmid's algorithm.
Arguments:
commitment -- Commitment of network operator (ranges from 0.0 to 1.0)
reputation -- Current reputation rating
success_list -- Current user's success report list
"""
if success_list[-1]:
return reputation - 0.01 if reputation >= 0.01 else 0.0
else:
penalty = commitment / 100 / (1-commitment)
return reputation + penalty if reputation + penalty <= 1.0 else 1.0
class Bidder:
"""
Represents network operator in the Digital Marketplace.
"""
# ID counter
_id_counter = 0
def __init__(self, total_bitrate=None, costs=None, bidding_params=None,
reputation=None, reputation_params=None):
"""
Constructs Bidder instance.
Keyword arguments:
total_bitrate -- Total available bit-rate
costs -- Costs per service type
bidding_params -- Bidding parameters
reputation -- Initial reputation value
reputation_params -- Reputation update specific params
"""
# Check if arguments were specified
if None in (total_bitrate, costs, bidding_params, reputation, reputation_params):
raise errors.UninitializedArgumentError()
# Create ID for this instance
self._id = Bidder._id_counter
# Increment ID counter
Bidder._id_counter += 1
# Initialize costs dictionary (key: service type)
self._costs = costs
# Assign bidding method
self._bidder_helper = BidderHelper()
self._bidding_method = self._bidder_helper.method(bidding_params)
# Initialize reputation
self._reputation = reputation
# Assign total available bitrate of the network operator
self._total_bitrate = total_bitrate
# Initialize available bitrate
self._available_bitrate = total_bitrate
# Assign reputation rating update method
self._reputation_update_method = self._bidder_helper.method(reputation_params)
# Initialize reputation history list
self._reputation_history = []
# Initialize winnings history list
self._winning_history = []
# Initialize user success report list
self._success_list = []
# Initialize dictionary of service dedicated bitrates
self._dedicated_bitrates = {}
def __str__(self):
"""
Returns string representation of the object.
"""
return "Bidder_" + str(self._id)
@property
def id(self):
"""
Returns unique ID of the object.
"""
return self._id
@property
def costs(self):
"""
Returns dictionary of costs (key: service type).
"""
return self._costs
@property
def reputation(self):
"""
Returns current reputation.
"""
return self._reputation
@property
def reputation_history(self):
"""
Returns reputation history.
"""
return self._reputation_history
@property
def winning_history(self):
"""
Returns winning history.
"""
return self._winning_history
@property
def total_bitrate(self):
"""
Returns total bit-rate.
"""
return self._total_bitrate
@property
def available_bitrate(self):
"""
Returns available bit-rate.
"""
return self._available_bitrate
@property
def success_list(self):
"""
Returns user success list.
"""
return self._success_list
def _generate_cost(self, service_type):
"""
Generates cost for each requested service type.
Arguments:
service_type -- Type of requested service
"""
# Check if service type already exists in dict
if service_type not in self._costs:
# Get SimulationEngine instance
se = sim.SimulationEngine()
# Generate new cost for service type
self._costs[service_type] = se.prng.uniform(0,1)
def submit_bid(self, service_type, price_weight, enemy_reputation):
"""
Returns bid for the specified parameters.
Arguments:
service_type -- Type of requested service
price_weight -- Price weight requested by the buyer
enemy_reputation -- Reputation of the other bidder
"""
# Generate cost for service type
self._generate_cost(service_type)
# Save current reputation
self._reputation_history += [self._reputation]
# Submit bid
return self._bidding_method(price_weight, self.costs[service_type], self.reputation, enemy_reputation)
def update_winning_history(self, has_won):
"""
Updates winning history list.
Arguments:
has_won -- True if won current auction; false otherwise
"""
value = 1 if has_won else 0
if self._winning_history:
self._winning_history += [self._winning_history[-1] + value]
else:
self._winning_history += [value]
def _update_available_bitrate(self, sr_number, service_type=None):
"""
Updates available bitrate.
Arguments:
sr_number -- Auction (SR) number
Keyword arguments:
service_type -- Type of the requested service
"""
if service_type:
sr_bitrate = DMEventHandler.BITRATES[service_type]
if self._available_bitrate >= sr_bitrate:
self._dedicated_bitrates[sr_number] = sr_bitrate
self._available_bitrate -= sr_bitrate
else:
self._dedicated_bitrates[sr_number] = self._available_bitrate
self._available_bitrate = 0
logging.debug("{} => service no. {} dedicated bit-rate: {}".format(self, sr_number, self._dedicated_bitrates[sr_number]))
else:
sr_bitrate = self._dedicated_bitrates[sr_number]
del self._dedicated_bitrates[sr_number]
self._available_bitrate += sr_bitrate
logging.debug("{} => service no. {} dedicated bit-rate: {}".format(self, sr_number, sr_bitrate))
logging.debug("{} => available bit-rate: {}".format(self, self._available_bitrate))
def _update_success_list(self, service_type):
"""
Updates user success report list.
Arguments:
service_type -- Type of the requested service
"""
if self._available_bitrate >= DMEventHandler.BITRATES[service_type]:
self._success_list += [1]
else:
self._success_list += [0]
logging.debug("{} => latest user success report: {}".format(self, self._success_list[-1]))
logging.debug("{} => user success report list: {}".format(self, self._success_list))
def service_request(self, sr_number, service_type):
"""
Updates params as if network operator has serviced buyer's service request.
Arguments:
sr_number -- Auction (SR) number
service_type -- Type of the requested service
"""
logging.debug("{} => service type: {}".format(self, service_type))
# Store user success report
self._update_success_list(service_type)
# Update available bitrate
self._update_available_bitrate(sr_number, service_type=service_type)
# Compute reputation rating update
self._reputation = self._reputation_update_method(self._reputation, self._success_list)
logging.debug("{} => reputation: {}".format(self, self._reputation))
def finish_servicing_request(self, sr_number):
"""
Updates params when finished | |
<reponame>chombatant/python-kik-chatbot
import re
import regex as re2
from regex import sub as sub2
from collections import defaultdict
from datetime import datetime
from ngrams.ngrams import corrections, Pw
import nltk
# ===========================================================================================
# #
## # #### ##### # # ## # # ###### ## ##### # #### # #
# # # # # # # ## ## # # # # # # # # # # # ## #
# # # # # # # # ## # # # # # # # # # # # # # # #
# # # # # ##### # # ###### # # # ###### # # # # # # #
# ## # # # # # # # # # # # # # # # # # # ##
# # #### # # # # # # ###### # ###### # # # # #### # #
def preprocess_message( statement):
sentences = nltk.sent_tokenize( statement)
return [
cleanup_sentence(
remove_fluff(
corrections(
expand_contractions(
sentence.lower()
))))
for sentence
in sentences
]
def stemmer( word, pos):
if pos=="NOUN" and word[-1]=="s":
return word[:-1]
elif pos=="VERB" and word[-1]=="s":
return word[:-1]
elif pos=="VERB" and word[-2:]=="ed" and word[-3]!="e":
return word[:-2]+"ing"
else:
return word
def capitalize_sentence(sentence):
sentence = sentence[:1].upper() + sentence[1:]
sentence = re.sub(r"(^|\W)i($|\W)",r"\1I\2",sentence)
names = extract_named_entities(sentence.title())
for name in names:
sentence = re.sub(name.lower(),name,sentence)
for token in nltk.tokenize.word_tokenize(sentence)[1:]:
if re.match(r"[A-Z]\w*",token):
if Pw(token.lower())>1e-06 and token not in firstnames.words():
sentence = re.sub(token,token.lower(),sentence)
return sentence
def capitalize_fragment(sentence):
sentence = re.sub(r"(^|\W)i($|\W)",r"\1I\2",sentence)
names = extract_named_entities(sentence.title())
for name in names:
sentence = re.sub(name.lower(),name,sentence)
for token in nltk.tokenize.word_tokenize(sentence):
if re.match(r"[A-Z]\w*",token):
if Pw(token.lower())>1e-06 and token not in firstnames.words():
sentence = re.sub(token,token.lower(),sentence)
return sentence
##### # ######
# # #### #### ##### # # # ## #####
# # # # # # # # # # # # # #
# #### # # # # # # # ###### # # # #
# # # # # # # # # # # ###### # #
# # # # # # # # # # # # # # #
##### #### #### ##### # ###### # # #####
intensifiers = "|".join([
r"(?:pretty(?: much)?",
r"quite",
r"so",
r"very",
r"absolutely",
r"total?ly",
r"real?ly",
r"somewhat",
r"kind of",
r"perfectly",
r"incredibly",
r"positively",
r"definitely",
r"completely",
r"propably",
r"just",
r"rather",
r"almost",
r"entirely",
r"fully",
r"highly",
r"a bit)"
])
positives = "|".join([
r"(good",
r"better",
r"best",
r"finer?",
r"nicer?",
r"lovel(y|ier)",
r"great(er)?",
r"amazing",
r"super",
r"smashing",
r"fantastic",
r"stunning",
r"groovy",
r"wonderful?l",
r"superb",
r"marvel?lous",
r"neat",
r"terrific",
r"swell",
r"dandy",
r"tremendous",
r"excellent",
r"dope",
r"well",
r"elat(ed|ing)",
r"enthusiastic",
r"looking forward to",
r"engag(ed|ing)",
r"thrill(ed|ing)",
r"excit(ed|ing)",
r"happ(y|ier)",
r"joyful",
r"joyous",
r"delight(ed|ing)",
r"curious",
r"eager",
r"ok",
r"alright)"
])
negatives = "|".join([
r"(bad",
r"terrible",
r"awful",
r"mad",
r"horrible",
r"horrid",
r"sad",
r"blue",
r"down",
r"unhappy",
r"unwell",
r"miserable",
r"dissatisfied",
r"unsatisfied",
r"sick",
r"ill",
r"tired",
r"jealous",
r"envious",
r"afraid",
r"scared",
r"converned",
r"worried",
r"uneasy",
r"so-so",
r"medium",
r"negative",
r"troubled)"
])
def is_positive( sentence):
if(
(
re.search( positives, sentence)
and not has_negation( sentence)
)or(
re.search( negatives, sentence)
and has_negation( sentence)
)
):
return True
else:
return False
def is_negative( sentence):
if(
(
re.search( negatives, sentence)
and not has_negation( sentence)
)or(
re.search( positives, sentence)
and has_negation( sentence)
)
):
return True
else:
return False
#####
# # ##### #### ##### # #
# # # # # # # #
##### # # # # # #
# # # # ##### #
# # # # # # # #
##### # #### # # #
def has_story( sentence):
if(
re.search( r"(^|\W)(i|me|mine|myself|my|we|our|oursel(f|v)(es)?|us)(\W|$)", sentence)
and not re.search( r"\?$", sentence)
):
return True
else:
return False
story_negatives = r"|".join([
r"(too late",
r"(lost|missed|broke|hurt|killed|failed|misplaced|forgot) (.* )?(my|ours?|mine|hers?|his|theirs?)",
r"failed (\w+ )?at)"
])
def has_story_negative( sentence):
if(
re.search( r"(^|\W)(i|we|my|our|me) ", sentence)
and not re.search( r"\?$", sentence)
and re.search( story_negatives, sentence)
):
return True
else:
return False
#####
# # # # ## # # ##### # ##### # ##### # #
# # # # # # ## # # # # # # # #
# # # # # # # # # # # # # # #
# # # # # ###### # # # # # # # # #
# # # # # # # ## # # # # # #
#### # #### # # # # # # # # # #
quantifier_much = "|".join([
r"(a [^\.\;]*lot",
r"lots",
r"enough",
r"(?:^|\s)sufficient",
r"great [^\.\;]*deal of",
r"some",
r"extensively",
r"several",
r"a few",
r"a [^\.\;]*couple of",
r"a [^\.\;]*bit of",
r"several",
r"multiple",
r"various",
r"fold",
r"numerous",
r"plent[iy]",
r"copious",
r"abundant",
r"ample",
r"any",
r"many",
r"much)"
])
quantifier_insufficient = "|".join([
r"(insufficient",
r"lack of",
r"lacked",
r"defici",
r"(?<!a\s)few", # match only if not preceded by "a "
r"(?<!a\s)little",
r"scant",
r"miss)"
])
def has_quantifier_much(sentence):
if re.search(r"not[^\.\;]+" + quantifier_much,sentence):
return False
if re.search(quantifier_much,sentence):
return True
elif re.search(r"no[^\.\;]+(complain|lack|miss|defici|insufficient)",sentence):
return True
else:
return False
def has_quantifier_insufficient(sentence):
if re.search(r"no[^\.\;]+" + quantifier_insufficient,sentence):
return False
if re.search(quantifier_insufficient,sentence):
return True
elif re.search(r"not[^\.\;]+"+quantifier_much,sentence):
return True
else:
return False
def has_quantifier_excessive(sentence):
if re.search(r"(too much|overmuch)",sentence):
return True
else:
return False
# # # # #
# # ###### #### # ## # ####
# # # # # # # # # #
# ##### #### # # # # # #
# # # # # # # # #
# # # # # # ## # #
# ###### #### # # # ####
# Maybe intensifiers can be viewed as a subset of affirmations?
affirmations = "|".join([
r"(yes",
r"yeah",
r"aye",
r"absolutely",
r"total?ly",
r"certainly",
r"probably",
r"definitely",
r"maybe",
r"right",
r"correct",
r"true",
r"possible",
r"possibly",
r"sure",
r"almost",
r"entirely",
r"fully",
r"highly",
r"ok",
r"okay",
r"agree",
r"alright)"
])
negations_short = "|".join([
r"(no",
r"not",
r"nay",
r"nope)"
])
negations_pronoun = "|".join([
r"(never",
r"no?one",
r"nobody",
r"nowhere",
r"nothing)"
])
negations_adjective = "|".join([
r"(impossible",
r"wrong",
r"false",
r"bullshit",
r"incorrect)"
])
negations = r"(("+negations_short+r"(\W|$))|" + negations_pronoun + "|" + negations_adjective + ")"
def has_negation(sentence):
if re.search( negations_short + r"[^\.\,\;(is)]+" + negations_adjective,sentence):
return False
elif re.search( negations,sentence):
return True
else:
return False
def has_affirmation( sentence):
if(
re.search( affirmations+r"(\W|$)",sentence)
and not has_negation( sentence)
):
return True
elif(
re.search( r"why not(\?|\!)", sentence)
or re.search( intensifiers + r" so(\W|$)", sentence)
or (
re.search( r"(\W|^)i (.* )?(think|say|hope) so(\W|$)", sentence)
and not has_negation(sentence)
)
or(
re.search( r"(\W|^)(sounds|feels) (" + intensifiers + " )?" + positives, sentence)
)
):
return True
else:
return False
def has_elaboration(sentences):
text = "".join(sentences)
for pattern in [ positives, negatives, intensifiers, affirmations, negations]:
text=re.sub(pattern,"",text)
if len(text) > 20:
return True
else:
return False
#######
# # ##### ##### # #### # # ####
# # # # # # # # ## # #
# # # # # # # # # # # ####
# # ##### # # # # # # # #
# # # # # # # # ## # #
####### # # # #### # # ####
action_verbs = r"|".join([
r"(ask(ing)?",
r"go(ing)?",
r"demand(ing)?",
r"driv(e|ing)",
r"chang(e|ing)",
r"behav(e|ing)",
r"perform(ing)?",
r"work(ing)?",
r"meet(ing)?",
r"prepar(e|ing)",
r"smil(e|ing)",
r"help(ing)?",
r"support(ing)?",
r"aid(ing)?",
r"consult(ing)?",
r"coach(ing)?",
r"car(e|ing)",
r"bring(ing)?",
r"tak(e|ing)",
r"get(ting)?",
r"carry(ing)?",
r"solv(e|ing)",
r"creat(e|ing)",
r"initiat(e|ing)?",
r"engag(e|ing)",
r"set(ting)?",
r"motivat(e|ing)",
r"inspir(e|ing)",
r"eat(ing)?",
r"drink(ing)?",
r"consum(e|ing)",
r"sleep(ing)?",
r"see(ing)?",
r"invent(ing)?",
r"rehears(e|ing)",
r"dress(ing)?",
r"break(ing)?",
r"fill(ing)?",
r"fulfill(ing)?",
r"develop(ing)?",
r"rest(ing)?",
r"stop(ing)?",
r"increas(e|ing)",
r"decreas(e|ing)",
r"listen(ing)?",
r"meditat(e|ing)",
r"us(e|ing)",
r"spen.(ing)?",
r"wast(e|ing)",
r"organiz(e|ing)",
r"plan(ing)?",
r"invest(ing)?",
r"learn(ing)?",
r"join(ing)?",
r"practi.(e|ing)",
r"play(ing)?",
r"hik(e|ing)",
r"climb(ing)?",
r"walk(ing)?",
r"bik(e|ing)",
r"sail(ing)?",
r"jump(ing)?",
r"laugh(ing)?",
r"surf(ing)?",
r"swim(ing)?",
r"fly(ing)?",
r"writ(e|ing)",
r"reply(ing)?",
r"send(ing)?",
r"fight(ing)?",
r"buy(ing)?",
r"repair(ing)?",
r"continu(e|ing)",
r"lower(ing)?",
r"rais(e|ing)",
r"improv(e|ing)",
r"read(ing)?",
r"explor(ing)?",
r"travel(ing)?",
r"exchang(e|ing)",
r"invest(ing)?",
r"transfer(ing)?",
r"balanc(ing)?",
r"danc(e|ing)",
r"wear(ing)?",
r"mak(e|ing)",
r"keep(ing)?",
r"writ(e|ing)",
r"jump(ing)?",
r"stand(ing)?",
r"pos(e|ing)",
r"fake(e|ing)?",
r"pretend(ing)?",
r"tell(ing)?",
r"nap(ping)?",
r"research(ing)?",
r"find(ing)?",
r"discuss(ing)?",
r"argue(ing)?",
r"provoc(e|ing)",
r"suggest(ing)?",
r"start(ing)?",
r"apply(ing)?",
r"connect(ing)?",
r"(out|crowd)?sourc(e|ing)",
r"fun(ing)?",
r"found(ing)",
r"shar(e|ing)",
r"tap(ping)?",
r"invit(e|ing)",
r"investigat(e|ing)",
r"giv(e|ing)",
r"donat(e|ing)",
r"lov(e|ing)?)",
r"ignor(e|ing)",
r"deal(ing)?",
r"mind(ing)?",
r"do(ing)"
])
def has_option( sentence):
if(
re2.search( action_verbs, sentence)
and (
not has_negation( sentence)
or re.search( r"no matter (what)?", sentence))
and not has_quantifier_excessive( sentence)
and not has_quantifier_insufficient( sentence)
and not re.search( r"(too late)", sentence)
):
return True
else:
return False
numbers | |
staticmethod(SaveFrugalInt)
def LoadFrugalInt(*args):
"""
LoadFrugalInt(char * pSrc, int & i) -> char *
Parameters:
pSrc: char *
i: int &
"""
return _snap.TInt_LoadFrugalInt(*args)
LoadFrugalInt = staticmethod(LoadFrugalInt)
def TestFrugalInt():
"""TestFrugalInt()"""
return _snap.TInt_TestFrugalInt()
TestFrugalInt = staticmethod(TestFrugalInt)
def SaveFrugalIntV(*args):
"""
SaveFrugalIntV(TSOut SOut, TIntV IntV)
Parameters:
SOut: TSOut &
IntV: TVec< TInt,int > const &
"""
return _snap.TInt_SaveFrugalIntV(*args)
SaveFrugalIntV = staticmethod(SaveFrugalIntV)
def LoadFrugalIntV(*args):
"""
LoadFrugalIntV(TSIn SIn, TIntV IntV, bool ClrP=True)
Parameters:
SIn: TSIn &
IntV: TVec< TInt,int > &
ClrP: bool
LoadFrugalIntV(TSIn SIn, TIntV IntV)
Parameters:
SIn: TSIn &
IntV: TVec< TInt,int > &
"""
return _snap.TInt_LoadFrugalIntV(*args)
LoadFrugalIntV = staticmethod(LoadFrugalIntV)
__swig_destroy__ = _snap.delete_TInt
TInt.Load = new_instancemethod(_snap.TInt_Load,None,TInt)
TInt.Save = new_instancemethod(_snap.TInt_Save,None,TInt)
TInt.LoadXml = new_instancemethod(_snap.TInt_LoadXml,None,TInt)
TInt.SaveXml = new_instancemethod(_snap.TInt_SaveXml,None,TInt)
TInt.__eq__ = new_instancemethod(_snap.TInt___eq__,None,TInt)
TInt.__ne__ = new_instancemethod(_snap.TInt___ne__,None,TInt)
TInt.__lt__ = new_instancemethod(_snap.TInt___lt__,None,TInt)
TInt.__call__ = new_instancemethod(_snap.TInt___call__,None,TInt)
TInt.__iadd__ = new_instancemethod(_snap.TInt___iadd__,None,TInt)
TInt.__isub__ = new_instancemethod(_snap.TInt___isub__,None,TInt)
TInt.GetMemUsed = new_instancemethod(_snap.TInt_GetMemUsed,None,TInt)
TInt.GetPrimHashCd = new_instancemethod(_snap.TInt_GetPrimHashCd,None,TInt)
TInt.GetSecHashCd = new_instancemethod(_snap.TInt_GetSecHashCd,None,TInt)
TInt_swigregister = _snap.TInt_swigregister
TInt_swigregister(TInt)
TInt.Mn = _snap.cvar.TInt_Mn
TInt.Mx = _snap.cvar.TInt_Mx
TInt.Kilo = _snap.cvar.TInt_Kilo
TInt.Mega = _snap.cvar.TInt_Mega
TInt.Giga = _snap.cvar.TInt_Giga
def TInt_Abs(*args):
"""
TInt_Abs(int const & Int) -> int
Parameters:
Int: int const &
"""
return _snap.TInt_Abs(*args)
def TInt_Sign(*args):
"""
TInt_Sign(int const & Int) -> int
Parameters:
Int: int const &
"""
return _snap.TInt_Sign(*args)
def TInt_Swap(*args):
"""
TInt_Swap(int & Int1, int & Int2)
Parameters:
Int1: int &
Int2: int &
"""
return _snap.TInt_Swap(*args)
def TInt_GetRnd(Range=0):
"""
GetRnd(int const & Range=0) -> int
Parameters:
Range: int const &
TInt_GetRnd() -> int
"""
return _snap.TInt_GetRnd(Range)
def TInt_IsOdd(*args):
"""
TInt_IsOdd(int const & Int) -> bool
Parameters:
Int: int const &
"""
return _snap.TInt_IsOdd(*args)
def TInt_IsEven(*args):
"""
TInt_IsEven(int const & Int) -> bool
Parameters:
Int: int const &
"""
return _snap.TInt_IsEven(*args)
def TInt_GetMn(*args):
"""
GetMn(int const & Int1, int const & Int2) -> int
Parameters:
Int1: int const &
Int2: int const &
GetMn(int const & Int1, int const & Int2, int const & Int3) -> int
Parameters:
Int1: int const &
Int2: int const &
Int3: int const &
TInt_GetMn(int const & Int1, int const & Int2, int const & Int3, int const & Int4) -> int
Parameters:
Int1: int const &
Int2: int const &
Int3: int const &
Int4: int const &
"""
return _snap.TInt_GetMn(*args)
def TInt_GetMx(*args):
"""
GetMx(int const & Int1, int const & Int2) -> int
Parameters:
Int1: int const &
Int2: int const &
GetMx(int const & Int1, int const & Int2, int const & Int3) -> int
Parameters:
Int1: int const &
Int2: int const &
Int3: int const &
TInt_GetMx(int const & Int1, int const & Int2, int const & Int3, int const & Int4) -> int
Parameters:
Int1: int const &
Int2: int const &
Int3: int const &
Int4: int const &
"""
return _snap.TInt_GetMx(*args)
def TInt_GetInRng(*args):
"""
TInt_GetInRng(int const & Val, int const & Mn, int const & Mx) -> int
Parameters:
Val: int const &
Mn: int const &
Mx: int const &
"""
return _snap.TInt_GetInRng(*args)
def TInt_GetHexStr(*args):
"""
GetHexStr(int const & Val) -> TStr
Parameters:
Val: int const &
TInt_GetHexStr(TInt Int) -> TStr
Parameters:
Int: TInt const &
"""
return _snap.TInt_GetHexStr(*args)
def TInt_GetKiloStr(*args):
"""
TInt_GetKiloStr(int const & Val) -> TStr
Parameters:
Val: int const &
"""
return _snap.TInt_GetKiloStr(*args)
def TInt_GetMegaStr(*args):
"""
TInt_GetMegaStr(int const & Val) -> TStr
Parameters:
Val: int const &
"""
return _snap.TInt_GetMegaStr(*args)
def TInt_SaveFrugalInt(*args):
"""
TInt_SaveFrugalInt(char * pDest, int i) -> char *
Parameters:
pDest: char *
i: int
"""
return _snap.TInt_SaveFrugalInt(*args)
def TInt_LoadFrugalInt(*args):
"""
TInt_LoadFrugalInt(char * pSrc, int & i) -> char *
Parameters:
pSrc: char *
i: int &
"""
return _snap.TInt_LoadFrugalInt(*args)
def TInt_TestFrugalInt():
"""TInt_TestFrugalInt()"""
return _snap.TInt_TestFrugalInt()
def TInt_SaveFrugalIntV(*args):
"""
TInt_SaveFrugalIntV(TSOut SOut, TIntV IntV)
Parameters:
SOut: TSOut &
IntV: TVec< TInt,int > const &
"""
return _snap.TInt_SaveFrugalIntV(*args)
def TInt_LoadFrugalIntV(*args):
"""
LoadFrugalIntV(TSIn SIn, TIntV IntV, bool ClrP=True)
Parameters:
SIn: TSIn &
IntV: TVec< TInt,int > &
ClrP: bool
TInt_LoadFrugalIntV(TSIn SIn, TIntV IntV)
Parameters:
SIn: TSIn &
IntV: TVec< TInt,int > &
"""
return _snap.TInt_LoadFrugalIntV(*args)
class TUInt(object):
"""Proxy of C++ TUInt class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
Val = _swig_property(_snap.TUInt_Val_get, _snap.TUInt_Val_set)
Rnd = _swig_property(_snap.TUInt_Rnd_get, _snap.TUInt_Rnd_set)
def __init__(self, *args):
"""
__init__(TUInt self) -> TUInt
__init__(TUInt self, uint const & _Val) -> TUInt
Parameters:
_Val: uint const &
__init__(TUInt self, TSIn SIn) -> TUInt
Parameters:
SIn: TSIn &
"""
_snap.TUInt_swiginit(self,_snap.new_TUInt(*args))
def Load(self, *args):
"""
Load(TUInt self, TSIn SIn)
Parameters:
SIn: TSIn &
"""
return _snap.TUInt_Load(self, *args)
def Save(self, *args):
"""
Save(TUInt self, TSOut SOut)
Parameters:
SOut: TSOut &
"""
return _snap.TUInt_Save(self, *args)
def LoadXml(self, *args):
"""
LoadXml(TUInt self, PXmlTok const & XmlTok, TStr Nm)
Parameters:
XmlTok: PXmlTok const &
Nm: TStr const &
"""
return _snap.TUInt_LoadXml(self, *args)
def SaveXml(self, *args):
"""
SaveXml(TUInt self, TSOut SOut, TStr Nm)
Parameters:
SOut: TSOut &
Nm: TStr const &
"""
return _snap.TUInt_SaveXml(self, *args)
def __call__(self, *args):
"""
__call__(TUInt self) -> uint
__call__(TUInt self) -> uint &
Parameters:
self: TUInt *
"""
return _snap.TUInt___call__(self, *args)
def __invert__(self):
"""
__invert__(TUInt self) -> TUInt
Parameters:
self: TUInt *
"""
return _snap.TUInt___invert__(self)
def __iand__(self, *args):
"""
__iand__(TUInt self, TUInt UInt) -> TUInt
Parameters:
UInt: TUInt const &
"""
return _snap.TUInt___iand__(self, *args)
def __ior__(self, *args):
"""
__ior__(TUInt self, TUInt UInt) -> TUInt
Parameters:
UInt: TUInt const &
"""
return _snap.TUInt___ior__(self, *args)
def __ixor__(self, *args):
"""
__ixor__(TUInt self, TUInt UInt) -> TUInt
Parameters:
UInt: TUInt const &
"""
return _snap.TUInt___ixor__(self, *args)
def __irshift__(self, *args):
"""
__irshift__(TUInt self, int const & ShiftBits) -> TUInt
Parameters:
ShiftBits: int const &
"""
return _snap.TUInt___irshift__(self, *args)
def __ilshift__(self, *args):
"""
__ilshift__(TUInt self, int const & ShiftBits) -> TUInt
Parameters:
ShiftBits: int const &
"""
return _snap.TUInt___ilshift__(self, *args)
def GetMemUsed(self):
"""
GetMemUsed(TUInt self) -> int
Parameters:
self: TUInt const *
"""
return _snap.TUInt_GetMemUsed(self)
def GetPrimHashCd(self):
"""
GetPrimHashCd(TUInt self) -> int
Parameters:
self: TUInt const *
"""
return _snap.TUInt_GetPrimHashCd(self)
def GetSecHashCd(self):
"""
GetSecHashCd(TUInt self) -> int
Parameters:
self: TUInt const *
"""
return _snap.TUInt_GetSecHashCd(self)
def GetRnd(Range=0):
"""
GetRnd(uint const & Range=0) -> uint
Parameters:
Range: uint const &
GetRnd() -> uint
"""
return _snap.TUInt_GetRnd(Range)
GetRnd = staticmethod(GetRnd)
def GetKiloStr(*args):
"""
GetKiloStr(uint const & Val) -> TStr
Parameters:
Val: uint const &
"""
return _snap.TUInt_GetKiloStr(*args)
GetKiloStr = staticmethod(GetKiloStr)
def GetMegaStr(*args):
"""
GetMegaStr(uint const & Val) -> TStr
Parameters:
Val: uint const &
"""
return _snap.TUInt_GetMegaStr(*args)
GetMegaStr = staticmethod(GetMegaStr)
def JavaUIntToCppUInt(*args):
"""
JavaUIntToCppUInt(uint const & JavaUInt) -> uint
Parameters:
JavaUInt: uint const &
"""
return _snap.TUInt_JavaUIntToCppUInt(*args)
JavaUIntToCppUInt = staticmethod(JavaUIntToCppUInt)
def IsIpStr(*args):
"""
IsIpStr(TStr IpStr, uint & Ip, char const & SplitCh='.') -> bool
Parameters:
IpStr: TStr const &
Ip: uint &
SplitCh: char const &
IsIpStr(TStr IpStr, uint & Ip) -> bool
Parameters:
IpStr: TStr const &
Ip: uint &
IsIpStr(TStr IpStr, char const & SplitCh='.') -> bool
Parameters:
IpStr: TStr const &
SplitCh: char const &
IsIpStr(TStr IpStr) -> bool
Parameters:
IpStr: TStr const &
"""
return _snap.TUInt_IsIpStr(*args)
IsIpStr = staticmethod(IsIpStr)
def GetUIntFromIpStr(*args):
"""
GetUIntFromIpStr(TStr IpStr, char const & SplitCh='.') -> uint
Parameters:
IpStr: TStr const &
SplitCh: char const &
GetUIntFromIpStr(TStr IpStr) -> uint
Parameters:
IpStr: TStr const &
"""
return _snap.TUInt_GetUIntFromIpStr(*args)
GetUIntFromIpStr = staticmethod(GetUIntFromIpStr)
def GetStrFromIpUInt(*args):
"""
GetStrFromIpUInt(uint const & Ip) -> TStr
Parameters:
Ip: uint const &
"""
return _snap.TUInt_GetStrFromIpUInt(*args)
GetStrFromIpUInt = staticmethod(GetStrFromIpUInt)
def IsIpv6Str(*args):
"""
IsIpv6Str(TStr IpStr, char const & SplitCh=':') -> bool
Parameters:
IpStr: TStr const &
SplitCh: char const &
IsIpv6Str(TStr IpStr) -> bool
Parameters:
IpStr: TStr const &
"""
return _snap.TUInt_IsIpv6Str(*args)
IsIpv6Str = staticmethod(IsIpv6Str)
__swig_destroy__ = _snap.delete_TUInt
TUInt.Load = new_instancemethod(_snap.TUInt_Load,None,TUInt)
TUInt.Save = new_instancemethod(_snap.TUInt_Save,None,TUInt)
TUInt.LoadXml = new_instancemethod(_snap.TUInt_LoadXml,None,TUInt)
TUInt.SaveXml = new_instancemethod(_snap.TUInt_SaveXml,None,TUInt)
TUInt.__call__ = new_instancemethod(_snap.TUInt___call__,None,TUInt)
TUInt.__invert__ = new_instancemethod(_snap.TUInt___invert__,None,TUInt)
TUInt.__iand__ = new_instancemethod(_snap.TUInt___iand__,None,TUInt)
TUInt.__ior__ = new_instancemethod(_snap.TUInt___ior__,None,TUInt)
TUInt.__ixor__ = new_instancemethod(_snap.TUInt___ixor__,None,TUInt)
TUInt.__irshift__ = new_instancemethod(_snap.TUInt___irshift__,None,TUInt)
TUInt.__ilshift__ = new_instancemethod(_snap.TUInt___ilshift__,None,TUInt)
TUInt.GetMemUsed = new_instancemethod(_snap.TUInt_GetMemUsed,None,TUInt)
TUInt.GetPrimHashCd = new_instancemethod(_snap.TUInt_GetPrimHashCd,None,TUInt)
TUInt.GetSecHashCd = new_instancemethod(_snap.TUInt_GetSecHashCd,None,TUInt)
TUInt_swigregister = _snap.TUInt_swigregister
TUInt_swigregister(TUInt)
TUInt.Mn = _snap.cvar.TUInt_Mn
TUInt.Mx = _snap.cvar.TUInt_Mx
def TUInt_GetRnd(Range=0):
"""
GetRnd(uint const & Range=0) -> uint
Parameters:
Range: uint const &
TUInt_GetRnd() -> uint
"""
return _snap.TUInt_GetRnd(Range)
def TUInt_GetKiloStr(*args):
"""
TUInt_GetKiloStr(uint const & Val) -> TStr
Parameters:
Val: uint const &
"""
return _snap.TUInt_GetKiloStr(*args)
def TUInt_GetMegaStr(*args):
"""
TUInt_GetMegaStr(uint const & Val) -> TStr
Parameters:
Val: uint const &
"""
return _snap.TUInt_GetMegaStr(*args)
def TUInt_JavaUIntToCppUInt(*args):
"""
TUInt_JavaUIntToCppUInt(uint const & JavaUInt) -> uint
Parameters:
JavaUInt: uint const &
"""
return _snap.TUInt_JavaUIntToCppUInt(*args)
def TUInt_IsIpStr(*args):
"""
IsIpStr(TStr IpStr, uint & Ip, char const & SplitCh='.') -> | |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
.. currentmodule:: thelma.tools.metadata.generation
This is the handler for experiment design parsers. It is a component of the
:class:`ExperimentMetadataGenerator`.
"""
from thelma.tools.handlers.base \
import MoleculeDesignPoolLayoutParserHandler
from thelma.tools.parsers.experimentdesign \
import ExperimentDesignParser
from thelma.tools.semiconstants import EXPERIMENT_SCENARIOS
from thelma.tools.semiconstants import get_positions_for_shape
from thelma.tools.metadata.base import TransfectionParameters
from thelma.tools.metadata.base import TransfectionPosition
from thelma.tools.utils.base import add_list_map_element
from thelma.tools.utils.base import is_valid_number
from thelma.tools.utils.layouts import MOCK_POSITION_TYPE
from thelma.entities.experiment import ExperimentDesign
from thelma.entities.experiment import ExperimentDesignRack
from thelma.entities.experiment import ExperimentMetadataType
from thelma.entities.racklayout import RackLayout
from thelma.entities.tagging import TaggedRackPositionSet
from thelma.entities.user import User
__docformat__ = 'reStructuredText en'
__all__ = ['_SUPPORTED_SCENARIOS',
'_SCENARIO_PARAMETERS',
'ExperimentDesignParserHandler',
]
class _SUPPORTED_SCENARIOS(object):
"""
Scenario supported by the experiment design parser handler.
"""
#: A list of all supported scenarios.
ALL = [EXPERIMENT_SCENARIOS.SCREENING, EXPERIMENT_SCENARIOS.OPTIMISATION,
EXPERIMENT_SCENARIOS.MANUAL, EXPERIMENT_SCENARIOS.ISO_LESS,
EXPERIMENT_SCENARIOS.LIBRARY]
class _SCENARIO_PARAMETERS(object):
"""
Mandatory and forbidden parameters for the supported scenarios.
"""
#: Parameters (factors) that are potentionally found in an experiment
#: design.
POTENTIAL_PARAMETERS = [TransfectionParameters.MOLECULE_DESIGN_POOL,
TransfectionParameters.REAGENT_NAME,
TransfectionParameters.REAGENT_DIL_FACTOR,
TransfectionParameters.FINAL_CONCENTRATION]
#: Potential parameters that are numerical.
NUMERICAL_PARAMETERS = [TransfectionParameters.FINAL_CONCENTRATION,
TransfectionParameters.REAGENT_DIL_FACTOR]
#: Transfection parameters that need to be specified in the layout.
MANDATORY_PARAMETERS = {
EXPERIMENT_SCENARIOS.SCREENING : [],
EXPERIMENT_SCENARIOS.LIBRARY : [],
EXPERIMENT_SCENARIOS.ISO_LESS : [],
EXPERIMENT_SCENARIOS.OPTIMISATION : [
TransfectionParameters.MOLECULE_DESIGN_POOL,
TransfectionParameters.FINAL_CONCENTRATION],
EXPERIMENT_SCENARIOS.MANUAL : POTENTIAL_PARAMETERS
}
#: Transfection parameters that must *not* be specified in the layout.
FORBIDDEN_PARAMETERS = {
EXPERIMENT_SCENARIOS.SCREENING : POTENTIAL_PARAMETERS,
EXPERIMENT_SCENARIOS.ISO_LESS : POTENTIAL_PARAMETERS,
EXPERIMENT_SCENARIOS.LIBRARY : POTENTIAL_PARAMETERS,
EXPERIMENT_SCENARIOS.OPTIMISATION : [],
EXPERIMENT_SCENARIOS.MANUAL : []
}
#: Scenarios that do no allow for a sheet called "Transfection".
TRANSFECTION_SHEET_SCENARIOS = [EXPERIMENT_SCENARIOS.OPTIMISATION,
EXPERIMENT_SCENARIOS.MANUAL]
class ExperimentDesignParserHandler(MoleculeDesignPoolLayoutParserHandler):
"""
This tool obtains a valid experiment design from an experiment metadata
file. There are different subclasses for the different experiment
scenarios.
**Return Value:** Experiment Design
(:class:`thelma.entities.experiment.ExperimentDesign`)
"""
NAME = 'Experiment Design Parser Handler'
_PARSER_CLS = ExperimentDesignParser
TAG_DOMAIN = ExperimentDesign.DOMAIN
#: The names of the sheet to parsed in any scenario.
BASIC_SHEET_NAMES = ['Seeding', 'Treatment', 'Assay']
#: The name of the transfection sheet (for non-screening caes).
TRANSFECTION_SHEET_NAME = 'Transfection'
def __init__(self, stream, requester, scenario, parent=None):
"""
Constructor.
:param requester: the user uploading the experiment design
:type requester: :class:`thelma.entities.user.User`
:param scenario: The scenario (experiment metadata types) defines the
mandatory and forbidden parameters for a design rack layout and the
names of the sheets to be parsed.
:type scenario: :class:`thelma.entities.experiment.ExperimentMetadataType`
"""
MoleculeDesignPoolLayoutParserHandler.__init__(self, stream,
parent=parent)
#: The user uploading the file.
self.requester = requester
#: A :class:`ExperimentMetadataType` supported by this handler subclass.
self.scenario = scenario
#: Transfection parameters that need to be specified in the layout.
self.__mandatory_parameters = None
#: Transfection parameters that must *not* be specified in the layout.
self.__forbidden_parameters = None
#: Stores the presence of parameters (a parameter has to be
#: specified for each non-empty well or not at all).
self.__parameter_presence = dict()
#: The designs racks for the experiment design.
self.__design_racks = []
def _initialize_parser_keys(self):
"""
Initialises floating related aliases within the parser.
"""
MoleculeDesignPoolLayoutParserHandler._initialize_parser_keys(self)
sheet_names = set()
for sheet_name in self.BASIC_SHEET_NAMES: sheet_names.add(sheet_name)
if isinstance(self.scenario, ExperimentMetadataType) and \
self.scenario.id in \
_SCENARIO_PARAMETERS.TRANSFECTION_SHEET_SCENARIOS:
sheet_names.add(self.TRANSFECTION_SHEET_NAME)
self.parser.sheet_names = sheet_names
def _convert_results_to_entity(self):
"""
Assembles and experiment design from the parsed sheets.
"""
self.add_info('Start experiment design generation ...')
self._check_input()
if not self.has_errors():
self.__set_scenario_values()
self._determine_rack_shape()
if not self.has_errors():
self.__create_design_racks()
self.__check_design_racks()
if not self.has_errors():
self.return_value = ExperimentDesign(rack_shape=self._rack_shape,
experiment_design_racks=self.__design_racks)
self.add_info('Experiment design creation completed.')
def _check_input(self):
"""
Checks the validity of the initialisation values.
"""
self.add_debug('Check input values ...')
self._check_input_class('requester', self.requester, User)
if self._check_input_class('experiment metadata type', self.scenario,
ExperimentMetadataType):
if not self.scenario.id in _SUPPORTED_SCENARIOS.ALL:
d_names = EXPERIMENT_SCENARIOS.get_displaynames(
_SUPPORTED_SCENARIOS.ALL)
msg = 'Unknown scenario: "%s". Allowed scenarios: %s.' \
% (self.scenario.display_name, ', '.join(d_names))
self.add_error(msg)
def __set_scenario_values(self):
"""
Sets the mandatory and forbidden parameters for the chosen scenario.
"""
self.add_debug('Set scenario values ...')
self.__mandatory_parameters = _SCENARIO_PARAMETERS.MANDATORY_PARAMETERS[
self.scenario.id]
self.__forbidden_parameters = _SCENARIO_PARAMETERS.FORBIDDEN_PARAMETERS[
self.scenario.id]
for parameter in _SCENARIO_PARAMETERS.POTENTIAL_PARAMETERS:
if parameter == TransfectionParameters.MOLECULE_DESIGN_POOL:
continue
self.__parameter_presence[parameter] = False
def __create_design_racks(self):
"""
Creates an experiment design object from the parsed data.
"""
for rack_container in self.parser.rack_map.values():
label = str(rack_container.rack_label)
trp_sets = self.__create_tagged_position_sets(rack_container)
rack_layout = RackLayout(shape=self._rack_shape,
tagged_rack_position_sets=trp_sets)
design_rack = ExperimentDesignRack(label=label,
rack_layout=rack_layout,
experiment_design=None, worklist_series=None)
self.__design_racks.append(design_rack)
def __create_tagged_position_sets(self, rack_container):
"""
Creates :class:`TaggedRackPositionSets` for a design rack layout.
"""
self.add_debug('Create tagged rack positions sets ...')
tagged_rack_positions = []
position_set_map = {} # maps rack position sets on hash values
tag_set_map = {} # maps tag lists on hash values
for layout_key in rack_container.layout_container_keys:
layout_container = self.parser.layout_map[layout_key]
for tag_container, pos_containers in layout_container.\
tag_data.iteritems():
pos_set = self._convert_to_rack_position_set(pos_containers)
hash_value = pos_set.hash_value
tag = self._convert_to_tag(tag_container)
if position_set_map.has_key(hash_value):
tag_set_map[hash_value].append(tag)
else:
position_set_map[hash_value] = pos_set
tag_set_map[hash_value] = [tag]
for hash_value in position_set_map.keys():
rps = position_set_map[hash_value]
tags = set(tag_set_map[hash_value])
trps = TaggedRackPositionSet(tags, rps, self.requester)
tagged_rack_positions.append(trps)
return tagged_rack_positions
def __check_design_racks(self):
"""
Checks the presence of parameters for each rack design rack.
"""
self.add_debug('Check design racks ...')
validators = dict()
for parameter in _SCENARIO_PARAMETERS.POTENTIAL_PARAMETERS:
validator = TransfectionParameters.create_validator_from_parameter(
parameter)
validators[parameter] = validator
for design_rack in self.__design_racks:
value_maps = self.__get_values_for_rack_layout(validators,
design_rack.rack_layout)
if self.__check_for_molecule_designs(value_maps, design_rack.label):
self.__check_numerical_values(value_maps, design_rack.label)
self.__check_reagent_name(value_maps, design_rack.label)
self.__check_value_presence(value_maps, design_rack.label)
def __get_values_for_rack_layout(self, validators, rack_layout):
"""
Finds the parameters values for each position in a design rack layout.
"""
shape_positions = get_positions_for_shape(self._rack_shape)
# Initialise the value maps
value_maps = dict()
for parameter in validators.keys():
rack_pos_dict = dict()
for rack_pos in shape_positions:
rack_pos_dict[rack_pos] = None
value_maps[parameter] = rack_pos_dict
for trps in rack_layout.tagged_rack_position_sets:
for tag in trps.tags:
for parameter, validator in validators.iteritems():
if validator.has_alias(tag.predicate):
value_map = value_maps[parameter]
for rack_pos in trps.rack_position_set:
value_map[rack_pos] = tag.value
return value_maps
def __check_for_molecule_designs(self, value_maps, label):
"""
Checks whether there are molecule designs in the layout.
"""
pool_parameter = TransfectionParameters.MOLECULE_DESIGN_POOL
has_pools = False
for value in value_maps[pool_parameter].values():
if not value is None:
has_pools = True
break
if not has_pools and pool_parameter in self.__mandatory_parameters:
msg = 'There are no molecule design pools in the layout for ' \
'design rack %s.' % (label)
self.add_error(msg)
return False
elif has_pools and pool_parameter in self.__forbidden_parameters:
msg = 'There are molecule design pools in the layout for design ' \
'rack %s. This is not allowed for the current scenario (%s).' \
% (label, self.scenario.display_name)
self.add_error(msg)
return False
return True
def __check_numerical_values(self, value_maps, label):
"""
Checks the values of the numerical parameters.
"""
invalid_numericals = dict()
invalid_mock = dict()
invalid_untreated = dict()
pool_map = value_maps[TransfectionParameters.MOLECULE_DESIGN_POOL]
for parameter, value_map in value_maps.iteritems():
if not parameter in _SCENARIO_PARAMETERS.NUMERICAL_PARAMETERS:
continue
for rack_pos, value in value_map.iteritems():
if value is None: continue
pool = pool_map[rack_pos]
if (pool == MOCK_POSITION_TYPE):
if not TransfectionParameters.is_valid_mock_value(value,
parameter):
add_list_map_element(invalid_mock, parameter,
rack_pos.label)
elif TransfectionParameters.is_untreated_type(pool):
if parameter in (TransfectionParameters.FINAL_CONCENTRATION,
TransfectionParameters.REAGENT_DIL_FACTOR) \
and not TransfectionPosition.\
is_valid_untreated_value(value):
add_list_map_element(invalid_untreated, parameter,
rack_pos.label)
elif not is_valid_number(value):
info = '%s (%s)' % (rack_pos.label, value)
add_list_map_element(invalid_numericals, parameter, info)
if len(invalid_numericals) > 0:
records_str = self.__get_error_record_string(invalid_numericals)
msg = 'The levels of some factors must be positive numbers. The ' \
'following positions in design rack %s have invalid ' \
'values: %s.' % (label, records_str)
self.add_error(msg)
if len(invalid_mock) > 0:
records_str = self.__get_error_record_string(invalid_mock)
msg = 'The levels of some factors for mock positions allow only ' \
'for the values "None" or "mock" (or no level). Some ' \
'positions in design rack "%s" have invalid levels. ' \
'Affected positions: %s.' % (label, records_str)
self.add_error(msg)
if len(invalid_untreated) > 0:
records_str = self.__get_error_record_string(invalid_untreated)
msg = 'The levels of some factors for untreated positions allow ' \
'only for the values "None" and "untreated" (or no level). ' \
'Some position in design rack "%s" have invalid levels. ' \
'Affected positions: %s.' % (label, records_str)
self.add_error(msg)
def __check_reagent_name(self, value_maps, label):
"""
Checks the reagent name for each rack position in a layout
(if this parameter is an allowed one). The reagent name must have a
special value for untreated positions and be at least two character
long for other positions.
"""
if not TransfectionParameters.REAGENT_NAME \
in self.__forbidden_parameters:
pool_map = value_maps[TransfectionParameters.MOLECULE_DESIGN_POOL]
name_map = value_maps[TransfectionParameters.REAGENT_NAME]
invalid_untreated = []
invalid_others = []
for rack_pos, reagent_name in name_map.iteritems():
if reagent_name is None: continue
pool = pool_map[rack_pos]
if TransfectionParameters.is_untreated_type(pool):
if not TransfectionPosition.is_valid_untreated_value(
reagent_name):
invalid_untreated.append(rack_pos.label)
elif not len(reagent_name) > 1:
invalid_others.append(rack_pos.label)
if len(invalid_untreated):
msg = 'Untreated position must only have the reagent names ' \
'"None", "untreated" or no reagent name at all. | |
# Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers validating specific Message types."""
import dataclasses
import math
import os
import re
from typing import Any, List, Mapping, Optional, Set, Tuple
import warnings
from absl import logging
from dateutil import parser
from rdkit import Chem
from rdkit import __version__ as RDKIT_VERSION
import ord_schema
from ord_schema import message_helpers
from ord_schema.proto import dataset_pb2
from ord_schema.proto import reaction_pb2
# pylint: disable=too-many-branches
@dataclasses.dataclass
class ValidationOptions:
"""Options for message validation."""
# Check that Dataset and Reaction IDs are well-formed.
validate_ids: bool = False
# Require ReactionProvenance for Reactions.
require_provenance: bool = False
# Allow reactions with valid reaction SMILES and nothing else.
allow_reaction_smiles_only: bool = True
@dataclasses.dataclass
class ValidationOutput:
"""Validation output: errors and warnings."""
errors: List[str] = dataclasses.field(default_factory=list)
warnings: List[str] = dataclasses.field(default_factory=list)
def extend(self, other):
self.errors.extend(other.errors)
self.warnings.extend(other.warnings)
def validate_datasets(datasets: Mapping[str, dataset_pb2.Dataset],
write_errors: bool = False,
options: Optional[ValidationOptions] = None):
"""Runs validation for a set of datasets.
Args:
datasets: Dict mapping text filenames to Dataset protos.
write_errors: If True, errors are written to disk.
options: ValidationOptions.
Raises:
ValidationError: if any Dataset does not pass validation.
"""
all_errors = []
for filename, dataset in datasets.items():
basename = os.path.basename(filename)
errors = _validate_datasets(dataset, label=basename, options=options)
if errors:
for error in errors:
all_errors.append(f'{filename}: {error}')
if write_errors:
with open(f'{filename}.error', 'w') as f:
for error in errors:
f.write(f'{error}\n')
# NOTE(kearnes): We run validation for all datasets before exiting if there
# are errors.
if all_errors:
error_string = '\n'.join(all_errors)
raise ValidationError(f'validation encountered errors:\n{error_string}')
def _validate_datasets(
dataset: dataset_pb2.Dataset,
label: str = 'dataset',
options: Optional[ValidationOptions] = None) -> List[str]:
"""Validates Reaction messages and cross-references in a Dataset.
Args:
dataset: dataset_pb2.Dataset message.
label: string label for logging purposes only.
options: ValidationOptions.
Returns:
List of validation error messages.
"""
errors = []
# Reaction-level validation.
num_bad_reactions = 0
for i, reaction in enumerate(dataset.reactions):
reaction_output = validate_message(reaction,
raise_on_error=False,
options=options)
if reaction_output.errors:
num_bad_reactions += 1
for error in reaction_output.errors:
errors.append(error)
logging.warning('Validation error for %s[%d]: %s', label, i, error)
logging.info('Validation summary for %s: %d/%d successful (%d failures)',
label,
len(dataset.reactions) - num_bad_reactions,
len(dataset.reactions), num_bad_reactions)
# Dataset-level validation of cross-references.
dataset_output = validate_message(dataset,
raise_on_error=False,
recurse=False,
options=options)
for error in dataset_output.errors:
errors.append(error)
logging.warning('Validation error for %s: %s', label, error)
return errors
def validate_message(
message: ord_schema.Message,
recurse: bool = True,
raise_on_error: bool = True,
options: Optional[ValidationOptions] = None,
trace: Optional[Tuple[str, ...]] = None) -> ValidationOutput:
"""Template function for validating custom messages in the reaction_pb2.
Messages are not validated to check enum values, since these are enforced
by the schema. Instead, we only check for validity of items that cannot be
enforced in the schema (e.g., non-negativity of certain measurements,
consistency of cross-referenced keys).
Note that the message may be modified in-place with any unambiguous changes
needed to ensure validity.
Args:
message: A message to validate.
recurse: A boolean that controls whether submessages of message (i.e.,
fields that are messages) should also be validated. Defaults to
True.
raise_on_error: If True, raises a ValidationError exception when errors
are encountered. If False, the user must manually check the return
value to identify validation errors.
options: ValidationOptions.
trace: Tuple containing a string "stack trace" to track the position of
the current message relative to the recursion root.
Returns:
ValidationOutput.
Raises:
ValidationError: If any fields are invalid.
"""
if trace is None:
trace = (message.DESCRIPTOR.name,)
output = ValidationOutput()
# Recurse through submessages
if recurse:
for field, value in message.ListFields():
if field.type == field.TYPE_MESSAGE: # need to recurse
_validate_message(field=field,
value=value,
output=output,
raise_on_error=raise_on_error,
options=options,
trace=trace)
# Message-specific validation
if not isinstance(message, tuple(_VALIDATOR_SWITCH.keys())):
# NOTE(ccoley): I made the conscious decision to raise an error here,
# rather than assume that the message is valid. If a message does not
# require any message-level checks (not uncommon), then it should still
# be listed in the dictionary switch above withpass. This will force
# us to think about what is necessary if/when new messages are added.
raise NotImplementedError(f"Don't know how to validate {type(message)}")
with warnings.catch_warnings(record=True) as tape:
if isinstance(message, (reaction_pb2.Reaction, dataset_pb2.Dataset)):
_VALIDATOR_SWITCH[type(message)](message, options=options)
else:
_VALIDATOR_SWITCH[type(message)](message)
stack = '.'.join(trace)
for warning in tape:
message = f'{stack}: {warning.message}'
if issubclass(warning.category, ValidationError):
if raise_on_error:
raise ValidationError(message)
output.errors.append(message)
else:
output.warnings.append(message)
return output
def _validate_message(field: ord_schema.FieldDescriptor, value: Any,
output: ValidationOutput, raise_on_error: bool,
options: ValidationOptions, trace: Tuple[str, ...]):
"""Validates a single message field and its children.
Args:
field: FieldDescriptor instance.
value: The value of the current message field.
output: ValidationOutput.
raise_on_error: If True, raises a ValidationError exception when errors
are encountered. If False, the user must manually check the return
value to identify validation errors.
options: ValidationOptions.
trace: Tuple containing a string "stack trace" to track the position of
the current message relative to the recursion root.
"""
if field.label == field.LABEL_REPEATED:
if field.message_type.GetOptions().map_entry: # map
# value is message
if field.message_type.fields_by_name['value'].type == \
field.TYPE_MESSAGE:
for key, submessage in value.items():
this_trace = trace + (f'{field.name}["{key}"]',)
this_output = validate_message(
submessage,
raise_on_error=raise_on_error,
options=options,
trace=this_trace)
output.extend(this_output)
else: # value is a primitive
pass
else: # Just a repeated message
for index, submessage in enumerate(value):
this_trace = trace + (f'{field.name}[{index}]',)
this_output = validate_message(submessage,
raise_on_error=raise_on_error,
options=options,
trace=this_trace)
output.extend(this_output)
else: # no recursion needed
this_trace = trace + (field.name,)
this_output = validate_message(value,
raise_on_error=raise_on_error,
options=options,
trace=this_trace)
output.extend(this_output)
class ValidationError(Warning):
pass
class ValidationWarning(Warning):
pass
def is_empty(message: ord_schema.Message):
"""Returns whether the given message is empty."""
empty = type(message)().SerializeToString()
return message.SerializeToString(deterministic=True) == empty
# pylint: disable=missing-function-docstring
def ensure_float_nonnegative(message: ord_schema.Message, field: str):
if getattr(message, field) < 0:
warnings.warn(
f'Field {field} of message '
f'{type(message).DESCRIPTOR.name} must be'
' non-negative', ValidationError)
def ensure_float_range(message: ord_schema.Message,
field: str,
min_value: float = -math.inf,
max_value: float = math.inf):
if (getattr(message, field) < min_value or
getattr(message, field) > max_value):
warnings.warn(
f'Field {field} of message '
f'{type(message).DESCRIPTOR.name} must be between'
f' {min_value} and {max_value}', ValidationError)
def check_value_and_units(message: ord_schema.UnitMessage):
"""Checks that value/units messages are complete."""
if not message.HasField('value'):
warnings.warn(f'{type(message)} requires `value` to be set',
ValidationError)
if message.units == message.UNSPECIFIED:
warnings.warn(f'{type(message)} requires `units` to be set',
ValidationError)
def check_type_and_details(message: ord_schema.TypeDetailsMessage):
"""Checks that type/details messages are complete."""
if is_empty(message):
return
if message.type == message.UNSPECIFIED:
warnings.warn(f'{type(message)} requires `type` to be set',
ValidationError)
if message.type == message.CUSTOM and not message.details:
warnings.warn(
f'{type(message)} has type CUSTOM but details field is empty',
ValidationError)
def reaction_has_internal_standard(message: reaction_pb2.Reaction) -> bool:
"""Whether any reaction component uses the internal standard role."""
for reaction_input in message.inputs.values():
for compound in reaction_input.components:
if (compound.reaction_role ==
reaction_pb2.ReactionRole.INTERNAL_STANDARD):
return True
for workup in message.workups:
if workup.input:
for compound in workup.input.components:
if (compound.reaction_role ==
reaction_pb2.ReactionRole.INTERNAL_STANDARD):
return True
return False
def reaction_has_limiting_component(message: reaction_pb2.Reaction) -> bool:
"""Whether any reaction input compound is limiting."""
for reaction_input in message.inputs.values():
for compound in reaction_input.components:
if compound.is_limiting:
return True
return False
def reaction_needs_internal_standard(message: reaction_pb2.Reaction) -> bool:
"""Whether any analysis uses an internal standard."""
for outcome in message.outcomes:
for product in outcome.products:
for measurement in product.measurements:
if measurement.uses_internal_standard:
return True
return False
def get_referenced_reaction_ids(message: reaction_pb2.Reaction) -> Set[str]:
"""Return the set of reaction IDs that are referenced in a Reaction."""
referenced_ids = set()
for reaction_input in message.inputs.values():
for component in reaction_input.components:
for preparation in component.preparations:
if preparation.reaction_id:
referenced_ids.add(preparation.reaction_id)
for crude_component in reaction_input.crude_components:
referenced_ids.add(crude_component.reaction_id)
return referenced_ids
def is_valid_reaction_id(reaction_id: str) -> bool:
match = re.fullmatch('^ord-[0-9a-f]{32}$', reaction_id)
return bool(match)
def is_valid_dataset_id(dataset_id: str) -> bool:
match = re.fullmatch('^ord_dataset-[0-9a-f]{32}$', dataset_id)
return bool(match)
def validate_dataset(message: dataset_pb2.Dataset,
options: Optional[ValidationOptions] = None):
# pylint: disable=too-many-branches,too-many-nested-blocks
if options is None:
options = ValidationOptions()
if not message.reactions and not message.reaction_ids:
warnings.warn('Dataset requires reactions or reaction_ids',
ValidationError)
elif message.reactions and message.reaction_ids:
warnings.warn('Dataset requires reactions or reaction_ids, not both',
ValidationError)
if message.reaction_ids:
for reaction_id in message.reaction_ids:
if not is_valid_reaction_id(reaction_id):
warnings.warn('Reaction ID is malformed', ValidationError)
if options.validate_ids:
# The dataset_id is a 32-character uuid4 hex string.
if not is_valid_dataset_id(message.dataset_id):
warnings.warn('Dataset ID is malformed', ValidationError)
# Check cross-references
dataset_referenced_ids | |
may conain NULL after sort()
if site == 'NULL':
continue
if prevIsJEDI:
foundRelease = True
winv = 1
else:
# get SiteSpec
if siteMapper.checkSite(site):
tmpSiteSpec = siteMapper.getSite(site)
else:
tmpLog.debug(" skip: %s doesn't exist in DB" % site)
continue
# ignore test sites
if (prevManualPreset is False) and (site.endswith('test') or \
site.endswith('Test') or site.startswith('Test')):
continue
# ignore analysis queues
if (not forAnalysis) and (not tmpSiteSpec.runs_production()):
continue
# check status
if tmpSiteSpec.status in ['offline','brokeroff'] and computingSite in ['NULL',None,'']:
if forAnalysis and tmpSiteSpec.status == 'brokeroff' and tmpSiteSpec.accesscontrol == 'grouplist':
# ignore brokeroff for grouplist site
pass
elif forAnalysis and prevProType in ['hammercloud','gangarobot','gangarobot-squid']:
# ignore site status for HC
pass
else:
tmpLog.debug(' skip: status %s' % tmpSiteSpec.status)
resultsForAnal['status'].append(site)
continue
if tmpSiteSpec.status == 'test' and (prevProType not in ['prod_test','hammercloud','gangarobot','gangarobot-squid']) \
and prevSourceLabel not in ['test','prod_test']:
tmpLog.debug(' skip: status %s for %s' % (tmpSiteSpec.status,prevProType))
resultsForAnal['status'].append(site)
continue
tmpLog.debug(' status=%s' % tmpSiteSpec.status)
# check core count
if tmpSiteSpec.coreCount > 1:
# use multi-core queue for MP jobs
if prevCoreCount not in [None,'NULL'] and prevCoreCount > 1:
pass
else:
tmpLog.debug(' skip: MP site (%s core) for job.coreCount=%s' % (tmpSiteSpec.coreCount,
prevCoreCount))
resultsForAnal['cpucore'].append(site)
continue
else:
# use single core for non-MP jobs
if prevCoreCount not in [None,'NULL'] and prevCoreCount > 1:
tmpLog.debug(' skip: single core site (%s core) for job.coreCount=%s' % (tmpSiteSpec.coreCount,
prevCoreCount))
resultsForAnal['cpucore'].append(site)
continue
# check max memory
if tmpSiteSpec.memory != 0 and prevMemory not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.memory) < int(prevMemory):
tmpLog.debug(' skip: site memory shortage %s<%s' % (tmpSiteSpec.memory,prevMemory))
resultsForAnal['memory'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("max memory check : %s %s" % (errtype,errvalue))
# check maxcpucount
if tmpSiteSpec.maxtime != 0 and prevMaxCpuCount not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.maxtime) < int(prevMaxCpuCount):
tmpLog.debug(' skip: insufficient maxtime %s<%s' % (tmpSiteSpec.maxtime,prevMaxCpuCount))
resultsForAnal['maxtime'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("maxtime check : %s %s" % (errtype,errvalue))
if tmpSiteSpec.mintime != 0 and prevMaxCpuCount not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.mintime) > int(prevMaxCpuCount):
tmpLog.debug(' skip: insufficient job maxtime %s<%s' % (prevMaxCpuCount,tmpSiteSpec.mintime))
resultsForAnal['maxtime'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("mintime check : %s %s" % (errtype,errvalue))
# check max work dir size
if tmpSiteSpec.maxwdir != 0 and (prevDiskCount not in [None,0,'NULL']):
try:
if int(tmpSiteSpec.maxwdir) < int(prevDiskCount):
tmpLog.debug(' skip: not enough disk %s<%s' % (tmpSiteSpec.maxwdir, prevDiskCount))
resultsForAnal['scratch'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("disk check : %s %s" % (errtype,errvalue))
tmpLog.debug(' maxwdir=%s' % tmpSiteSpec.maxwdir)
# reliability
if forAnalysis and isinstance(siteReliability, (int, long)):
if tmpSiteSpec.reliabilityLevel is not None and tmpSiteSpec.reliabilityLevel > siteReliability:
tmpLog.debug(' skip: insufficient reliability %s > %s' % (tmpSiteSpec.reliabilityLevel,siteReliability))
resultsForAnal['reliability'].append(site)
continue
# change NULL cmtconfig to slc3/4
if prevCmtConfig in ['NULL','',None]:
if forAnalysis:
tmpCmtConfig = 'i686-slc4-gcc34-opt'
else:
tmpCmtConfig = 'i686-slc3-gcc323-opt'
else:
tmpCmtConfig = prevCmtConfig
# set release
releases = tmpSiteSpec.releases
origReleases = releases
if prevProType in ['reprocessing']:
# use validated releases for reprocessing
releases = tmpSiteSpec.validatedreleases
if not useCacheVersion:
tmpLog.debug(' %s' % str(releases))
if origReleases == ['ANY']:
# doesn't check releases for catch all
tmpLog.debug(' no release check due to releases=%s' % origReleases)
foundRelease = True
elif forAnalysis and (tmpSiteSpec.cloud in ['ND'] or prevRelease==''):
# doesn't check releases for analysis
tmpLog.debug(' no release check')
pass
elif forAnalysis and useCacheVersion:
# cache matching
if site not in siteListWithCache:
tmpLog.debug(' skip: cache %s/%s not found' % (prevRelease.replace('\n',' '),prevCmtConfig))
if trustIS:
resultsForAnal['rel'].append(site)
continue
elif prevRelease is not None and \
(useCacheVersion and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']) and \
(prevProType not in ['reprocessing']) and \
(site not in siteListWithCache):
tmpLog.debug(' skip: cache %s/%s not found' % (prevHomePkg.replace('\n',' '), prevCmtConfig))
# send message to logger
try:
if prevSourceLabel in ['managed','test']:
resultsForAnal['rel'].append(site)
# make message
message = '%s - cache %s/%s not found' % (site,prevHomePkg.replace('\n',' '),prevCmtConfig)
if message not in loggerMessages:
loggerMessages.append(message)
except Exception:
pass
continue
elif prevRelease is not None and \
((not useCacheVersion and releases != [] and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']) or prevProType in ['reprocessing']) and \
(((not _checkRelease(prevRelease,releases) and prevManualPreset is False) or site not in siteListWithCache) and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']):
# release matching
if not useCacheVersion:
tmpLog.debug(' skip: release %s/%s not found' % (prevRelease.replace('\n',' '),prevCmtConfig))
else:
tmpLog.debug(' skip: repro cache %s/%s not found' % (prevHomePkg.replace('\n',' '),prevCmtConfig))
resultsForAnal['rel'].append(site)
continue
elif not foundRelease:
# found at least one site has the release
foundRelease = True
# get pilot statistics
nPilotsGet = 0
nPilotsUpdate = 0
if nWNmap == {}:
nWNmap = taskBuffer.getCurrentSiteData()
if site in nWNmap:
nPilots = nWNmap[site]['getJob'] + nWNmap[site]['updateJob']
nPilotsGet = nWNmap[site]['getJob']
nPilotsUpdate = nWNmap[site]['updateJob']
elif site.split('/')[0] in nWNmap:
tmpID = site.split('/')[0]
nPilots = nWNmap[tmpID]['getJob'] + nWNmap[tmpID]['updateJob']
nPilotsGet = nWNmap[tmpID]['getJob']
nPilotsUpdate = nWNmap[tmpID]['updateJob']
else:
nPilots = 0
tmpLog.debug(' original nPilots:%s get:%s update:%s' % (nPilots,nPilotsGet,nPilotsUpdate))
# limit on (G+1)/(U+1)
limitOnGUmax = 1.1
limitOnGUmin = 0.9
guRatio = float(1+nPilotsGet)/float(1+nPilotsUpdate)
if guRatio > limitOnGUmax:
nPilotsGet = limitOnGUmax * float(1+nPilotsUpdate) - 1.0
elif guRatio < limitOnGUmin:
nPilotsGet = limitOnGUmin * float(1+nPilotsUpdate) - 1.0
tmpLog.debug(' limited nPilots:%s get:%s update:%s' % (nPilots,nPilotsGet,nPilotsUpdate))
# if no pilots
if nPilots == 0 and nWNmap != {}:
tmpLog.debug(" skip: %s no pilot" % site)
resultsForAnal['pilot'].append(site)
continue
# if no jobs in jobsActive/jobsDefined
jobStatistics.setdefault(site,
{'assigned':0,'activated':0,'running':0,'transferring':0})
# set nRunning
if forAnalysis:
nRunningMap.setdefault(site, 0)
# check space
if specialWeight != {}:
# for PD2P
if site in sizeMapForCheck:
# threshold for PD2P max(5%,3TB)
thrForThisSite = long(sizeMapForCheck[site]['total'] * 5 / 100)
if thrForThisSite < diskThresholdPD2P:
thrForThisSite = diskThresholdPD2P
remSpace = sizeMapForCheck[site]['total'] - sizeMapForCheck[site]['used']
tmpLog.debug(' space available=%s remain=%s thr=%s' % (sizeMapForCheck[site]['total'],
remSpace,thrForThisSite))
if remSpace-datasetSize < thrForThisSite:
tmpLog.debug(' skip: disk shortage %s-%s< %s' % (remSpace,datasetSize,thrForThisSite))
if getWeight:
weightUsedByBrokerage[site] = "NA : disk shortage"
continue
else:
if tmpSiteSpec.space:
# production
if not forAnalysis:
# take assigned/activated/running jobs into account for production
nJobsIn = float(jobStatistics[site]['assigned'])
nJobsOut = float(jobStatistics[site]['activated']+jobStatistics[site]['running'])
# get remaining space and threshold
if site == siteMapper.getCloud(previousCloud)['source']:
# T1
remSpace = float(tmpSiteSpec.space) - 0.2 * nJobsOut
remSpace = int(remSpace)
diskThreshold = diskThresholdT1
else:
# T2
remSpace = float(tmpSiteSpec.space) - 0.2 * nJobsOut - 2.0 * nJobsIn
remSpace = int(remSpace)
diskThreshold = diskThresholdT2
else:
# analysis
remSpace = tmpSiteSpec.space
diskThreshold = diskThresholdAna
tmpLog.debug(' space available=%s remain=%s' % (tmpSiteSpec.space,remSpace))
if remSpace < diskThreshold:
tmpLog.debug(' skip: disk shortage < %s' % diskThreshold)
resultsForAnal['disk'].append(site)
# keep message to logger
try:
if prevSourceLabel in ['managed','test']:
# make message
message = '%s - disk %s < %s' % (site,remSpace,diskThreshold)
if message not in loggerMessages:
loggerMessages.append(message)
except Exception:
pass
continue
# get the process group
tmpProGroup = ProcessGroups.getProcessGroup(prevProType)
if prevProType in skipBrokerageProTypes:
# use original processingType since prod_test is in the test category and thus is interfered by validations
tmpProGroup = prevProType
# production share
skipDueToShare = False
try:
if not forAnalysis and prevSourceLabel in ['managed'] and site in faresharePolicy:
for tmpPolicy in faresharePolicy[site]['policyList']:
# ignore priority policy
if tmpPolicy['priority'] is not None:
continue
# only zero share
if tmpPolicy['share'] != '0%':
continue
# check group
if tmpPolicy['group'] is not None:
if '*' in tmpPolicy['group']:
# wildcard
tmpPatt = '^' + tmpPolicy['group'].replace('*','.*') + '$'
if re.search(tmpPatt,prevWorkingGroup) is None:
continue
else:
# normal definition
if prevWorkingGroup != tmpPolicy['group']:
continue
else:
# catch all except WGs used by other policies
groupInDefList = faresharePolicy[site]['groupList']
usedByAnother = False
# loop over all groups
for groupInDefItem in groupInDefList:
if '*' in groupInDefItem:
# wildcard
tmpPatt = '^' + groupInDefItem.replace('*','.*') + '$'
if re.search(tmpPatt,prevWorkingGroup) is not None:
usedByAnother = True
break
else:
# normal definition
if prevWorkingGroup == groupInDefItem:
usedByAnother = True
break
if usedByAnother:
continue
# check type
if tmpPolicy['type'] is not None:
if tmpPolicy['type'] == tmpProGroup:
skipDueToShare = True
break
else:
# catch all except PGs used by other policies
typeInDefList = faresharePolicy[site]['typeList'][tmpPolicy['group']]
usedByAnother = False
for typeInDefItem in typeInDefList:
if typeInDefItem == tmpProGroup:
usedByAnother = True
break
if not usedByAnother:
skipDueToShare = True
break
# skip
if skipDueToShare:
tmpLog.debug(" skip: %s zero share" % site)
resultsForAnal['share'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("share check : %s %s" % (errtype,errvalue))
# the number of assigned and activated
if not | |
<reponame>xinranzhu/GPTune-1<filename>examples/GCN/gcn_MB.py
#! /usr/bin/env python
# GPTune Copyright (c) 2019, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S.Dept. of Energy) and the University of
# California, Berkeley. All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at <EMAIL>.
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights.
# As such, the U.S. Government has been granted for itself and others acting
# on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in
# the Software to reproduce, distribute copies to the public, prepare
# derivative works, and perform publicly and display publicly, and to permit
# other to do so.
#
################################################################################
"""
Example of invocation of this script:
mpirun -n 1 python gcn_MB.py -dataset 'cora-citeseer' -nprocmin_pernode 1 -ntask 2 -nrun 10
where:
-nprocmin_pernode minimum number of MPIs per node for launching the application code
-ntask number of different tasks to be tuned
-nrun number of calls per task
-dataset name of dataset to be tune on
Description of the parameters of GCN:
Task space:
-dataset
Input space:
lr: learning rate
hidden: number of hidden layers
weight_decay: the L2 loss on GCN parameters
dropout: dropout rate
"""
import sys, os
# add GPTunde path in front of all python pkg path
from autotune.search import *
from autotune.space import *
from autotune.problem import *
from gptune import * # import all
sys.path.insert(0, os.path.abspath(__file__ + "/../GCN-driver/"))
from GCNdriver import GCNdriver
sys.path.insert(0, os.path.abspath(__file__ + "/../../../GPTune/"))
import re
import numpy as np
import time
import argparse
import pickle
from random import *
from callopentuner import OpenTuner
from callhpbandster import HpBandSter, HpBandSter_bandit
import math
import functools
import scipy
def objectives(point):
bmin = point['bmin']
bmax = point['bmax']
eta = point['eta']
params = [(point['dataset'], point["lr"],
point["hidden"], point["dropout"],
point["weight_decay"])]
max_epoch=500
min_epoch=100
# map budget to fidelity, i.e., percentage of training data
def budget_map(b, nmin=min_epoch, nmax=max_epoch):
k = (nmax-nmin)/(bmax-bmin)
m = nmax-bmax*k
if b == bmin:
return nmin
elif b == bmax:
return nmax
else:
return k * b + m
try:
budget = budget_map(int(point["budget"]))
except:
budget = None
validation_loss = GCNdriver(params, budget=budget, max_epoch=max_epoch, device=point["device"], seed=41)
print(params, ' valiation loss: ', validation_loss)
return validation_loss
def main():
(machine, processor, nodes, cores) = GetMachineConfiguration()
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
# Parse command line arguments
args = parse_args()
bmin = args.bmin
device = args.device
bmax = args.bmax
eta = args.eta
nrun = args.nrun
npernode = args.npernode
ntask = args.ntask
Nloop = args.Nloop
restart = args.restart
TUNER_NAME = args.optimization
ot.RandomGenerator.SetSeed(args.seed)
TLA = False
print(args)
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
dataset = Categoricalnorm(['cora', 'citeseer'], transform="onehot", name="dataset")
lr = Real(1e-5, 1e-2, name="lr")
hidden = Integer(4, 64, transform="normalize", name="hidden")
weight_decay = Real(1e-5, 1e-2, name="weight_decay")
dropout = Real(0.1, 0.9, name="dropout")
validation_loss = Real(0., 1., name="validation_loss")
IS = Space([dataset])
PS = Space([weight_decay, hidden, lr, dropout])
OS = Space([validation_loss])
constraints = {}
constants={"nodes":nodes,"cores":cores,"npernode":npernode,"bmin":bmin,"bmax":bmax,"eta":eta, "device":device}
print(IS, PS, OS, constraints)
problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants)
computer = Computer(nodes=nodes, cores=cores, hosts=None)
options = Options()
options['model_processes'] = 4 # parallel cholesky for each LCM kernel
# options['model_threads'] = 1
# options['model_restarts'] = args.Nrestarts
# options['distributed_memory_parallelism'] = False
# parallel model restart
options['model_restarts'] = restart
options['distributed_memory_parallelism'] = False
options['shared_memory_parallelism'] = False
# options['mpi_comm'] = None
options['model_class'] = 'Model_LCM' # Model_GPy_LCM or Model_LCM
options['verbose'] = False
options['sample_class'] = 'SampleOpenTURNS'
options['budget_min'] = bmin
options['budget_max'] = bmax
options['budget_base'] = eta
smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base'])))
budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)]
NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)]
NSs_all = NSs.copy()
budget_all = budgets.copy()
for s in range(smax+1):
for n in range(s):
NSs_all.append(int(NSs[s]/options['budget_base']**(n+1)))
budget_all.append(int(budgets[s]*options['budget_base']**(n+1)))
Ntotal = int(sum(NSs_all) * Nloop)
Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max'] * Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners
print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}")
print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all)
print("total number of samples: ", Ntotal)
print("total number of evaluations at highest budget: ", Btotal)
print()
options.validate(computer = computer)
data = Data(problem)
# giventask = [[0.2, 0.5]]
giventask = []
dataset_list = args.dataset.split('-')
for dataset in dataset_list:
giventask.append([dataset])
NI=len(giventask)
assert NI == ntask # make sure number of tasks match
if(TUNER_NAME=='GPTune'):
gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__))
""" Building MLA with the given list of tasks """
NS = Btotal
if args.nrun > 0:
NS = args.nrun
NS1 = max(NS//2, 1)
(data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
if(TUNER_NAME=='opentuner'):
NS = Btotal
(data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
# results_file.write(f" Ps {data.P[tid][:NS]}\n")
results_file.write(f" Os {data.O[tid][:NS].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
# single-fidelity version of hpbandster
if(TUNER_NAME=='TPE'):
NS = Btotal
(data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="HpBandSter", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
if(TUNER_NAME=='GPTuneBand'):
data = Data(problem)
gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options)
(data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
nth = np.argmin(data.O[tid])
Popt = data.P[tid][nth]
# find which arm and which sample the optimal param is from
for arm in range(len(data_hist.P)):
try:
idx = (data_hist.P[arm]).index(Popt)
arm_opt = arm
except ValueError:
pass
print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth)
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" dataset = {data.I[tid][0]}\n")
# results_file.write(f" Ps {data.P[tid]}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n')
results_file.close()
# multi-fidelity version
if(TUNER_NAME=='hpbandster'):
NS = Ntotal
(data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" dataset = {data.I[tid][0]}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
# print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
max_budget = 0.
Oopt = 99999
Popt = None
nth = None
for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())):
for subout in out[0]:
budget_cur = subout[0]
if budget_cur > max_budget:
max_budget = budget_cur
Oopt = subout[1]
Popt = config
nth = idx
elif budget_cur == max_budget:
if subout[1] < Oopt:
Oopt = subout[1]
Popt = config
nth = idx
print(' Popt ', Popt, 'Oopt ', Oopt, 'nth | |
from copy import deepcopy
import numpy as np
from numpy.lib.npyio import _savez
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from seispy.trace import Trace, FourierDomainTrace
from seispy.errors import EmptyStreamError, DataTypeError, \
SamplingError, SamplingRateError, NptsError, StarttimeError
from timetools.timetick import timetick
# from seispy.time.timetick import timetick
def readseispystream(npzfilename):
st = Stream()
st.from_npz(npzfilename=npzfilename)
return st
class Stream(list):
def __init__(self, traces: list = None):
"""
initiate the instance with the stream (obspy or obsmax4)
or nothing : see self.from_obspy or self.from_npz"""
if traces is None:
super().__init__([])
else:
for trace in traces:
if not isinstance(trace, Trace):
raise TypeError(type(traces))
super().__init__(traces)
def copy(self):
return deepcopy(self)
def __str__(self):
return "\n".join([str(tr) for tr in self])
def __repr__(self):
return self.__str__()
# ============ convertion from or to obspy
def from_obspy(self, stream):
"""populate the objects with an obspy stream
use it to convert obspy into a seispy object
"""
for obspy_trace in stream:
trace = Trace()
trace.from_obspy(obspy_trace)
self.append(trace)
def to_obspy(self):
# warning this module must keep independant from obspy, I just assume here that the user is
# trying to convert this object to obspy, so obspy is installed
try:
from obspy.core.stream import Stream as ObspyStream
except ImportError as e:
e.args = ('obspy not installed', )
raise e
obspy_traces = []
for seispy_trace in self:
obspy_trace = seispy_trace.to_obspy()
obspy_traces.append(obspy_trace)
return ObspyStream(obspy_traces)
# ============
def check_data_types(self):
if not len(self):
raise EmptyStreamError()
dtype = self[0].data.dtype
for trace in self[1:]:
if dtype != trace.data.dtype:
raise DataTypeError
return dtype
def check_stream_sampling_regularization(self):
"""
verifies that all traces have the same time vector
:return:
"""
if not len(self):
raise EmptyStreamError()
msg = 'the stream is not regularized, please resample {}, ({}, {})'
nptss = np.asarray([tr.npts for tr in self], int)
deltas = np.asarray([tr.delta for tr in self], float)
starttimes = np.asarray([tr.starttime for tr in self], float)
npts = self[0].npts
delta = self[0].delta
starttime = self[0].starttime
is_npts = nptss == npts
is_delta = deltas == delta
is_start = starttimes == starttime
if not is_npts.all():
raise NptsError(msg.format("npts", npts, nptss[~is_npts][0]))
elif not is_delta.all():
raise SamplingRateError(msg.format("delta", delta, deltas[~is_delta][0]))
elif not is_start.all():
raise StarttimeError(msg.format("starttime", starttime, starttimes[~is_start][0]))
return npts, delta, starttime
def regularize(self, fill_value: float = 0.0, qc: bool = True):
if not len(self):
raise EmptyStreamError()
starttimes = np.asarray([tr.starttime for tr in self], float)
endtimes = np.asarray([tr.endtime for tr in self], float)
deltas = np.asarray([tr.delta for tr in self], float)
delta = np.min(deltas)
start = np.min(starttimes)
end = np.max(endtimes)
new_npts = int(np.floor((end - start) / delta))
new_time = np.arange(new_npts) * delta + start
for n, tr in enumerate(self):
tr: Trace
if (tr.delta == delta) and \
(tr.starttime == start) and \
(tr.npts == new_npts):
# no need to interpolate the signal
continue
old_time = tr.atime()
old_data = tr.data
tr.data = np.interp(
new_time, xp=old_time, fp=old_data,
left=fill_value, right=fill_value)
tr.starttime = start
tr.delta = delta
if qc:
try:
self.check_stream_sampling_regularization()
except (EmptyStreamError, SamplingError) as e:
e.args = ("the regularization failed, {}".format(str(e)))
def mean(self):
nptss = np.asarray([tr.npts for tr in self], float)
sum = np.sum([tr.data.sum() for tr in self])
mean = sum / nptss.sum()
return mean
def pseudo_std(self):
"""
std is evaluated by means of deviations relative to the mean of each trace
and not relative to the ensemble mean as in self.std
"""
nptss = np.asarray([tr.npts for tr in self], float)
covariances = np.asarray([tr.data.std() ** 2.0 for tr in self], float) # E((Xi - E(Xi))^2)
return ((nptss * covariances).sum() / nptss.sum()) ** 0.5
def std(self):
# return np.concatenate([tr.data for tr in self]).std()
# same as above without concatenating arrays
nptss = np.asarray([tr.npts for tr in self], float)
means = np.asarray([tr.data.mean() for tr in self], float)
mean = (nptss * means).sum() / nptss.sum()
deviations = np.array([((tr.data - mean) ** 2.0).sum() for tr in self])
return (deviations.sum() / nptss.sum()) ** 0.5
def clip(self, nstd=10.0):
"""
remove outliers above a certain threshold given in number of times the pseudo_std
:param nstd:
:return:
"""
means = np.asarray([tr.data.mean() for tr in self], float)
pseudo_std = self.pseudo_std()
for tr, m in zip(self, means):
tr.data = tr.data.clip(m - nstd * pseudo_std, m + nstd * pseudo_std)
def show(self, ax, gain=0.1, color="k", alpha=0.4,
seedticks=False, linewidth=2, linestyle="-",
obspy_decim=False, obspy_decim_nwin=1000):
"""
show many traces on same plot with vertical offset 1 per trace
:param ax:
:param gain:
:param color:
:param alpha:
:param seedticks:
:param linewidth:
:param linestyle:
:param obspy_decim:
:return:
"""
if len(self) <= 1:
raise ValueError('too few items for st.show, ')
fourier_domain = np.all([isinstance(tr, FourierDomainTrace) for tr in self])
xmin, xmax = np.inf, -np.inf
edge_segments = []
assert 0 < alpha <= 1.0
i = 0
if fourier_domain:
fs, dats = [], []
for i, tr in enumerate(self):
f, dat = tr.side(sign=1, zero=False, copy=False)
fs.append(f)
dats.append(np.abs(dat))
k = gain / np.std(np.hstack(dats))
xmin = np.hstack(fs).min()
xmax = np.hstack(fs).max()
for i, (f, dat) in enumerate(zip(fs, dats)):
edge_segments.append(np.column_stack((f, k * dat + i)))
else:
k = gain / self.std()
for i, tr in enumerate(self):
if obspy_decim:
t, dat = tr.obspy_like_decim(nwin=obspy_decim_nwin)
dat = np.column_stack((t, k * dat + i))
else:
dat = np.column_stack((tr.atime(), k * tr.data + i))
edge_segments.append(dat)
if tr.starttime < xmin:
xmin = tr.starttime
if tr.endtime > xmax:
xmax = tr.endtime
coll = LineCollection(
edge_segments, colors=color, alpha=alpha,
linewidths=linewidth, linestyles=linestyle)
ax.add_collection(coll)
if seedticks:
yticks = np.arange(len(self))
yticklabels = [_.seedid for _ in self]
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
ax.set_xlim(xmin, xmax)
ax.set_ylim(-1., i + 1.)
if fourier_domain:
pass
else:
timetick(ax=ax, axis="x", major=True, minor=True)
return coll
def shade(self, ax, cmap=None, vmin=None, vmax=None, powergain=1., seedticks=False, **kwargs):
"""
:param ax: obsmax4.graphictools.gutils.myax object, use obsmax4.graphictools.gca
:param cmap: colormap
:param vmin: float, lowest value, or None
:param vmax: float, highest value, or None
:param powergain: float, > 0, apply power gain to the plotted amplitudes
:param cticks:
:param args:
:param kwargs:
:return:
"""
assert len(self)
kwargs.setdefault('rasterized', True)
fourier_domain = np.all([isinstance(tr, FourierDomainTrace) for tr in self])
if cmap is None:
if fourier_domain:
cmap = plt.get_cmap('nipy_spectral')
else:
cmap = plt.get_cmap('gray')
nmax = np.max([len(tr.data) for tr in self])
T, I, D = [], [], []
dmin, dmax = np.inf, -np.inf
for n, tr in enumerate(self):
if fourier_domain:
f, d = tr.side(sign=1, zero=False, copy=False)
d = np.abs(d)
else:
d = tr.data[:]
if powergain != 1.:
d = np.sign(d) * np.abs(d) ** powergain
# all items in D must be the same length
d = np.concatenate((d, np.nan * np.zeros(nmax - len(d))))
d = np.ma.masked_where(np.isnan(d) | np.isinf(d), d)
dmin = np.min([dmin, d.min()])
dmax = np.max([dmax, d.max()])
# -----
D.append(d)
if n <= len(self) - 2:
D.append(d * 0.)
# -----
if fourier_domain:
df = f[1] - f[0]
f = -.5 * df + np.hstack((f, (f[-1] + df) * np.ones(nmax + 1 - len(f))))
T.append(f)
T.append(f)
else:
dt = tr.delta
t = -.5 * dt + tr.starttime + np.arange(nmax+1) * dt
T.append(t)
T.append(t)
# -----
I.append(n - .5 * np.ones(len(d) + 1))
I.append(n + .5 * np.ones(len(d) + 1))
T, I, D = [np.asarray(_) for _ in [T, I, D]]
if vmin is None and vmax is None:
vmax = np.max([abs(dmin), abs(dmax)])
vmin = -vmax
if vmax is None:
vmax = dmax
if vmin is None:
vmin = dmin
if fourier_domain:
vmin=0.
vmax=vmax
# print(T.shape, I.shape, D.shape)
coll = ax.pcolormesh(
T, I, D,
cmap=cmap,
vmin=vmin, vmax=vmax,
**kwargs)
if seedticks:
yticks = np.arange(len(self))
yticklabels = [_.seedid for _ in self]
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
ax.set_xlim((T.min(), T.max()))
ax.set_ylim((0, I.max()))
cbarwidth = 0.008
cbarheight = 0.5
cbardist = 0.012
p = ax.get_position()
cax = ax.figure.add_axes((p.x1 + cbardist * p.width,
p.y0 + 0.5 * (1. - cbarheight) * p.height,
cbarwidth, cbarheight * p.height))
ax.figure.colorbar(coll, cax=cax, ticks=[vmin, 0, vmax])
cax.set_yticklabels(["-", "0", "+"])
if not fourier_domain:
timetick(ax=ax, axis="x", major=True, minor=True)
return coll, cax
def savez(self, npzfilename):
"""
write the stream under npz format
the filename must end with .seispystream.npz
:param npzfilename:
:return:
"""
if not len(self):
raise EmptyStreamError
if not npzfilename.endswith('.seispystream.npz'):
raise ValueError('npzfilename does not end with .seispystream.npz')
# == put the metadata into lists, one per item
kwargs = {
"npts": np.array([trace.npts for trace in self], np.dtype('uint32')),
"delta": np.array([trace.delta for trace in self], np.dtype('float64')),
"starttime": np.array([trace.starttime for trace in self], np.dtype('float64')),
"seedid": np.array([trace.seedid | |
}, {
"stepBegin": 55,
"stepEnd": 60,
"vector": {
"x": 0,
"y": 0.25,
"z": 0
}
}],
"rotates": [{
"stepBegin": 1,
"stepEnd": 2,
"vector": {
"x": 0,
"y": 45,
"z": 0
}
}, {
"stepBegin": 11,
"stepEnd": 12,
"vector": {
"x": 0,
"y": -45,
"z": 0
}
}, {
"stepBegin": 55,
"stepEnd": 56,
"vector": {
"x": 0,
"y": 45,
"z": 0
}
}]
}, {
"id": "occluder_pole_",
"type": "cylinder",
"kinematic": True,
"structure": True,
"mass": 100,
"materials": ["AI2-THOR/Materials/Walls/DrywallBeige"],
"shows": [{
"stepBegin": 0,
"position": {
"x": 0,
"y": 2.25,
"z": 1
},
"scale": {
"x": 0.1,
"y": 1,
"z": 0.1
}
}],
"moves": [{
"stepBegin": 1,
"stepEnd": 6,
"vector": {
"x": 0,
"y": 0.25,
"z": 0
}
}, {
"stepBegin": 7,
"stepEnd": 12,
"vector": {
"x": 0,
"y": -0.25,
"z": 0
}
}, {
"stepBegin": 55,
"stepEnd": 60,
"vector": {
"x": 0,
"y": 0.25,
"z": 0
}
}]
}]
OCCLUDER_INSTANCE_SIDEWAYS = [{
"id": "occluder_wall_",
"type": "cube",
"kinematic": True,
"structure": True,
"mass": 100,
"materials": ["AI2-THOR/Materials/Walls/DrywallBeige"],
"shows": [{
"stepBegin": 0,
"position": {
"x": 0,
"y": 0.75,
"z": 1
},
"scale": {
"x": 1,
"y": 1.5,
"z": 0.1
}
}],
"moves": [{
"stepBegin": 1,
"stepEnd": 4,
"vector": {
"x": 0,
"y": 0.25,
"z": 0
}
}, {
"stepBegin": 9,
"stepEnd": 12,
"vector": {
"x": 0,
"y": -0.25,
"z": 0
}
}, {
"stepBegin": 35,
"stepEnd": 38,
"vector": {
"x": 0,
"y": 0.25,
"z": 0
}
}],
"rotates": [{
"stepBegin": 5,
"stepEnd": 6,
"vector": {
"x": 45,
"y": 0,
"z": 0
}
}, {
"stepBegin": 7,
"stepEnd": 8,
"vector": {
"x": -45,
"y": 0,
"z": 0
}
}, {
"stepBegin": 39,
"stepEnd": 40,
"vector": {
"x": 45,
"y": 0,
"z": 0
}
}]
}, {
"id": "occluder_pole_",
"type": "cylinder",
"kinematic": True,
"structure": True,
"mass": 100,
"materials": ["AI2-THOR/Materials/Walls/DrywallBeige"],
"shows": [{
"stepBegin": 0,
"position": {
"x": 0,
"y": 0.75,
"z": 1
},
"rotation": {
"x": 0,
"y": 0,
"z": 90
},
"scale": {
"x": 0.1,
"y": 3,
"z": 0.1
}
}],
"moves": [{
"stepBegin": 1,
"stepEnd": 4,
"vector": {
"x": 0.25,
"y": 0,
"z": 0
}
}, {
"stepBegin": 9,
"stepEnd": 12,
"vector": {
"x": -0.25,
"y": 0,
"z": 0
}
}, {
"stepBegin": 35,
"stepEnd": 38,
"vector": {
"x": 0.25,
"y": 0,
"z": 0
}
}]
}]
OBJECTS_INTPHYS: List[Dict[str, Any]] = [{
"type": "sphere",
"info": ["medium", "ball"],
"mass": 0.75,
"choose": [{
"materialCategory": ["plastic"],
"salientMaterials": ["plastic", "hollow"]
}, {
"materialCategory": ["rubber"],
"salientMaterials": ["rubber"]
}, {
"materialCategory": ["wood"],
"salientMaterials": ["wood"]
}, {
"materialCategory": ["metal"],
"salientMaterials": ["metal"]
}],
"attributes": ["moveable", "pickupable"],
"dimensions": {
"x": 0.75,
"y": 0.75,
"z": 0.75
},
"position_y": 0.375,
"scale": {
"x": 0.75,
"y": 0.75,
"z": 0.75
},
"intphys_options": [{
"y": 0,
"force": {
"x": 300 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.38,
0.759,
1.139,
1.518,
1.898,
2.278,
2.657,
3.037,
3.416,
3.796,
4.176,
4.555,
4.935,
5.314,
5.694,
6.074,
6.453,
6.833,
7.213,
7.592,
7.972,
8.351,
8.731,
9.111,
9.49,
9.87,
10.249,
10.629
]
}, {
"y": 0,
"force": {
"x": 450 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.604,
1.209,
1.813,
2.417,
3.022,
3.626,
4.231,
4.835,
5.439,
6.044,
6.648,
7.252,
7.857,
8.461,
9.066,
9.67,
10.274,
10.879
]
}, {
"y": 0,
"force": {
"x": 600 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.829,
1.659,
2.488,
3.317,
4.147,
4.976,
5.806,
6.635,
7.464,
8.294,
9.123,
9.952,
10.782
]
}, {
"y": 1.5,
"force": {
"x": 300 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.45,
0.9,
1.35,
1.8,
2.25,
2.7,
3.15,
3.6,
4.05,
4.5,
4.95,
5.4,
5.832,
6.212,
6.591,
6.971,
7.35,
7.73,
8.11,
8.489,
8.869,
9.248,
9.628,
10.008,
10.387,
10.767
]
}, {
"y": 1.5,
"force": {
"x": 450 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.675,
1.35,
2.025,
2.7,
3.375,
4.05,
4.725,
5.4,
6.075,
6.75,
7.425,
8.1,
8.757,
9.361,
9.966,
10.57
]
}, {
"y": 1.5,
"force": {
"x": 600 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.9,
1.8,
2.7,
3.6,
4.5,
5.4,
6.3,
7.2,
8.1,
9,
9.9,
11.8
]
}]
}, {
"type": "sphere",
"info": ["small", "ball"],
"mass": 0.5,
"choose": [{
"materialCategory": ["plastic"],
"salientMaterials": ["plastic", "hollow"]
}, {
"materialCategory": ["rubber"],
"salientMaterials": ["rubber"]
}, {
"materialCategory": ["wood"],
"salientMaterials": ["wood"]
}, {
"materialCategory": ["metal"],
"salientMaterials": ["metal"]
}],
"attributes": ["moveable", "pickupable"],
"dimensions": {
"x": 0.5,
"y": 0.5,
"z": 0.5
},
"position_y": 0.25,
"scale": {
"x": 0.5,
"y": 0.5,
"z": 0.5
},
"intphys_options": [{
"y": 0,
"force": {
"x": 300 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.379,
0.759,
1.138,
1.517,
1.897,
2.276,
2.656,
3.035,
3.414,
3.794,
4.173,
4.552,
4.932,
5.311,
5.691,
6.07,
6.449,
6.829,
7.208,
7.587,
7.967,
8.346,
8.725,
9.105,
9.484,
9.864,
10.243,
10.622
]
}, {
"y": 0,
"force": {
"x": 450 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.604,
1.209,
1.813,
2.417,
3.022,
3.626,
4.231,
4.835,
5.439,
6.044,
6.648,
7.252,
7.857,
8.461,
9.066,
9.67,
10.274,
10.879
]
}, {
"y": 0,
"force": {
"x": 600 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.829,
1.659,
2.488,
3.317,
4.147,
4.976,
5.806,
6.635,
7.464,
8.294,
9.123,
9.952,
10.782
]
}, {
"y": 1.5,
"force": {
"x": 300 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.45,
0.9,
1.35,
1.8,
2.25,
2.7,
3.15,
3.6,
4.05,
4.5,
4.95,
5.4,
5.832,
6.211,
6.591,
6.97,
7.349,
7.729,
8.108,
8.487,
8.867,
9.246,
9.625,
10.005,
10.384
]
}, {
"y": 1.5,
"force": {
"x": 450 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.675,
1.35,
2.025,
2.7,
3.375,
4.05,
4.725,
5.4,
6.075,
6.75,
7.425,
8.1,
8.757,
9.361,
9.966,
10.57
]
}, {
"y": 1.5,
"force": {
"x": 600 * 0.5,
"y": 0,
"z": 0
},
"position_by_step": [
0.9,
1.8,
2.7,
3.6,
4.5,
5.4,
6.3,
7.2,
8.1,
9,
9.9,
11.8
]
}]
}, {
"type": "sphere",
"info": ["tiny", "ball"],
"mass": 0.25,
"choose": [{
"materialCategory": ["plastic"],
"salientMaterials": ["plastic", "hollow"]
}, {
"materialCategory": ["rubber"],
"salientMaterials": ["rubber"]
}, {
"materialCategory": ["wood"],
"salientMaterials": ["wood"]
}, {
"materialCategory": ["metal"],
"salientMaterials": ["metal"]
}],
"attributes": ["moveable", "pickupable"],
"dimensions": {
"x": 0.25,
"y": 0.25,
"z": 0.25
},
"position_y": 0.125,
"scale": {
"x": 0.25,
"y": 0.25,
"z": 0.25
},
"intphys_options": [{
"y": 0,
"force": {
"x": 300 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.379,
0.759,
1.138,
1.517,
1.897,
2.276,
2.656,
3.035,
3.414,
3.794,
4.173,
4.552,
4.932,
5.311,
5.691,
6.07,
6.449,
6.829,
7.208,
7.587,
7.967,
8.346,
8.725,
9.105,
9.484,
9.864,
10.243
]
}, {
"y": 0,
"force": {
"x": 450 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.604,
1.209,
1.813,
2.417,
3.022,
3.626,
4.231,
4.835,
5.439,
6.044,
6.648,
7.252,
7.857,
8.461,
9.066,
9.67,
10.274
]
}, {
"y": 0,
"force": {
"x": 600 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.829,
1.659,
2.488,
3.317,
4.147,
4.976,
5.806,
6.635,
7.464,
8.294,
9.123,
9.952,
10.782
]
}, {
"y": 1.5,
"force": {
"x": 300 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.45,
0.9,
1.35,
1.8,
2.25,
2.7,
3.15,
3.6,
4.05,
4.5,
4.95,
5.4,
5.832,
6.211,
6.591,
6.97,
7.349,
7.729,
8.108,
8.487,
8.867,
9.246,
9.625,
10.005,
10.384
]
}, {
"y": 1.5,
"force": {
"x": 450 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.675,
1.35,
2.025,
2.7,
3.375,
4.05,
4.725,
5.4,
6.075,
6.75,
7.425,
8.1,
8.757,
9.361,
9.966,
10.57
]
}, {
"y": 1.5,
"force": {
"x": 600 * 0.25,
"y": 0,
"z": 0
},
"position_by_step": [
0.9,
1.8,
2.7,
3.6,
4.5,
5.4,
6.3,
7.2,
8.1,
9,
9.9,
11.8
]
}]
}, {
"type": "cube",
"info": ["medium", "cube"],
"mass": 0.75,
"choose": [{
"materialCategory": ["plastic"],
"salientMaterials": ["plastic", "hollow"]
}, {
"materialCategory": ["rubber"],
"salientMaterials": ["rubber"]
}, {
"materialCategory": ["wood"],
"salientMaterials": ["wood"]
}, {
"materialCategory": ["metal"],
"salientMaterials": ["metal"]
}],
"attributes": ["moveable", "pickupable"],
"dimensions": {
"x": 0.75,
"y": 0.75,
"z": 0.75
},
"position_y": 0.375,
"scale": {
"x": 0.75,
"y": 0.75,
"z": 0.75
},
"intphys_options": [{
"y": 0,
"force": {
"x": 300 * 0.75,
"y": 0,
"z": 0
},
"position_by_step": [
0.245,
0.484,
0.807,
1.208,
1.564,
1.794,
| |
return ret
# NOTE: Uncomment this code if PSSH should be parsed as a box container
@property
def childpos(self):
return self.offset+32
class saiz_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
@property
def default_sample_info_size(self):
offset = 12
if self.flags and 1:
offset += 8
return struct.unpack('>B', self.fmap[self.offset+offset:self.offset+offset+1])[0]
@property
def sample_count(self):
offset = 13
if self.flags and 1:
offset += 8
return struct.unpack('>I', self.fmap[self.offset+offset:self.offset+offset+4])[0]
def sample_info_size(self, index):
if self.default_sample_info_size != 0:
return self.default_sample_info_size
info_offset_base = 17
if self.flags and 1:
info_offset_base += 8
sample_offset = self.offset + info_offset_base + index
return struct.unpack('>B', self.fmap[sample_offset:sample_offset+1])[0]
@property
def decoration(self):
base = '#samples: {0} default size: {1}'.format(self.sample_count, self.default_sample_info_size)
entries = ['\n']
if VERBOSE > 1:
if self.default_sample_info_size == 0:
for i in range(self.sample_count):
sample_info_size = self.sample_info_size(i)
entries.append(' - #{index:03d} sample info size:{0:3d}\n'.format(sample_info_size, index=i))
return base + ''.join(entries)[:-1]
class saio_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
#print dump_hex(self.fmap[self.offset:self.offset+self.size])
@property
def entry_count(self):
offset = 12
if self.flags and 1:
offset += 8
return struct.unpack('>I', self.fmap[self.offset+offset:self.offset+offset+4])[0]
def entry_offset(self, index):
offset = 16
if self.flags and 1:
offset += 8
offset += index * 8
return struct.unpack('>Q', self.fmap[self.offset+offset:self.offset+offset+8])[0]
else:
offset += index * 4
return struct.unpack('>I', self.fmap[self.offset+offset:self.offset+offset+4])[0]
@property
def decoration(self):
base = '#entries: {0}'.format(self.entry_count)
entries = ['\n']
if VERBOSE > 1:
for i in range(self.entry_count):
offset = self.entry_offset(i)
entries.append(' - #{0:03d} offset:{1:d}\n'.format(i, offset))
return base + ''.join(entries)[:-1]
class sbgp_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
#print 'sbgp'
#print dump_hex(self.fmap[self.offset:self.offset+self.size])
@property
def grouping_type(self):
return self.fmap[self.offset+12:self.offset+16]
@property
def entries(self):
return struct.unpack('>I', self.fmap[self.offset+16:self.offset+20])[0]
def group_entry(self, index):
base_offset = 20 + (self.version and 4 or 0)
entry_offset = base_offset + 8 * index
if entry_offset > self.size:
return 0, 0
offset = self.offset + entry_offset
sample_count = struct.unpack('>I', self.fmap[offset:offset+4])[0]
group_description_index = struct.unpack('>I', self.fmap[offset+4:offset+8])[0]
return sample_count, group_description_index
@property
def decoration(self):
base = 'grouping:%s #entries:%d' % (self.grouping_type, self.entries)
entries = ['\n']
if VERBOSE > 1:
for i in range(self.entries):
data = self.group_entry(i)
entries.append(' - #{index:03d} sample count:{0:3d} group descr index:{1:3d}\n'.format(*data, index=i))
return base + ''.join(entries)[:-1]
class sgpd_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
#print 'sgpd'
#print dump_hex(self.fmap[self.offset:self.offset+self.size])
@property
def grouping_type(self):
return self.fmap[self.offset+12:self.offset+16]
@property
def entries(self):
o = (self.version and 4 or 0)
return struct.unpack('>I', self.fmap[self.offset+o+16:self.offset+o+20])[0]
def entry(self, index):
base_offset = 20 + (self.version and 4 or 0)
entry_offset = base_offset + 20 * index
if entry_offset > self.size:
return 0, 0, ''
offset = self.offset + entry_offset
is_encrypted = struct.unpack('>i', '\x00'+self.fmap[offset:offset+3])[0]
iv_size = struct.unpack('>b', self.fmap[offset+3:offset+4])[0]
kid = self.fmap[offset+4:offset+20]
return is_encrypted, iv_size, kid
def entry_data(self, index):
base_offset = 20 + (self.version and 4 or 0)
entry_offset = base_offset + 20 * index
if entry_offset > self.size:
return ''
offset = self.offset + entry_offset
return self.fmap[offset:offset+20]
@property
def decoration(self):
base = 'grouping:%s #entries:%d' % (self.grouping_type, self.entries)
entries = ['\n']
if VERBOSE > 1:
for i in range(self.entries):
is_enc, iv_size, kid = self.entry(i)
entry = ' - #{0:03d} enc:{1}'.format(i, is_enc)
if is_enc != 0:
entry = entry + ' iv size:{0} kid:{1}'.format(iv_size, ''.join(["%02X"%ord(x) for x in kid]))
entries.append(entry + '\n')
return base + ''.join(entries)[:-1]
class senc_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.sample_count = i.send('>I')[0]
self.samples = []
def_iv_size = 8
for j in range(0, self.sample_count):
iv = i.send('>Q')[0]
iv_2 = hex(iv)
self.samples.append(iv_2)
# TODO: subsamples
@property
def decoration(self):
base = '#samples: {0}'.format(self.sample_count)
entries = ['\n']
if VERBOSE > 1:
for i in range(self.sample_count):
sample = self.samples[i]
entries.append(' - #{index:03d} iv:{0}\n'.format(sample, index=i))
return base + ''.join(entries)[:-1]
class genc_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
self._sample_map = {}
#print dump_hex(self.fmap[self.offset:self.offset+self.size])
def _init_sample_map_from_sbgp(self, tenc):
sbgp = self.get_sibling('sbgp')
if not sbgp:
return
sgpd = self.get_sibling('sgpd')
entry_index = 0
for i in range(sbgp.entries):
count, group_index = sbgp.group_entry(i)
if group_index == 0:
# No group. Use default tenc values
enc = tenc.is_encrypted
iv_size = tenc.iv_size
kid = tenc.key_id
else:
# group defined. use values from sgpd
enc, iv_size, kid = sgpd.entry(group_index-1)
for sample_index in range(count):
self._sample_map[entry_index + sample_index] = (enc, iv_size, kid)
entry_index += count
def _init_sample_map(self):
self._sample_map = {}
tfhd = self.get_sibling('tfhd')
tenc = self.get_tenc_for_track_id(tfhd.track_id)
self._init_sample_map_from_sbgp(tenc)
saiz = self.get_sibling('saiz')
#saio = self.get_sibling('saio')
#moof = self.get_ancestor('moof')
#sample_offset = moof.offset + saio.entry_offset(0)
for i in range(saiz.sample_count):
#sample_info_size = saiz.sample_info_size(i)
if not i in self._sample_map:
self._sample_map[i] = (tenc.is_encrypted, tenc.iv_size, tenc.key_id)
#sample_offset += sample_info_size
def sample_encrypted_info(self, index):
if index in self._sample_map:
is_enc, iv_size, kid = self._sample_map[index]
return is_enc, iv_size, kid
return (0, 0, '')
def get_sibling(self, type_):
box_list = self.parent.children
#pindex = self.parent.children.index(self)
# print('my index of parent: %d' % (pindex))
for box_ in box_list:
if box_.type == type_:
return box_
return None
def get_ancestor(self, type_):
p = self
while p.parent:
p = p.parent
if p.type == type_:
return p
return None
def get_tenc_for_track_id(self, track_id):
trak_boxes = self.root.find_all('moov.trak')
for box_ in trak_boxes:
tkhd = box_.find('tkhd')
if tkhd.track_id == track_id:
return box_.find('mdia.minf.stbl.stsd.sinf.schi.tenc')
return None
@property
def decoration(self):
self._init_sample_map()
saiz = self.get_sibling('saiz')
saio = self.get_sibling('saio')
tfhd = self.get_sibling('tfhd')
#tenc = self.get_tenc_for_track_id(tfhd.track_id)
moof = self.get_ancestor('moof')
sample_offset = moof.offset + saio.entry_offset(0)
base = ' #aux data: {0}'.format(saiz.sample_count)
entries = ['\n']
if VERBOSE > 1:
for i in range(saiz.sample_count):
sample_info_size = saiz.sample_info_size(i)
#sample_data = self.fmap[sample_offset:sample_offset+sample_info_size]
is_encrypted, iv_size, kid = self.sample_encrypted_info(i)
entry = ' - index:{0:03d} enc: {1}'.format(i, is_encrypted)
if is_encrypted != 0:
iv = self.fmap[sample_offset:sample_offset+iv_size]
entry = entry + ' iv:0x{0} kid:{1}'.format(''.join(["%02X"%ord(x) for x in iv]), \
''.join(["%02X"%ord(x) for x in kid]))
if sample_info_size > iv_size:
a = sample_offset + iv_size
b = a + 2
sub_sample_count = struct.unpack('>h', self.fmap[a : b])[0]
entry = entry + ' #sub samples:{0}'.format(sub_sample_count)
for s in range(sub_sample_count):
sub_sample_offset = sample_offset+iv_size+2+s*6
off = sub_sample_offset
clear_data_size = struct.unpack('>H', self.fmap[off:off + 2])[0]
encrypted_data_size = struct.unpack('>I', self.fmap[off + 2 : off + 6])[0]
entry = entry + '\n - - sub sample:{0:03d} clear chunk:{1} encrypted chunk:{2}'\
.format(s, clear_data_size, encrypted_data_size)
entries.append(entry + '\n')
sample_offset += sample_info_size
return base + ''.join(entries)[:-1]
class SampleEntry(box):
def __init__(self, *args):
box.__init__(self, *args)
@property
def data_reference_index(self):
return struct.unpack('>H', self.fmap[self.offset+14:self.offset+16])[0]
def getDescriptorLen(i):
tmp = i.send('>B')[0]
len_ = 0
while tmp & 0x80:
len_ = ((len_ << 7) | (tmp & 0x7f))
tmp = i.send('>B')[0]
len_ = ((len_ << 7) | (tmp & 0x7f))
return len_
class esds_box(box):
def __init__(self, *args):
box.__init__(self, *args)
self.cfg = ''
i = parse_generator(self.fmap[self.offset+8:self.offset+self.size])
i.next() # prime
vf = i.send('>I')[0]
tag1 = i.send('>B')[0]
if tag1 == 3:
l = getDescriptorLen(i)
i.send('>B')[0]
i.send('>B')[0]
i.send('>B')[0]
tag2 = i.send('>B')[0]
if tag2 == 4:
l = getDescriptorLen(i)
obj_type = i.send('>B')[0]
stream_type = i.send('>B')[0]
i.send('>B')[0]
i.send('>B')[0]
i.send('>I')[0]
i.send('>I')[0]
i.send('>B')[0]
tag3 = i.send('>B')[0]
if tag3 == 5:
l = getDescriptorLen(i)
cfg = []
for p in range(0, l):
X = i.send('>B')[0]
cfg.append(X)
cfg_str = '0x' + ''.join(['%02x' % c for c in cfg])
self.cfg = cfg_str
self.decoration = 'cfg={0}, obj_type={1}, stream_type={2}'\
.format(cfg_str, obj_type, stream_type)
class mp4a_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
self.channels = struct.unpack('>h', self.fmap[self.offset+24:self.offset+26])[0]
self.sample_size = struct.unpack('>h', self.fmap[self.offset+26:self.offset+28])[0]
self.sample_rate = struct.unpack('>I', self.fmap[self.offset+32:self.offset+36])[0] >> 16
self.decoration = 'index:{0} channels:{1} sample size:{2} sample rate:{3}'\
.format(self.data_reference_index, self.channels, self.sample_size, self.sample_rate)
@property
def childpos(self):
return self.offset+36
class ac_3_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
channels = struct.unpack('>h', self.fmap[self.offset+24:self.offset+26])[0]
sample_size = struct.unpack('>h', self.fmap[self.offset+26:self.offset+28])[0]
sample_rate = struct.unpack('>I', self.fmap[self.offset+32:self.offset+36])[0] >> 16
self.decoration = 'index:{0} channels:{1} sample size:{2} sample rate:{3}'\
.format(self.data_reference_index, channels, sample_size, sample_rate)
@property
def childpos(self):
return self.offset+36
class ec_3_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
channels = struct.unpack('>h', self.fmap[self.offset+24:self.offset+26])[0]
sample_size = struct.unpack('>h', self.fmap[self.offset+26:self.offset+28])[0]
sample_rate = struct.unpack('>I', self.fmap[self.offset+32:self.offset+36])[0] >> 16
self.decoration = 'index:{0} channels:{1} sample size:{2} sample rate:{3}'\
.format(self.data_reference_index, channels, sample_size, sample_rate)
@property
def childpos(self):
return self.offset+36
class dac3_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
self.dec_info = self.fmap[self.offset+8:self.offset+self.size]
self.dec_info_hex = ''.join(['%02x' % ord(c) for c in self.dec_info])
self.decoration = 'dec_info={0}'.format(self.dec_info_hex)
class dec3_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
self.dec_info = self.fmap[self.offset+8:self.offset+self.size]
self.dec_info_hex = ''.join(['%02x' % ord(c) for c in self.dec_info])
self.decoration = 'dec_info={0}'.format(self.dec_info_hex)
class enca_box(mp4a_box):
def __init__(self, *args):
mp4a_box.__init__(self, *args)
class mp4v_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
width = struct.unpack('>h', self.fmap[self.offset+32:self.offset+34])[0]
height = struct.unpack('>h', self.fmap[self.offset+34:self.offset+36])[0]
self.decoration = 'index:{0} width:{1} height:{2}'\
.format(self.data_reference_index, width, height)
class avcx_box(SampleEntry):
def __init__(self, *args):
SampleEntry.__init__(self, *args)
#print dump_hex(self.fmap[self.offset:self.offset+self.size])
self.width = struct.unpack('>h', self.fmap[self.offset+32:self.offset+34])[0]
self.height = struct.unpack('>h', self.fmap[self.offset+34:self.offset+36])[0]
res_hori = struct.unpack('>I', self.fmap[self.offset+36:self.offset+40])[0]
res_vert = struct.unpack('>I', self.fmap[self.offset+40:self.offset+44])[0]
frame_count = struct.unpack('>h', self.fmap[self.offset+48:self.offset+50])[0]
compressor = str(self.fmap[self.offset+50:self.offset+82])
depth = struct.unpack('>h', self.fmap[self.offset+82:self.offset+84])[0]
| |
<filename>lsteamclient/gen_wrapper.py
#!/usr/bin/env python3
#NOTE: If you make modifications here, consider whether they should
#be duplicated in ../vrclient/gen_wrapper.py
from __future__ import print_function
CLANG_PATH='/usr/lib/clang/13.0.0'
from clang.cindex import CursorKind, Index, Type, TypeKind
import pprint
import sys
import os
import re
import math
sdk_versions = [
"153a",
"152",
"151",
"150",
"149",
"148a",
"147",
"146",
"145",
"144",
"143y",
"143x",
"143",
"142",
"141",
"140",
"139",
"138a",
"138",
"137",
"136",
"135a",
"135",
"134",
"133x",
"133b",
"133a",
"133",
"132x",
"132",
"131",
"130x",
"130",
"129a",
"129",
"128x",
"128",
"127",
"126a",
"126",
"125",
"124",
"123a",
"123",
"122",
"121x",
"121",
"120",
"119x",
"119",
"118",
"117",
"116x",
"116",
"115",
"114",
"113",
"112x",
"112",
"111x",
"111",
"110",
"109",
"108",
"107",
"106",
"105",
"104",
"103",
"102x",
"102",
"101x",
"101",
"100",
"099y",
"099x",
"099w",
"099v",
"099u",
]
files = [
("steam_api.h", [
"ISteamApps",
"ISteamAppList",
"ISteamClient",
"ISteamController",
"ISteamGameSearch",
"ISteamFriends",
"ISteamHTMLSurface",
"ISteamHTTP",
"ISteamInput",
"ISteamInventory",
"ISteamMatchmaking",
"ISteamMatchmakingServers",
"ISteamMusic",
"ISteamMusicRemote",
"ISteamNetworking",
"ISteamParties",
"ISteamRemotePlay",
"ISteamRemoteStorage",
"ISteamScreenshots",
"ISteamUGC",
"ISteamUnifiedMessages",
"ISteamUser",
"ISteamUserStats",
"ISteamUtils",
"ISteamVideo"
]),
("isteamappticket.h", [
"ISteamAppTicket"
]),
("isteamgameserver.h", [
"ISteamGameServer"
]),
("isteamgameserverstats.h", [
"ISteamGameServerStats"
]),
("isteamgamestats.h", [
"ISteamGameStats"
]),
("isteammasterserverupdater.h", [
"ISteamMasterServerUpdater"
]),
("isteamgamecoordinator.h", [
"ISteamGameCoordinator"
]),
("isteamparentalsettings.h", [
"ISteamParentalSettings"
]),
("isteamnetworkingmessages.h", [
"ISteamNetworkingMessages"
]),
("isteamnetworkingsockets.h", [
"ISteamNetworkingSockets"
]),
("isteamnetworkingsocketsserialized.h", [
"ISteamNetworkingSocketsSerialized"
]),
("isteamnetworkingutils.h", [
"ISteamNetworkingUtils"
]),
("steamnetworkingfakeip.h", [
"ISteamNetworkingFakeUDPPort"
]),
]
aliases = {
#these interfaces are undocumented and binary compatible
#"target interface": ["alias 1", "alias 2"],
"SteamUtils004":["SteamUtils003"],
"SteamUtils002":["SteamUtils001"],
"SteamGameServer008":["SteamGameServer007","SteamGameServer006"],
"SteamNetworkingSocketsSerialized002":["SteamNetworkingSocketsSerialized001"],
"STEAMAPPS_INTERFACE_VERSION001":["SteamApps001"],
"STEAMAPPS_INTERFACE_VERSION001":["SteamApps001"],
"SteamNetworkingSockets002":["SteamNetworkingSockets003"],
}
# these structs are manually confirmed to be equivalent
exempt_structs = [
"CSteamID",
"CGameID",
"CCallbackBase",
"SteamPS3Params_t",
"ValvePackingSentinel_t"
]
# we have converters for these written by hand because they're too complicated to generate
manually_handled_structs = [
"SteamNetworkingMessage_t"
]
manually_handled_methods = {
#TODO: 001
"cppISteamNetworkingSockets_SteamNetworkingSockets002": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnListenSocket"
],
# 003 never appeared in a public SDK, but is an alias for 002 (the version in SDK 1.45 is actually 004 but incorrectly versioned as 003)
"cppISteamNetworkingSockets_SteamNetworkingSockets004": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnListenSocket",
],
#TODO: 005
"cppISteamNetworkingSockets_SteamNetworkingSockets006": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnListenSocket",
"SendMessages"
],
#TODO: 007
"cppISteamNetworkingSockets_SteamNetworkingSockets008": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnPollGroup",
"SendMessages"
],
"cppISteamNetworkingSockets_SteamNetworkingSockets009": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnPollGroup",
"SendMessages"
],
"cppISteamNetworkingSockets_SteamNetworkingSockets012": [
"ReceiveMessagesOnConnection",
"ReceiveMessagesOnPollGroup",
"SendMessages",
"CreateFakeUDPPort"
],
"cppISteamNetworkingUtils_SteamNetworkingUtils003": [
"AllocateMessage",
"SetConfigValue",
],
"cppISteamNetworkingUtils_SteamNetworkingUtils004": [
"AllocateMessage",
"SetConfigValue",
],
"cppISteamNetworkingMessages_SteamNetworkingMessages002": [
"ReceiveMessagesOnChannel"
],
"cppISteamInput_SteamInput001": [
"GetGlyphForActionOrigin",
"GetGlyphForXboxOrigin"
],
"cppISteamInput_SteamInput002": [
"GetGlyphForActionOrigin",
"GetGlyphForXboxOrigin"
],
"cppISteamInput_SteamInput005": [
"EnableActionEventCallbacks",
"GetGlyphPNGForActionOrigin",
"GetGlyphSVGForActionOrigin",
"GetGlyphForActionOrigin_Legacy",
"GetGlyphForXboxOrigin"
],
"cppISteamController_SteamController005": [
"GetGlyphForActionOrigin"
],
"cppISteamController_SteamController006": [
"GetGlyphForActionOrigin"
],
"cppISteamController_SteamController007": [
"GetGlyphForActionOrigin",
"GetGlyphForXboxOrigin"
],
"cppISteamController_SteamController008": [
"GetGlyphForActionOrigin",
"GetGlyphForXboxOrigin"
],
"cppISteamNetworkingFakeUDPPort_SteamNetworkingFakeUDPPort001": [
"DestroyFakeUDPPort",
"ReceiveMessages"
],
}
# manual converters for simple types (function pointers)
manual_type_converters = [
"FSteamNetworkingSocketsDebugOutput",
"SteamAPI_CheckCallbackRegistered_t"
]
# manual converters for specific parameters
manual_param_converters = [
"nNativeKeyCode"
]
#struct_conversion_cache = {
# '142': {
# 'SteamUGCDetails_t': True,
# 'SteamUGCQueryCompleted_t': False
# }
#}
struct_conversion_cache = {}
converted_structs = []
# callback classes for which we have a linux wrapper
wrapped_classes = [
"ISteamMatchmakingServerListResponse",
"ISteamMatchmakingPingResponse",
"ISteamMatchmakingPlayersResponse",
"ISteamMatchmakingRulesResponse",
"ISteamNetworkingFakeUDPPort",
]
print_sizes = []
class_versions = {}
path_conversions = [
{
"parent_name": "GetAppInstallDir",
"l2w_names": ["pchDirectory"],
"l2w_lens": ["cchNameMax"],
"l2w_urls": [False],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": True
},
{
"parent_name": "GetAppInstallDir",
"l2w_names": ["pchFolder"],
"l2w_lens": ["cchFolderBufferSize"],
"l2w_urls": [False],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": True
},
{
"parent_name": "GetFileDetails",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszFileName"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": True
},
### ISteamGameServer::SetModDir - "Just the folder name, not the whole path. I.e. "Spacewar"."
{
"parent_name": "LoadURL",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchURL"],
"w2l_arrays": [False],
"w2l_urls": [True],
"return_is_size": False
},
{
"parent_name": "FileLoadDialogResponse",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchSelectedFiles"],
"w2l_arrays": [True],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "HTML_StartRequest_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_URLChanged_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_FinishedRequest_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_OpenLinkInNewTab_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_LinkAtPosition_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_FileOpenDialog_t",
"l2w_names": ["pchInitialFile"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "HTML_NewWindow_t",
"l2w_names": ["pchURL"],
"l2w_lens": [None],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "PublishWorkshopFile",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchFile", "pchPreviewFile"],
"w2l_arrays": [False, False],
"w2l_urls": [False, False],
"return_is_size": False
},
{
"parent_name": "UpdatePublishedFileFile",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "UpdatePublishedFilePreviewFile",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchPreviewFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "PublishVideo",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchPreviewFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "AddScreenshotToLibrary",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchFilename", "pchThumbnailFilename"],
"w2l_arrays": [False, False],
"w2l_urls": [False, False],
"return_is_size": False
},
{
"parent_name": "AddVRScreenshotToLibrary",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchFilename", "pchVRFilename"],
"w2l_arrays": [False, False],
"w2l_urls": [False, False],
"return_is_size": False
},
{
"parent_name": "UGCDownloadToLocation",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchLocation"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "GetQueryUGCAdditionalPreview",
"l2w_names": ["pchURLOrVideoID"],
"l2w_lens": ["cchURLSize"],
"l2w_urls": [True],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "SetItemContent",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszContentFolder"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "SetItemPreview",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszPreviewFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "AddItemPreviewFile",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszPreviewFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "UpdateItemPreviewFile",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszPreviewFile"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "GetItemInstallInfo",
"l2w_names": ["pchFolder"],
"l2w_lens": ["cchFolderSize"],
"l2w_urls": [False],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "BInitWorkshopForGameServer",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pszFolder"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "GetUserDataFolder",
"l2w_names": ["pchBuffer"],
"l2w_lens": ["cubBuffer"],
"l2w_urls": [False],
"w2l_names": [],
"w2l_arrays": [],
"w2l_urls": [],
"return_is_size": False
},
{
"parent_name": "CheckFileSignature",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["szFileName"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "Init",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchAbsolutePathToControllerConfigVDF"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
{
"parent_name": "SetInputActionManifestFilePath",
"l2w_names": [],
"l2w_lens": [],
"l2w_urls": [],
"w2l_names": ["pchInputActionManifestAbsolutePath"],
"w2l_arrays": [False],
"w2l_urls": [False],
"return_is_size": False
},
]
def strip_const(typename):
return typename.replace("const ", "", 1)
windows_structs32 = {}
def find_windows_struct(struct):
return windows_structs32.get(strip_const(struct.spelling), None)
windows_structs64 = {}
def find_windows64_struct(struct):
return windows_structs64.get(strip_const(struct.spelling), None)
linux_structs64 = {}
def find_linux64_struct(struct):
return linux_structs64.get(strip_const(struct.spelling), None)
def struct_needs_conversion_nocache(struct):
if strip_const(struct.spelling) in exempt_structs:
return False
if strip_const(struct.spelling) in manually_handled_structs:
return True
#check 32-bit compat
windows_struct = find_windows_struct(struct)
if windows_struct is None:
print("Couldn't find windows struct for " + struct.spelling)
assert(not windows_struct is None) #must find windows_struct
for field in struct.get_fields():
if struct.get_offset(field.spelling) != windows_struct.get_offset(field.spelling):
return True
if field.type.kind == TypeKind.RECORD and \
struct_needs_conversion(field.type):
return True
#check 64-bit compat
windows_struct = find_windows64_struct(struct)
assert(not windows_struct is None) #must find windows_struct
lin64_struct = find_linux64_struct(struct)
assert(not lin64_struct is None) #must find lin64_struct
for field in lin64_struct.get_fields():
if lin64_struct.get_offset(field.spelling) != windows_struct.get_offset(field.spelling):
return True
if field.type.kind == TypeKind.RECORD and \
struct_needs_conversion(field.type):
return True
#check if any members need path conversion
path_conv = get_path_converter(struct)
if path_conv:
return True
return False
def struct_needs_conversion(struct):
if not sdkver in struct_conversion_cache:
struct_conversion_cache[sdkver] = {}
if not strip_const(struct.spelling) in struct_conversion_cache[sdkver]:
struct_conversion_cache[sdkver][strip_const(struct.spelling)] = struct_needs_conversion_nocache(struct)
return struct_conversion_cache[sdkver][strip_const(struct.spelling)]
def handle_destructor(cfile, classname, winclassname, method):
cfile.write(f"DEFINE_THISCALL_WRAPPER({winclassname}_destructor, 4)\n")
cfile.write(f"void __thiscall {winclassname}_destructor({winclassname} *_this)\n{{/* never called */}}\n\n")
return "destructor"
def get_path_converter(parent):
for conv in path_conversions:
if conv["parent_name"] in parent.spelling:
if None in conv["l2w_names"]:
return conv
if type(parent) == Type:
children = list(parent.get_fields())
else:
children = list(parent.get_children())
for child in children:
if child.spelling in conv["w2l_names"] or \
child.spelling in conv["l2w_names"]:
return conv
return None
class DummyWriter(object):
def write(self, s):
#noop
pass
def to_c_bool(b):
if b:
return "1"
return "0"
dummy_writer = DummyWriter()
def handle_method(cfile, classname, winclassname, cppname, method, cpp, cpp_h, existing_methods):
used_name = method.spelling
if used_name in existing_methods:
number = '2'
while used_name in existing_methods:
idx = existing_methods.index(used_name)
used_name = f"{method.spelling}_{number}"
number = chr(ord(number) + 1)
existing_methods.insert(idx, used_name)
else:
existing_methods.append(used_name)
returns_record = method.result_type.get_canonical().kind == TypeKind.RECORD
if returns_record:
parambytes = 8 #_this + return pointer
else:
parambytes = 4 #_this
for param in list(method.get_children()):
if param.kind == CursorKind.PARM_DECL:
if param.type.kind == | |
'type': 'bool'},
'status': {'key': 'properties.status', 'type': 'str'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: Optional[str] = None,
auto_delete_on_idle: Optional[str] = None,
default_message_time_to_live: Optional[str] = None,
dead_lettering_on_filter_evaluation_exceptions: Optional[bool] = None,
dead_lettering_on_message_expiration: Optional[bool] = None,
enable_batched_operations: Optional[bool] = None,
entity_availability_status: Optional[Union[str, "EntityAvailabilityStatus"]] = None,
is_read_only: Optional[bool] = None,
lock_duration: Optional[str] = None,
max_delivery_count: Optional[int] = None,
requires_session: Optional[bool] = None,
status: Optional[Union[str, "EntityStatus"]] = None,
**kwargs
):
super(SubscriptionResource, self).__init__(location=location, **kwargs)
self.accessed_at = None
self.auto_delete_on_idle = auto_delete_on_idle
self.count_details = None
self.created_at = None
self.default_message_time_to_live = default_message_time_to_live
self.dead_lettering_on_filter_evaluation_exceptions = dead_lettering_on_filter_evaluation_exceptions
self.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
self.enable_batched_operations = enable_batched_operations
self.entity_availability_status = entity_availability_status
self.is_read_only = is_read_only
self.lock_duration = lock_duration
self.max_delivery_count = max_delivery_count
self.message_count = None
self.requires_session = requires_session
self.status = status
self.updated_at = None
class TopicCreateOrUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Create Or Update Topic operation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Topic name.
:type name: str
:param location: Required. Location of the resource.
:type location: str
:ivar accessed_at: Last time the message was sent, or a request was received, for this topic.
:vartype accessed_at: ~datetime.datetime
:param auto_delete_on_idle: TimeSpan idle interval after which the topic is automatically
deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: str
:param entity_availability_status: Entity availability status for the topic. Possible values
include: "Available", "Limited", "Renaming", "Restoring", "Unknown".
:type entity_availability_status: str or
~azure.mgmt.servicebus.v2015_08_01.models.EntityAvailabilityStatus
:ivar created_at: Exact time the message was created.
:vartype created_at: ~datetime.datetime
:ivar count_details: Message Count Details.
:vartype count_details: ~azure.mgmt.servicebus.v2015_08_01.models.MessageCountDetails
:param default_message_time_to_live: Default message time to live value. This is the duration
after which the message expires, starting from when the message is sent to Service Bus. This is
the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: str
:param duplicate_detection_history_time_window: TimeSpan structure that defines the duration of
the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: str
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:param enable_express: Value that indicates whether Express Entities are enabled. An express
topic holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:param enable_partitioning: Value that indicates whether the topic to be partitioned across
multiple message brokers is enabled.
:type enable_partitioning: bool
:param filtering_messages_before_publishing: Whether messages should be filtered before
publishing.
:type filtering_messages_before_publishing: bool
:param is_anonymous_accessible: Value that indicates whether the message is accessible
anonymously.
:type is_anonymous_accessible: bool
:param is_express:
:type is_express: bool
:param max_size_in_megabytes: Maximum size of the topic in megabytes, which is the size of the
memory allocated for the topic.
:type max_size_in_megabytes: long
:param requires_duplicate_detection: Value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:ivar size_in_bytes: Size of the topic, in bytes.
:vartype size_in_bytes: long
:param status: Enumerates the possible values for the status of a messaging entity. Possible
values include: "Active", "Creating", "Deleting", "Disabled", "ReceiveDisabled", "Renaming",
"Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.mgmt.servicebus.v2015_08_01.models.EntityStatus
:ivar subscription_count: Number of subscriptions.
:vartype subscription_count: int
:param support_ordering: Value that indicates whether the topic supports ordering.
:type support_ordering: bool
:ivar updated_at: The exact time the message was updated.
:vartype updated_at: ~datetime.datetime
"""
_validation = {
'location': {'required': True},
'accessed_at': {'readonly': True},
'created_at': {'readonly': True},
'count_details': {'readonly': True},
'size_in_bytes': {'readonly': True},
'subscription_count': {'readonly': True},
'updated_at': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'accessed_at': {'key': 'properties.accessedAt', 'type': 'iso-8601'},
'auto_delete_on_idle': {'key': 'properties.autoDeleteOnIdle', 'type': 'str'},
'entity_availability_status': {'key': 'properties.entityAvailabilityStatus', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'count_details': {'key': 'properties.countDetails', 'type': 'MessageCountDetails'},
'default_message_time_to_live': {'key': 'properties.defaultMessageTimeToLive', 'type': 'str'},
'duplicate_detection_history_time_window': {'key': 'properties.duplicateDetectionHistoryTimeWindow', 'type': 'str'},
'enable_batched_operations': {'key': 'properties.enableBatchedOperations', 'type': 'bool'},
'enable_express': {'key': 'properties.enableExpress', 'type': 'bool'},
'enable_partitioning': {'key': 'properties.enablePartitioning', 'type': 'bool'},
'filtering_messages_before_publishing': {'key': 'properties.filteringMessagesBeforePublishing', 'type': 'bool'},
'is_anonymous_accessible': {'key': 'properties.isAnonymousAccessible', 'type': 'bool'},
'is_express': {'key': 'properties.isExpress', 'type': 'bool'},
'max_size_in_megabytes': {'key': 'properties.maxSizeInMegabytes', 'type': 'long'},
'requires_duplicate_detection': {'key': 'properties.requiresDuplicateDetection', 'type': 'bool'},
'size_in_bytes': {'key': 'properties.sizeInBytes', 'type': 'long'},
'status': {'key': 'properties.status', 'type': 'str'},
'subscription_count': {'key': 'properties.subscriptionCount', 'type': 'int'},
'support_ordering': {'key': 'properties.supportOrdering', 'type': 'bool'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
name: Optional[str] = None,
auto_delete_on_idle: Optional[str] = None,
entity_availability_status: Optional[Union[str, "EntityAvailabilityStatus"]] = None,
default_message_time_to_live: Optional[str] = None,
duplicate_detection_history_time_window: Optional[str] = None,
enable_batched_operations: Optional[bool] = None,
enable_express: Optional[bool] = None,
enable_partitioning: Optional[bool] = None,
filtering_messages_before_publishing: Optional[bool] = None,
is_anonymous_accessible: Optional[bool] = None,
is_express: Optional[bool] = None,
max_size_in_megabytes: Optional[int] = None,
requires_duplicate_detection: Optional[bool] = None,
status: Optional[Union[str, "EntityStatus"]] = None,
support_ordering: Optional[bool] = None,
**kwargs
):
super(TopicCreateOrUpdateParameters, self).__init__(**kwargs)
self.name = name
self.location = location
self.accessed_at = None
self.auto_delete_on_idle = auto_delete_on_idle
self.entity_availability_status = entity_availability_status
self.created_at = None
self.count_details = None
self.default_message_time_to_live = default_message_time_to_live
self.duplicate_detection_history_time_window = duplicate_detection_history_time_window
self.enable_batched_operations = enable_batched_operations
self.enable_express = enable_express
self.enable_partitioning = enable_partitioning
self.filtering_messages_before_publishing = filtering_messages_before_publishing
self.is_anonymous_accessible = is_anonymous_accessible
self.is_express = is_express
self.max_size_in_megabytes = max_size_in_megabytes
self.requires_duplicate_detection = requires_duplicate_detection
self.size_in_bytes = None
self.status = status
self.subscription_count = None
self.support_ordering = support_ordering
self.updated_at = None
class TopicListResult(msrest.serialization.Model):
"""The response to the List Topics operation.
:param value: Result of the List Topics operation.
:type value: list[~azure.mgmt.servicebus.v2015_08_01.models.TopicResource]
:param next_link: Link to the next set of results. Not empty if Value contains incomplete list
of topics.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[TopicResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["TopicResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(TopicListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class TopicResource(Resource):
"""Description of topic resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:param location: Resource location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:ivar accessed_at: Last time the message was sent, or a request was received, for this topic.
:vartype accessed_at: ~datetime.datetime
:param auto_delete_on_idle: TimeSpan idle interval after which the topic is automatically
deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: str
:param entity_availability_status: Entity availability status for the topic. Possible values
include: "Available", "Limited", "Renaming", "Restoring", "Unknown".
:type entity_availability_status: str or
~azure.mgmt.servicebus.v2015_08_01.models.EntityAvailabilityStatus
:ivar created_at: Exact time the message was created.
:vartype created_at: ~datetime.datetime
:ivar count_details: Message Count Details.
:vartype count_details: ~azure.mgmt.servicebus.v2015_08_01.models.MessageCountDetails
:param default_message_time_to_live: Default message time to live value. This is the duration
after which the message expires, starting from when the message is sent to Service Bus. This is
the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: str
:param duplicate_detection_history_time_window: TimeSpan structure that defines the duration of
the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: str
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:param enable_express: Value that indicates whether Express Entities are enabled. An express
topic holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:param enable_partitioning: Value that indicates whether the topic to be partitioned across
multiple message brokers is enabled.
:type enable_partitioning: bool
:param filtering_messages_before_publishing: Whether messages should be filtered before
publishing.
:type filtering_messages_before_publishing: bool
:param is_anonymous_accessible: Value that indicates whether the message is accessible
anonymously.
:type is_anonymous_accessible: bool
:param is_express:
:type is_express: bool
:param max_size_in_megabytes: Maximum size of the topic in megabytes, which is the size of the
memory allocated for the topic.
:type max_size_in_megabytes: long
:param requires_duplicate_detection: Value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:ivar size_in_bytes: Size of the topic, in bytes.
:vartype size_in_bytes: long
:param status: Enumerates the possible values for the status of a messaging entity. Possible
values include: "Active", "Creating", "Deleting", "Disabled", "ReceiveDisabled", "Renaming",
"Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.mgmt.servicebus.v2015_08_01.models.EntityStatus
:ivar subscription_count: Number of subscriptions.
:vartype subscription_count: int
:param support_ordering: Value that indicates whether the topic supports ordering.
:type support_ordering: bool
:ivar updated_at: The exact time the message was updated.
:vartype updated_at: ~datetime.datetime
"""
_validation = | |
<reponame>amdhacks/Cirq
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
from cirq.linalg import predicates
from cirq.linalg.tolerance import Tolerance
def test_is_diagonal():
assert predicates.is_diagonal(np.empty((0, 0)))
assert predicates.is_diagonal(np.empty((1, 0)))
assert predicates.is_diagonal(np.empty((0, 1)))
assert predicates.is_diagonal(np.array([[1]]))
assert predicates.is_diagonal(np.array([[-1]]))
assert predicates.is_diagonal(np.array([[5]]))
assert predicates.is_diagonal(np.array([[3j]]))
assert predicates.is_diagonal(np.array([[1, 0]]))
assert predicates.is_diagonal(np.array([[1], [0]]))
assert not predicates.is_diagonal(np.array([[1, 1]]))
assert not predicates.is_diagonal(np.array([[1], [1]]))
assert predicates.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert predicates.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not predicates.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not predicates.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not predicates.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not predicates.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert predicates.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_diagonal(np.array([[1, 0], [-0.5, 1]]), tol)
assert not predicates.is_diagonal(np.array([[1, 0], [-0.6, 1]]), tol)
# Error isn't accumulated across entries.
assert predicates.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), tol)
assert not predicates.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), tol)
def test_is_hermitian():
assert predicates.is_hermitian(np.empty((0, 0)))
assert not predicates.is_hermitian(np.empty((1, 0)))
assert not predicates.is_hermitian(np.empty((0, 1)))
assert predicates.is_hermitian(np.array([[1]]))
assert predicates.is_hermitian(np.array([[-1]]))
assert predicates.is_hermitian(np.array([[5]]))
assert not predicates.is_hermitian(np.array([[3j]]))
assert not predicates.is_hermitian(np.array([[0, 0]]))
assert not predicates.is_hermitian(np.array([[0], [0]]))
assert not predicates.is_hermitian(np.array([[5j, 0], [0, 2]]))
assert predicates.is_hermitian(np.array([[5, 0], [0, 2]]))
assert predicates.is_hermitian(np.array([[1, 0], [0, 1]]))
assert not predicates.is_hermitian(np.array([[1, 0], [1, 1]]))
assert not predicates.is_hermitian(np.array([[1, 1], [0, 1]]))
assert predicates.is_hermitian(np.array([[1, 1], [1, 1]]))
assert predicates.is_hermitian(np.array([[1, 1j], [-1j, 1]]))
assert predicates.is_hermitian(np.array([[1, 1j], [-1j, 1]]) * np.sqrt(0.5))
assert not predicates.is_hermitian(np.array([[1, 1j], [1j, 1]]))
assert not predicates.is_hermitian(np.array([[1, 0.1], [-0.1, 1]]))
assert predicates.is_hermitian(
np.array([[1, 1j + 1e-11], [-1j, 1 + 1j * 1e-9]]))
def test_is_hermitian_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_hermitian(np.array([[1, 0], [-0.5, 1]]), tol)
assert predicates.is_hermitian(np.array([[1, 0.25], [-0.25, 1]]), tol)
assert not predicates.is_hermitian(np.array([[1, 0], [-0.6, 1]]), tol)
assert not predicates.is_hermitian(np.array([[1, 0.25], [-0.35, 1]]), tol)
# Error isn't accumulated across entries.
assert predicates.is_hermitian(
np.array([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]), tol)
assert not predicates.is_hermitian(
np.array([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]), tol)
assert not predicates.is_hermitian(
np.array([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]), tol)
def test_is_unitary():
assert predicates.is_unitary(np.empty((0, 0)))
assert not predicates.is_unitary(np.empty((1, 0)))
assert not predicates.is_unitary(np.empty((0, 1)))
assert predicates.is_unitary(np.array([[1]]))
assert predicates.is_unitary(np.array([[-1]]))
assert predicates.is_unitary(np.array([[1j]]))
assert not predicates.is_unitary(np.array([[5]]))
assert not predicates.is_unitary(np.array([[3j]]))
assert not predicates.is_unitary(np.array([[1, 0]]))
assert not predicates.is_unitary(np.array([[1], [0]]))
assert not predicates.is_unitary(np.array([[1, 0], [0, -2]]))
assert predicates.is_unitary(np.array([[1, 0], [0, -1]]))
assert predicates.is_unitary(np.array([[1j, 0], [0, 1]]))
assert not predicates.is_unitary(np.array([[1, 0], [1, 1]]))
assert not predicates.is_unitary(np.array([[1, 1], [0, 1]]))
assert not predicates.is_unitary(np.array([[1, 1], [1, 1]]))
assert not predicates.is_unitary(np.array([[1, -1], [1, 1]]))
assert predicates.is_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert predicates.is_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not predicates.is_unitary(
np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert predicates.is_unitary(
np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_unitary_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_unitary(np.array([[1, 0], [-0.5, 1]]), tol)
assert not predicates.is_unitary(np.array([[1, 0], [-0.6, 1]]), tol)
# Error isn't accumulated across entries.
assert predicates.is_unitary(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), tol)
assert not predicates.is_unitary(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), tol)
def test_is_orthogonal():
assert predicates.is_orthogonal(np.empty((0, 0)))
assert not predicates.is_orthogonal(np.empty((1, 0)))
assert not predicates.is_orthogonal(np.empty((0, 1)))
assert predicates.is_orthogonal(np.array([[1]]))
assert predicates.is_orthogonal(np.array([[-1]]))
assert not predicates.is_orthogonal(np.array([[1j]]))
assert not predicates.is_orthogonal(np.array([[5]]))
assert not predicates.is_orthogonal(np.array([[3j]]))
assert not predicates.is_orthogonal(np.array([[1, 0]]))
assert not predicates.is_orthogonal(np.array([[1], [0]]))
assert not predicates.is_orthogonal(np.array([[1, 0], [0, -2]]))
assert predicates.is_orthogonal(np.array([[1, 0], [0, -1]]))
assert not predicates.is_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not predicates.is_orthogonal(np.array([[1, 0], [1, 1]]))
assert not predicates.is_orthogonal(np.array([[1, 1], [0, 1]]))
assert not predicates.is_orthogonal(np.array([[1, 1], [1, 1]]))
assert not predicates.is_orthogonal(np.array([[1, -1], [1, 1]]))
assert predicates.is_orthogonal\
(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not predicates.is_orthogonal(
np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not predicates.is_orthogonal(
np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert predicates.is_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_orthogonal_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_orthogonal(np.array([[1, 0], [-0.5, 1]]), tol)
assert not predicates.is_orthogonal(np.array([[1, 0], [-0.6, 1]]), tol)
# Error isn't accumulated across entries.
assert predicates.is_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), tol)
assert not predicates.is_orthogonal(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), tol)
def test_is_special_orthogonal():
assert predicates.is_special_orthogonal(np.empty((0, 0)))
assert not predicates.is_special_orthogonal(np.empty((1, 0)))
assert not predicates.is_special_orthogonal(np.empty((0, 1)))
assert predicates.is_special_orthogonal(np.array([[1]]))
assert not predicates.is_special_orthogonal(np.array([[-1]]))
assert not predicates.is_special_orthogonal(np.array([[1j]]))
assert not predicates.is_special_orthogonal(np.array([[5]]))
assert not predicates.is_special_orthogonal(np.array([[3j]]))
assert not predicates.is_special_orthogonal(np.array([[1, 0]]))
assert not predicates.is_special_orthogonal(np.array([[1], [0]]))
assert not predicates.is_special_orthogonal(np.array([[1, 0], [0, -2]]))
assert not predicates.is_special_orthogonal(np.array([[1, 0], [0, -1]]))
assert predicates.is_special_orthogonal(np.array([[-1, 0], [0, -1]]))
assert not predicates.is_special_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not predicates.is_special_orthogonal(np.array([[1, 0], [1, 1]]))
assert not predicates.is_special_orthogonal(np.array([[1, 1], [0, 1]]))
assert not predicates.is_special_orthogonal(np.array([[1, 1], [1, 1]]))
assert not predicates.is_special_orthogonal(np.array([[1, -1], [1, 1]]))
assert predicates.is_special_orthogonal(
np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not predicates.is_special_orthogonal(
np.array([[1, 1], [1, -1]]) * np.sqrt(0.5))
assert not predicates.is_special_orthogonal(
np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not predicates.is_special_orthogonal(
np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert predicates.is_special_orthogonal(
np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_special_orthogonal_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_special_orthogonal(
np.array([[1, 0], [-0.5, 1]]), tol)
assert not predicates.is_special_orthogonal(
np.array([[1, 0], [-0.6, 1]]), tol)
# Error isn't accumulated across entries, except for determinant factors.
assert predicates.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), tol)
assert not predicates.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), tol)
assert not predicates.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), tol)
def test_is_special_unitary():
assert predicates.is_special_unitary(np.empty((0, 0)))
assert not predicates.is_special_unitary(np.empty((1, 0)))
assert not predicates.is_special_unitary(np.empty((0, 1)))
assert predicates.is_special_unitary(np.array([[1]]))
assert not predicates.is_special_unitary(np.array([[-1]]))
assert not predicates.is_special_unitary(np.array([[5]]))
assert not predicates.is_special_unitary(np.array([[3j]]))
assert not predicates.is_special_unitary(np.array([[1, 0], [0, -2]]))
assert not predicates.is_special_unitary(np.array([[1, 0], [0, -1]]))
assert predicates.is_special_unitary(np.array([[-1, 0], [0, -1]]))
assert not predicates.is_special_unitary(np.array([[1j, 0], [0, 1]]))
assert predicates.is_special_unitary(np.array([[1j, 0], [0, -1j]]))
assert not predicates.is_special_unitary(np.array([[1, 0], [1, 1]]))
assert not predicates.is_special_unitary(np.array([[1, 1], [0, 1]]))
assert not predicates.is_special_unitary(np.array([[1, 1], [1, 1]]))
assert not predicates.is_special_unitary(np.array([[1, -1], [1, 1]]))
assert predicates.is_special_unitary(
np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert predicates.is_special_unitary(
np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not predicates.is_special_unitary(
np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert predicates.is_special_unitary(
np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_special_unitary_tolerance():
tol = Tolerance(atol=0.5)
# Pays attention to specified tolerance.
assert predicates.is_special_unitary(np.array([[1, 0], [-0.5, 1]]), tol)
assert not predicates.is_special_unitary(np.array([[1, 0], [-0.6, 1]]), tol)
assert predicates.is_special_unitary(
np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.1), tol)
assert not predicates.is_special_unitary(
np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.3), tol)
# Error isn't accumulated across entries, except for determinant factors.
assert predicates.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), tol)
assert not predicates.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), tol)
assert not predicates.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), tol)
def test_commutes():
assert predicates.commutes(
np.empty((0, 0)),
np.empty((0, 0)))
assert not predicates.commutes(
np.empty((1, 0)),
np.empty((0, 1)))
assert not predicates.commutes(
np.empty((0, 1)),
np.empty((1, 0)))
assert not predicates.commutes(
np.empty((1, 0)),
np.empty((1, 0)))
assert not predicates.commutes(
np.empty((0, 1)),
np.empty((0, 1)))
assert predicates.commutes(np.array([[1]]), np.array([[2]]))
assert predicates.commutes(np.array([[1]]), np.array([[0]]))
x = np.array([[0, 1], [1, 0]])
y = np.array([[0, -1j], [1j, 0]])
z = np.array([[1, 0], [0, -1]])
xx = np.kron(x, x)
zz = np.kron(z, z)
assert predicates.commutes(x, x)
assert predicates.commutes(y, y)
assert predicates.commutes(z, z)
assert not predicates.commutes(x, y)
assert not predicates.commutes(x, z)
assert not predicates.commutes(y, z)
assert predicates.commutes(xx, zz)
assert predicates.commutes(xx, np.diag([1, -1, -1, 1 + 1e-9]))
def test_commutes_tolerance():
tol = Tolerance(atol=0.5)
x = np.array([[0, 1], [1, 0]])
z = np.array([[1, 0], [0, -1]])
# Pays attention to specified tolerance.
assert predicates.commutes(x, x + z * 0.1, tol)
assert not predicates.commutes(x, x + z * 0.5, tol)
def test_allclose_up_to_global_phase():
assert predicates.allclose_up_to_global_phase(
np.array([1]),
np.array([1j]))
assert predicates.allclose_up_to_global_phase(
np.array([[1]]),
np.array([[1]]))
assert predicates.allclose_up_to_global_phase(
np.array([[1]]),
np.array([[-1]]))
assert predicates.allclose_up_to_global_phase(
np.array([[0]]),
np.array([[0]]))
assert predicates.allclose_up_to_global_phase(
np.array([[1, | |
entity = self.entity_wikid2id[rows[i][j]['surfaceLinks'][0]['target']['id']]
entity_cells[i,j] = entity
tmp_entities.append([(i,j), entity])
tmp_entities_text.append(rows[i][j]['text'])
except:
entity_cells[i,j] = 0
else:
entity_cells[i,j] = 0
if len(tmp_entities) == 0:
continue
if self.mode == 0:
if i == 0 or not (entity_cells[i] == entity_cells[:i]).all(axis=1).any():
has_core = True if any([z[0][1]==subject for z in tmp_entities]) else False
if has_core or self.src == "train":
for (index, entity), entity_text in zip(tmp_entities, tmp_entities_text):
if self.mode == 0 and self.src != "train" and index[1]!=subject:
entities.append([index, self.entity_wikid2id['[ENT_MASK]']])
entities_text.append('')
tmp_entity_num += 1
entities.append([index, entity])
entities_text.append(entity_text)
all_entities.add(entity)
tmp_entity_num += 1
if index[1] == subject:
core_entities.append(entity)
core_entities_text.append(entities_text)
if tmp_entity_num >= self.max_cell:
split.append(len(entities))
tmp_entity_num = 0
elif self.mode == 1 or self.mode == 2:
for (index, entity), entity_text in zip(tmp_entities, tmp_entities_text):
entities.append([index, entity])
entities_text.append(entity_text)
tmp_entity_num += 1
core_entities.append(entity)
core_entities_text.append(entity_text)
if tmp_entity_num >= self.max_cell:
split.append(len(entities))
tmp_entity_num = 0
# pdb.set_trace()
if split[-1]!=len(entities):
split.append(len(entities))
if len(core_entities) < 5:
if self.mode!=2:
if self.src!="train" or len(core_entities) == 0 or (self.mode == 1 and len(core_entities) < 3):
table_removed += 1
continue
if split[-2]!=0 and split[-1]-split[-2]<5:
split[-2] = split[-1]
split = split[:-1]
for i in range(len(split)-1):
actual_tables.append([
table_id,
subject,
pgEnt,
pgTitle,
secTitle,
caption,
headers,
core_entities,
core_entities_text,
all_entities,
entities[split[i]:split[i+1]],
entities_text[split[i]:split[i+1]],
entity_cand
])
actual_table_num = len(actual_tables)
print('%d original tables, actual %d tables in total\n%d tables removed because of extra entity filtering'%(origin_table_num, actual_table_num, table_removed))
pool = Pool(processes=4)
if self.mode == 0:
processed_data = list(tqdm(pool.imap(partial(process_single_hybrid_table,config=self), actual_tables, chunksize=2000),total=len(actual_tables)))
elif self.mode == 1 or self.mode == 2:
processed_data = list(tqdm(pool.imap(partial(process_single_hybrid_table_CER,config=self), actual_tables, chunksize=2000),total=len(actual_tables)))
# elif self.mode == 2:
else:
raise Exception
pool.close()
# pdb.set_trace()
with open(preprocessed_filename, 'wb') as f:
pickle.dump(processed_data, f)
# pdb.set_trace()
return processed_data
def __init__(self, data_dir, entity_vocab, max_cell=100, max_input_tok=350, max_input_ent=150, src="train", max_length = [50, 10, 10], force_new=False, tokenizer = None, mode=0):
if tokenizer is not None:
self.tokenizer = tokenizer
else:
self.tokenizer = BertTokenizer.from_pretrained('data/pre-trained_models/bert-base-uncased')
self.src = src
self.mode = mode #{0:pretrain,1:core entity retrieval,2:cell filling}
self.max_cell = float(max_cell)
self.max_title_length = max_length[0]
self.max_header_length = max_length[1]
self.max_cell_length = max_length[2]
self.force_new = force_new
self.max_input_tok = max_input_tok
self.max_input_ent = max_input_ent
self.entity_vocab = entity_vocab
self.entity_wikid2id = {self.entity_vocab[x]['wiki_id']:x for x in self.entity_vocab}
self.data = self._preprocess(data_dir)
# pdb.set_trace()
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def mask_ent(inputs_origin, inputs_local_id, core_entity_mask, entity_wikid2id, mlm_probability=0.15, mall_probability=0.5, is_train=False):
""" Prepare masked entities inputs/labels for masked entity modeling: 80% MASK, 10% random, 10% original. """
labels = inputs_local_id.clone()
inputs = inputs_origin.clone()
input_ent_mask_type = torch.zeros_like(inputs)
if is_train:
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = (inputs<len(RESERVED_ENT_VOCAB))#[list(map(lambda x: 1 if x == entity_wikid2id['[PAD]'] else 0, val)) for val in labels.tolist()]
# special_tokens_mask[:, 1] = True
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -1 # We only compute loss on masked tokens
# 50% of the time, we mask input ent&text, the model need to recover the entity
indices_mask_both = torch.bernoulli(torch.full(labels.shape, mall_probability)).bool() & masked_indices
pg_ent_mask = torch.zeros(labels.shape)
pg_ent_mask[:,0] = 1
inputs[indices_mask_both] = 0
input_ent_mask_type[indices_mask_both] = entity_wikid2id['[ENT_MASK]']
input_ent_mask_type[indices_mask_both & pg_ent_mask.bool()] = entity_wikid2id['[PG_ENT_MASK]']
input_ent_mask_type[indices_mask_both & core_entity_mask] = entity_wikid2id['[CORE_ENT_MASK]']
# 50% of the time, we only mask ent, the task would be like entity linking
# 40% of the time, we mask ent directly
indices_mask_single = masked_indices & ~indices_mask_both
inputs[indices_mask_single] = 0
# 5% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.1)).bool() & indices_mask_single
random_words = torch.randint(low=RESERVED_ENT_VOCAB_NUM,high=len(entity_wikid2id), size=labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
inputs[:, 1] = 0
input_ent_mask_type[:, 1] = entity_wikid2id['[CORE_ENT_MASK]']
indices_unchanged = torch.bernoulli(torch.full(labels.shape, 0.1)).bool() & masked_indices
inputs[indices_unchanged] = inputs_origin[indices_unchanged]
input_ent_mask_type[indices_unchanged] = 0
# 10% of the time, we keep the masked input tokens unchanged
else:
ent_mask = inputs==entity_wikid2id['[ENT_MASK]']
core_ent_mask = inputs==entity_wikid2id['[CORE_ENT_MASK]']
labels[~ent_mask] = -1
input_ent_mask_type[ent_mask] = entity_wikid2id['[ENT_MASK]']
input_ent_mask_type[core_ent_mask] = entity_wikid2id['[CORE_ENT_MASK]']
inputs[ent_mask|core_ent_mask] = 0
return inputs, input_ent_mask_type, labels
class pretrain_hybrid_table_collate_fn:
def __init__(self, tokenizer, entity_wikid2id, mlm_probability, ent_mlm_probability, mall_probability=0.5, max_entity_candidate=1000, is_train=True, candidate_distribution=None, use_cand=True, random_sample=True, use_visibility=True):
self.tokenizer = tokenizer
self.entity_wikid2id = entity_wikid2id
self.mlm_probability = mlm_probability
self.ent_mlm_probability = ent_mlm_probability
self.mall_probability = mall_probability
self.max_entity_candidate = max_entity_candidate
self.is_train = is_train
self.candidate_distribution = candidate_distribution
self.use_cand = use_cand
self.random_sample = random_sample
self.use_visibility = use_visibility
def generate_random_candidate(self, batch_size, indice_mask):
random_shifts = np.random.random((batch_size, len(self.entity_wikid2id)))
random_shifts[indice_mask] = 10
return np.argpartition(random_shifts, self.max_entity_candidate, axis=1)[:, :self.max_entity_candidate]
def generate_random_candidate_v2(self, batch_size, masked_entity, candidate_distribution=None, ent_cand=None):
if self.random_sample and self.is_train:
random_shifts = np.random.random(len(self.entity_wikid2id))
if candidate_distribution is not None:
random_shifts /= np.sum(random_shifts)
random_shifts -= candidate_distribution
all_masked = list(itertools.chain(*masked_entity))
random_shifts[all_masked] = 10
all_masked = set(all_masked)
final_candidates = np.tile(np.argpartition(random_shifts, self.max_entity_candidate)[:self.max_entity_candidate],[batch_size, 1])
else:
final_candidates = np.zeros([batch_size, self.max_entity_candidate])
for i, masked in enumerate(masked_entity):
final_candidates[i, :len(masked)] = masked
if self.use_cand:
cand_i = ent_cand[i]
if self.random_sample and self.is_train:
if len(cand_i)+len(masked) > self.max_entity_candidate/2:
cand_i = random.sample(cand_i, int(self.max_entity_candidate/2-len(masked)))
else:
if len(cand_i)+len(masked) > self.max_entity_candidate:
cand_i = random.sample(cand_i, int(self.max_entity_candidate-len(masked)))
final_candidates[i, len(masked):len(masked)+len(cand_i)] = cand_i
else:
remain = list(all_masked-set(masked))
final_candidates[i, len(masked):len(masked)+len(remain)] = remain
return final_candidates
def __call__(self, raw_batch):
batch_table_id,batch_input_tok,batch_input_tok_type,batch_input_tok_pos,batch_input_tok_mask,batch_input_tok_length, \
batch_input_ent,batch_input_ent_text,batch_input_ent_cell_length,batch_input_ent_local_id,batch_input_ent_type,batch_input_ent_mask,batch_input_ent_length, \
batch_core_entity_mask,batch_core_ent_local_id,batch_all_entity_set,batch_entity_cand,batch_exclusive_ent_mask = zip(*raw_batch)
if batch_entity_cand[0] is None and self.use_cand:
raise Exception
max_input_tok_length = max(batch_input_tok_length)
max_input_ent_length = max(batch_input_ent_length)
max_input_cell_length = max([z.shape[-1] for z in batch_input_ent_text])
batch_size = len(batch_input_tok_length)
batch_input_tok_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_type_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_pos_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_mask_padded = np.zeros([batch_size, max_input_tok_length, max_input_tok_length+max_input_ent_length], dtype=int)
batch_input_ent_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_text_padded = np.zeros([batch_size, max_input_ent_length, max_input_cell_length], dtype=int)
batch_input_ent_text_length = np.ones([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_type_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_mask_padded = np.zeros([batch_size, max_input_ent_length, max_input_tok_length+max_input_ent_length], dtype=int)
batch_core_entity_mask_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_local_id_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
if self.is_train:
max_input_col_ent_num = max([z.shape[1] for z in batch_exclusive_ent_mask])
batch_exclusive_ent_mask_padded = np.full([batch_size, max_input_ent_length, max_input_col_ent_num], 1000, dtype=int)
for i, (tok_l, ent_l) in enumerate(zip(batch_input_tok_length, batch_input_ent_length)):
batch_input_tok_padded[i, :tok_l] = batch_input_tok[i]
batch_input_tok_type_padded[i, :tok_l] = batch_input_tok_type[i]
batch_input_tok_pos_padded[i, :tok_l] = batch_input_tok_pos[i]
if self.use_visibility or not self.is_train:
batch_input_tok_mask_padded[i, :tok_l, :tok_l] = batch_input_tok_mask[i][0]
batch_input_tok_mask_padded[i, :tok_l, max_input_tok_length:max_input_tok_length+ent_l] = batch_input_tok_mask[i][1]
else:
batch_input_tok_mask_padded[i, :tok_l, :tok_l] = 1
batch_input_tok_mask_padded[i, :tok_l, max_input_tok_length:max_input_tok_length+ent_l] = 1
batch_input_ent_padded[i, :ent_l] = batch_input_ent[i]
batch_input_ent_text_padded[i, :ent_l, :batch_input_ent_text[i].shape[-1]] = batch_input_ent_text[i]
batch_input_ent_text_length[i, :ent_l] = batch_input_ent_cell_length[i]
batch_input_ent_type_padded[i, :ent_l] = batch_input_ent_type[i]
if self.use_visibility or not self.is_train:
batch_input_ent_mask_padded[i, :ent_l, :tok_l] = batch_input_ent_mask[i][0]
batch_input_ent_mask_padded[i, :ent_l, max_input_tok_length:max_input_tok_length+ent_l] = batch_input_ent_mask[i][1]
else:
batch_input_ent_mask_padded[i, :ent_l, :tok_l] = 1
batch_input_ent_mask_padded[i, :ent_l, max_input_tok_length:max_input_tok_length+ent_l] = 1
batch_core_entity_mask_padded[i, :ent_l] = batch_core_entity_mask[i]
batch_input_ent_local_id_padded[i, :ent_l] = batch_input_ent_local_id[i]
if self.is_train:
batch_exclusive_ent_mask_padded[i, :ent_l, :batch_exclusive_ent_mask[i].shape[1]] = batch_exclusive_ent_mask[i]
batch_input_tok_padded = torch.LongTensor(batch_input_tok_padded)
batch_input_tok_type_padded = torch.LongTensor(batch_input_tok_type_padded)
batch_input_tok_pos_padded = torch.LongTensor(batch_input_tok_pos_padded)
batch_input_tok_mask_padded = torch.LongTensor(batch_input_tok_mask_padded)
batch_input_ent_padded = torch.LongTensor(batch_input_ent_padded)
batch_input_ent_text_padded = torch.LongTensor(batch_input_ent_text_padded)
batch_input_ent_text_length = torch.LongTensor(batch_input_ent_text_length)
batch_input_ent_type_padded = torch.LongTensor(batch_input_ent_type_padded)
batch_input_ent_mask_padded = torch.LongTensor(batch_input_ent_mask_padded)
batch_core_entity_mask_padded = torch.BoolTensor(batch_core_entity_mask_padded)
batch_input_ent_local_id_padded = torch.LongTensor(batch_input_ent_local_id_padded)
if self.is_train:
batch_exclusive_ent_mask_padded = torch.LongTensor(batch_exclusive_ent_mask_padded)
else:
batch_exclusive_ent_mask_padded = None
batch_input_tok_final, batch_input_tok_labels = mask_tokens(batch_input_tok_padded, self.tokenizer, mlm_probability=self.mlm_probability)
batch_input_ent_final, batch_input_ent_mask_type, batch_input_ent_labels = mask_ent(batch_input_ent_padded, batch_input_ent_local_id_padded, batch_core_entity_mask_padded, self.entity_wikid2id, mlm_probability=self.ent_mlm_probability, mall_probability=self.mall_probability, is_train=self.is_train)
#random sample candidate
# indice_mask = (list(itertools.chain(*[[i]*len(x) for i,x in enumerate(batch_all_entity_set)])), list(itertools.chain(*batch_all_entity_set)))
# batch_candidate_entity_set = self.generate_random_candidate(batch_size, indice_mask)
batch_candidate_entity_set = self.generate_random_candidate_v2(batch_size, batch_all_entity_set, self.candidate_distribution, ent_cand=batch_entity_cand)
batch_candidate_entity_set = torch.LongTensor(batch_candidate_entity_set)
if not self.is_train:
batch_core_entity_label = np.zeros([batch_size, self.max_entity_candidate], dtype=bool)
for i in range(batch_size):
batch_core_entity_label[i, batch_core_ent_local_id[i]] = True
batch_core_entity_label = torch.BoolTensor(batch_core_entity_label)
else:
batch_core_entity_label = None
return batch_table_id,batch_input_tok_final, batch_input_tok_type_padded, batch_input_tok_pos_padded, batch_input_tok_labels, batch_input_tok_mask_padded, \
batch_input_ent_text_padded, batch_input_ent_text_length, batch_input_ent_mask_type, batch_input_ent_final, batch_input_ent_type_padded, batch_input_ent_labels, batch_input_ent_mask_padded, batch_candidate_entity_set, batch_core_entity_label, batch_exclusive_ent_mask_padded, batch_core_entity_mask_padded
class pretrain_hybrid_table_collate_fn_CER(pretrain_hybrid_table_collate_fn):
def __init__(self, tokenizer, entity_wikid2id, max_entity_candidate=1000, is_train=True, candidate_distribution=None, use_cand=True, seed_num=1, random_sample=True):
self.tokenizer = tokenizer
self.entity_wikid2id = entity_wikid2id
self.max_entity_candidate = max_entity_candidate
self.is_train = is_train
self.candidate_distribution = candidate_distribution
self.use_cand = use_cand
self.seed = seed_num
self.random_sample = random_sample
def __call__(self, raw_batch):
batch_table_id,batch_input_tok,batch_input_tok_type,batch_input_tok_pos,batch_input_tok_length, \
batch_input_ent,batch_input_ent_text,batch_input_ent_cell_length,batch_input_ent_length,batch_input_ent_local_id,batch_core_entities, \
batch_entity_cand = zip(*raw_batch)
if batch_entity_cand[0] is None and self.use_cand:
raise Exception
max_input_tok_length = max(batch_input_tok_length)
max_input_ent_length = max(batch_input_ent_length)
max_input_cell_length = max([z.shape[-1] for z in batch_input_ent_text])
batch_size = len(batch_input_tok_length)
batch_input_tok_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_type_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_tok_pos_padded = np.zeros([batch_size, max_input_tok_length], dtype=int)
batch_input_ent_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_text_padded = np.zeros([batch_size, max_input_ent_length, max_input_cell_length], dtype=int)
batch_input_ent_text_length = np.ones([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_type_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_type_padded[:, 0] = 2
batch_input_ent_ent_mask_padded = np.zeros([batch_size, max_input_ent_length], dtype=int)
batch_input_ent_ent_mask_padded[:, 1] = 1
batch_input_mask_padded = np.zeros([batch_size, 1, max_input_tok_length+max_input_ent_length], dtype=int)
batch_seed_ent = []
batch_target_ent = np.full([batch_size, self.max_entity_candidate], 0, dtype=int)
for i, (tok_l, ent_l) in enumerate(zip(batch_input_tok_length, batch_input_ent_length)):
batch_input_tok_padded[i, :tok_l] = batch_input_tok[i]
batch_input_tok_type_padded[i, :tok_l] = batch_input_tok_type[i]
batch_input_tok_pos_padded[i, :tok_l] = batch_input_tok_pos[i]
batch_input_ent_padded[i, :ent_l] = batch_input_ent[i]
batch_input_ent_text_padded[i, :ent_l, :batch_input_ent_text[i].shape[-1]] = batch_input_ent_text[i]
batch_input_ent_text_length[i, :ent_l] = batch_input_ent_cell_length[i]
batch_input_ent_type_padded[i, 1:ent_l] = 3
if self.seed >0:
tmp_cand_core = set(range(ent_l-2))
tmp_selected_core = random.sample(tmp_cand_core,self.seed)
batch_seed_ent.append(batch_input_ent_local_id[i][tmp_selected_core])
tmp_cand_core = list(tmp_cand_core-set(tmp_selected_core))
# batch_target_ent[i,:len(tmp_cand_core)] = batch_input_ent_local_id[i][tmp_cand_core]
batch_target_ent[i,batch_input_ent_local_id[i][tmp_cand_core]] = | |
<gh_stars>1-10
import attr
from functools import total_ordering
from uuid import uuid4
from sqlalchemy import Float, String, and_, between, case, cast, func, or_, text
from recipe.exceptions import BadIngredient
from recipe.utils import AttrDict, filter_to_string
from recipe.utils.datatype import (
convert_date,
convert_datetime,
determine_datatype,
datatype_from_column_expression,
)
ALLOWED_OPERATORS = set(
[
"eq",
"ne",
"lt",
"lte",
"gt",
"gte",
"is",
"isnot",
"like",
"ilike",
"quickselect",
"in",
"notin",
"between",
]
)
@total_ordering
class Ingredient(object):
"""Ingredients combine to make a SQLAlchemy query.
Any unknown keyword arguments provided to an Ingredient
during initialization are stored in a meta object.
.. code:: python
# icon is an unknown keyword argument
m = Metric(func.sum(MyTable.sales), icon='cog')
print(m.meta.icon)
>>> 'cog'
This meta storage can be used to add new capabilities to
ingredients.
Args:
id (:obj:`str`):
An id to identify this Ingredient. If ingredients are
added to a Shelf, the id is automatically set as the key in
the shelf.
columns (:obj:`list` of :obj:`ColumnElement`):
A list of SQLAlchemy columns to use in a query select.
filters (:obj:`list` of :obj:`BinaryExpression`):
A list of SQLAlchemy BinaryExpressions to use in the
.filter() clause of a query.
havings (:obj:`list` of :obj:`BinaryExpression`):
A list of SQLAlchemy BinaryExpressions to use in the
.having() clause of a query.
columns (:obj:`list` of :obj:`ColumnElement`):
A list of SQLAlchemy columns to use in the `group_by` clause
of a query.
formatters: (:obj:`list` of :obj:`callable`):
A list of callables to apply to the result values.
If formatters exist, property `{ingredient.id}_raw` will
exist on each result row containing the unformatted
value.
cache_context (:obj:`str`):
Extra context when caching this ingredient. DEPRECATED
ordering (`string`, 'asc' or 'desc'):
One of 'asc' or 'desc'. 'asc' is the default value.
The default ordering of this ingredient if it is
used in a ``recipe.order_by``.
This is added to the ingredient when the ingredient is
used in a ``recipe.order_by``.
group_by_strategy (:obj:`str`):
A strategy to use when preparing group_bys for the query
"labels" is the default strategy which will use the labels assigned to
each column.
"direct" will use the column expression directly. This alternative is
useful when there might be more than one column with the same label
being used in the query.
quickselects (:obj:`list` of named filters):
A list of named filters that can be accessed through
``build_filter``. Named filters are dictionaries with
a ``name`` (:obj:str) property and a ``condition`` property
(:obj:`BinaryExpression`)
datatype (:obj:`str`):
The identified datatype (num, str, date, bool, datetime) of
the parsed expression
datatype_by_role (:obj:`dict`):
The identified datatype (num, str, date, bool, datetime) for each
role.
Returns:
An Ingredient object.
"""
def __init__(self, **kwargs):
self.id = kwargs.pop("id", uuid4().hex[:12])
self.columns = kwargs.pop("columns", [])
self.filters = kwargs.pop("filters", [])
self.havings = kwargs.pop("havings", [])
self.group_by = kwargs.pop("group_by", [])
self.formatters = kwargs.pop("formatters", [])
self.quickselects = kwargs.pop("quickselects", [])
self.column_suffixes = kwargs.pop("column_suffixes", None)
self.cache_context = kwargs.pop("cache_context", "")
self.datatype = kwargs.pop("datatype", None)
self.datatype_by_role = kwargs.pop("datatype_by_role", dict())
self.anonymize = False
self.roles = {}
self._labels = []
self.error = kwargs.pop("error", None)
# What order should this be in
self.ordering = kwargs.pop("ordering", "asc")
self.group_by_strategy = kwargs.pop("group_by_strategy", "labels")
if not isinstance(self.formatters, (list, tuple)):
raise BadIngredient(
"formatters passed to an ingredient must be a list or tuple"
)
# If explicit suffixes are passed in, there must be one for each column
if self.column_suffixes is not None and len(self.column_suffixes) != len(
self.columns
):
raise BadIngredient("column_suffixes must be the same length as columns")
# Any remaining passed properties are available in self.meta
self.meta = AttrDict(kwargs)
def __hash__(self):
return hash(self.describe())
def __repr__(self):
return self.describe()
def _stringify(self):
"""Return a relevant string based on ingredient type for repr and
ordering. Ingredients with the same classname, id and _stringify
value are considered the same."""
return " ".join(str(col) for col in self.columns)
def describe(self):
"""A string representation of the ingredient."""
return u"({}){} {}".format(self.__class__.__name__, self.id, self._stringify())
def _format_value(self, value):
"""Formats value using any stored formatters."""
for f in self.formatters:
value = f(value)
return value
def make_column_suffixes(self):
"""Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
Developers note: These are generated when the query runs because the
recipe may be run with anonymization on or off, which will inject
a formatter.
"""
if self.column_suffixes:
return self.column_suffixes
if len(self.columns) == 0:
return ()
elif len(self.columns) == 1:
if self.formatters:
return ("_raw",)
else:
return ("",)
else:
raise BadIngredient(
"column_suffixes must be supplied if there is " "more than one column"
)
@property
def query_columns(self):
"""Yield labeled columns to be used as a select in a query."""
self._labels = []
for column, suffix in zip(self.columns, self.make_column_suffixes()):
self._labels.append(self.id + suffix)
yield column.label(self.id + suffix)
@property
def order_by_columns(self):
"""Yield columns to be used in an order by using this ingredient. Column
ordering is in reverse order of columns
"""
# Ensure the labels are generated
if not self._labels:
list(self.query_columns)
if self.group_by_strategy == "labels":
if self.ordering == "desc":
suffix = " DESC"
else:
suffix = ""
return [
text(lbl + suffix)
for col, lbl in reversed(list(zip(self.columns, self._labels)))
]
else:
if self.ordering == "desc":
return [col.desc() for col in reversed(self.columns)]
else:
return reversed(self.columns)
@property
def cauldron_extras(self):
"""Yield extra tuples containing a field name and a callable that takes
a row.
"""
if self.formatters:
raw_property = self.id + "_raw"
yield self.id, lambda row: self._format_value(getattr(row, raw_property))
def _order(self):
"""Ingredients are sorted by subclass then by id."""
if isinstance(self, Dimension):
return (0, self.id)
elif isinstance(self, Metric):
return (1, self.id)
elif isinstance(self, Filter):
return (2, self.id)
elif isinstance(self, Having):
return (3, self.id)
else:
return (4, self.id)
def __lt__(self, other):
"""Make ingredients sortable."""
return self._order() < other._order()
def __eq__(self, other):
"""Make ingredients sortable."""
return self._order() == other._order()
def __ne__(self, other):
"""Make ingredients sortable."""
return not (self._order() == other._order())
def _build_scalar_filter(self, value, operator=None, target_role=None):
"""Build a Filter given a single value.
Args:
value (a string, number, boolean or None):
operator (`str`)
A valid scalar operator. The default operator
is `eq`
target_role (`str`)
An optional role to build the filter against
Returns:
A Filter object
"""
# Developer's note: Valid operators should appear in ALLOWED_OPERATORS
# This is used by the AutomaticFilter extension.
if operator is None:
operator = "eq"
if target_role and target_role in self.roles:
filter_column = self.roles.get(target_role)
datatype = determine_datatype(self, target_role)
else:
filter_column = self.columns[0]
datatype = determine_datatype(self)
# Ensure that the filter_column and value have compatible data types
# Support passing ILIKE in Paginate extensions
if datatype == "date":
value = convert_date(value)
elif datatype == "datetime":
value = convert_datetime(value)
if isinstance(value, str) and datatype != "str":
filter_column = cast(filter_column, String)
if operator == "eq":
# Default operator is 'eq' so if no operator is provided, handle
# like an 'eq'
if value is None:
return filter_column.is_(value)
else:
return filter_column == value
if operator == "ne":
return filter_column != value
elif operator == "lt":
return filter_column < value
elif operator == "lte":
return filter_column <= value
elif operator == "gt":
return filter_column > value
elif operator == "gte":
return filter_column >= value
elif operator == "is":
return filter_column.is_(value)
elif operator == "isnot":
return filter_column.isnot(value)
elif operator == "like":
value = str(value)
return filter_column.like(value)
elif operator == "ilike":
value = str(value)
return filter_column.ilike(value)
elif operator == "quickselect":
for qs in self.quickselects:
if qs.get("name") == value:
return qs.get("condition")
raise ValueError(
"quickselect {} was not found in "
"ingredient {}".format(value, self.id)
)
else:
raise ValueError("Unknown operator {}".format(operator))
def _build_vector_filter(self, value, operator=None, target_role=None):
"""Build a Filter given a list of values.
Args:
value (a list of string, number, boolean or None):
operator (:obj:`str`)
A valid vector operator. The default operator is
`in`.
target_role (`str`)
An optional role to build the filter against
Returns:
A Filter object
"""
# Developer's note: Valid operators should appear in ALLOWED_OPERATORS
# This is used by the AutomaticFilter extension.
if operator is None:
operator = "in"
if target_role and target_role in self.roles:
filter_column = self.roles.get(target_role)
datatype = determine_datatype(self, target_role)
else:
filter_column = self.columns[0]
datatype = determine_datatype(self)
if datatype == "date":
value = list(map(convert_date, value))
elif datatype == "datetime":
value = list(map(convert_datetime, value))
if operator == "in":
# Default | |
source, convolved in zip(self.sources, self.convolved):
source[yslice, xslice] = value
# Reconvolve the changed region.
reconv = scipy.signal.convolve(
source[ylos:yhis, xlos:xhis], self.kernel, mode='same')
# Copy the changed region into the full convolution.
convolved[conv_slice] = reconv[y1:y2, x1:x2]
return conv_slice
def prepare(D, W=None, invgain=1.6, smoothing=3, saturation=None):
"""Prepare image data for analysis.
The input data D is converted to float32, if necessary, and an
estimate of the mean background will be subtracted.
If no inverse variance map W is provided, it will be estimated
from D, including both background and signal contributions.
Otherwise, just return W converted to float32.
Parameters
----------
D : array
2D array of pixel values in ADU units. An estimate of the
mean background will be subtracted.
W : array or None
2D array of inverse variance weights in ADU units, or None
if this should be estimated from D.
invgain : float
Inverse gain in units of e/ADU to assume for estimating
the signal variance contribution.
smoothing : int
Number of pixels for median filtering of D used to estimate
the signal variance contribution. Must be odd.
saturation : int or None
Pixel values >= this level are considered saturated and masked.
Nothing is maksed when saturation is None.
Returns
-------
tuple
Tuple D, W of 2D numpy float32 arrays.
"""
# Default saturation level is the maximum value for this datatype.
if saturation is None:
saturated = np.zeros(D.shape, bool)
else:
saturated = (D >= saturation)
logging.info('Found {np.count_nonzero(saturated)} pixels saturated (>={0}).'
.format(saturation))
# Convert to a float32 array.
D = np.array(D, np.float32)
# Select background pixels using sigma clipping.
clipped, _, _ = scipy.stats.sigmaclip(D[~saturated])
# Subtract the clipped mean from the data.
bgmean = np.mean(clipped)
logging.info('Subtracted background mean {0:.1f} ADU.'.format(bgmean))
D -= bgmean
if W is None:
# Use the clipped pixels for the background variance estimate.
bgvar = np.var(clipped)
logging.info('Estimated background RMS {0:.1f} ADU.'.format(np.sqrt(bgvar)))
var = bgvar * np.ones_like(D)
# Estimate additional variance due to median-filtered signal.
Dsmoothed = scipy.signal.medfilt2d(D, smoothing)
#smoother = np.ones((smoothing, smoothing)) / smoothing ** 2
#Dsmoothed = scipy.signal.convolve(D, smoother, mode='same')
var += np.maximum(0., Dsmoothed) / invgain
# Build an inverse variance image with zeros where var is zero.
W = np.divide(1., var, out=np.zeros_like(var, dtype=np.float32), where=var > 0)
else:
W = np.asarray(W, dtype=np.float32)
# Zero ivar for any saturated pixels.
W[saturated] = 0.
return D, W
def sobelfilter(D, W):
"""Estimate the magnitude of the 2D gradient of D.
Uses Sobel filters in x and y modified to account for ivar weights W.
"""
here, plus, minus = slice(1, -1), slice(2, None), slice(None, -2)
# Estimate slopes along each axis at each pixel.
Dx = 0.5 * (D[:, plus] - D[:, minus])
Dy = 0.5 * (D[plus, :] - D[minus, :])
# Calculate the corresponding inverse variances.
Wp, Wm = W[:, plus], W[:, minus]
Wx = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)
Wp, Wm = W[plus, :], W[minus, :]
Wy = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)
# Average slope estimates along the other axis with weights (1, 2, 1).
WDx = Wx[minus, :] * Dx[minus, :] + 2 * Wx[here, :] * Dx[here, :] + Wx[plus, :] * Dx[plus, :]
Wxsum = Wx[minus, :] + 2 * Wx[here, :] + Wx[plus, :]
Dx = np.divide(WDx, Wxsum, out=np.zeros_like(WDx), where=Wxsum > 0)
WDy = Wy[:, minus] * Dy[:, minus] + 2 * Wy[:, here] * Dy[:, here] + Wy[:, plus] * Dy[:, plus]
Wysum = Wy[:, minus] + 2 * Wy[:, here] + Wy[:, plus]
Dy = np.divide(WDy, Wysum, out=np.zeros_like(WDy), where=Wysum > 0)
# Estimate the 2D gradient magnitude.
Dg = np.zeros_like(D)
Dg[here, here] = np.hypot(Dx, Dy)
return Dg
def mask_defects(D, W, chisq_max=5e3, kernel_size=3, min_neighbors=7, inplace=False):
if not inplace:
W = W.copy()
# Initialize the kernel.
if kernel_size % 2 == 0:
raise ValueError('Kernel size must be odd.')
kernel = np.ones((kernel_size, kernel_size), np.float32)
nby2 = kernel_size // 2
max_neighbors = kernel_size ** 2 - 1
kernel[nby2, nby2] = 0.
# Calculate the ivar-weighted image.
WD = np.array(W * D, np.float32)
# Convolve with the kernel.
C = Convolutions([WD, W], kernel)
WD, W = C.sources
WDf, Wf = C.convolved
# Calculate the Wf weighted residuals.
res = Wf * D - WDf
# Calculate residual chisq.
denom = (W + Wf) * Wf
Wratio = np.divide(W, denom, out=np.zeros_like(W), where=denom != 0)
chisq = res ** 2 * Wratio
# Iteratively remove defects.
nmasked = 0
ny, nx = D.shape
while np.any(chisq > chisq_max):
# Find the next largest chisq.
iy, ix = np.unravel_index(np.argmax(chisq), (ny, nx))
# Count the number of surrounding pixels with nonzero ivar.
xlo, ylo = max(0, ix - nby2), max(0, iy - nby2)
xhi, yhi = min(nx, ix + nby2 + 1), min(ny, iy + nby2 + 1)
# Subtract 1 since chisq > 0 means that W > 0 for the central pixel.
num_neighbors = np.count_nonzero(W[ylo:yhi, xlo:xhi]) - 1
if num_neighbors < min_neighbors or ((num_neighbors < max_neighbors) and (chisq[iy, ix] < 2 * chisq_max)):
# Zero this pixel's chisq without changing its weight.
chisq[iy, ix] = 0.
continue
# Set this pixel's ivar to zero.
changed = C.set_source(iy, ix, 0)
# Update the chisq.
res[changed] = Wf[changed] * D[changed] - WDf[changed]
denom = (W[changed] + Wf[changed]) * Wf[changed]
Wratio[changed] = 0
np.divide(
W[changed], denom, out=Wratio[changed], where=denom != 0)
chisq[changed] = res[changed] ** 2 * Wratio[changed]
nmasked += 1
return W, nmasked
def get_data(name, must_exist=False):
"""Return the absolute path to a named data file associated with this package.
Relative paths refer to the desietcimg/data/ folder of this installation.
Use an absolute path to override this behavior.
"""
if os.path.isabs(name):
path = name
else:
import desietcimg
root = os.path.abspath(os.path.dirname(desietcimg.__file__))
path = os.path.join(root, 'data', name)
if must_exist and not os.path.exists(path):
raise RuntimeError('Non-existent data file: {0}'.format(path))
return path
def find_files(pattern, min=None, max=None, check_parent=True, partial_match_is_error=True):
"""Find files matching a pattern with a sequence number.
The sequence number is represented using {N} in the input pattern,
which can be repeated.
Parameters
----------
pattern : str or pathlib.Path
File pattern using {N} to represent the sequence number.
min : int or None
Only values of N >= min will be returned.
max : int or None
Only values of N <= max will be returned.
check_parent : bool
Raise an exception when True and the parent directory does
not exist.
partial_match_is_error : bool
Raise an exception when True for any paths that match the
first {N} but not all subsequent {N}'s in the input pattern.
Returns
-------
list
List of filenames matching the pattern and filtered by
any min/max cuts.
"""
if not isinstance(pattern, pathlib.Path):
pattern = pathlib.Path(pattern)
# Find which parts of the pattern contain {N}.
parts = pattern.parts
part_has_N = ['{N}' in part for part in parts]
if not any(part_has_N):
# Nothing to match. Return the input if it exists.
return [str(pattern)] if pattern.exists() else []
first_N = part_has_N.index(True)
# Build the parent path to search.
parent_path = pathlib.Path(*parts[:first_N])
if check_parent and not parent_path.exists():
raise FileNotFoundError(parent_path)
# Build the suffix patttern if there is one.
remaining = first_N + 1
suffix_pattern = str(pathlib.Path(*parts[remaining:])) if remaining < len(parts) else None
# Look for paths matching the first {N} in the path using * as a glob pattern.
first_N_pattern = parts[first_N]
paths = sorted([str(P) for P in parent_path.glob(first_N_pattern.format(N='*'))])
# Check for integer matches to N.
regexp = re.compile(first_N_pattern.format(N='([0-9]+)') + '$')
selected = []
suffix = ''
for path in paths:
found = regexp.search(path)
if found:
N = int(found.group(1))
if min is not None and N < min:
continue
if max is not None and N > max:
continue
if suffix_pattern:
# Build the full path for this value of N.
# Use the regexp string match rather than the integer N to preserve formatting.
suffix = suffix_pattern.format(N=found.group(1))
full_path = pathlib.Path(path) / | |
a variable's name.
if self.peek()[0][0] == "NAME":
# Save the variable's name.
var = self.peek()[0][1]
self.discard()
# Make sure this variable isn't already declared.
if var in self.names:
self.error(Error.REDFINED_VAR)
# Check to see if we need to allocate a vector on the stack.
if self.peek()[0][0] == "SB":
self.discard()
# Expecting a number.
if self.peek()[0][0] != "NUMBER":
self.error()
# Save the vector size.
v = int(self.peek()[0][1])
self.discard()
# Set the variable's size on the stack.
self.var_l -= v*self.word
# If a vector was allocated, point to it. Otherwise set the vector as null.
if v:
self.add("lea {}, [{}{}]".format(self.a(), self.bp(), self.var_l))
else:
self.add("xor {}, {}".format(self.a(), self.a()))
# Set the size of the pointer on the stack.
self.var_l -= self.word
# Point the pointer at the vector.
self.add("mov [{}{}], {}".format(self.bp(), self.var_l, self.a()))
# Allocate space on the stack for the vector and pointer.
self.add("sub {}, {}".format(self.sp(), (v+1)*self.word))
# Expecting a ending bracket.
if self.peek()[0][0] != "EB":
self.error()
self.discard()
else:
# Set the variable's size on the stack.
self.var_l -= self.word
# Allocate space on the stack for the variable.
self.add("sub {}, {}".format(self.sp(), self.word))
# Add the variable to the list of names.
self.names.append(var)
# Set the variable's location on the stack.
self.var[var] = self.var_l
self.add("; {} @ [{}{}]".format(var, self.bp(), self.var_l))
else:
self.error()
if self.peek()[0][0] == "COMMA":
self.discard()
elif self.peek()[0][0] == "SEMICOLON":
self.discard()
break
else:
self.error()
def do_func(self):
# A temporary dictionary to hold the functions properties.
_func = {"name": "",
"call": "CDECL",
"prototype": False,
"param": 0,
"params": []}
# If there's no statement, then this is a function prototype.
_func["prototype"] = not self.findinline("SC")
# A function cannot be declared inside another function!
if self.in_func:
self.error(123)
# See if this function has a calling convention keyword.
if self.peek()[0][0] in ("STDCALL", "CDECL"):
_func["call"] = self.peek()[0][0]
self.discard()
# Check to see if this name exists already!
if self.peek()[0][1] in self.names:
self.error(Error.REDFINED_FUNC)
# Save the function's name.
_func["name"] = self.peek()[0][1]
self.discard()
if _func["name"] in self.funcs.keys():
if not (not _func["prototype"] and self.funcs[_func["name"]]["prototype"]):
self.error(554)
# Save space for BP and IP.
self.param_l = self.word*2
self.var_l = 0
# Make sure there's a (
if self.peek()[0][0] == "SP":
self.discard()
else:
self.error(125)
while True:
if self.peek()[0][0] == "NAME":
if self.peek()[0][1] in self.names:
self.error(126)
if not _func["prototype"]:
self.names.append(self.peek()[0][1])
self.param[self.peek()[0][1]] = self.param_l
self.param_l += self.word
_func["params"].append(self.peek()[0][1])
self.discard()
elif self.peek()[0][0] == "EP":
pass
else:
self.error(127)
if self.peek()[0][0] == "COMMA":
self.discard()
elif self.peek()[0][0] == "EP":
self.discard()
break
else:
self.error(128)
# Set param
_func["param"] = len(_func["params"])
if not _func["prototype"] and self.peek()[0][0] == "SC":
self.discard()
elif _func["prototype"] and self.peek()[0][0] == "SEMICOLON":
self.discard()
else:
self.error(129)
# Create function's true name used in assembly.
if _func["call"] == "CDECL":
_func["tname"] = "_{}".format(_func["name"])
elif _func["call"] == "STDCALL":
_func["tname"] = "_{}@{}".format(_func["name"], _func["param"]*self.word)
else:
self.error(130)
if not _func["prototype"]:
# Add the function to the list of compound statements.
com = self.push_compound(before="xor {0}, {0}".format(self.a()),
after="mov {0}, {1}\npop {1}\nret".format(self.sp(), self.bp()),
func=True)
## NOTE: Fix this function!!!
# Adjust the start and end points of the function.
com["end"] = com["start"]
self.l -= 1
com["start"] = _func["tname"]
# Add the function to the output.
self.names.append(_func["name"])
self.in_func = True
#self.add_pretty()
self.add("{}:".format(com["start"]))
self.add("push {}".format(self.bp()))
self.add("mov {}, {}".format(self.bp(), self.sp()))
self.add_pretty()
# Make sure we save how many parameters this function has.
self.funcs[_func["name"]] = _func
def do_return(self):
# Get rid of the if token.
self.discard()
# Create a list of tokens for math.
math_list = []
# Get all the math tokens.
while self.peek()[0][0] not in ("SEMICOLON", "EC"):
math_list.append(self.peek()[0])
self.discard()
# If this is a semicolon then get rid of it.
if self.peek()[0][0] == "SEMICOLON":
self.discard()
# Do the math!
a = self.math(math_list)
# Add some some space in the output assembly.
#self.add()
if a == None:
self.add("xor {}, {}".format(self.a(), self.a()))
elif a[0] == "NAME":
self.add("mov {}, {}".format(self.a(), self.get_var(a[1])))
elif a[0] == "NUMBER":
self.add("mov {}, {}".format(self.a(), a[1]))
elif a[0] == "REGISTER":
if a[1] != self.a():
self.add("mov {}, {}".format(self.a(), a[1]))
else:
self.error(54)
# Get the function's end label.
com = self.get_func()
# Make sure we're in a function.
if com:
self.add("jmp {}".format(com["end"]))
else:
self.error(Error.RETURN_OUTSIDE_FUNC)
def do_break(self):
# Get rid of the if token.
self.discard()
# If this is a semicolon then get rid of it.
if self.peek()[0][0] == "SEMICOLON":
self.discard()
else:
self.error(Error.EXPECT_SC)
# Get the end of the inner most loop statement.
com = self.get_loop()
# Can't break if you're not inside a loop statement!
if not com:
self.error(Error.BREAK_OUTSIDE_LOOP)
self.add("jmp {}".format(com["end"]))
def do_next(self):
# Get rid of the if token.
self.discard()
# If this is a semicolon then get rid of it.
if self.peek()[0][0] == "SEMICOLON":
self.discard()
else:
self.error(Error.EXPECT_SC)
# Get the end of the inner most loop statement.
com = self.get_loop()
# Can't break if you're not inside a loop statement!
if not com:
self.error(Error.NEXT_OUTSIDE_LOOP)
self.add("jmp {}".format(com["start"]))
def do_repeat(self):
# Get rid of the repeat token.
self.discard()
# Add the repeat loop to the list of compound statements.
com = self.push_compound(before="jmp .L{}".format(self.l+1), loop=True)
# Add a starting comment.
self.add("; REPEAT loop")
# Add the repeat loop's label.
self.add("{}:".format(com["start"]))
# Check to see if this is a simple statement.
if self.peek()[0][0] == "SC":
self.discard()
else:
self.next_simple = True
def do_while(self):
# Get rid of the if token.
self.discard()
# Make sure there's a (
if self.peek()[0][0] != "SP":
self.error(Error.EXPECT_SP)
# Discard (
self.discard()
# Set a () counter.
c = 1
# Create a list of tokens for math.
math_list = []
while c:
if self.peek()[0][0] == "SP":
c += 1
elif self.peek()[0][0] == "EP":
c -= 1
elif self.peek()[0][0] == "SEMICOLON":
self.error()
# Don't appending a closing )
if c:
math_list.append(self.peek()[0])
self.discard()
# Add the while loop to the list of compound statements.
com = self.push_compound(before="jmp .L{}".format(self.l+1), loop=True)
# Add a starting comment.
self.add("; WHILE loop")
# Add while loop label.
self.add("{}:".format(com["start"]))
r = self.math(math_list)
# Check to see if this is a simple statement.
if self.peek()[0][0] == "SC":
self.discard()
else:
self.next_simple = True
if r[0] == "NUMBER":
self.add("mov {}, {}".format(self.a(), r[1]))
r = ("REGISTER", self.a(), -1, -1)
# Skip loop if the expression is false.
self.add("test {0}, {0}".format(r[1]))
self.add("je {}".format(com["end"]))
#self.add()
def do_if(self):
# Get rid of the if token.
self.discard()
# Make sure there's a (
if self.peek()[0][0] != "SP":
self.error(Error.EXPECT_SP)
# Discard (
self.discard()
# Set a () counter.
c = 1
# Create a list of tokens for math.
math_list = []
while c:
if self.peek()[0][0] == "SP":
c += 1
elif self.peek()[0][0] == "EP":
c -= 1
elif self.peek()[0][0] == "SEMICOLON":
self.error()
# Don't appending a closing )
if c:
math_list.append(self.peek()[0])
self.discard()
# Add the if condition to the list of compound statements.
com = self.push_compound(_if=True)
# Add a starting comment.
self.add("; IF conditional")
# Add if condition start label.
self.add("{}:".format(com["start"]))
r = self.math(math_list)
# Check to see if this is a simple statement.
if self.peek()[0][0] == "SC":
self.discard()
else:
self.next_simple = True
if r[0] == "NUMBER":
self.add("mov {}, {}".format(self.a(), r[1]))
r = ("REGISTER", self.a(), -1, -1)
if r[1][0] == "[" and r[1][-1] == "]":
self.add("mov {}, {} {}".format(self.a(), self.sys_prefix(), r[1]))
r = ("REGISTER", self.a(), -1, -1)
# Skip this statement if the test is false.
self.add("test {0}, {0}".format(r[1]))
self.add("je {}".format(com["end"]))
#self.add()
def do_else(self):
# Get rid of the else token.
self.discard()
# Add the else condition to the list of compound statements.
com = self.push_compound()
# Add a starting comment.
self.add("; ELSE conditional")
# Add else condition start label.
self.add("{}:".format(com["start"]))
# Check to see if this is a simple statement.
if self.peek()[0][0] == "SC":
self.discard()
else:
self.next_simple = True
def do_math(self):
# Create a list of tokens for math.
math_list = []
while True:
if self.peek()[0][0] == "SEMICOLON":
self.discard()
break
elif self.peek()[0][0] == "EC":
break
math_list.append(self.peek()[0])
self.discard()
self.math(math_list)
def math(self, math_list):
# The comment string for the equation. For debug purposes.
s = "; INFIX:"
# Dict to str.
for i in math_list:
s = "{} {}".format(s, i[1])
s = s.lstrip()
# Add | |
is not None:
self.ALIGNABLE_ANNOTATION.export(outfile, level, namespace_, name_='ALIGNABLE_ANNOTATION', pretty_print=pretty_print)
if self.REF_ANNOTATION is not None:
self.REF_ANNOTATION.export(outfile, level, namespace_, name_='REF_ANNOTATION', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ALIGNABLE_ANNOTATION':
obj_ = alignableType.factory()
obj_.build(child_)
self.ALIGNABLE_ANNOTATION = obj_
obj_.original_tagname_ = 'ALIGNABLE_ANNOTATION'
elif nodeName_ == 'REF_ANNOTATION':
obj_ = refAnnoType.factory()
obj_.build(child_)
self.REF_ANNOTATION = obj_
obj_.original_tagname_ = 'REF_ANNOTATION'
# end class annotationType
class alignableType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, TIME_SLOT_REF1=None, TIME_SLOT_REF2=None, SVG_REF=None, ANNOTATION_ID=None, EXT_REF=None, LANG_REF=None, CVE_REF=None, ANNOTATION_VALUE=None):
self.original_tagname_ = None
self.TIME_SLOT_REF1 = _cast(None, TIME_SLOT_REF1)
self.TIME_SLOT_REF2 = _cast(None, TIME_SLOT_REF2)
self.SVG_REF = _cast(None, SVG_REF)
self.ANNOTATION_ID = _cast(None, ANNOTATION_ID)
self.EXT_REF = _cast(None, EXT_REF)
self.LANG_REF = _cast(None, LANG_REF)
self.CVE_REF = _cast(None, CVE_REF)
self.ANNOTATION_VALUE = ANNOTATION_VALUE
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, alignableType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if alignableType.subclass:
return alignableType.subclass(*args_, **kwargs_)
else:
return alignableType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ANNOTATION_VALUE(self): return self.ANNOTATION_VALUE
def set_ANNOTATION_VALUE(self, ANNOTATION_VALUE): self.ANNOTATION_VALUE = ANNOTATION_VALUE
def get_TIME_SLOT_REF1(self): return self.TIME_SLOT_REF1
def set_TIME_SLOT_REF1(self, TIME_SLOT_REF1): self.TIME_SLOT_REF1 = TIME_SLOT_REF1
def get_TIME_SLOT_REF2(self): return self.TIME_SLOT_REF2
def set_TIME_SLOT_REF2(self, TIME_SLOT_REF2): self.TIME_SLOT_REF2 = TIME_SLOT_REF2
def get_SVG_REF(self): return self.SVG_REF
def set_SVG_REF(self, SVG_REF): self.SVG_REF = SVG_REF
def get_ANNOTATION_ID(self): return self.ANNOTATION_ID
def set_ANNOTATION_ID(self, ANNOTATION_ID): self.ANNOTATION_ID = ANNOTATION_ID
def get_EXT_REF(self): return self.EXT_REF
def set_EXT_REF(self, EXT_REF): self.EXT_REF = EXT_REF
def get_LANG_REF(self): return self.LANG_REF
def set_LANG_REF(self, LANG_REF): self.LANG_REF = LANG_REF
def get_CVE_REF(self): return self.CVE_REF
def set_CVE_REF(self, CVE_REF): self.CVE_REF = CVE_REF
def hasContent_(self):
if (
self.ANNOTATION_VALUE is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='alignableType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('alignableType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='alignableType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='alignableType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='alignableType'):
if self.TIME_SLOT_REF1 is not None and 'TIME_SLOT_REF1' not in already_processed:
already_processed.add('TIME_SLOT_REF1')
outfile.write(' TIME_SLOT_REF1=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TIME_SLOT_REF1), input_name='TIME_SLOT_REF1')), ))
if self.TIME_SLOT_REF2 is not None and 'TIME_SLOT_REF2' not in already_processed:
already_processed.add('TIME_SLOT_REF2')
outfile.write(' TIME_SLOT_REF2=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TIME_SLOT_REF2), input_name='TIME_SLOT_REF2')), ))
if self.SVG_REF is not None and 'SVG_REF' not in already_processed:
already_processed.add('SVG_REF')
outfile.write(' SVG_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.SVG_REF), input_name='SVG_REF')), ))
if self.ANNOTATION_ID is not None and 'ANNOTATION_ID' not in already_processed:
already_processed.add('ANNOTATION_ID')
outfile.write(' ANNOTATION_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.ANNOTATION_ID), input_name='ANNOTATION_ID')), ))
if self.EXT_REF is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
outfile.write(' EXT_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.EXT_REF), input_name='EXT_REF')), ))
if self.LANG_REF is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
outfile.write(' LANG_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_REF), input_name='LANG_REF')), ))
if self.CVE_REF is not None and 'CVE_REF' not in already_processed:
already_processed.add('CVE_REF')
outfile.write(' CVE_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.CVE_REF), input_name='CVE_REF')), ))
def exportChildren(self, outfile, level, namespace_='', name_='alignableType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ANNOTATION_VALUE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ANNOTATION_VALUE>%s</ANNOTATION_VALUE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ANNOTATION_VALUE), input_name='ANNOTATION_VALUE')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('TIME_SLOT_REF1', node)
if value is not None and 'TIME_SLOT_REF1' not in already_processed:
already_processed.add('TIME_SLOT_REF1')
self.TIME_SLOT_REF1 = value
value = find_attr_value_('TIME_SLOT_REF2', node)
if value is not None and 'TIME_SLOT_REF2' not in already_processed:
already_processed.add('TIME_SLOT_REF2')
self.TIME_SLOT_REF2 = value
value = find_attr_value_('SVG_REF', node)
if value is not None and 'SVG_REF' not in already_processed:
already_processed.add('SVG_REF')
self.SVG_REF = value
value = find_attr_value_('ANNOTATION_ID', node)
if value is not None and 'ANNOTATION_ID' not in already_processed:
already_processed.add('ANNOTATION_ID')
self.ANNOTATION_ID = value
value = find_attr_value_('EXT_REF', node)
if value is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
self.EXT_REF = value
value = find_attr_value_('LANG_REF', node)
if value is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
self.LANG_REF = value
value = find_attr_value_('CVE_REF', node)
if value is not None and 'CVE_REF' not in already_processed:
already_processed.add('CVE_REF')
self.CVE_REF = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ANNOTATION_VALUE':
ANNOTATION_VALUE_ = child_.text
ANNOTATION_VALUE_ = self.gds_validate_string(ANNOTATION_VALUE_, node, 'ANNOTATION_VALUE')
self.ANNOTATION_VALUE = ANNOTATION_VALUE_
# end class alignableType
class refAnnoType(GeneratedsSuper):
"""This is in fact a reference to the parent annotation."""
subclass = None
superclass = None
def __init__(self, ANNOTATION_REF=None, PREVIOUS_ANNOTATION=None, ANNOTATION_ID=None, EXT_REF=None, LANG_REF=None, CVE_REF=None, ANNOTATION_VALUE=None):
self.original_tagname_ = None
self.ANNOTATION_REF = _cast(None, ANNOTATION_REF)
self.PREVIOUS_ANNOTATION = _cast(None, PREVIOUS_ANNOTATION)
self.ANNOTATION_ID = _cast(None, ANNOTATION_ID)
self.EXT_REF = _cast(None, EXT_REF)
self.LANG_REF = _cast(None, LANG_REF)
self.CVE_REF = _cast(None, CVE_REF)
self.ANNOTATION_VALUE = ANNOTATION_VALUE
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, refAnnoType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if refAnnoType.subclass:
return refAnnoType.subclass(*args_, **kwargs_)
else:
return refAnnoType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ANNOTATION_VALUE(self): return self.ANNOTATION_VALUE
def set_ANNOTATION_VALUE(self, ANNOTATION_VALUE): self.ANNOTATION_VALUE = ANNOTATION_VALUE
def get_ANNOTATION_REF(self): return self.ANNOTATION_REF
def set_ANNOTATION_REF(self, ANNOTATION_REF): self.ANNOTATION_REF = ANNOTATION_REF
def get_PREVIOUS_ANNOTATION(self): return self.PREVIOUS_ANNOTATION
def set_PREVIOUS_ANNOTATION(self, PREVIOUS_ANNOTATION): self.PREVIOUS_ANNOTATION = PREVIOUS_ANNOTATION
def get_ANNOTATION_ID(self): return self.ANNOTATION_ID
def set_ANNOTATION_ID(self, ANNOTATION_ID): self.ANNOTATION_ID = ANNOTATION_ID
def get_EXT_REF(self): return self.EXT_REF
def set_EXT_REF(self, EXT_REF): self.EXT_REF = EXT_REF
def get_LANG_REF(self): return self.LANG_REF
def set_LANG_REF(self, LANG_REF): self.LANG_REF = LANG_REF
def get_CVE_REF(self): return self.CVE_REF
def set_CVE_REF(self, CVE_REF): self.CVE_REF = CVE_REF
def hasContent_(self):
if (
self.ANNOTATION_VALUE is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='refAnnoType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('refAnnoType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='refAnnoType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='refAnnoType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='refAnnoType'):
if self.ANNOTATION_REF is not None and 'ANNOTATION_REF' not in already_processed:
already_processed.add('ANNOTATION_REF')
outfile.write(' ANNOTATION_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.ANNOTATION_REF), input_name='ANNOTATION_REF')), ))
if self.PREVIOUS_ANNOTATION is not None and 'PREVIOUS_ANNOTATION' not in already_processed:
already_processed.add('PREVIOUS_ANNOTATION')
outfile.write(' PREVIOUS_ANNOTATION=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.PREVIOUS_ANNOTATION), input_name='PREVIOUS_ANNOTATION')), ))
if self.ANNOTATION_ID is not None and 'ANNOTATION_ID' not in already_processed:
already_processed.add('ANNOTATION_ID')
outfile.write(' ANNOTATION_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.ANNOTATION_ID), input_name='ANNOTATION_ID')), ))
if self.EXT_REF is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
outfile.write(' EXT_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.EXT_REF), input_name='EXT_REF')), ))
if self.LANG_REF is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
outfile.write(' LANG_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_REF), input_name='LANG_REF')), ))
if self.CVE_REF is not None and 'CVE_REF' not in already_processed:
already_processed.add('CVE_REF')
outfile.write(' CVE_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.CVE_REF), input_name='CVE_REF')), ))
def exportChildren(self, outfile, level, namespace_='', name_='refAnnoType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ANNOTATION_VALUE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ANNOTATION_VALUE>%s</ANNOTATION_VALUE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ANNOTATION_VALUE), input_name='ANNOTATION_VALUE')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ANNOTATION_REF', node)
if value is not None and 'ANNOTATION_REF' not in already_processed:
already_processed.add('ANNOTATION_REF')
self.ANNOTATION_REF = value
value = find_attr_value_('PREVIOUS_ANNOTATION', node)
if value is not None and 'PREVIOUS_ANNOTATION' not in already_processed:
already_processed.add('PREVIOUS_ANNOTATION')
self.PREVIOUS_ANNOTATION = value
value = find_attr_value_('ANNOTATION_ID', node)
if value is not None and 'ANNOTATION_ID' not in already_processed:
already_processed.add('ANNOTATION_ID')
self.ANNOTATION_ID = value
value = find_attr_value_('EXT_REF', node)
if value is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
self.EXT_REF = value
value = find_attr_value_('LANG_REF', node)
if value is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
self.LANG_REF = value
value = find_attr_value_('CVE_REF', node)
if value is not None and 'CVE_REF' not in already_processed:
already_processed.add('CVE_REF')
self.CVE_REF = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ANNOTATION_VALUE':
ANNOTATION_VALUE_ = child_.text
ANNOTATION_VALUE_ = self.gds_validate_string(ANNOTATION_VALUE_, node, 'ANNOTATION_VALUE')
self.ANNOTATION_VALUE = ANNOTATION_VALUE_
# end class refAnnoType
class lingType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LINGUISTIC_TYPE_ID=None, TIME_ALIGNABLE=None, CONSTRAINTS=None, GRAPHIC_REFERENCES=None, CONTROLLED_VOCABULARY_REF=None, EXT_REF=None, LEXICON_REF=None):
self.original_tagname_ = None
self.LINGUISTIC_TYPE_ID = _cast(None, LINGUISTIC_TYPE_ID)
self.TIME_ALIGNABLE = _cast(bool, TIME_ALIGNABLE)
self.CONSTRAINTS = _cast(None, CONSTRAINTS)
self.GRAPHIC_REFERENCES = _cast(bool, GRAPHIC_REFERENCES)
self.CONTROLLED_VOCABULARY_REF = _cast(None, CONTROLLED_VOCABULARY_REF)
self.EXT_REF = _cast(None, EXT_REF)
self.LEXICON_REF = _cast(None, | |
<gh_stars>1-10
"""
Sentinel-1 EW reader.
The script provides Sentinel1Product and Sentinel1Band classes.
Sentinel1Product class describes the product and consists of two Sentinel1Band classes, landmask
information (and function to find it), location of borders (x_min and x_max).
Sentinel1Band class describes a band of Sentinel-1 product.
In addition to band data, the class includes information about noise, calibration parameters,
geolocation grid and functions to calculate these parameters.
NOTE: currently incidence angle correction can not be turned off for HH band.
@author: <NAME>
"""
import os
import zipfile
from xml.etree import ElementTree
from datetime import datetime
from multiprocessing.pool import ThreadPool
from io import BytesIO
from functools import partial
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
from PIL import Image
from .utils import scene_time
Image.MAX_IMAGE_PIXELS = None # turn off the warning about large image size
class Sentinel1Band(object):
""" Represents a Sentinel-1 band of a Sentinel-1 product.
It is initialized with paths for data, annotation, calibration and noise files as well as with the band name.
It has the following attributes:
des - band designator or short name: 'hh' or 'hv'
data_path - path to the tiff file that contains band data (or a ZipExtFile instance)
noise_path - path to the xml file that contains noise LUT (or a ZipExtFile instance)
calibration_path - path to the xml file thant containes calibration parameters LUT (or a ZipExtFile instance)
annotation_path - path to the xml file with annotation (or a ZipExtFile instance)
denoised - flag, showing if data has been denoised (to prevent double noise removal)
Image band max and min values are taken from kmeans cluster analysis of a set of images.
For more information look into 'gray_level_reduction.py'
The following methods are available:
read_data() -- should be executed first
read_noise()
read_calibration()
subtract_noise()
incidence_angle_correction(elevation_angle)
"""
def __init__(self, data_path, annotation_path, calibration_path, noise_path, band_name):
self.des = band_name.lower()
self.img_max = 4 if self.des == 'hh' else -15
self.img_min = -29 if self.des == 'hh' else -32
self.incidence_angle_correction_coefficient = 0.213 if self.des == 'hh' else 0.053
self.data_path = data_path
self.noise_path = noise_path
self.calibration_path = calibration_path
self.annotation_path = annotation_path
self.denoised = False
def read_data(self):
if type(self.data_path) is str:
data = Image.open(self.data_path)
else:
unziped_bytes = BytesIO(self.data_path.read())
data = Image.open(unziped_bytes)
self.data = np.array(data, dtype=np.float32)
self.denoised = False
self.X, self.Y = self.data.shape
self.nodata_mask = np.where(self.data == 0, True, False)
def read_noise(self, azimuth_noise=True):
""" Read noise table from the band noise file, interpolate it for the entire image.
self.noise has same shape as self.data
"""
if not hasattr(self, 'X') or not hasattr(self, 'Y'):
print('Read data first.')
return False
""" First, deal with noise in the range direction. """
noise_file = ElementTree.parse(self.noise_path).getroot()
noise = np.array([j for i in noise_file[1] for j in i[3].text.split(' ')], dtype=np.float32)
noise_y = np.array([j for i in noise_file[1] for j in i[2].text.split(' ')], dtype=np.int16)
noise_x = np.array([i[1].text for i in noise_file[1] for j in range(int(i[2].get('count')))], dtype=np.int16)
"""
2D interpolation:
RectBivariateSpline can be used for regular grid only, this is not the option for
Sentinel-1 since noise data can contain differend number of values for each row.
interp2d introduces horisontal stripes into noise data
griddata seems to be the best solution
"""
x_new = np.arange(0, self.X, 1, dtype=np.int16)
y_new = np.arange(0, self.Y, 1, dtype=np.int16)
xx, yy = np.meshgrid(y_new, x_new)
self.noise = griddata(np.vstack((noise_y, noise_x)).transpose(), noise, (xx, yy),
method='linear', fill_value=0).astype(np.float32)
""" if noise data has incorrect units (before July 2015) than scale it:
noise_scaled = noise * k_noise * DN
where k_noise is 56065.87 (given at a ESA document),
DN is given in the band calibration file (index 6)
"""
if self.noise.max() < 1:
cf = ElementTree.parse(self.calibration_path).getroot()
DN = float(cf[2][0][6].text.split(' ')[0])
self.noise *= 56065.87 * DN
""" Second, take into account noise in the azimuth direction (if possible).
According https://qc.sentinel1.eo.esa.int/ipf/ only products taken after 13 March 2018 containg this information. """
if azimuth_noise:
try:
self._read_azimuth_noise(noise_file)
self.noise *= self.azimuth_noise
except:
print('Failed to read azimuth noise for {0} (this is normal for Sentinel-1 scenes taken before 13 March 2018).'.format(self.noise_path))
def _read_azimuth_noise(self, noise_file):
""" Read scalloping noise data.
The noise file should be passed here for support of zip-archives.
If .SAFE folder is used as input for the Sentinel1Product then noise_file can be taken from self.noise_file.
"""
self.scalloping_lut = [{'line_min': int(i[1].text), 'line_max': int(i[3].text), 'sample_min': int(i[2].text), 'sample_max': int(i[4].text),
'lines': np.array(i[5].text.split(' '), dtype=np.int16), 'noise': np.array(i[6].text.split(' '), dtype=np.float32)} for i in noise_file[2]]
""" Interpolate scalloping noise """
self.azimuth_noise = np.zeros((self.X, self.Y), dtype=np.float32)
for patch in self.scalloping_lut:
scalloping = interp1d(patch['lines'], patch['noise'], kind='linear', fill_value='extrapolate')
noise_line = scalloping(np.arange(patch['line_min'], patch['line_max'] + 1))
self.azimuth_noise[patch['line_min']:patch['line_max'] + 1, patch['sample_min']:patch['sample_max'] + 1] = noise_line[:, np.newaxis]
def read_calibration(self):
""" Read calibration table from product folder.
cal_par - calibration parameter number: 3 - SigmaNought, 4 - BetaNought,
5 - gamma, 6 - dn. These parameters are given in the band calibration file
self.calibration has same shape as self.data
All 4 parameters are read, than only sigma is interpolated for entire image.
"""
if not hasattr(self, 'X') or not hasattr(self, 'Y'):
print('Read data first.')
return False
calibration_file = ElementTree.parse(self.calibration_path).getroot()
calibration_x = int(calibration_file[2].get('count'))
calibration_y = int(calibration_file[2][0][2].get('count'))
result = []
for cal_par in [3, 4, 5, 6]:
calibration = np.array([i[cal_par].text.split(' ') for i in calibration_file[2]], dtype=np.float32).ravel()
result.append(np.array(calibration).reshape(calibration_x, calibration_y))
self.sigma0, self.beta0, self.gamma, self.dn = result
self.calibration_azimuth_list = [int(i) for i in calibration_file[2][0][2].text.split(' ')]
self.calibration_range_list = [int(i) for i in [j[1].text for j in calibration_file[2]]]
gamma_interp = RectBivariateSpline(self.calibration_range_list, self.calibration_azimuth_list, self.gamma, kx=1, ky=1)
x_new = np.arange(0, self.X, 1, dtype=np.int16)
y_new = np.arange(0, self.Y, 1, dtype=np.int16)
self.calibration = gamma_interp(x_new, y_new).astype(np.float32)
def subtract_noise(self):
""" Calibrated and denoised data is equal to
(data**2 - Noise) / Calibration**2
"""
if not hasattr(self, 'data'):
print('Read data first.')
return False
elif not hasattr(self, 'noise'):
print('Read noise first.')
return False
elif not hasattr(self, 'calibration'):
print('Read calibration first.')
return False
if not self.denoised:
self.data = self.data**2 - self.noise
self.data = self.data / self.calibration**2
threshold = 1 / self.calibration.max()
self.data[self.data < threshold] = threshold
self.data = np.log10(self.data) * 10
self.denoised = True
else:
print('Product is already denoised.')
def normalize(self, output_range=[0, 1], extend=True):
""" Scale data to output_range.
"""
""" Normalize """
if extend:
self.data -= self.data.min()
self.data /= self.data.max()
else:
self.data -= self.img_min
self.data /= self.img_max - self.img_min
""" Scale to output_range """
self.data = self.data * (output_range[1] - output_range[0]) + output_range[0]
def clip_normalize(self, output_range=[0, 1], extend=True):
""" Clip data and normalize it
"""
self.clip()
self.normalize(output_range=output_range, extend=extend)
def clip(self):
self.data[self.data > self.img_max] = self.img_max
self.data[self.data < self.img_min] = self.img_min
def extend(self):
""" Return normalized band data to clipped or original
"""
self.data *= (self.img_max - self.img_min)
self.data += self.img_min
def incidence_angle_correction(self, elevation_angle):
self.data = self.data + self.incidence_angle_correction_coefficient * (elevation_angle - elevation_angle.min())
def remove_useless_data(self):
self.calibration = None
self.noise = None
self.elevation_angle = None
class Sentinel1Product(object):
""" The main class that represents a Sentinel-1 EW product.
It contains information about the scene and band data in Sentinel1Band objects (one object per band).
Input is expected to be a path to a Sentinel-1 scene (both *.SAFE and *.zip are supported).
"""
def __init__(self, product_path):
""" Set paths to auxilary data.
Create Sentinel1Band object for each band in the product.
Parse date and time of the product into self.timestamp
"""
""" If *product_path* is a folder, set path to data and auxilary data,
otherwise unpack it first (create tmp_folder if it does not exist)
"""
try:
self.product_name = os.path.basename(product_path).split('.')[0]
print(self.product_path)
except:
pass
def _band_number(x):
""" Function expects a .xml filename from Sentinel-1 product folder.
It returns the band number (the last character before the file extention, *00<band_num>.xml or *00<band_num>.tiff)
"""
return int(os.path.split(x)[1].split('.')[0][-1])
if os.path.isdir(product_path):
self.zip = False
self.product_path = os.path.abspath(product_path)
self.data_files = sorted([os.path.join(self.product_path, 'measurement', item) for item in os.listdir(os.path.join(self.product_path, 'measurement'))], key=_band_number)
self.annotation_files = sorted([os.path.join(self.product_path, 'annotation', item) for item in os.listdir(os.path.join(self.product_path, 'annotation')) if '.xml' in item], key=_band_number)
self.noise_files = sorted([os.path.join(self.product_path, 'annotation', 'calibration', item) for item in os.listdir(os.path.join(self.product_path, 'annotation', 'calibration')) if 'noise' in item], key=_band_number)
self.calibration_files = sorted([os.path.join(self.product_path, 'annotation', 'calibration', item) for item in os.listdir(os.path.join(self.product_path, 'annotation', 'calibration')) if 'calibration' in item], key=_band_number)
elif not os.path.isfile(product_path):
print('File {0} does not exist.'.format(product_path))
return False
else:
if not zipfile.is_zipfile(product_path):
print('File {0} | |
None:
# ax.set_xlim(xlim)
# if ylim is not None:
# ax.set_ylim(ylim)
episode_length = data["experiment"]["environment"]["steps_per_episode"]
# Plot with the standard error
for i in range(len(ind)):
timesteps, mean, std = exp.get_mean_err(data, type_, ind[i],
smooth_over, exp.stderr,
keep_shape=keep_shape)
timesteps = np.array(timesteps[:last_ind]) * timestep_multiply[i]
mean = mean[:last_ind] / episode_length
std = std[:last_ind] / episode_length
# Plot based on colours
label = name
if colours is not None:
_plot_shaded(ax, timesteps, mean, std, colours[i], label, alpha)
else:
_plot_shaded(ax, timesteps, mean, std, None, label, alpha)
ax.legend()
fig.show()
return fig, ax
def _plot_mean_with_stderr_episodic(data, type_, ind, smooth_over, fig=None,
ax=None, figsize=(12, 6), name="",
last_ind=-1, xlabel="Episodes",
ylabel="Average Return", xlim=None,
ylim=None, alpha=0.2, colours=None,
keep_shape=False):
"""
Plots the average training or evaluation return over all runs for a
single data dictionary on an episodic environment. Plots shaded retions
as standard error.
Parameters
----------
data : dict
The Python data dictionary generated from running main.py
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of int
The list of hyperparameter settings indices to plot
smooth_over : int
The number of previous data points to smooth over. Note that this
is *not* the number of timesteps to smooth over, but rather the number
of data points to smooth over. For example, if you save the return
every 1,000 timesteps, then setting this value to 15 will smooth
over the last 15 readings, or 15,000 timesteps.
fig : plt.figure
The figure to plot on, by default None. If None, creates a new figure
ax : plt.Axes
The axis to plot on, by default None, If None, creates a new axis
figsize : tuple(int, int)
The size of the figure to plot
name : str, optional
The name of the agent, used for the legend
last_ind : int, optional
The index of the last element to plot in the returns list,
by default -1. This is useful if you want to plot many things on the
same axis, but all of which have a different number of elements. This
way, we can plot the first last_ind elements of each returns for each
agent.
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
alpha : float, optional
The alpha channel for the plot, by default 0.1
colours : list of str
The colours to use for each plot of each hyperparameter setting
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
Returns
-------
plt.figure, plt.Axes
The figure and axes of the plot
Raises
------
ValueError
When an axis is passed but no figure is passed
When an appropriate number of colours is not specified to cover all
hyperparameter settings
"""
if colours is not None and len(colours) != len(ind):
raise ValueError("must have one colour for each hyperparameter " +
"setting")
if ax is not None and fig is None:
raise ValueError("must pass figure when passing axis")
if colours is None:
colours = _get_default_colours(ind)
# Set up figure
if ax is None and fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
# Plot with the standard error
for i in range(len(ind)):
data = exp.reduce_episodes(data, ind[i], type_=type_)
# data has consistent # of episodes, so treat as env_type="continuing"
_, mean, std = exp.get_mean_err(data, type_, ind[i], smooth_over,
exp.stderr, keep_shape=keep_shape)
mean = mean[:last_ind]
std = std[:last_ind]
episodes = np.arange(mean.shape[0])
# Plot based on colours
label = name
if colours is not None:
_plot_shaded(ax, episodes, mean, std, colours[i], label, alpha)
else:
_plot_shaded(ax, episodes, mean, std, None, label, alpha)
ax.legend()
ax.set_title(f"Average {type_.title()} Return per Run with Standard Error")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
fig.show()
return fig, ax
def hyperparam_sensitivity_plot(dir, type_, hp_name, combo, env_config,
agent_config, figsize=(12, 6), fig=None,
ax=None, annotate=False, colour=None,
env_type="continuing", conf=False,
significance=0.1):
"""
Plots the hyperparameter sensitivity for hp_name, where the combo argument
determines which hyperparameters are held constant.
Given hyperparameters a, b, and c, let's say we want to get all
hyperparameter settings indices where a varies, and b and c are constant.
if a, b, and c can each be 1 or 2, then there are four ways that a can
vary with b and c remaining constant:
[
((a=1, b=1, c=1), (a=2, b=1, c=1)), combo = 0
((a=1, b=2, c=1), (a=2, b=2, c=1)), combo = 1
((a=1, b=1, c=2), (a=2, b=1, c=2)), combo = 2
((a=1, b=2, c=2), (a=2, b=2, c=2)) combo = 3
]
The combo argument indexes into this list of hyperparameter settings
Parameters
----------
dir : str
The directory containing all data dictionaries generated from running
main.py, separated into one file per HP index
type_ : str
Which type of data to plot, one of "eval" or "train"
combo : int
Determines the values of the constant hyperparameters. Given that
only one hyperparameter may vary, there are many different sets
having this hyperparameter varying with all others remaining constant
since each constant hyperparameter may take on many values. This
argument cycles through all sets of hyperparameter settings indices
that have only one hyperparameter varying and all others constant.
env_config : dict
The environment configuration file as a Python dictionary
agent_config : dict
The agent configuration file as a Python dictionary
figsize : tuple(int, int)
The size of the figure
fig : plt.figure, optional
The figure to plot on, by default None. If None, a new figure is
created
ax : plt.Axes, optional
The axis to plot on, by default None. If None, a new axis is created.
annotate : bool, optional
Whether or not to annoate each point drawn with its y-value
colour : str
The colour to use in the plot, by default None. If None, then default
values will be used.
Returns
-------
tuple of plt.figure, plt.Axes
The figure and axis that was plotted on
Raises
------
ValueError
If only one of fig, ax are specified
"""
if (fig is None and ax is not None) or (ax is None and fig is not None):
raise ValueError("fig and ax must both be specified if one is "
"specified")
if not conf:
stderr_fn = exp.get_mean_returns_with_stderr_hp_varying
hp_values, mean_returns, error = \
stderr_fn(dir, type_, hp_name, combo, env_config, agent_config,
after=0, env_type=env_type)
else:
conf_fn = exp.get_mean_returns_with_conf_hp_varying
hp_values, mean_returns, error = \
conf_fn(dir, type_, hp_name, combo, env_config, agent_config,
after=0, env_type=env_type, significance=significance)
# Set up the figure
if fig is None and ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.set_title(f"{hp_name.title()} Hyperparameter Sensitivity Plot " +
f"over {type_.title()} Data")
ax.set_xlabel("Hyperparameter Values")
if env_type == "episodic":
ax.set_ylabel("Average Return")
elif env_type == "continuing":
ax.set_ylabel("Average Reward")
if colour is None:
colour = DEFAULT_COLOURS[0]
if env_type == "continuing":
ep_steps = env_config["steps_per_episode"]
mean_returns /= ep_steps
error /= ep_steps
_plot_shaded(ax, hp_values, mean_returns, error, colour, "",
alpha=0.25)
if annotate:
for i, coords in enumerate(zip(hp_values, mean_returns)):
ax.annotate(coords[i], xy=coords)
return fig, ax
def hyperparam_sensitivity_plot_by_settings_index(dir, type_, hp_name, ind,
env_config, agent_config,
figsize=(12, 6),
annotate=False,
env_type="continuing",
conf=False,
significance=0.1):
"""
Plot hyperparameter sensitivity by hyperparameter settings index.
Plots the hyperparameter sensitivity plot where all
constant hyperparameters are equal to those defined by the
argument hyperparameter settings index. The only hyperparameter that
is allowed to vary is the one corresponding to hp_name.
Parameters
----------
dir : str
The directory containing all data dictionaries generated by running
main.py
type_ : str
Whether to plot the training or evaluation data, one of
'train', 'eval'
hp_name : str
The name of the hyperparameter to plot the sensitivity of
config : dict
A Python dictionary of the agent configuration file used to run
the experiment
ind : int
The hyperparameter settings index that should define the
values of all constant hyperparameters. Only the hyperparameter
defined by hp_name is allowed to vary, all others must be equal to
the values of those defined by the settings index ind.
env_config : dict
The environment | |
[[], [], []]
if to_sort is None:
to_sort = list()
for item in to_sort:
len_ = len(item)
by_prim_type[len_-2].append(item)
return by_prim_type
def set_primitive_indices(self, define_prims=None):
stretches, bends, dihedrals = self.sort_by_prim_type(define_prims)
self.set_bond_indices(stretches)
self.set_bending_indices(bends)
self.set_dihedral_indices(dihedrals)
def calculate(self, coords, attr=None):
coords3d = coords.reshape(-1, 3)
def per_type(func, ind):
val, grad = func(coords3d, ind, True)
return PrimitiveCoord(ind, val, grad)
self.bonds = list()
self.bends = list()
self.dihedrals = list()
for ind in self.bond_indices:
bonds = per_type(self.calc_stretch, ind)
self.bonds.append(bonds)
for ind in self.bending_indices:
bend = per_type(self.calc_bend, ind)
self.bends.append(bend)
for ind in self.dihedral_indices:
dihedral = per_type(self.calc_dihedral, ind)
self.dihedrals.append(dihedral)
int_coords = self.bonds + self.bends + self.dihedrals
if attr:
return np.array([getattr(ic,attr) for ic in int_coords])
return int_coords
def calculate_val_diffs(self, coords1, coords2):
vals1 = np.array(self.calculate(coords1, attr="val"))
vals2 = np.array(self.calculate(coords2, attr="val"))
return vals1-vals2
def calc_stretch(self, coords3d, bond_ind, grad=False):
n, m = bond_ind
bond = coords3d[m] - coords3d[n]
bond_length = np.linalg.norm(bond)
if grad:
bond_normed = bond / bond_length
row = np.zeros_like(coords3d)
# 1 / -1 correspond to the sign factor [1] Eq. 18
row[m,:] = bond_normed
row[n,:] = -bond_normed
row = row.flatten()
return bond_length, row
return bond_length
def calc_bend(self, coords3d, angle_ind, grad=False):
m, o, n = angle_ind
u_dash = coords3d[m] - coords3d[o]
v_dash = coords3d[n] - coords3d[o]
u_norm = np.linalg.norm(u_dash)
v_norm = np.linalg.norm(v_dash)
u = u_dash / u_norm
v = v_dash / v_norm
angle_rad = np.arccos(u.dot(v))
if grad:
# Eq. (24) in [1]
if self.are_parallel(u, v, angle_ind):
tmp_vec = np.array((1, -1, 1))
par = self.are_parallel(u, tmp_vec) and self.are_parallel(v, tmp_vec)
tmp_vec = np.array((-1, 1, 1)) if par else tmp_vec
w_dash = np.cross(u, tmp_vec)
else:
w_dash = np.cross(u, v)
w_norm = np.linalg.norm(w_dash)
w = w_dash / w_norm
uxw = np.cross(u, w)
wxv = np.cross(w, v)
row = np.zeros_like(coords3d)
# | m | n | o |
# -----------------------------------
# sign_factor(amo) | 1 | 0 | -1 | first_term
# sign_factor(ano) | 0 | 1 | -1 | second_term
first_term = uxw / u_norm
second_term = wxv / v_norm
row[m,:] = first_term
row[o,:] = -first_term - second_term
row[n,:] = second_term
row = row.flatten()
return angle_rad, row
return angle_rad
def calc_dihedral(self, coords3d, dihedral_ind, grad=False, cos_tol=1e-9):
m, o, p, n = dihedral_ind
u_dash = coords3d[m] - coords3d[o]
v_dash = coords3d[n] - coords3d[p]
w_dash = coords3d[p] - coords3d[o]
u_norm = np.linalg.norm(u_dash)
v_norm = np.linalg.norm(v_dash)
w_norm = np.linalg.norm(w_dash)
u = u_dash / u_norm
v = v_dash / v_norm
w = w_dash / w_norm
phi_u = np.arccos(u.dot(w))
phi_v = np.arccos(-w.dot(v))
uxw = np.cross(u, w)
vxw = np.cross(v, w)
cos_dihed = uxw.dot(vxw)/(np.sin(phi_u)*np.sin(phi_v))
# Restrict cos_dihed to [-1, 1]
if cos_dihed >= 1 - cos_tol:
dihedral_rad = 0
elif cos_dihed <= -1 + cos_tol:
dihedral_rad = np.arccos(-1)
else:
dihedral_rad = np.arccos(cos_dihed)
if dihedral_rad != np.pi:
# wxv = np.cross(w, v)
# if wxv.dot(u) < 0:
if vxw.dot(u) < 0:
dihedral_rad *= -1
if grad:
row = np.zeros_like(coords3d)
# | m | n | o | p |
# ------------------------------------------
# sign_factor(amo) | 1 | 0 | -1 | 0 | 1st term
# sign_factor(apn) | 0 | -1 | 0 | 1 | 2nd term
# sign_factor(aop) | 0 | 0 | 1 | -1 | 3rd term
# sign_factor(apo) | 0 | 0 | -1 | 1 | 4th term
sin2_u = np.sin(phi_u)**2
sin2_v = np.sin(phi_v)**2
first_term = uxw/(u_norm*sin2_u)
second_term = vxw/(v_norm*sin2_v)
third_term = uxw*np.cos(phi_u)/(w_norm*sin2_u)
fourth_term = -vxw*np.cos(phi_v)/(w_norm*sin2_v)
row[m,:] = first_term
row[n,:] = -second_term
row[o,:] = -first_term + third_term - fourth_term
row[p,:] = second_term - third_term + fourth_term
row = row.flatten()
return dihedral_rad, row
return dihedral_rad
def update_internals(self, new_cartesians, prev_internals):
new_internals = self.calculate(new_cartesians, attr="val")
internal_diffs = np.array(new_internals - prev_internals)
bond, bend, dihedrals = self.prim_indices
dihedral_diffs = internal_diffs[-len(dihedrals):]
# Find differences that are shifted by 2*pi
shifted_by_2pi = np.abs(np.abs(dihedral_diffs) - 2*np.pi) < np.pi/2
org = dihedral_diffs.copy()
new_dihedrals = new_internals[-len(dihedrals):]
new_dihedrals[shifted_by_2pi] -= 2*np.pi * np.sign(dihedral_diffs[shifted_by_2pi])
new_internals[-len(dihedrals):] = new_dihedrals
return new_internals
def transform_int_step(self, dq_in, ensure_convergence=True):
"""
This is always done in primitive internal coordinates so care
has to be taken that the supplied step is given in primitive internal
coordinates.
"""
logging.info('\n\tBack-transformation to cartesian coordinates...')
q_orig = self.prim_coords.copy()
geom_orig = self.cart_coords.copy()
q_target = q_orig + dq_in
dq = dq_in.copy()
conv = False # is back-transformation converged?
if ensure_convergence:
cnt = -1
while not conv:
cnt += 1
if cnt > 0:
logging.info("\tReducing step-size by a factor of {:d}.".format(2 * cnt))
dq[:] = dq_in / (2.0 * cnt)
conv, dx = self.back_transformation(dq)
if not conv:
self._prim_coords = q_orig
self.cart_coords = geom_orig
if cnt == 5:
logging.warning(
"\tUnable to back-transform even 1/10th of the desired step rigorously."
+ "\tQuitting with previous geometry.")
conv, dx = self.back_transformation(dq)
break
if conv and cnt > 0: # We were able to take a modest step. Try to complete it.
logging.info(
"\tAble to take a small step; trying another partial back-transformations.\n")
for j in range(1, 2 * cnt):
logging.info("\tMini-step {:d} of {:d}.\n".format(j + 1, 2 * cnt))
dq[:] = dq_in / (2 * cnt)
conv, mdx = self.back_transformation(dq)
dx += mdx
if not conv:
self._prim_coords = q_orig
self.cart_coords = geom_orig
if cnt == 5:
logging.warning(
"\tCouldn't converge this mini-step; quitting with previous geometry.\n")
# raise SamplingError('Couldn\'t converge to targeted internal coordinate even with 1/10th of the desired step.')
dq = dq_in.copy()
conv, dx = self.back_transformation(dq)
conv = True
break
else: # try to back-transform, but continue even if desired dq is not achieved
conv, dx = self.back_transformation(dq)
intco_lbls, qShow_orig, qShow_target, dqShow, qShow_final = [], [], [], [], []
bonds, bends, dihedrals = self.prim_indices
for i, bond in enumerate(bonds):
q = self.prim_coords[i]
intco_lbls.append('R' + str(tuple(bond + 1)).replace(" ", ""))
qShow_orig.append(q_orig[i])
qShow_target.append(q_target[i])
dqShow.append(q - qShow_orig[i])
qShow_final.append(q)
for i, bend in enumerate(bends):
q = self.prim_coords[len(bonds) + i] * 180 / np.pi
intco_lbls.append('B' + str(tuple(bend + 1)).replace(" ", ""))
qShow_orig.append(q_orig[len(bonds) + i] * 180 / np.pi)
qShow_target.append(q_target[len(bonds) + i] * 180 / np.pi)
dqShow.append(q - qShow_orig[len(bonds) + i])
qShow_final.append(q)
for i, dihedral in enumerate(dihedrals):
q = self.prim_coords[len(bonds) + len(bends) + i] * 180 / np.pi
intco_lbls.append('D' + str(tuple(dihedral + 1)).replace(" ", ""))
qShow_orig.append(q_orig[len(bonds) + len(bends) + i] * 180 / np.pi)
qShow_target.append(q_target[len(bonds) + len(bends) + i] * 180 / np.pi)
dqShow.append(q - qShow_orig[len(bonds) + len(bends) + i])
qShow_final.append(q)
# Make sure final Dq is actual change
frag_report = "\tReport of back-transformation: (au)\n"
frag_report += "\n\t int q_final q_target Error\n"
frag_report += "\t -------------------------------------------------------------\n"
for i in range(len(dq_in)):
frag_report += ("\t %-16s=%16.6f%14.6f%14.6f\n"
% (intco_lbls[i], qShow_final[i], qShow_target[i], (qShow_final[i] - qShow_target[i])))
frag_report += "\t -------------------------------------------------------------\n"
logging.debug(frag_report)
coordinate_change_report = (
"\n\t---Internal Coordinate Step in ANG or DEG, aJ/ANG or AJ/DEG ---\n")
coordinate_change_report += (
"\t -------------------------------------------------------------\n")
coordinate_change_report += (
"\t Coordinate Previous Change New\n")
coordinate_change_report += (
"\t ---------- -------- ------ ------\n")
for i in range(len(dq_in)):
coordinate_change_report += ("\t %-16s=%16.6f%14.6f%14.6f\n"
% (intco_lbls[i], qShow_orig[i], dqShow[i], qShow_final[i]))
coordinate_change_report += (
"\t -------------------------------------------------------------\n")
logging.info(coordinate_change_report)
return dx
def back_transformation(self, dq, bt_dx_conv=1.0e-6, bt_max_iter=100):
dx_rms_last = -1
q_orig = self.prim_coords.copy()
q_target = q_orig + dq
prev_geom = self.cart_coords.copy() # cart geometry to start each iter
geom = self.cart_coords.copy()
bond, bend, dihedrals = self.prim_indices
# for i in set(self.shift_pi):
# step[len(bond)+i] *= -1
target_bends = q_target[len(bond):-(len(dihedrals))]
for i, target_bend in enumerate(target_bends):
bendi = tuple(bend[i] + 1)
if target_bend > np.pi:
# TODO solve target_bend > np.pi situation
# target_bends[i] = 2*np.pi - target_bends[i]
# self.shift_pi.append(i)
raise Exception('A sampling bending angel of {} is over 180°.'.format(bendi))
elif target_bend <= 0:
raise Exception('A sampling bending angel of {} is below 0°.'.format(bendi))
B_prim = self.B_prim
Bt_inv_prim = np.linalg.pinv(B_prim.dot(B_prim.T)).dot(B_prim)
prev_q = q_orig
bt_iter_continue = True
bt_converged = False
bt_iter_cnt = 0
while bt_iter_continue:
dx = Bt_inv_prim.T.dot(dq)
# Frozen the positions of dummy atoms and hydrogen caps of QMMM system
if self.nHcap != 0:
dx[-(self.nHcap * 3):] = 0
# Update cartesian coordinates
geom += dx
dx_rms = np.sqrt(np.mean(dx ** 2))
# Met convergence thresholds
if dx_rms < bt_dx_conv:
bt_converged = True
bt_iter_continue = False
# No further progress toward convergence
elif (np.absolute(dx_rms | |
<reponame>govtmirror/PriceHistoryGUI<filename>PPBottleApp.py
from bottle import Bottle, run, template,request,TEMPLATE_PATH,static_file,HeaderDict,BaseResponse,response,redirect
import time
import urllib
import ast
import sys
import LogFeedback
import LogActivity
import requests
import os
import PriceHistoryAuth.LogActivity
import PriceHistoryAuth.pycas
from ppGuiConfig import URLToPPSearchApiSolr,GoogleAnalyticsInclusionScript,FEEDBACK_EMAIL,\
LocalURLToRecordFeedback,CAS_SERVER,CAS_PROXY,CAS_RETURN_SERVICE_URL,CAS_LEVEL_OF_ASSURANCE,CAS_LEVEL_OF_ASSURANCE_PREDICATE
import PriceHistoryAuth.auth
import cPickle as pickle
from cStringIO import StringIO
# I am duplicating this because I don't really know how t organize python
# classes. Probably it should be removed.
import morris_config
import os
import cgi
import md5
import time
import urllib
import urlparse
URL_TO_MORRIS_PORTFOLIOS_API = "http://localhost:" + str(morris_config.BOTTLE_DEORATOR_PORTFOLIOS_API_PORT)
URL_TO_MORRIS_TAGS_API = "http://localhost:" + str(morris_config.BOTTLE_DEORATOR_TAGS_API_PORT)
PathToBottleWebApp = "./"
PathToExternalFiles = "../"
PathToCSSFiles=PathToExternalFiles+"css/"
app = Bottle()
PricesPaidAPIUsername=None
PricesPaidAPIPassword=None
PricesPaidAPIBasicAuthUsername=None
PricesPaidAPIBasicAuthPassword=<PASSWORD>
P3APISALT = None
PYCAS_SECRET = None
def readCredentials():
global PricesPaidAPIUsername
if (PricesPaidAPIUsername is None):
global PricesPaidAPIPassword
global PricesPaidAPIBasicAuthUsername
global PricesPaidAPIBasicAuthPassword
global P3APISALT
global PYCAS_SECRET
PricesPaidAPIUsername=os.environ.get("PricesPaidAPIUsername")
PricesPaidAPIPassword=os.environ.get("PricesPaidAPIPassword")
PricesPaidAPIBasicAuthUsername=os.environ.get("PricesPaidAPIBasicAuthUsername")
PricesPaidAPIBasicAuthPassword=os.environ.get("PricesPaidAPIBasicAuthPassword")
P3APISALT=os.environ.get("P3APISALT")
PYCAS_SECRET=os.environ.get("PYCAS_SECRET")
# Begin Common Template Strings
DATASET_DESCRIPTION = template('DataSetDescriptionPreamble')
FOOTER_HTML = template('Footer',feedback_email=FEEDBACK_EMAIL)
COLUMN_DROPDOWN_HTML = template('ColumnDropdown')
EXTRA_LOGIN_METHODS = template('ExtraLoginMethods')
PORTFOLIO_PANEL = template('PortfolioPanel')
MAINJS_INCLUDES = template('MainJSIncludes')
SLICKGRID_INCLUDES = template('SlickGridIncludes')
JQPLOT_INCLUDES = template('JQPlotIncludes')
# End Common Template Strings
@app.route('/theme/<path:path>')
def server_static(path):
return static_file(path, root=PathToBottleWebApp+"theme/")
@app.route('/css/<filename>')
def server_static(filename):
return static_file(filename, root=PathToCSSFiles)
@app.route('/js/<filename>')
def server_static(filename):
return static_file(filename, root=PathToBottleWebApp+"js/")
@app.route('/MorrisDataDecorator/js/<filename>')
def server_static(filename):
return static_file(filename, root="../MorrisDataDecorator/js/")
@app.route('/MorrisDataDecorator/imgs/<filename>')
def server_static(filename):
return static_file(filename, root="../MorrisDataDecorator/imgs/")
@app.route('/MorrisDataDecorator/css/<filename>')
def server_static(filename):
return static_file(filename, root="../MorrisDataDecorator/css/")
from bottle import template
@app.route('/')
def legalNotice():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","LegalNotice")
return template('LegalNotice',goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/SearchHelp')
def searchHelp():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","SearchHelp")
return template('SearchHelp',goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/Logout',method='POST')
def logoutViaPost():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","Logout")
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
PriceHistoryAuth.auth.del_session(ses_id)
return template('Logout',goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/Logout',method='GET')
def logoutViaGet():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","Logout")
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
PriceHistoryAuth.auth.del_session(ses_id)
return template('Logout',goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/Login')
def login():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","LoginPage")
return template('Login',message='',
footer_html=FOOTER_HTML,
feedback_email=FEEDBACK_EMAIL,
extra_login_methods=EXTRA_LOGIN_METHODS,
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/UploadData')
def upload():
# For the hackathon, I'm not going to worry about security
# acsrf = request.query['antiCSRF']
# ses_id = request.query['session_id']
# if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
# return template('Login',message='Improper Credentials or Timeout.',
# extra_login_methods=EXTRA_LOGIN_METHODS,
# feedback_email=FEEDBACK_EMAIL,
# footer_html=FOOTER_HTML,
# goog_anal_script=GoogleAnalyticsInclusionScript)
# PriceHistoryAuth.auth.update_acsrf(ses_id)
readCredentials()
search_string = request.forms.get('search_string')
search_string = search_string if search_string is not None else ""
commodity_id = request.forms.get('commodity_id')
return template('UploadData',message='',
feedback_url=LocalURLToRecordFeedback,\
footer_html=FOOTER_HTML,
feedback_email=FEEDBACK_EMAIL,
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/UploadData',method='POST')
def upload():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
return template('Login',message='Improper Credentials or Timeout.',
extra_login_methods=EXTRA_LOGIN_METHODS,
feedback_email=FEEDBACK_EMAIL,
footer_html=FOOTER_HTML,
goog_anal_script=GoogleAnalyticsInclusionScript)
PriceHistoryAuth.auth.update_acsrf(ses_id)
search_string = request.forms.get('search_string')
search_string = search_string if search_string is not None else ""
commodity_id = request.forms.get('commodity_id')
return template('UploadData',message='',
acsrf=PriceHistoryAuth.auth.get_acsrf(ses_id),\
session_id=ses_id,\
feedback_url=LocalURLToRecordFeedback,\
footer_html=FOOTER_HTML,
feedback_email=FEEDBACK_EMAIL,
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/UploadCSVFile',method='POST')
def upload():
# acsrf = request.forms.get('antiCSRF')
# ses_id = request.forms.get('session_id')
# if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
# return template('Login',message='Improper Credentials or Timeout.',
# extra_login_methods=EXTRA_LOGIN_METHODS,
# feedback_email=FEEDBACK_EMAIL,
# footer_html=FOOTER_HTML,
# goog_anal_script=GoogleAnalyticsInclusionScript)
# PriceHistoryAuth.auth.update_acsrf(ses_id)
readCredentials()
csv_file = request.forms.get('csv_file')
payload = { 'username' : PricesPaidAPIUsername,\
'password' : <PASSWORD>,\
'csv_file' : csv_file
}
r = requests.post(URLToPPSearchApiSolr+"/AddCSVFile", data=payload, \
auth=(PricesPaidAPIBasicAuthUsername, PricesPaidAPIBasicAuthPassword), verify=False)
return r.text;
@app.route('/LoginViaMax')
def loginViaMax():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","MaxLoginPage")
response.status = 303
domain,path = urlparse.urlparse(CAS_RETURN_SERVICE_URL)[1:3]
secure=1
setCookieCommand = PriceHistoryAuth.pycas.make_pycas_cookie("gateway",domain,path,secure)
strip = setCookieCommand[12:]
response.set_header('Set-Cookie', strip)
opt=""
location = PriceHistoryAuth.pycas.get_url_redirect_as_string(CAS_SERVER,CAS_RETURN_SERVICE_URL,opt,secure)
response.set_header('Location',location)
return "You will be redirected."+strip+location
@app.route('/ReturnLoginViaMax')
def returnLoginViaMax():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","ReturnMaxLoginPage")
PYCAS_SECRET=os.environ.get("PYCAS_SECRET")
PriceHistoryAuth.LogActivity.logDebugInfo("PYCAS_SECRET:"+PYCAS_SECRET)
ticket = request.query['ticket']
PriceHistoryAuth.LogActivity.logDebugInfo("MAX AUTHENTICATED ticket :"+ticket)
status, id, cookie = PriceHistoryAuth.pycas.check_authenticated_p(CAS_LEVEL_OF_ASSURANCE_PREDICATE,ticket,CAS_SERVER,CAS_PROXY,
PYCAS_SECRET, CAS_RETURN_SERVICE_URL, lifetime=None, secure=1, protocol=2, path="/", opt="")
maxAuthenticatedProperly = (status == PriceHistoryAuth.pycas.CAS_OK);
PriceHistoryAuth.LogActivity.logDebugInfo("MAX AUTHENTICATED WITH ID:"+id)
username = "billybob"
if (maxAuthenticatedProperly):
return doStartPageAuthenticated(username)
else:
PriceHistoryAuth.LogActivity.logBadCredentials(username+":failed to Authenticate with Max")
# It would be better to make this message configuration in the same way that CAS_LEVEL_OF_ASSURANCE_PREDICATE is...
# But that is for another day.
return template('Login',message='Improper Credentials returned by MAX. Possibly you authenticated without using a physical PIV/CAC card, or MAX did not return a high enough Level of Assurance. Trying logging out of MAX at http://max.omb.gov and re-authenticating here.',
footer_html=FOOTER_HTML,
feedback_email=FEEDBACK_EMAIL,
extra_login_methods=EXTRA_LOGIN_METHODS,
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/StartPage',method='POST')
def pptriv():
username = request.forms.get('username')
password = request.forms.get('password')
# just a little throttle to slow down any denial of service attack..
time.sleep(1.0);
readCredentials()
if (not PriceHistoryAuth.auth.does_authenticate(username,password,P3APISALT)):
PriceHistoryAuth.LogActivity.logBadCredentials(username)
return template('Login',message='Improper Credentials.',
footer_html=FOOTER_HTML,
feedback_email=FEEDBACK_EMAIL,
extra_login_methods=EXTRA_LOGIN_METHODS,
goog_anal_script=GoogleAnalyticsInclusionScript)
return doStartPageAuthenticated(username)
def doStartPageAuthenticated(username):
search_string = request.forms.get('search_string')
search_string = search_string if search_string is not None else ""
psc_pattern = request.forms.get('psc_pattern')
ses_id = PriceHistoryAuth.auth.create_session_id()
PriceHistoryAuth.LogActivity.logSessionBegin(username,ses_id)
PriceHistoryAuth.LogActivity.logPageTurn(ses_id,"StartPage")
return template('StartPage',search_string=search_string,\
acsrf=PriceHistoryAuth.auth.get_acsrf(ses_id),\
username=username, \
session_id=ses_id,\
footer_html=FOOTER_HTML,\
dataset_description_preamble=DATASET_DESCRIPTION,\
psc_pattern=psc_pattern,goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/StartPageReturned',method='POST')
def StartPageReturned():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
return template('Login',message='Improper Credentials or Timeout.',
extra_login_methods=EXTRA_LOGIN_METHODS,
feedback_email=FEEDBACK_EMAIL,
footer_html=FOOTER_HTML,
goog_anal_script=GoogleAnalyticsInclusionScript)
search_string = request.forms.get('search_string')
search_string = search_string if search_string is not None else ""
psc_pattern = request.forms.get('psc_pattern')
ses_id = PriceHistoryAuth.auth.create_session_id()
PriceHistoryAuth.LogActivity.logPageTurn(ses_id,"StartPageReturned")
return template('StartPage',search_string=search_string,\
acsrf=PriceHistoryAuth.auth.get_acsrf(ses_id),\
session_id=ses_id,\
footer_html=FOOTER_HTML,\
dataset_description_preamble=DATASET_DESCRIPTION,\
psc_pattern=psc_pattern,goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/PricesPaid',method='GET')
def swallow():
acsrf = request.query['antiCSRF']
ses_id = request.query['session_id']
return render_main_page(acsrf,ses_id)
@app.route('/PricesPaid',method='POST')
def pptriv():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
return render_main_page(acsrf,ses_id)
def render_main_page(acsrf,ses_id):
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
return template('Login',message='Improper Credentials or Timeout.',
extra_login_methods=EXTRA_LOGIN_METHODS,
feedback_email=FEEDBACK_EMAIL,
footer_html=FOOTER_HTML,
goog_anal_script=GoogleAnalyticsInclusionScript)
PriceHistoryAuth.auth.update_acsrf(ses_id)
search_string = request.forms.get('search_string')
search_string = search_string if search_string is not None else ""
commodity_id = request.forms.get('commodity_id')
PriceHistoryAuth.LogActivity.logPageTurn(ses_id,"MainPage")
return template('MainPage',search_string=search_string,\
acsrf=PriceHistoryAuth.auth.get_acsrf(ses_id),\
session_id=ses_id,\
feedback_url=LocalURLToRecordFeedback,\
footer_html=FOOTER_HTML,\
portfolio_panel=PORTFOLIO_PANEL,\
column_dropdown=COLUMN_DROPDOWN_HTML,\
mainjs_includes=MAINJS_INCLUDES,\
slickgrid_includes=SLICKGRID_INCLUDES,\
jqplot_includes=JQPLOT_INCLUDES,\
commodity_id=commodity_id,goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/PortfolioPage',method='POST')
def render_portfolio():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
return template('Login',message='Improper Credentials or Timeout.',
extra_login_methods=EXTRA_LOGIN_METHODS,
feedback_email=FEEDBACK_EMAIL,
footer_html=FOOTER_HTML,
goog_anal_script=GoogleAnalyticsInclusionScript)
PriceHistoryAuth.auth.update_acsrf(ses_id)
PriceHistoryAuth.LogActivity.logPageTurn(ses_id,"Portfolio")
portfolio = request.forms.get('portfolio')
return template('Portfolio',acsrf=PriceHistoryAuth.auth.get_acsrf(ses_id),\
session_id=ses_id,\
portfolio=portfolio,\
feedback_url=LocalURLToRecordFeedback,\
footer_html=FOOTER_HTML,\
portfolio_panel=PORTFOLIO_PANEL,\
column_dropdown=COLUMN_DROPDOWN_HTML,\
mainjs_includes=MAINJS_INCLUDES,\
slickgrid_includes=SLICKGRID_INCLUDES,\
jqplot_includes=JQPLOT_INCLUDES,\
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/returnPortfolio',method='POST')
def apisolr():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
dict = {0: {"status": "BadAuthentication"}}
return dict;
portfolio = request.forms.get('portfolio')
print "portfolio = "+portfolio
r = requests.get(URL_TO_MORRIS_PORTFOLIOS_API+"/decoration/"+portfolio)
content = r.text
d = ast.literal_eval(r.text)
p3ids = d['data']
readCredentials()
payload = { 'username' : PricesPaidAPIUsername,\
'password' : <PASSWORD>,\
'p3ids' : pickle.dumps(p3ids)
}
readCredentials()
r = requests.post(URLToPPSearchApiSolr+"/fromIds", data=payload, \
auth=(PricesPaidAPIBasicAuthUsername, PricesPaidAPIBasicAuthPassword), verify=False)
PriceHistoryAuth.LogActivity.logDebugInfo("Got Past Post to :"+URLToPPSearchApiSolr)
content = r.text
# This is inefficient, but I can't seem to get Bottle to
# let me procure a correct JSON response with out using a dictionary.
# I tried using BaseResponse. This could be my weakness
# with Python or confusion in Bottle.
d = ast.literal_eval(content)
return d
@app.route('/SimpleHTML',method='GET')
def apisolr():
acsrf = request.query['antiCSRF']
ses_id = request.query['session_id']
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
dict = {0: {"status": "BadAuthentication"}}
return dict;
portfolio = request.query['portfolio']
print "portfolio = "+portfolio
r = requests.get(URL_TO_MORRIS_PORTFOLIOS_API+"/decoration/"+portfolio)
content = r.text
d = ast.literal_eval(r.text)
p3ids = d['data']
readCredentials()
payload = { 'username' : PricesPaidAPIUsername,\
'password' : <PASSWORD>,\
'p3ids' : pickle.dumps(p3ids)
}
r = requests.post(URLToPPSearchApiSolr+"/fromIds", data=payload, \
auth=(PricesPaidAPIBasicAuthUsername, PricesPaidAPIBasicAuthPassword), verify=False)
PriceHistoryAuth.LogActivity.logDebugInfo("Got Past Post to :"+URLToPPSearchApiSolr)
content = r.text
d = ast.literal_eval(content)
html = ""
for key, vdict in d.iteritems():
# Turn this into a function!
html = html + produceHTML(vdict)
# Actually, here we need to loop over a template, but I will try this first!
return html
def produceHTML(valuesdict):
html = ""
if "productDescription" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Product Description: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["productDescription"]+"</p>"+"<br />"
if "unitPrice" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Unit Price: "+"</h3>"+ "<p style='display:inline'>"+"$"+ valuesdict["unitPrice"]+"</p>"+"<br />"
if "longDescription" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Description: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["longDescription"]+"</p>"+"<br />"
if "unitsOrdered" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Units Ordered: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["unitsOrdered"]+"</p>"+"<br />"
if "vendor" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Vendor: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["vendor"]+"</p>"+"<br />"
if "orderDate" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Date: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["orderDate"]+"</p>"+"<br />"
if "awardIdv" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Vehicle/Schedule Bought From: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["awardIdIdv"]+"</p>"+"<br />"
if "psc" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"PSC Code: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["psc"]+"</p>"+"<br />"
if "contractingAgency" in valuesdict:
html = html + "<h3 style= 'display:inline'>"+"Contracting Agency: "+"</h3>"+ "<p style='display:inline'>"+ valuesdict["contractingAgency"]+"</p>"+"<br />"
for k,v in valuesdict.iteritems():
if k not in ("unitPrice","longDescription" ,"productDescription" , "unitsOrdered" , "vendor", "score", "orderDate", "p3id", 'awardIdIdv', 'psc', "contractingAgency"):
html = html + "<h3 style= 'display:inline'>" +k+ ":" +"</h3>"+ "<p style='display:inline'>"+str(v) + "</p>" +"<br />"+ "\n"
html = html + "<p></p>" + "\n"
return html
@app.route('/search',method='POST')
def apisolr():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
dict = {0: {"status": "BadAuthentication"}}
return dict;
readCredentials()
search_string = request.forms.get('search_string')
psc_pattern = request.forms.get('psc_pattern')
max_results = request.forms.get('numRows')
PriceHistoryAuth.LogActivity.logSearchBegun(ses_id,psc_pattern,search_string)
payload = { 'username' : PricesPaidAPIUsername,\
'password' : <PASSWORD>,\
'search_string': search_string,\
'psc_pattern': psc_pattern,\
'numRows': max_results }
r = requests.post(URLToPPSearchApiSolr, data=payload, \
auth=(PricesPaidAPIBasicAuthUsername, PricesPaidAPIBasicAuthPassword), verify=False)
PriceHistoryAuth.LogActivity.logDebugInfo("Got Past Post to :"+URLToPPSearchApiSolr)
content = r.text
# This is inefficient, but I can't seem to get Bottle to
# let me procure a correct JSON response with out using a dictionary.
# I tried using BaseResponse. This could be my weakness
# with Python or confusion in Bottle.
d = ast.literal_eval(content)
PriceHistoryAuth.LogActivity.logSearchDone(ses_id,psc_pattern,search_string)
return d
@app.route('/record_feedback',method='POST')
def feedback():
acsrf = request.forms.get('antiCSRF')
ses_id = request.forms.get('session_id')
PriceHistoryAuth.LogActivity.logDebugInfo("acsrf ses_d :"+acsrf+ses_id)
if (not PriceHistoryAuth.auth.is_valid_acsrf(ses_id,acsrf)):
dict = {0: {"status": "BadAuthentication"}}
return dict;
PriceHistoryAuth.LogActivity.logDebugInfo("authenticated !")
PriceHistoryAuth.LogActivity.logFeedback(ses_id)
message = request.forms.get('message')
name = request.forms.get('name')
radio_list_value = request.forms.get('radio_list_value')
LogFeedback.logFeedback(name,message,radio_list_value);
return "true";
# This file is a directoy copy fromt he MorrisData Decorator. It
# out to be possible to avoid this duplication, but I don't really
# know how to do that in Python. I will have to spend the | |
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Proxy
"""
import json
from oslo_log import log
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.zfssa import restclient
from cinder.volume.drivers.zfssa import webdavclient
LOG = log.getLogger(__name__)
class ZFSSAApi(object):
"""ZFSSA API proxy class"""
def __init__(self):
self.host = None
self.url = None
self.rclient = None
def __del__(self):
if self.rclient and self.rclient.islogin():
self.rclient.logout()
def _is_pool_owned(self, pdata):
"""returns True if the pool's owner is the
same as the host.
"""
svc = '/api/system/v1/version'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting version: '
'svc: %(svc)s.'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'svc': svc,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
vdata = json.loads(ret.data)
return vdata['version']['asn'] == pdata['pool']['asn'] and \
vdata['version']['nodename'] == pdata['pool']['owner']
def set_host(self, host, timeout=None):
self.host = host
self.url = "https://" + self.host + ":215"
self.rclient = restclient.RestClientURL(self.url, timeout=timeout)
def login(self, auth_str):
"""Login to the appliance"""
if self.rclient and not self.rclient.islogin():
self.rclient.login(auth_str)
def get_pool_stats(self, pool):
"""Get space available and total properties of a pool
returns (avail, total).
"""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Pool Stats: '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.InvalidVolume(reason=exception_msg)
val = json.loads(ret.data)
if not self._is_pool_owned(val):
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
'by %(host)s.'),
{'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
total = val['pool']['usage']['total']
return avail, total
def create_project(self, pool, project, compression=None, logbias=None):
"""Create a project on a pool
Check first whether the pool exists.
"""
self.verify_pool(pool)
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/storage/v1/pools/' + pool + '/projects'
arg = {
'name': project
}
if compression and compression != '':
arg.update({'compression': compression})
if logbias and logbias != '':
arg.update({'logbias': logbias})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Project: '
'%(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_initiator(self, initiator, alias, chapuser=None,
chapsecret=None):
"""Create an iSCSI initiator."""
svc = '/api/san/v1/iscsi/initiators/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiators'
arg = {
'initiator': initiator,
'alias': alias
}
if chapuser and chapuser != '' and chapsecret and chapsecret != '':
arg.update({'chapuser': chapuser,
'chapsecret': chapsecret})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Initiator: '
'%(initiator)s on '
'Alias: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def add_to_initiatorgroup(self, initiator, initiatorgroup):
"""Add an iSCSI initiator to initiatorgroup"""
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiator-groups'
arg = {
'name': initiatorgroup,
'initiators': [initiator]
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
val = json.loads(ret.data)
inits = val['group']['initiators']
if inits is None:
exception_msg = (_('Error Getting Initiators: '
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
if initiator in inits:
return
inits.append(initiator)
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
arg = {
'initiators': inits
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_target(self, alias, interfaces=None, tchapuser=None,
tchapsecret=None):
"""Create an iSCSI target.
interfaces: an array with network interfaces
tchapuser, tchapsecret: target's chapuser and chapsecret
returns target iqn
"""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/targets'
arg = {
'alias': alias
}
if tchapuser and tchapuser != '' and tchapsecret and \
tchapsecret != '':
arg.update({'targetchapuser': tchapuser,
'targetchapsecret': tchapsecret,
'auth': 'chap'})
if interfaces is not None and len(interfaces) > 0:
arg.update({'interfaces': interfaces})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def get_target(self, alias):
"""Get an iSCSI target iqn."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def add_to_targetgroup(self, iqn, targetgroup):
"""Add an iSCSI target to targetgroup."""
svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svccrt = '/api/san/v1/iscsi/target-groups'
arg = {
'name': targetgroup,
'targets': [iqn]
}
ret = self.rclient.post(svccrt, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
return
arg = {
'targets': [iqn]
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding to TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_pool(self, pool):
"""Checks whether pool exists."""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying Pool: '
'%(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_project(self, pool, project):
"""Checks whether project exists."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Project: %(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_initiator(self, iqn):
"""Check whether initiator iqn exists."""
svc = '/api/san/v1/iscsi/initiators/' + iqn
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Initiator: %(iqn)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'initiator': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_target(self, alias):
"""Check whether target alias exists."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Target: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
"""Create a LUN.
specs - contains volume properties (e.g blocksize, compression).
"""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns'
arg = {
'name': lun,
'volsize': volsize,
'targetgroup': targetgroup,
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'
}
if specs:
arg.update(specs)
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Volume: %(lun)s '
'Size: %(size)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'size': volsize,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val
def get_lun(self, pool, project, lun):
"""return iscsi lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + "/luns/" + lun
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Volume: %(lun)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'pool': pool,
'project': project,
'ret.status': | |
#!/usr/bin/env python2.7
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""Convert (to and) from rdflib graphs to other well known graph libraries.
Currently the following libraries are supported:
- networkx: MultiDiGraph, DiGraph, Graph
- graph_tool: Graph
Doctests in this file are all skipped, as we can't run them conditionally if
networkx or graph_tool are available and they would err otherwise.
see ../../test/test_extras_external_graph_libs.py for conditional tests
"""
import logging
logger = logging.getLogger(__name__)
def _identity(x): return x
def _rdflib_to_networkx_graph(
graph,
nxgraph,
calc_weights,
edge_attrs,
transform_s=_identity, transform_o=_identity):
"""Helper method for multidigraph, digraph and graph.
Modifies nxgraph in-place!
Arguments:
graph: an rdflib.Graph.
nxgraph: a networkx.Graph/DiGraph/MultiDigraph.
calc_weights: If True adds a 'weight' attribute to each edge according
to the count of s,p,o triples between s and o, which is meaningful
for Graph/DiGraph.
edge_attrs: Callable to construct edge data from s, p, o.
'triples' attribute is handled specially to be merged.
'weight' should not be generated if calc_weights==True.
(see invokers below!)
transform_s: Callable to transform node generated from s.
transform_o: Callable to transform node generated from o.
"""
assert callable(edge_attrs)
assert callable(transform_s)
assert callable(transform_o)
import networkx as nx
for s, p, o in graph:
ts, to = transform_s(s), transform_o(o) # apply possible transformations
data = nxgraph.get_edge_data(ts, to)
if data is None or isinstance(nxgraph, nx.MultiDiGraph):
# no edge yet, set defaults
data = edge_attrs(s, p, o)
if calc_weights:
data['weight'] = 1
nxgraph.add_edge(ts, to, **data)
else:
# already have an edge, just update attributes
if calc_weights:
data['weight'] += 1
if 'triples' in data:
d = edge_attrs(s, p, o)
data['triples'].extend(d['triples'])
def rdflib_to_networkx_multidigraph(
graph,
edge_attrs=lambda s, p, o: {'key': p},
**kwds):
"""Converts the given graph into a networkx.MultiDiGraph.
The subjects and objects are the later nodes of the MultiDiGraph.
The predicates are used as edge keys (to identify multi-edges).
Arguments:
graph: a rdflib.Graph.
edge_attrs: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is
passed to networkx's add_edge(s, o, **attrs) function.
By default this will include setting the MultiDiGraph key=p here.
If you don't want to be able to re-identify the edge later on, you
can set this to `lambda s, p, o: {}`. In this case MultiDiGraph's
default (increasing ints) will be used.
Returns:
networkx.MultiDiGraph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> mdg = rdflib_to_networkx_multidigraph(g)
>>> len(mdg.edges())
4
>>> mdg.has_edge(a, b)
True
>>> mdg.has_edge(a, b, key=p)
True
>>> mdg.has_edge(a, b, key=q)
True
>>> mdg = rdflib_to_networkx_multidigraph(g, edge_attrs=lambda s,p,o: {})
>>> mdg.has_edge(a, b, key=0)
True
>>> mdg.has_edge(a, b, key=1)
True
"""
import networkx as nx
mdg = nx.MultiDiGraph()
_rdflib_to_networkx_graph(graph, mdg, False, edge_attrs, **kwds)
return mdg
def rdflib_to_networkx_digraph(
graph,
calc_weights=True,
edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
**kwds):
"""Converts the given graph into a networkx.DiGraph.
As an rdflib.Graph() can contain multiple edges between nodes, by default
adds the a 'triples' attribute to the single DiGraph edge with a list of
all triples between s and o.
Also by default calculates the edge weight as the length of triples.
Args:
graph: a rdflib.Graph.
calc_weights: If true calculate multi-graph edge-count as edge 'weight'
edge_attrs: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is
passed to networkx's add_edge(s, o, **attrs) function.
By default this will include setting the 'triples' attribute here,
which is treated specially by us to be merged. Other attributes of
multi-edges will only contain the attributes of the first edge.
If you don't want the 'triples' attribute for tracking, set this to
`lambda s, p, o: {}`.
Returns:
networkx.DiGraph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> dg = rdflib_to_networkx_digraph(g)
>>> dg[a][b]['weight']
2
>>> sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)]
True
>>> len(dg.edges())
3
>>> dg.size()
3
>>> dg.size(weight='weight')
4.0
>>> dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{})
>>> 'weight' in dg[a][b]
False
>>> 'triples' in dg[a][b]
False
"""
import networkx as nx
dg = nx.DiGraph()
_rdflib_to_networkx_graph(graph, dg, calc_weights, edge_attrs, **kwds)
return dg
def rdflib_to_networkx_graph(
graph,
calc_weights=True,
edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
**kwds):
"""Converts the given graph into a networkx.Graph.
As an rdflib.Graph() can contain multiple directed edges between nodes, by
default adds the a 'triples' attribute to the single DiGraph edge with a
list of triples between s and o in graph.
Also by default calculates the edge weight as the len(triples).
Args:
graph: a rdflib.Graph.
calc_weights: If true calculate multi-graph edge-count as edge 'weight'
edge_attrs: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is
passed to networkx's add_edge(s, o, **attrs) function.
By default this will include setting the 'triples' attribute here,
which is treated specially by us to be merged. Other attributes of
multi-edges will only contain the attributes of the first edge.
If you don't want the 'triples' attribute for tracking, set this to
`lambda s, p, o: {}`.
Returns:
networkx.Graph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> ug = rdflib_to_networkx_graph(g)
>>> ug[a][b]['weight']
3
>>> sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)]
True
>>> len(ug.edges())
2
>>> ug.size()
2
>>> ug.size(weight='weight')
4.0
>>> ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{})
>>> 'weight' in ug[a][b]
False
>>> 'triples' in ug[a][b]
False
"""
import networkx as nx
g = nx.Graph()
_rdflib_to_networkx_graph(graph, g, calc_weights, edge_attrs, **kwds)
return g
def rdflib_to_graphtool(
graph,
v_prop_names=[str('term')],
e_prop_names=[str('term')],
transform_s=lambda s, p, o: {str('term'): s},
transform_p=lambda s, p, o: {str('term'): p},
transform_o=lambda s, p, o: {str('term'): o},
):
"""Converts the given graph into a graph_tool.Graph().
The subjects and objects are the later vertices of the Graph.
The predicates become edges.
Arguments:
graph: a rdflib.Graph.
v_prop_names: a list of names for the vertex properties. The default is
set to ['term'] (see transform_s, transform_o below).
e_prop_names: a list of names for the edge properties.
transform_s: callable with s, p, o input. Should return a dictionary
containing a value for each name in v_prop_names. By default is set
to {'term': s} which in combination with v_prop_names = ['term']
adds s as 'term' property to the generated vertex for s.
transform_p: similar to transform_s, but wrt. e_prop_names. By default
returns {'term': p} which adds p as a property to the generated
edge between the vertex for s and the vertex for o.
transform_o: similar to transform_s.
Returns:
graph_tool.Graph()
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> mdg = rdflib_to_graphtool(g)
>>> len(list(mdg.edges()))
4
>>> from graph_tool import util as gt_util
>>> vpterm = mdg.vertex_properties['term']
>>> va = gt_util.find_vertex(mdg, vpterm, a)[0]
>>> vb = gt_util.find_vertex(mdg, vpterm, b)[0]
>>> vl = gt_util.find_vertex(mdg, vpterm, l)[0]
>>> (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())]
True
>>> epterm = mdg.edge_properties['term']
>>> len(list(gt_util.find_edge(mdg, epterm, p))) == 3
True
>>> len(list(gt_util.find_edge(mdg, epterm, q))) == 1
True
>>> mdg = rdflib_to_graphtool(
... g,
... e_prop_names=[str('name')],
... transform_p=lambda s, p, o: {str('name'): unicode(p)})
>>> epterm = mdg.edge_properties['name']
>>> len(list(gt_util.find_edge(mdg, epterm, unicode(p)))) == 3
True
>>> len(list(gt_util.find_edge(mdg, epterm, unicode(q)))) | |
89); }\n"
""))
self.gb_pb_sell_volume_row_5_6.setFlat(True)
self.gb_pb_sell_volume_row_5_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_5_6"))
self.gb_pb_sell_volume_row_minus_5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_minus_5_6.setGeometry(QtCore.QRect(180, 40, 31, 23))
self.gb_pb_sell_volume_row_minus_5_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_minus_5_6"))
self.gb_pb_sell_volume_row_minus_4_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_minus_4_6.setGeometry(QtCore.QRect(180, 60, 31, 23))
self.gb_pb_sell_volume_row_minus_4_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_minus_4_6"))
self.gb_pb_sell_volume_row_minus_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_minus_3_6.setGeometry(QtCore.QRect(180, 80, 31, 23))
self.gb_pb_sell_volume_row_minus_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_minus_3_6"))
self.gb_pb_sell_volume_row_minus_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_minus_2_6.setGeometry(QtCore.QRect(180, 100, 31, 23))
self.gb_pb_sell_volume_row_minus_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_minus_2_6"))
self.gb_pb_sell_volume_row_minus_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_minus_1_6.setGeometry(QtCore.QRect(180, 120, 31, 23))
self.gb_pb_sell_volume_row_minus_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_minus_1_6"))
self.gb_pb_sell_volume_row_4_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_4_6.setGeometry(QtCore.QRect(210, 60, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_4_6.setFont(font)
self.gb_pb_sell_volume_row_4_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_4_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_4_6.setFlat(True)
self.gb_pb_sell_volume_row_4_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_4_6"))
self.gb_pb_sell_volume_row_8 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_8.setGeometry(QtCore.QRect(210, 80, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_8.setFont(font)
self.gb_pb_sell_volume_row_8.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_8.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_8.setFlat(True)
self.gb_pb_sell_volume_row_8.setObjectName(_fromUtf8("gb_pb_sell_volume_row_8"))
self.gb_pb_sell_volume_row_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_2_6.setGeometry(QtCore.QRect(210, 100, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_2_6.setFont(font)
self.gb_pb_sell_volume_row_2_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_2_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_2_6.setFlat(True)
self.gb_pb_sell_volume_row_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_2_6"))
self.gb_pb_sell_volume_row_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_1_6.setGeometry(QtCore.QRect(210, 120, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_1_6.setFont(font)
self.gb_pb_sell_volume_row_1_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_1_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_1_6.setFlat(True)
self.gb_pb_sell_volume_row_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_1_6"))
self.gb_pb_sell_volume_5_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_5_1_6.setGeometry(QtCore.QRect(270, 40, 41, 23))
self.gb_pb_sell_volume_5_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_5_1_6"))
self.gb_pb_sell_volume_5_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_5_2_6.setGeometry(QtCore.QRect(310, 40, 41, 23))
self.gb_pb_sell_volume_5_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_5_2_6"))
self.gb_pb_sell_volume_5_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_5_3_6.setGeometry(QtCore.QRect(350, 40, 41, 23))
self.gb_pb_sell_volume_5_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_5_3_6"))
self.gb_pb_sell_volume_4_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_4_3_6.setGeometry(QtCore.QRect(350, 60, 41, 23))
self.gb_pb_sell_volume_4_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_3_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_4_3_6"))
self.gb_pb_sell_volume_4_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_4_2_6.setGeometry(QtCore.QRect(310, 60, 41, 23))
self.gb_pb_sell_volume_4_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_2_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_4_2_6"))
self.gb_pb_sell_volume_4_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_4_1_6.setGeometry(QtCore.QRect(270, 60, 41, 23))
self.gb_pb_sell_volume_4_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_1_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_4_1_6"))
self.gb_pb_sell_volume_3_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_3_1_6.setGeometry(QtCore.QRect(270, 80, 41, 23))
self.gb_pb_sell_volume_3_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_1_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_3_1_6"))
self.gb_pb_sell_volume_3_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_3_2_6.setGeometry(QtCore.QRect(310, 80, 41, 23))
self.gb_pb_sell_volume_3_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_2_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_3_2_6"))
self.gb_pb_sell_volume_3_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_3_3_6.setGeometry(QtCore.QRect(350, 80, 41, 23))
self.gb_pb_sell_volume_3_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_3_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_3_3_6"))
self.gb_pb_sell_volume_2_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_2_3_6.setGeometry(QtCore.QRect(350, 100, 41, 23))
self.gb_pb_sell_volume_2_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_3_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_2_3_6"))
self.gb_pb_sell_volume_2_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_2_2_6.setGeometry(QtCore.QRect(310, 100, 41, 23))
self.gb_pb_sell_volume_2_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_2_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_2_2_6"))
self.gb_pb_sell_volume_2_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_2_1_6.setGeometry(QtCore.QRect(270, 100, 41, 23))
self.gb_pb_sell_volume_2_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_1_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_2_1_6"))
self.gb_pb_sell_volume_1_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_1_1_6.setGeometry(QtCore.QRect(270, 120, 41, 23))
self.gb_pb_sell_volume_1_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_1_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_1_1_6"))
self.gb_pb_sell_volume_1_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_1_3_6.setGeometry(QtCore.QRect(350, 120, 41, 23))
self.gb_pb_sell_volume_1_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_3_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_1_3_6"))
self.gb_pb_sell_volume_1_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_1_2_6.setGeometry(QtCore.QRect(310, 120, 41, 23))
self.gb_pb_sell_volume_1_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_2_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_1_2_6"))
self.label_6 = QtGui.QLabel(self.gb_ETFOrder_6)
self.label_6.setGeometry(QtCore.QRect(10, 140, 381, 20))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gb_pb_buy_volume_2_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_2_2_6.setGeometry(QtCore.QRect(310, 180, 41, 23))
self.gb_pb_buy_volume_2_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_2_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_2_6.setObjectName(_fromUtf8("gb_pb_buy_volume_2_2_6"))
self.gb_pb_buy_volume_row_minus_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_minus_2_6.setGeometry(QtCore.QRect(180, 180, 31, 23))
self.gb_pb_buy_volume_row_minus_2_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_2_6"))
self.gb_pb_b5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b5_6.setGeometry(QtCore.QRect(10, 240, 51, 20))
self.gb_pb_b5_6.setFlat(True)
self.gb_pb_b5_6.setObjectName(_fromUtf8("gb_pb_b5_6"))
self.gb_pb_b5_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b5_price_6.setGeometry(QtCore.QRect(60, 240, 51, 20))
self.gb_pb_b5_price_6.setStyleSheet(_fromUtf8(""))
self.gb_pb_b5_price_6.setFlat(True)
self.gb_pb_b5_price_6.setObjectName(_fromUtf8("gb_pb_b5_price_6"))
self.gb_pb_b4_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b4_1_6.setGeometry(QtCore.QRect(10, 220, 51, 20))
self.gb_pb_b4_1_6.setFlat(True)
self.gb_pb_b4_1_6.setObjectName(_fromUtf8("gb_pb_b4_1_6"))
self.gb_pb_buy_volume_row_minus_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_minus_1_6.setGeometry(QtCore.QRect(180, 160, 31, 23))
self.gb_pb_buy_volume_row_minus_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_1_6"))
self.gb_pb_b3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b3_6.setGeometry(QtCore.QRect(10, 200, 51, 20))
self.gb_pb_b3_6.setFlat(True)
self.gb_pb_b3_6.setObjectName(_fromUtf8("gb_pb_b3_6"))
self.gb_pb_b2_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b2_volume_6.setGeometry(QtCore.QRect(110, 180, 71, 20))
self.gb_pb_b2_volume_6.setFlat(True)
self.gb_pb_b2_volume_6.setObjectName(_fromUtf8("gb_pb_b2_volume_6"))
self.gb_pb_buy_volume_1_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_1_3_6.setGeometry(QtCore.QRect(350, 160, 41, 23))
self.gb_pb_buy_volume_1_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_3_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_1_3_6"))
self.gb_pb_buy_volume_row_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_3_6.setGeometry(QtCore.QRect(210, 200, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_3_6.setFont(font)
self.gb_pb_buy_volume_row_3_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_3_6.setFlat(True)
self.gb_pb_buy_volume_row_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_3_6"))
self.gb_pb_buy_volume_5_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_5_1_6.setGeometry(QtCore.QRect(270, 240, 41, 23))
self.gb_pb_buy_volume_5_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_1_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_5_1_6"))
self.gb_pb_buy_volume_4_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_4_1_6.setGeometry(QtCore.QRect(270, 220, 41, 23))
self.gb_pb_buy_volume_4_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_1_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_4_1_6"))
self.gb_pb_buy_volume_row_4_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_4_6.setGeometry(QtCore.QRect(210, 220, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_4_6.setFont(font)
self.gb_pb_buy_volume_row_4_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_4_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_4_6.setFlat(True)
self.gb_pb_buy_volume_row_4_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_4_6"))
self.gb_pb_buy_volume_3_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_3_1_6.setGeometry(QtCore.QRect(270, 200, 41, 23))
self.gb_pb_buy_volume_3_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_3_1_6"))
self.gb_pb_b1_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b1_price_6.setGeometry(QtCore.QRect(60, 160, 51, 20))
self.gb_pb_b1_price_6.setFlat(True)
self.gb_pb_b1_price_6.setObjectName(_fromUtf8("gb_pb_b1_price_6"))
self.gb_pb_buy_volume_3_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_3_2_6.setGeometry(QtCore.QRect(310, 200, 41, 23))
self.gb_pb_buy_volume_3_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_2_6.setObjectName(_fromUtf8("gb_pb_buy_volume_3_2_6"))
self.gb_pb_b3_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b3_volume_6.setGeometry(QtCore.QRect(110, 200, 71, 20))
self.gb_pb_b3_volume_6.setFlat(True)
self.gb_pb_b3_volume_6.setObjectName(_fromUtf8("gb_pb_b3_volume_6"))
self.gb_pb_buy_volume_row_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_2_6.setGeometry(QtCore.QRect(210, 180, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_2_6.setFont(font)
self.gb_pb_buy_volume_row_2_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_2_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_2_6.setFlat(True)
self.gb_pb_buy_volume_row_2_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_2_6"))
self.gb_pb_b2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b2_6.setGeometry(QtCore.QRect(10, 180, 51, 20))
self.gb_pb_b2_6.setFlat(True)
self.gb_pb_b2_6.setObjectName(_fromUtf8("gb_pb_b2_6"))
self.gb_pb_buy_volume_3_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_3_3_6.setGeometry(QtCore.QRect(350, 200, 41, 23))
self.gb_pb_buy_volume_3_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_3_3_6"))
self.gb_pb_b2_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b2_price_6.setGeometry(QtCore.QRect(60, 180, 51, 20))
self.gb_pb_b2_price_6.setFlat(True)
self.gb_pb_b2_price_6.setObjectName(_fromUtf8("gb_pb_b2_price_6"))
self.gb_pb_buy_volume_row_minus_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_minus_3_6.setGeometry(QtCore.QRect(180, 200, 31, 23))
self.gb_pb_buy_volume_row_minus_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_3_6"))
self.gb_pb_b3_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b3_price_6.setGeometry(QtCore.QRect(60, 200, 51, 20))
self.gb_pb_b3_price_6.setFlat(True)
self.gb_pb_b3_price_6.setObjectName(_fromUtf8("gb_pb_b3_price_6"))
self.gb_pb_b4_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b4_volume_6.setGeometry(QtCore.QRect(110, 220, 71, 20))
self.gb_pb_b4_volume_6.setFlat(True)
self.gb_pb_b4_volume_6.setObjectName(_fromUtf8("gb_pb_b4_volume_6"))
self.gb_pb_buy_volume_1_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_1_1_6.setGeometry(QtCore.QRect(270, 160, 41, 23))
self.gb_pb_buy_volume_1_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_1_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_1_1_6"))
self.gb_pb_buy_volume_row_minus_5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_minus_5_6.setGeometry(QtCore.QRect(180, 240, 31, 23))
self.gb_pb_buy_volume_row_minus_5_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_5_6"))
self.gb_pb_buy_volume_5_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_5_3_6.setGeometry(QtCore.QRect(350, 240, 41, 23))
self.gb_pb_buy_volume_5_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_3_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_5_3_6"))
self.gb_pb_buy_volume_2_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_2_1_6.setGeometry(QtCore.QRect(270, 180, 41, 23))
self.gb_pb_buy_volume_2_1_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_1_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_1_6.setObjectName(_fromUtf8("gb_pb_buy_volume_2_1_6"))
self.gb_pb_buy_volume_1_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_1_2_6.setGeometry(QtCore.QRect(310, 160, 41, 23))
self.gb_pb_buy_volume_1_2_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_2_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_2_6.setObjectName(_fromUtf8("gb_pb_buy_volume_1_2_6"))
self.gb_pb_buy_volume_row_5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_row_5_6.setGeometry(QtCore.QRect(210, 240, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_5_6.setFont(font)
self.gb_pb_buy_volume_row_5_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_5_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_5_6.setFlat(True)
self.gb_pb_buy_volume_row_5_6.setObjectName(_fromUtf8("gb_pb_buy_volume_row_5_6"))
self.gb_pb_buy_volume_4_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_buy_volume_4_3_6.setGeometry(QtCore.QRect(350, 220, 41, 23))
self.gb_pb_buy_volume_4_3_6.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_3_6.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_3_6.setObjectName(_fromUtf8("gb_pb_buy_volume_4_3_6"))
self.gb_pb_b4_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b4_price_6.setGeometry(QtCore.QRect(60, 220, 51, 20))
self.gb_pb_b4_price_6.setStyleSheet(_fromUtf8("\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_b4_price_6.setFlat(True)
self.gb_pb_b4_price_6.setObjectName(_fromUtf8("gb_pb_b4_price_6"))
self.gb_pb_b5_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_b5_volume_6.setGeometry(QtCore.QRect(110, 240, 71, 20))
self.gb_pb_b5_volume_6.setFlat(True)
self.gb_pb_b5_volume_6.setObjectName(_fromUtf8("gb_pb_b5_volume_6"))
self.gb_pb_buy_volume_4_2_6 | |
B*B*A + B*A*B + A*B*B
q = A*A*A + B*B*B
p1 = p.subs({"A": A+B, "B": A-B})
assert p1 == 4*A*A*A + 4*B*B*B
verbose = argv.verbose
r = argv.get("r", 1) # degree
m = argv.get("m", 3)
code = reed_muller(r, m)
code.dump()
p = code.weight_enum(A, B)
if verbose:
print("code:")
print(p)
dual = code.get_dual()
q = dual.weight_enum(A, B)
if verbose:
print("dual:")
print(q)
print("p==q:", p==q)
print("code.is_selfdual:", code.is_selfdual())
#r = p.subs({"A": A+B, "B": A-B})
r = code.weight_enum(A+B, A-B)
if verbose:
print("P(A+B, A-B)")
print(r)
coeff = 2**len(code.G)
print("MacWilliams:", r == coeff*q)
print("OK")
def test_dual():
from vec import Space, Hom, Map
import element
ring = element.Z
#ring = element.Q
one = ring.one
space = Space(2, ring)
hom = Hom(space, space)
I = Map.from_array([[1, 0], [0, 1]], hom)
X = Map.from_array([[0, 1], [1, 0]], hom)
Z = Map.from_array([[1, 0], [0, -1]], hom)
assert X+X == (2*X)
assert X*Z == -Z*X
assert (X@X) * (Z@Z) == (Z@Z) * (X@X)
assert (I@X@X) * (I@Z@Z) == (I@Z@Z) * (I@X@X)
if argv.code == "repitition":
G = parse("111")
H = parse("""
11.
.11""")
elif argv.code == "steane":
G = parse("""
1111...
.1.11.1
..11.11
""")
else:
return
code = Code(G)
if argv.dual:
code = code.get_dual()
dual = code.get_dual()
code.dump()
#W = lambda A, B : (A@A@A + B@B@B)
#WD = lambda A, B : (A@A@A + B@B@A + B@A@B + A@B@B)
W = code.tensor_enum
WD = dual.tensor_enum
a = 2**len(code.G)
b = 2**len(dual.G)
A = WD(I, X)
B = W(I, Z)
assert A*B == B*A
AA = W(I+X, I-X)
BB = WD(I+Z, I-Z)
assert AA == a*A
assert BB == b*B
assert AA*BB == a*b*A*B
#print(W(I+X, I-X))
#print(WD(I, X))
#print(W(I, Z))
src = Space(2, ring)
tgt = Space(3, ring)
hom = Hom(src, tgt)
A = Map.from_array([
[1, 2],
[5, 6],
[7, 8]], hom)
B = Map.from_array([
[7, 2],
[6, 3],
[5, 4]], hom)
assert W(A+B, A-B) == a*WD(A, B)
assert WD(A+B, A-B) == b*W(A, B)
def search():
# <NAME>, 1209.2426v1 sec IX.
# https://arxiv.org/pdf/1209.2426.pdf
verbose = argv.get("verbose")
m = argv.get("m", 6) # _number of rows
k = argv.get("k", None) # _number of odd-weight rows
# these are the variables N_x
xs = list(cross([(0, 1)]*m))
maxweight = argv.maxweight
minweight = argv.get("minweight", 1)
xs = [x for x in xs if minweight <= sum(x)]
if maxweight:
xs = [x for x in xs if sum(x) <= maxweight]
N = len(xs)
lhs = []
rhs = []
# bi-orthogonality
for a in range(m):
for b in range(a+1, m):
v = zeros2(N)
for i, x in enumerate(xs):
if x[a] == x[b] == 1:
v[i] = 1
if v.sum():
lhs.append(v)
rhs.append(0)
# tri-orthogonality
for a in range(m):
for b in range(a+1, m):
for c in range(b+1, m):
v = zeros2(N)
for i, x in enumerate(xs):
if x[a] == x[b] == x[c] == 1:
v[i] = 1
if v.sum():
lhs.append(v)
rhs.append(0)
# # dissallow columns with weight <= 1
# for i, x in enumerate(xs):
# if sum(x)<=1:
# v = zeros2(N)
# v[i] = 1
# lhs.append(v)
# rhs.append(0)
if k is not None:
# constrain to k _number of odd-weight rows
assert 0<=k<m
for a in range(m):
v = zeros2(N)
for i, x in enumerate(xs):
if x[a] == 1:
v[i] = 1
lhs.append(v)
if a<k:
rhs.append(1)
else:
rhs.append(0)
A = array2(lhs)
rhs = array2(rhs)
#print(shortstr(A))
B = pseudo_inverse(A)
soln = dot2(B, rhs)
if not eq2(dot2(A, soln), rhs):
print("no solution")
return
if verbose:
print("soln:")
print(shortstr(soln))
soln.shape = (N, 1)
rhs.shape = A.shape[0], 1
K = array2(list(find_kernel(A)))
#print(K)
#print( dot2(A, K.transpose()))
#sols = []
#for v in span(K):
best = None
density = 1.0
size = 99*N
trials = argv.get("trials", 1024)
count = 0
for trial in range(trials):
u = rand2(len(K), 1)
v = dot2(K.transpose(), u)
#print(v)
v = (v+soln)%2
assert eq2(dot2(A, v), rhs)
if v.sum() > size:
continue
size = v.sum()
Gt = []
for i, x in enumerate(xs):
if v[i]:
Gt.append(x)
if not Gt:
continue
Gt = array2(Gt)
G = Gt.transpose()
assert is_morthogonal(G, 3)
if G.shape[1]<m:
continue
if 0 in G.sum(1):
continue
if argv.strong_morthogonal and not strong_morthogonal(G, 3):
continue
#print(shortstr(G))
# for g in G:
# print(shortstr(g), g.sum())
# print()
_density = float(G.sum()) / (G.shape[0]*G.shape[1])
#if best is None or _density < density:
if best is None or G.shape[1] <= size:
best = G
size = G.shape[1]
density = _density
if 0:
#sols.append(G)
Gx = even_rows(G)
assert is_morthogonal(Gx, 3)
if len(Gx)==0:
continue
GGx = array2(list(span(Gx)))
assert is_morthogonal(GGx, 3)
count += 1
print("found %d solutions" % count)
if best is None:
return
G = best
#print(shortstr(G))
for g in G:
print(shortstr(g), g.sum())
print()
print("density:", density)
print("shape:", G.shape)
G = linear_independent(G)
A = list(span(G))
print(strong_morthogonal(A, 1))
print(strong_morthogonal(A, 2))
print(strong_morthogonal(A, 3))
#print(shortstr(dot2(G, G.transpose())))
if 0:
B = pseudo_inverse(A)
v = dot2(B, rhs)
print("B:")
print(shortstr(B))
print("v:")
print(shortstr(v))
assert eq2(dot2(B, v), rhs)
def build_toric(l=3, allgen=True):
keys = []
keymap = {}
for i in range(l):
for j in range(l):
for k in (0, 1):
m = len(keys)
keys.append((i, j, k))
for di in (-l, 0, l):
for dj in (-l, 0, l):
keymap[i+di, j+dj, k] = m
if l>2:
assert keys[keymap[2, 1, 0]] == (2, 1, 0)
if allgen:
m = l**2 # rows (constraints)
else:
m = l**2-1 # rows (constraints)
n = len(keys) # cols (bits)
assert n == 2*(l**2)
Lx = zeros2(2, n)
Lz = zeros2(2, n)
Hx = zeros2(m, n)
Tz = zeros2(m, n)
Hz = zeros2(m, n)
Tx = zeros2(m, n)
for i in range(l):
Lx[0, keymap[i, l-1, 1]] = 1
Lx[1, keymap[l-1, i, 0]] = 1
Lz[0, keymap[0, i, 1]] = 1
Lz[1, keymap[i, 0, 0]] = 1
row = 0
xmap = {}
for i in range(l):
for j in range(l):
if (i, j)==(0, 0) and not allgen:
continue
Hx[row, keymap[i, j, 0]] = 1
Hx[row, keymap[i, j, 1]] = 1
Hx[row, keymap[i-1, j, 0]] = 1
Hx[row, keymap[i, j-1, 1]] = 1
xmap[i, j] = row
i1 = i
while i1>0:
Tz[row, keymap[i1-1, j, 0]] = 1
i1 -= 1
j1 = j
while j1>0:
Tz[row, keymap[i1, j1-1, 1]] = 1
j1 -= 1
row += 1
row = 0
zmap = {}
for i in range(l):
for j in range(l):
if i==l-1 and j==l-1 and not allgen:
continue
Hz[row, keymap[i, j, 0]] = 1
Hz[row, keymap[i, j, 1]] = 1
Hz[row, keymap[i+1, j, 1]] = 1
Hz[row, keymap[i, j+1, 0]] = 1
zmap[i, j] = row
i1 = i
while i1<l-1:
Tx[row, keymap[i1+1, j, 1]] = 1
i1 += 1
j1 = j
while j1<l-1:
Tx[row, keymap[i1, j1+1, 0]] = 1
j1 += 1
row += 1
return Hx
def search_extend():
# Extend the checks of a random code to make it triorthogonal.
# Based on the search function above.
verbose = argv.get("verbose")
m = argv.get("m", 6)
n = argv.get("n", m+2)
k = argv.get("k") # odd _numbered rows ( logical operators)
code = argv.get("code", "rand")
if code == "rand":
while 1:
G0 = rand2(m, n)
counts = G0.sum(0)
if min(counts)==2 and rank(G0) == m:
cols = set()
for i in range(n):
cols.add(tuple(G0[:, i]))
if len(cols) == n: # no repeated cols
break
elif code == "toric":
G0 = parse("""
11.11...
.111..1.
1...11.1
""") # l=2 toric code X logops + X stabs
l = argv.get("l", 3)
G0 = build_toric(l)
m, n = G0.shape
else:
return
code = Code(G0, check=False)
print(shortstr(G0))
print("is_triorthogonal:", code.is_triorthogonal())
# these are the variables N_x
xs = list(cross([(0, 1)]*m))
N = len(xs)
lookup = {}
for i, x in enumerate(xs):
lookup[x] = i
lhs = []
rhs = []
taken = set()
for i in range(n):
x = G0[:, i]
idx = lookup[tuple(x)]
assert idx not in taken
taken.add(idx)
if verbose:
for idx in range(N):
print(idx, xs[idx], "*" if idx in taken else "")
for idx in taken:
v = zeros2(N)
v[idx] = 1
lhs.append(v)
rhs.append(1)
# bi-orthogonality
for a in range(m):
for b in range(a+1, m):
v = zeros2(N)
for i, | |
this risk level.
returned: on success
type: int
sample: 56
auditing_findings_count:
description:
- The number of findings in the Auditing category.
returned: on success
type: int
sample: 56
authorization_control_findings_count:
description:
- The number of findings in the Authorization Control category.
returned: on success
type: int
sample: 56
data_encryption_findings_count:
description:
- The number of findings in the Data Encryption category.
returned: on success
type: int
sample: 56
db_configuration_findings_count:
description:
- The number of findings in the Database Configuration category.
returned: on success
type: int
sample: 56
fine_grained_access_control_findings_count:
description:
- The number of findings in the Fine-Grained Access Control category.
returned: on success
type: int
sample: 56
privileges_and_roles_findings_count:
description:
- The number of findings in the Privileges and Roles category.
returned: on success
type: int
sample: 56
user_accounts_findings_count:
description:
- The number of findings in the User Accounts category.
returned: on success
type: int
sample: 56
low_risk:
description:
- ""
returned: on success
type: complex
contains:
targets_count:
description:
- The number of targets that contributed to the counts at this risk level.
returned: on success
type: int
sample: 56
auditing_findings_count:
description:
- The number of findings in the Auditing category.
returned: on success
type: int
sample: 56
authorization_control_findings_count:
description:
- The number of findings in the Authorization Control category.
returned: on success
type: int
sample: 56
data_encryption_findings_count:
description:
- The number of findings in the Data Encryption category.
returned: on success
type: int
sample: 56
db_configuration_findings_count:
description:
- The number of findings in the Database Configuration category.
returned: on success
type: int
sample: 56
fine_grained_access_control_findings_count:
description:
- The number of findings in the Fine-Grained Access Control category.
returned: on success
type: int
sample: 56
privileges_and_roles_findings_count:
description:
- The number of findings in the Privileges and Roles category.
returned: on success
type: int
sample: 56
user_accounts_findings_count:
description:
- The number of findings in the User Accounts category.
returned: on success
type: int
sample: 56
advisory:
description:
- ""
returned: on success
type: complex
contains:
targets_count:
description:
- The number of targets that contributed to the counts at this risk level.
returned: on success
type: int
sample: 56
auditing_findings_count:
description:
- The number of findings in the Auditing category.
returned: on success
type: int
sample: 56
authorization_control_findings_count:
description:
- The number of findings in the Authorization Control category.
returned: on success
type: int
sample: 56
data_encryption_findings_count:
description:
- The number of findings in the Data Encryption category.
returned: on success
type: int
sample: 56
db_configuration_findings_count:
description:
- The number of findings in the Database Configuration category.
returned: on success
type: int
sample: 56
fine_grained_access_control_findings_count:
description:
- The number of findings in the Fine-Grained Access Control category.
returned: on success
type: int
sample: 56
privileges_and_roles_findings_count:
description:
- The number of findings in the Privileges and Roles category.
returned: on success
type: int
sample: 56
user_accounts_findings_count:
description:
- The number of findings in the User Accounts category.
returned: on success
type: int
sample: 56
evaluate:
description:
- ""
returned: on success
type: complex
contains:
targets_count:
description:
- The number of targets that contributed to the counts at this risk level.
returned: on success
type: int
sample: 56
auditing_findings_count:
description:
- The number of findings in the Auditing category.
returned: on success
type: int
sample: 56
authorization_control_findings_count:
description:
- The number of findings in the Authorization Control category.
returned: on success
type: int
sample: 56
data_encryption_findings_count:
description:
- The number of findings in the Data Encryption category.
returned: on success
type: int
sample: 56
db_configuration_findings_count:
description:
- The number of findings in the Database Configuration category.
returned: on success
type: int
sample: 56
fine_grained_access_control_findings_count:
description:
- The number of findings in the Fine-Grained Access Control category.
returned: on success
type: int
sample: 56
privileges_and_roles_findings_count:
description:
- The number of findings in the Privileges and Roles category.
returned: on success
type: int
sample: 56
user_accounts_findings_count:
description:
- The number of findings in the User Accounts category.
returned: on success
type: int
sample: 56
_pass:
description:
- ""
returned: on success
type: complex
contains:
targets_count:
description:
- The number of targets that contributed to the counts at this risk level.
returned: on success
type: int
sample: 56
auditing_findings_count:
description:
- The number of findings in the Auditing category.
returned: on success
type: int
sample: 56
authorization_control_findings_count:
description:
- The number of findings in the Authorization Control category.
returned: on success
type: int
sample: 56
data_encryption_findings_count:
description:
- The number of findings in the Data Encryption category.
returned: on success
type: int
sample: 56
db_configuration_findings_count:
description:
- The number of findings in the Database Configuration category.
returned: on success
type: int
sample: 56
fine_grained_access_control_findings_count:
description:
- The number of findings in the Fine-Grained Access Control category.
returned: on success
type: int
sample: 56
privileges_and_roles_findings_count:
description:
- The number of findings in the Privileges and Roles category.
returned: on success
type: int
sample: 56
user_accounts_findings_count:
description:
- The number of findings in the User Accounts category.
returned: on success
type: int
sample: 56
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see
L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see Resource Tags.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"target_ids": [],
"ignored_targets": [],
"ignored_assessment_ids": [],
"target_version": "target_version_example",
"is_baseline": true,
"is_deviated_from_baseline": true,
"last_compared_baseline_id": "ocid1.lastcomparedbaseline.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"schedule_security_assessment_id": "ocid1.schedulesecurityassessment.oc1..xxxxxxEXAMPLExxxxxx",
"triggered_by": "USER",
"description": "description_example",
"schedule": "schedule_example",
"link": "link_example",
"type": "LATEST",
"statistics": {
"targets_count": 56,
"high_risk": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
},
"medium_risk": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
},
"low_risk": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
},
"advisory": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
},
"evaluate": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
},
"_pass": {
"targets_count": 56,
"auditing_findings_count": 56,
"authorization_control_findings_count": 56,
"data_encryption_findings_count": 56,
"db_configuration_findings_count": 56,
"fine_grained_access_control_findings_count": 56,
"privileges_and_roles_findings_count": 56,
"user_accounts_findings_count": 56
}
},
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
from oci.data_safe.models import ChangeSecurityAssessmentCompartmentDetails
from oci.data_safe.models import SecurityAssessmentBaseLineDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeSecurityAssessmentActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
set_security_assessment_baseline
unset_security_assessment_baseline
"""
@staticmethod
def get_module_resource_id_param():
return "security_assessment_id"
def get_module_resource_id(self):
return self.module.params.get("security_assessment_id")
def get_get_fn(self):
return self.client.get_security_assessment
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_security_assessment,
security_assessment_id=self.module.params.get("security_assessment_id"),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeSecurityAssessmentCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_security_assessment_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
security_assessment_id=self.module.params.get("security_assessment_id"),
change_security_assessment_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def set_security_assessment_baseline(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, SecurityAssessmentBaseLineDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.set_security_assessment_baseline,
call_fn_args=(),
call_fn_kwargs=dict(
security_assessment_id=self.module.params.get("security_assessment_id"),
base_line_details=action_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def unset_security_assessment_baseline(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.unset_security_assessment_baseline,
call_fn_args=(),
call_fn_kwargs=dict(
security_assessment_id=self.module.params.get("security_assessment_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
DataSafeSecurityAssessmentActionsHelperCustom = get_custom_class(
"DataSafeSecurityAssessmentActionsHelperCustom"
)
class ResourceHelper(
DataSafeSecurityAssessmentActionsHelperCustom,
DataSafeSecurityAssessmentActionsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
security_assessment_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str"),
assessment_ids=dict(type="list", elements="str"),
action=dict(
type="str",
required=True,
choices=[
"change_compartment",
"set_security_assessment_baseline",
"unset_security_assessment_baseline",
],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="security_assessment",
service_client_class=DataSafeClient,
namespace="data_safe",
)
result | |
else:
self.gd['mapped_ica'][ica_lookup].update({'template' : map_itemWidget})
if icn_lookup:
if icn_lookup not in self.gd['mapped_icn'].keys():
self.gd['mapped_icn'].update({icn_lookup : {}})
if ica_lookup:
self.gd['mapped_icn'][icn_lookup].update({ica_lookup : map_itemWidget})
else:
self.gd['mapped_icn'][icn_lookup].update({'template' : map_itemWidget})
if updateGUI:
self.listWidget_ICAComponents.clearSelection()
self.listWidget_Classifications.setCurrentItem(map_itemWidget)
self.update_gui_classifications(map_itemWidget)
def delete_Classification(self):
"""Remove ICA > ICN mapping from list"""
last_i = len(self.listWidget_Classifications.selectedItems()) - 1
for i,item in enumerate(self.listWidget_Classifications.selectedItems()):
mapping_lookup = str(item.data(Qt.UserRole))
ica_lookup = self.gd['mapped'][mapping_lookup]['ica_lookup']
icn_lookup = self.gd['mapped'][mapping_lookup]['icn_lookup']
# Remove mapped widget item
self.listWidget_Classifications.takeItem(self.listWidget_Classifications.row(item))
self.listWidget_Classifications.clearSelection()
# Remove data storage for mapping
self.gd['mapped'].pop(mapping_lookup)
# Update dict of mapped ICs/ICNs
if icn_lookup in self.gd['mapped_ica'][ica_lookup].keys():
self.gd['mapped_ica'][ica_lookup].pop(icn_lookup)
if len(self.gd['mapped_ica'][ica_lookup].keys()) == 0:
self.gd['mapped_ica'].pop(ica_lookup)
if ica_lookup in self.gd['mapped_icn'][icn_lookup].keys():
self.gd['mapped_icn'][icn_lookup].pop(ica_lookup)
if len(self.gd['mapped_icn'][icn_lookup].keys()) == 0:
self.gd['mapped_icn'].pop(icn_lookup)
# Add ICA item back to listwidget
if ica_lookup in self.gd['ica'].keys():
ica_display_name = self.gd['ica'][ica_lookup]['display_name']
ica_matches = self.listWidget_ICAComponents.findItems(ica_display_name,
Qt.MatchExactly)
if len(ica_matches) == 0:
ica_item = QtWidgets.QListWidgetItem(ica_lookup)
self.listWidget_ICAComponents.addItem(ica_item)
ica_item.setData(Qt.UserRole, ica_lookup)
ica_item.setText(self.gd['ica'][ica_lookup]['display_name'])
self.gd['ica'][ica_lookup]['widget'] = ica_item
if i == last_i:
self.listWidget_ICAComponents.setCurrentItem(ica_item)
self.update_gui_ica(ica_item)
elif i == last_i:
self.update_plots()
def find_duplicate_mappings(self, duplicated_name='ica'):
"""Find ICA comps/ICN templates, etc. in multiple mappings/classifications,
without taking into account customized names"""
if duplicated_name not in ['ica', 'icn']: return #nothing to do
self.listWidget_Classifications.clearSelection()
self.listWidget_Classifications.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
if duplicated_name == 'ica':
mapped_list = 'mapped_ica'
dup_type = 'ICA comp.'
dup_type_other = 'ICNs'
extras = None
elif duplicated_name == 'icn':
mapped_list = 'mapped_icn'
dup_type = 'ICN template'
dup_type_other = 'ICA comps.'
extras = [k for k in self.gd['icn'].keys() if re.match('\\.*noise', k,
flags=re.IGNORECASE)]
extras += self.config['icn']['extra_items']
extras += self.config['noise']['extra_items']
else:
dup_type = duplicated_name.capitalize()
custom_names = []
extras = None
mappings_dup_names = []
mappings_dup_items = []
found_duplicates = False
for lookup1 in self.gd[mapped_list].keys():
if ((lookup1 not in extras) and
(len(self.gd[mapped_list][lookup1]) >= 2)):
for lookup2 in self.gd[mapped_list][lookup1].keys():
found_duplicates = True
mapping_item = self.gd[mapped_list][lookup1][lookup2]
mapping_item.setSelected(True)
mappings_dup_items.append(mapping_item)
mappings_dup_names.append(str(mapping_item.text()))
if not found_duplicates:
title = "Searching for multiple "+ dup_type +" classifications:"
message = "No multiclassifications found, all current "
message += dup_type +" classifications are unique"
QtWidgets.QMessageBox.information(self, title, message)
else:
title = "Searching for multiple "+ dup_type +" classifications:"
message = "Found multiple classifications of "+ dup_type +"s to multiple "
message += dup_type_other + ":"
for i,mapping in enumerate(mappings_dup_names):
message += "\n " + mapping
message += "\n\n(Note: above may include duplicated " + dup_type
message += " items under multiple customized, unique names)"
message += "\n\nContinue to 'select classifications' window to remove & reset above items?"
if QtWidgets.QMessageBox.question(None, title, message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes:
self.clear_list_select(list_name='mapped',
listWidget=self.listWidget_Classifications,
list_subset = mappings_dup_items)
self.listWidget_Classifications.clearSelection()
self.listWidget_Classifications.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
def find_probable_classifications(self):
"""Find obvious, unambigous/undisputed possible mappings for non-classified ICA comps"""
warnflag = False
title = "Finding likely classifications:"
if not hasattr(self, 'corrs'):
warnflag = True
elif not self.corrs:
warnflag = True
if warnflag:
message = "Correlations need to be calculated in order to find likely classifications"
QtWidgets.QMessageBox.warning(self, title, message)
return
# Definitions of "Obvious" & "Undisputed" Criteria:
min_corr = 0.3 # minimum top corr. > 0.3
unambig_factor = 2 # top corr must be at least twice the corr for 2nd-highest match
criteria = "Criteria: \n1. ICA comp. strongly matches template (r > " + str(min_corr) + ")"
criteria += " \n2. ICN template currently without comparable alternatives"
criteria += " \n(defined as top match "+str(unambig_factor)+"x or more than all other corrs.):"
self.matches = map.Mapper.assign_matches(self.get_img_names('ica'),
self.corrs,
min_corr=min_corr,
unambigous_scaling_factor=unambig_factor)
# Select possible mappings to add
new_mappings = []
conflicting_mappings = []
redundant_mappings = []
mapping_info = {}
for ica_lookup, icn_lookup in self.matches.items():
if ica_lookup and icn_lookup:
ica_custom_name = self.gd['ica'][ica_lookup]['display_name']
icn_custom_name = self.gd['icn'][icn_lookup]['display_name']
potential_mapping = "%s > %s" %(ica_custom_name, icn_custom_name)
mapping_info.update({potential_mapping: {'ica_lookup': ica_lookup,
'icn_lookup': icn_lookup,
'ica_custom_name':ica_custom_name,
'icn_custom_name':icn_custom_name}})
# Sort potential new mappings
if potential_mapping in self.gd['mapped'].keys():
redundant_mappings.append(potential_mapping) #mapping already exists, ignore
else:
if ica_lookup in self.gd['mapped_ica'].keys():
if ((not self.config['ica']['allow_multiclassifications']) and
(icn_lookup not in self.gd['mapped_ica'][ica_lookup].keys())):
#ICA mappings must be unique, pairing w/ new ICN will overwrite existing mapping
conflicting_mappings.append(potential_mapping)
elif icn_lookup not in self.gd['mapped_ica'][ica_lookup].keys():
new_mappings.append(potential_mapping) #new ICA>ICN pair, consider adding
else:
redundant_mappings.append(potential_mapping) #ICA>ICN pair exists by another name
else:
new_mappings.append(potential_mapping) #unmapped ICA comp., consider adding
if len(new_mappings) + len(conflicting_mappings) == 0:
message = "No potential classifications found."
message += "\n\nAll currently unclassified ICA comps. "
message += "either feature weak (r < " + str(min_corr) + ") top matches,"
message += " or ambigous matching to multiple templates"
QtWidgets.QMessageBox.information(self, title, message)
return
message = ""
if len(new_mappings) > 0:
message += "Found likely new classifications:"
for mapping in new_mappings:
message += "\n " + mapping
message += "\n\n"
if len(conflicting_mappings) > 0:
message += "Found likely classifactions,"
message += " but will override existing classifications:"
for mapping in conflicting_mappings:
message += "\n " + mapping
message += "\n\n"
if len(new_mappings) + len(conflicting_mappings) > 0:
message += criteria
message += "\n\nContinue to 'select mappings' to add from above?"
if QtWidgets.QMessageBox.question(None, title, message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes:
new_mappings += conflicting_mappings
self.selectionWin = select.newSelectWin(self.listWidget_Classifications,
title="Select classifications to add:",
add_items = new_mappings,
list_subset=[]) #[] excludes current list items
if self.selectionWin.accept_result == QtWidgets.QDialog.Accepted:
for mapping_lookup in self.selectionWin.selected_display_names:
ica_lookup = mapping_info[mapping_lookup]['ica_lookup']
icn_lookup = mapping_info[mapping_lookup]['icn_lookup']
ica_custom_name = mapping_info[mapping_lookup]['ica_custom_name']
icn_custom_name = mapping_info[mapping_lookup]['icn_custom_name']
self.add_Classification(ica_icn_pair=(ica_lookup, icn_lookup),
ica_custom_name=ica_custom_name,
icn_custom_name=icn_custom_name,
updateGUI=False)
self.update_plots()
def find_questionable_classifications(self):
"""Find mappings between low-correlated ICA & ICN pairs,
Pull up GUI to allow rethinking/removal"""
min_corr = 0.3 # ...r < 0.3 is suspect
unambig_factor = 2 # ...top matches less than 2x as high as next highest are disputable
suspect_lookups = []
suspect_items = []
self.listWidget_Classifications.clearSelection()
self.listWidget_Classifications.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
for mapping_lookup in self.gd['mapped'].keys():
ica_lookup = self.gd['mapped'][mapping_lookup]['ica_lookup']
icn_lookup = self.gd['mapped'][mapping_lookup]['icn_lookup']
if ica_lookup in self.corrs.keys():
if icn_lookup in self.corrs[ica_lookup].keys():
if self.corrs[ica_lookup][icn_lookup] < min_corr:
suspect_lookups.append(mapping_lookup)
else:
other_corrs = self.corrs[ica_lookup].copy()
del other_corrs[icn_lookup]
best_other_corr = max(other_corrs.values())
if self.corrs[ica_lookup][icn_lookup] < best_other_corr * unambig_factor:
suspect_lookups.append(mapping_lookup)
if len(suspect_lookups) > 0:
title = "Searching for weak or debatable classifications in mappings:"
message = "Found weak (r < "+ str(min_corr) +") or depatable (r1 ~ r2) mappings:\n"
for i,mapping in enumerate(suspect_lookups):
message += "\n " + mapping
mapping_item = self.gd['mapped'][mapping]['mapped_item']
mapping_item.setSelected(True)
suspect_items.append(mapping_item)
message += "\n\nContinue to 'select mappings', to reset & re-classify?"
if QtWidgets.QMessageBox.question(None, title, message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes:
self.clear_list_select(list_name='mapped',
listWidget=self.listWidget_Classifications,
list_subset = suspect_items)
self.listWidget_Classifications.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.update_plots()
#--------------------------------------------------------
### Functions used to plot spatial maps & time series ###
#--------------------------------------------------------
def update_plots(self):
"""Update plots using global plotting options"""
ica_lookup, icn_lookup = self.get_current_networks()
if ica_lookup or icn_lookup:
options = self.get_plot_options(ica_lookup, icn_lookup, coords_from_sliders=False)
self.plot_vols(self.figure_x, **options)
self.canvas_x.draw()
self.plot_time(self.figure_t, **options)
self.canvas_t.draw()
def update_plots_from_sliders(self):
"""Updates plots after change in x,y,z slider bars,
without changing global plotting options"""
ica_lookup, icn_lookup = self.get_current_networks()
if ica_lookup or icn_lookup:
options = self.get_plot_options(ica_lookup, icn_lookup, coords_from_sliders=True)
self.plot_vols(self.figure_x, **options)
self.canvas_x.draw()
self.plot_time(self.figure_t, **options)
self.canvas_t.draw()
def get_current_networks(self):
"""Determine which ICs & ICN templates, or mappings are currently selected"""
if self.listWidget_Classifications.currentRow() != -1:
mapping_lookup = str(self.listWidget_Classifications.currentItem().data(Qt.UserRole))
ica_lookup = self.gd['mapped'][mapping_lookup]['ica_lookup']
if self.listWidget_ICNtemplates.currentRow() != -1:
icn_lookup = str(self.listWidget_ICNtemplates.currentItem().data(Qt.UserRole))
else:
icn_lookup = self.gd['mapped'][mapping_lookup]['icn_lookup']
elif self.listWidget_ICAComponents.currentRow() != -1:
ica_lookup = str(self.listWidget_ICAComponents.currentItem().data(Qt.UserRole))
if self.listWidget_ICNtemplates.currentRow() != -1:
icn_lookup = str(self.listWidget_ICNtemplates.currentItem().data(Qt.UserRole))
else:
icn_lookup = None
elif self.listWidget_ICNtemplates.currentRow() != -1:
ica_lookup = None
icn_lookup = str(self.listWidget_ICNtemplates.currentItem().data(Qt.UserRole))
else: # if both mapped networks & unmapped ICA lists are empty, reset GUI
ica_lookup = None
icn_lookup = None
self.reset_display()
return ica_lookup, icn_lookup
def get_plot_options(self, ica_lookup, icn_lookup, coords_from_sliders=False):
"""Get all plot options"""
displayLayout, coords = self.apply_slice_views(ica_lookup, icn_lookup, coords_from_sliders)
options = {'ica_lookup': ica_lookup,
'icn_lookup': icn_lookup,
'displayLayout': displayLayout,
'coords': coords}
options.update({'show_icn': self.mp['icn']['show_icn']})
if icn_lookup in self.gd['icn'].keys():
if not isinstance(self.gd['icn'][icn_lookup]['img'], (Nifti1Image, Nifti1Pair)):
options.update({'show_icn': False})
else:
options.update({'show_icn': False})
options.update({'show_time_series': self.tp['items']['show_time_series']})
options.update({'show_spectrum': self.tp['items']['show_spectrum']})
return options
def apply_slice_views(self, ica_lookup, icn_lookup, coords_from_sliders):
"""Determine what data display to use"""
if coords_from_sliders:
x, y, z = self.get_and_set_slice_coordinates()
else: x, y, z = (0, 0, 0)
if ica_lookup in self.gd['ica'].keys():
ica_img = self.gd['ica'][ica_lookup]['img']
else: ica_img = None
if (icn_lookup in self.gd['icn'].keys()) and self.gd['icn'][icn_lookup]['img']:
map_img = self.gd['icn'][icn_lookup]['img']
else: map_img = None
if (map_img is not None) and (ica_img is not None):
map_img = image.resample_to_img(source_img=map_img, | |
(numpy.ndarray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
Returns:
numpy.ndarray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
observation_counts = []
forecast_probabilities = []
forecast_counts = []
for bin_min, bin_max in self.probability_bins:
observation_mask = (((forecast >= bin_min) & (forecast <= bin_max))
& (np.isclose(truth, 1))).astype(int)
forecast_mask = ((forecast >= bin_min) &
(forecast <= bin_max)).astype(int)
forecasts_probability_values = forecast * forecast_mask
observation_counts.append(observation_mask)
forecast_probabilities.append(forecasts_probability_values)
forecast_counts.append(forecast_mask)
reliability_table = np.stack([
np.stack(observation_counts),
np.stack(forecast_probabilities),
np.stack(forecast_counts)])
return reliability_table.astype(np.float32)
def process(self, historic_forecasts, truths):
"""
Slice data over threshold and time coordinates to construct reliability
tables. These are summed over time to give a single table for each
threshold, constructed from all the provided historic forecasts and
truths.
.. See the documentation for an example of the resulting reliability
table cube.
.. include:: extended_documentation/calibration/
reliability_calibration/reliability_calibration_examples.rst
Note that the forecast and truth data used is probabilistic, i.e. has
already been thresholded relative to the thresholds of interest, using
the equality operator required. As such this plugin is agnostic as to
whether the data is thresholded below or above a given diagnostic
threshold.
Args:
historic_forecasts (iris.cube.Cube):
A cube containing the historical forecasts used in calibration.
These are expected to all have a consistent cycle hour, that is
the hour in the forecast reference time.
truths (iris.cube.Cube):
A cube containing the thresholded gridded truths used in
calibration.
Returns:
iris.cube.CubeList:
A cubelist of reliability table cubes, one for each threshold
in the historic forecast cubes.
Raises:
ValueError: If the forecast and truth cubes have differing
threshold coordinates.
"""
historic_forecasts, truths = filter_non_matching_cubes(
historic_forecasts, truths)
threshold_coord = find_threshold_coordinate(historic_forecasts)
truth_threshold_coord = find_threshold_coordinate(truths)
if not threshold_coord == truth_threshold_coord:
msg = "Threshold coordinates differ between forecasts and truths."
raise ValueError(msg)
time_coord = historic_forecasts.coord('time')
self._check_forecast_consistency(historic_forecasts)
reliability_cube = self._create_reliability_table_cube(
historic_forecasts, threshold_coord)
reliability_tables = iris.cube.CubeList()
threshold_slices = zip(historic_forecasts.slices_over(threshold_coord),
truths.slices_over(threshold_coord))
for forecast_slice, truth_slice in threshold_slices:
threshold_reliability = []
time_slices = zip(forecast_slice.slices_over(time_coord),
truth_slice.slices_over(time_coord))
for forecast, truth in time_slices:
reliability_table = (
self._populate_reliability_bins(
forecast.data, truth.data))
threshold_reliability.append(reliability_table)
# Stack and sum reliability tables for all times
table_values = np.stack(threshold_reliability)
table_values = np.sum(table_values, axis=0, dtype=np.float32)
reliability_entry = reliability_cube.copy(data=table_values)
reliability_entry.replace_coord(
forecast_slice.coord(threshold_coord))
reliability_tables.append(reliability_entry)
return MergeCubes()(reliability_tables)
class AggregateReliabilityCalibrationTables(BasePlugin):
"""This plugin enables the aggregation of multiple reliability calibration
tables, and/or the aggregation over coordinates in the tables."""
def __repr__(self):
"""Represent the configured plugin instance as a string."""
return '<AggregateReliabilityCalibrationTables>'
@staticmethod
def _check_frt_coord(cubes):
"""
Check that the reliability calibration tables do not have overlapping
forecast reference time bounds. If these coordinates overlap in time it
indicates that some of the same forecast data has contributed to more
than one table, thus aggregating them would double count these
contributions.
Args:
cubes (iris.cube.CubeList):
The list of reliability calibration tables for which the
forecast reference time coordinates should be checked.
Raises:
ValueError: If the bounds overlap.
"""
bounds = []
for cube in cubes:
bounds.extend(cube.coord('forecast_reference_time').bounds)
bounds = np.concatenate(bounds)
if not all(x < y for x, y in zip(bounds, bounds[1:])):
raise ValueError('Reliability calibration tables have overlapping '
'forecast reference time bounds, indicating that '
'the same forecast data has contributed to the '
'construction of both tables. Cannot aggregate.')
def process(self, cubes, coordinates=None):
"""
Aggregate the input reliability calibration table cubes and return the
result.
Args:
cubes (list or iris.cube.CubeList):
The cube or cubes containing the reliability calibration tables
to aggregate.
coordinates (list or None):
A list of coordinates over which to aggregate the reliability
calibration table using summation. If the argument is None and
a single cube is provided, this cube will be returned
unchanged.
"""
coordinates = [] if coordinates is None else coordinates
try:
cube, = cubes
except ValueError:
cubes = iris.cube.CubeList(cubes)
self._check_frt_coord(cubes)
cube = cubes.merge_cube()
coordinates.append('forecast_reference_time')
else:
if not coordinates:
return cube
result = collapsed(cube, coordinates, iris.analysis.SUM)
frt = create_unified_frt_coord(cube.coord('forecast_reference_time'))
result.replace_coord(frt)
return result
class ApplyReliabilityCalibration(PostProcessingPlugin):
"""
A plugin for the application of reliability calibration to probability
forecasts.
References:
<NAME>. 2014. Calibrating ensemble reliability whilst preserving
spatial structure. Tellus, Ser. A Dyn. Meteorol. Oceanogr. 66.
"""
def __init__(self, minimum_forecast_count=200):
"""
Initialise class for applying reliability calibration.
Args:
minimum_forecast_count (int):
The minimum number of forecast counts in a forecast probability
bin for it to be used in calibration. If the reliability
table for a forecast threshold includes any bins with
insufficient counts that threshold will be returned unchanged.
The default value of 200 is that used in Flowerdew 2014.
"""
if minimum_forecast_count < 1:
raise ValueError(
"The minimum_forecast_count must be at least 1 as empty "
"bins in the reliability table are not handled.")
self.minimum_forecast_count = minimum_forecast_count
self.threshold_coord = None
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = '<ApplyReliabilityCalibration: minimum_forecast_count: {}>'
return result.format(self.minimum_forecast_count)
@staticmethod
def _threshold_coords_equivalent(forecast, reliability_table):
"""Ensure that the threshold coordinates are identical in the
reliability table and in the forecast cube. If not raise an
exception.
Args:
forecast (iris.cube.Cube):
The forecast to be calibrated.
reliability_table (iris.cube.Cube):
The reliability table to use for applying calibration.
Raises:
ValueError: If the threshold coordinates are different in the two
cubes.
"""
if not (forecast.coord(var_name='threshold') ==
reliability_table.coord(var_name='threshold')):
raise ValueError('Threshold coordinates do not match between '
'reliability table and forecast cube.')
def _ensure_monotonicity(self, cube):
"""
Ensures that probabilities change monotonically relative to thresholds
in the expected order, e.g. exceedance probabilities always remain the
same or decrease as the threshold values increase, below threshold
probabilities always remain the same or increase as the threshold
values increase.
Args:
cube (iris.cube.Cube):
The probability cube for which monotonicity is to be checked
and enforced. This cube is modified in place.
Raises:
ValueError: Threshold coordinate lacks the
spp__relative_to_threshold attribute.
Warns:
UserWarning: If the probabilities must be sorted to reinstate
expected monotonicity following calibration.
"""
threshold_dim, = cube.coord_dims(self.threshold_coord)
thresholding = self.threshold_coord.attributes.get(
"spp__relative_to_threshold", None)
if thresholding is None:
msg = ('Cube threshold coordinate does not define whether '
'thresholding is above or below the defined thresholds.')
raise ValueError(msg)
if (thresholding == 'above' and not
(np.diff(cube.data, axis=threshold_dim) <= 0).all()):
msg = ('Exceedance probabilities are not decreasing monotonically '
'as the threshold values increase. Forced back into order.')
warnings.warn(msg)
cube.data = np.sort(cube.data, axis=threshold_dim)[::-1]
if (thresholding == 'below' and not
(np.diff(cube.data, axis=threshold_dim) >= 0).all()):
msg = ('Below threshold probabilities are not increasing '
'monotonically as the threshold values increase. Forced '
'back into order.')
warnings.warn(msg)
cube.data = np.sort(cube.data, axis=threshold_dim)
def _calculate_reliability_probabilities(self, reliability_table):
"""
Calculates forecast probabilities and observation frequencies from the
reliability table. Where the forecast count is zero, Nones are
returned.
Args:
reliability_table (iris.cube.Cube):
A reliability table for a single threshold from which to
calculate the forecast probabilities and observation
frequencies.
Returns:
(tuple): tuple containing Nones or:
**forecast_probability** (numpy.ndarray):
Forecast probabilities calculated by dividing the sum of
forecast probabilities by the forecast count.
**observation_frequency** (numpy.ndarray):
Observation frequency calculated by dividing the
observation count by the forecast count.
"""
observation_count = reliability_table.extract(
iris.Constraint(table_row_name='observation_count')).data
forecast_count = reliability_table.extract(
iris.Constraint(table_row_name='forecast_count')).data
forecast_probability_sum = reliability_table.extract(
iris.Constraint(
table_row_name='sum_of_forecast_probabilities')).data
# In some bins have insufficient counts, return None to avoid applying
# calibration.
valid_bins = np.where(forecast_count >= self.minimum_forecast_count)
if valid_bins[0].size != forecast_count.size:
return None, None
forecast_probability = np.array(
forecast_probability_sum / forecast_count)
observation_frequency = np.array(
observation_count / forecast_count)
return forecast_probability, observation_frequency
@staticmethod
def _interpolate(forecast_threshold, reliability_probabilities,
observation_frequencies):
"""
Perform interpolation of the forecast probabilities using the
reliability table data to produce the calibrated forecast. Where
necessary linear extrapolation will be applied. Any mask in place on
the forecast_threshold data is removed and reapplied after calibration.
Args:
forecast_threshold (numpy.ndarray):
The forecast probabilities to be calibrated.
reliability_probabilities (numpy.ndarray):
Probabilities taken from the reliability tables.
observation_frequencies (numpy.ndarray):
Observation frequencies that relate to the reliability
probabilities, taken from the reliability tables.
Returns:
numpy.ndarray:
The calibrated forecast probabilities. The final results are
clipped to ensure any extrapolation has not | |
<gh_stars>1-10
import base64
import hashlib
import hmac
import os
import random
import re
import string
import urllib.request
from datetime import datetime, timedelta
import pytz
from flask import request
from todoist import TodoistAPI
def process_webhook(req, user):
api = initiate_api(user.access_token)
if api is None:
return 'Request for Streaks with Todoist not authorized, exiting.'
local_time = str(get_now_user_timezone(api))
if req['event_name'] == 'item:completed':
item_id = int(req['event_data']['id'])
if api.items.get_by_id(item_id) is not None:
item = api.items.get_by_id(item_id)
content = item['content']
# Avoiding duplicates: if webhook callback content matches api content
if req['event_data']['content'] == content:
task_complete(api, item_id)
if req['event_name'] == 'item:uncompleted':
item_id = int(req['event_data']['id'])
if api.items.get_by_id(item_id) is not None:
item = api.items.get_by_id(item_id)
content = item['content']
# Avoiding duplicates: if webhook callback content matches api content
if req['event_data']['content'] == content:
task_uncomplete(api, item_id)
if req['event_name'] == 'reminder:fired':
item_id = int(req['event_data']['item_id'])
if api.items.get_by_id(item_id) is not None:
task = api.items.get_by_id(item_id)
print(local_time + ': Reminder fired: ' + task['content'])
reminder_fired(api, item_id)
if req['event_name'] == 'item:updated':
item_id = int(req['event_data']['id'])
if api.items.get_by_id(item_id) is not None:
item = api.items.get_by_id(item_id)
content = item['content']
task_updated(api, item_id)
item = api.items.get_by_id(item_id)
content = item['content']
if req['event_name'] == 'item:added':
item_id = int(req['event_data']['id'])
if api.items.get_by_id(item_id) is not None:
item = api.items.get_by_id(item_id)
content = item['content']
task_added(api, item_id)
api.commit()
def convert_time_str_datetime(due_date_utc, user_timezone):
"""Parse time string, convert to datetime object in user's timezone"""
if "Z" in due_date_utc:
try:
# In format Fri 23 Nov 2018 18:00:00 +0000
datetime_obj = datetime.strptime(
due_date_utc, '%Y-%m-%dT%H:%M:%SZ')
except ValueError or TypeError:
return None
dt_local = datetime_obj.astimezone(user_timezone)
return dt_local
elif "T" in due_date_utc:
try:
# In format Fri 23 Nov 2018 18:00:00 +0000
datetime_obj = datetime.strptime(due_date_utc, '%Y-%m-%dT%H:%M:%S')
except ValueError or TypeError:
return None
dt_local = datetime_obj.astimezone(user_timezone)
return dt_local
else:
try:
# In format Fri 23 Nov 2018 18:00:00 +0000
datetime_obj = datetime.strptime(due_date_utc, '%Y-%m-%d')
except ValueError or TypeError:
return None
dt_local = datetime_obj.astimezone(user_timezone)
return dt_local
def is_habit(text):
return re.search(r'\[streak\s(\d+)\]', text)
def is_count(text):
return re.search(r'\[(\d+)\]', text)
def update_streak(task, streak):
"""Update streak contents from text [streak n] to text [streak n+1]"""
streak_num = '[streak {}]'.format(streak)
new_content = re.sub(r'\[streak\s(\d+)\]', streak_num, task['content'])
task.update(content=new_content)
def update_count(task, count):
"""Update count contents from text [n] to text [n+1]"""
count_num = '[{}]'.format(count)
new_content = re.sub(r'\[(\d+)\]', count_num, task['content'])
task.update(content=new_content)
def get_now_user_timezone(api):
"""Get current time in user's timezone"""
user_timezone = get_user_timezone(api)
return datetime.now(tz=user_timezone)
def initiate_api(access_token):
"""Initiate and sync Todoist API"""
api = TodoistAPI(access_token)
api.sync()
if bool(api['user']):
return api
else:
return None
def compute_hmac():
"""Take payload and compute hmac--check if user-agent matches to todoist webhooks"""
if request.headers.get('USER-AGENT') == 'Todoist-Webhooks':
request_hmac = request.headers.get('X-Todoist-Hmac-SHA256')
calculated_hmac = base64.b64encode(hmac.new(bytes(os.getenv(
'CLIENT_SECRET'), encoding='utf-8'), msg=request.get_data(), digestmod=hashlib.sha256).digest()).decode("utf-8")
if request_hmac == calculated_hmac:
return 1
else:
return 0
def update_to_all_day(now):
"""Update due date to end of today (default for all day tasks)"""
return '"date" : "' + now.strftime('%Y-%m-%d') + '"'
# new_due_date = datetime(year=now.year,
# month=now.month,
# day=now.day,
# hour=23,
# minute=59,
# second=59).astimezone(pytz.utc)
# return new_due_date
def get_user_timezone(api):
"""Get user's timezone"""
todoist_tz = api.state["user"]["tz_info"]["timezone"]
match = re.search("GMT( ((\+|\-)(\d+)))?", todoist_tz)
if match:
if match.group(3) == '+':
operation = '-'
else:
operation = '+'
GMT_tz = 'Etc/GMT' + operation + match.group(4)
return pytz.timezone(GMT_tz)
else:
return pytz.timezone(api.state["user"]["tz_info"]["timezone"])
def convert_datetime_str(date):
"""Convert a datetime object into the todoist due date string format"""
return date.strftime('%Y-%m-%dT%H:%M:%S')
def convert_datetime_str_notime(date):
"""Convert a datetime object into the todoist due date string format"""
return date.strftime('%Y-%m-%d')
def create_url():
# Generate 6 random digits
state = (''.join(random.choices(string.ascii_uppercase + string.digits, k=6)))
url = 'https://todoist.com/oauth/authorize?state=' + state + \
'&client_id=' + os.getenv('CLIENT_ID') + '&scope=data:read_write'
return url
def task_updated(api, task_id):
"""TODO: Add logic for finding <> and replacing due time"""
task = api.items.get_by_id(task_id)
if task["due"] and is_recurrence_diff(task['content']):
new_due_time = is_recurrence_diff(task["content"]).group(1)
new_due_date_utc = replace_due_date_time(
new_due_time, task["due"]['date'], get_user_timezone(api))
new_due_date_utc_str = convert_datetime_str(new_due_date_utc)
task.update(content=re.sub(is_recurrence_diff(
task["content"]).group(0), '', task["content"]))
task.update(due_date_utc=new_due_date_utc_str)
# TODO: Priority/convenience tasks: Extend feature to others
user_email = api['user']['email']
if user_email == os.getenv('PRIMARY_EMAIL') or user_email == os.getenv('EMAIL1') or user_email == os.getenv('EMAIL2'):
if api.state['user']['is_premium']:
if task["due"]:
# Special behavior for return date filter
if task['content'] == 'return date' and api.projects.get_by_id(task['project_id'])['name'] == 'crt':
if 'last_due_date' in api.activity.get(object_id=task['id'], limit=1)['events'][0]['extra_data']:
last_due_date = api.activity.get(object_id=task['id'], limit=1)[
'events'][0]['extra_data']['last_due_date']
if last_due_date == None or last_due_date != task["due_date_utc"]:
for filter in api.filters.state['filters']:
if filter['name'] == 'Vacation':
return_date_label = task['due']['string']
return_date = task['due']['date']
# todo: convert to date, add to dates
filter.update(query="search:Return date - " + return_date_label + RIGHT_SPACER + " | ( search: _____ | due before: " + add_to_dtobject(api, return_date, 1) + " | (@ tDE & ! no due date) | (" + add_to_dtobject(
api, return_date, 1) + " & @t2D) | (due before: " + add_to_dtobject(api, return_date, 6) + " & @t5D) | (due before: " + add_to_dtobject(api, return_date, 8) + " & @tW) | (due before: " + add_to_dtobject(api, return_date, 32) + " & @tM) ) & ! ##crt")
api.commit()
# Regular behavior for date added
elif 'P4' not in task['content']:
if 'last_due_date' in api.activity.get(object_id=task['id'], limit=1)['events'][0]['extra_data']:
if api.activity.get(object_id=task['id'], limit=1)['events'][0]['extra_data']['last_due_date'] == None:
task.update(priority=3)
else:
content_no_P4 = task['content'].replace('P4', '')
task.update(content=content_no_P4)
# Remove date
else:
if 'last_due_date' in api.activity.get(object_id=task['id'], limit=1)['events'][0]['extra_data']:
if api.activity.get(object_id=task['id'], limit=1)['events'][0]['extra_data']['last_due_date'] != None:
task.update(priority=0)
def is_recurrence_diff(task_content):
"""Find hours, minutes and, optionally, seconds"""
return re.search(r'<(\d+:\d+:*\d*)*>', task_content)
def add_to_dtobject(api, date_str, add_num):
date = convert_time_str_datetime(date_str, get_user_timezone(api))
new_date = date + timedelta(days=add_num)
return convert_datetime_str_notime(new_date)
def replace_due_date_time(new_due_time, due_date_utc, user_timezone):
"""Replace with the user-entered hour, minute and, optionally, second, and convert to utc datetime object"""
due_date_localtz_date = convert_time_str_datetime(
due_date_utc, user_timezone)
if(new_due_time):
new_due_time_split = new_due_time.split(":")
new_due_date_localtz_date = due_date_localtz_date.replace(hour=int(new_due_time_split[0]),
minute=int(
new_due_time_split[1]),
second=int(0))
else:
new_due_date_localtz_date = due_date_localtz_date.replace(
hour=23, minute=23, second=59)
new_due_date_utc_date = new_due_date_localtz_date.astimezone(pytz.utc)
return new_due_date_utc_date
RIGHT_SPACER = "_________________________________"
L1_LABEL = "search:" + "Level 1" + RIGHT_SPACER + " | "
L2_LABEL = "search:" + "Level 2" + RIGHT_SPACER + " | "
L3_LABEL = "search:" + "Level 3" + RIGHT_SPACER + " | "
L1_CLEAN_LABEL = "search:" + "Level 1 - clean" + RIGHT_SPACER + " | "
L2_CLEAN_LABEL = "search:" + "Level 2 - clean" + RIGHT_SPACER + " | "
OOO_LABEL = "search:_OOO_ |"
OOO_ADD = " & !(tod & ##work & P4) & !(due after: tod & ##work) & !(no due date & ##work)"
NO_COMP_LABEL = "search:_NO COMP_ |"
NO_COMP_ADD = " & !@COMP"
CLEAN_ADD = " & !(search:Cleared L1 | search:Cleared L2)"
L1_BASE = "((due before: +0 hours | (due after: tod 23:59 & due before: tom 00:00)) & ! ##crt)"
L2_BASE = " | search:_____ | ((@tDE & ! no due date) | (tom & @t2D) | (next 5 days & @t5D) | (next 8 days & @tW) | (next 32 days & @tM))"
L3_BASE = "| ((no due date & !(@TG & no due date) & !##WF - & !##Someday/Maybe & !no labels & !@AGENDAS & !@oADDON & !@WF))"
L1 = '(' + L1_LABEL + L1_BASE + ')'
L2 = '(' + L2_LABEL + L1_BASE + L2_BASE + ')'
L3 = '(' + L3_LABEL + L1_BASE + L2_BASE + L3_BASE + ')'
L1_CLEAN = '(' + L1_CLEAN_LABEL + '(' + L1_BASE + ')' + CLEAN_ADD + ')'
L2_CLEAN = '(' + L2_CLEAN_LABEL + \
'(' + L1_BASE + L2_BASE + ')' + CLEAN_ADD + ')'
# TODO: Extend feature to other users
URL = os.getenv('AUTO_REMOTE_URL')
def reset_base_filters(api):
for filter in api.filters.state['filters']:
if filter['name'] == 'Level 1':
filter.update(query=L1)
if filter['name'] == 'Level 2':
filter.update(query=L2)
if filter['name'] == 'Level 3':
filter.update(query=L3)
if filter['name'] == 'Level 1 - clean':
filter.update(query=L1_CLEAN)
if filter['name'] == 'Level 2 - clean':
filter.update(query=L2_CLEAN)
api.commit()
def task_complete(api, task_id):
task = api.items.get_by_id(int(task_id))
if task:
# Disabling rarely used recurrence snooze due to recursion error
# if api.state['user']['is_premium'] and task['due']:
# if check_recurring_task(api, task) and check_regular_intervals(task['due']['string']): check_activity_log(api, task)
increment_streak(task)
increment_count(task)
# Turn on OOO
if task['content'] == 'ooo mode' and api.projects.get_by_id(task['project_id'])['name'] == 'crt':
print("OOO ON request")
for filter in api.filters.state['filters']:
query = filter['query']
if filter['name'] == 'Level 1':
add_label_add_query(filter, query, OOO_LABEL, OOO_ADD)
if filter['name'] == 'Level 2':
add_label_add_query(filter, query, OOO_LABEL, OOO_ADD)
if filter['name'] == 'Level 3':
add_label_add_query(filter, query, OOO_LABEL, OOO_ADD)
if filter['name'] == 'Level 1 - clean':
add_label_add_query(filter, query, OOO_LABEL, OOO_ADD)
if filter['name'] == 'Level 2 - clean':
add_label_add_query(filter, query, OOO_LABEL, OOO_ADD)
# TODO: Extend feature this to other users
urllib.request.urlopen(URL).read()
# Turn on no computer
if task['content'] == 'no computer' and api.projects.get_by_id(task['project_id'])['name'] == 'crt':
for filter in api.filters.state['filters']:
query = filter['query']
if filter['name'] == 'Level 1':
add_label_add_query(
filter, query, NO_COMP_LABEL, NO_COMP_ADD)
if filter['name'] == 'Level 2':
add_label_add_query(
filter, query, NO_COMP_LABEL, NO_COMP_ADD)
if filter['name'] == 'Level |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.