input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
m.ncp_t and 0 < kx <= m.ncp_x:
return m.dHge_dt[it, kt, ix, kx] == \
sum(m.ldot_t[jt, kt] * m.Hge[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Constraint.Skip
# Nse
def fdvar_t_nse(m, it, kt, ix, kx, c):
if 0 < kt <= m.ncp_t and 0 < kx <= m.ncp_x:
return m.dNse_dt[it, kt, ix, kx, c] == \
sum(m.ldot_t[jt, kt] * m.Nse[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Constraint.Skip
# Hse
def fdvar_t_hse(m, it, kt, ix, kx):
if 0 < kt <= m.ncp_t and 0 < kx <= m.ncp_x:
return m.dHse_dt[it, kt, ix, kx] == \
sum(m.ldot_t[jt, kt] * m.Hse[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Constraint.Skip
# expr ================================================================================================
# Ngbi0
def fcp_t_ngb(m, it, ix, kx, c):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Ngb[it + 1, 0, ix, kx, c] - \
sum(m.l1_t[jt] * m.Ngb[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Hgbi0
def fcp_t_hgb(m, it, ix, kx):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Hgb[it + 1, 0, ix, kx] - \
sum(m.l1_t[jt] * m.Hgb[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Ngci0
def fcp_t_ngc(m, it, ix, kx, c):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Ngc[it + 1, 0, ix, kx, c] - \
sum(m.l1_t[jt] * m.Ngc[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Hgci0
def fcp_t_hgc(m, it, ix, kx):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Hgc[it + 1, 0, ix, kx] - \
sum(m.l1_t[jt] * m.Hgc[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Nsei0
def fcp_t_nsc(m, it, ix, kx, c):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Nsc[it + 1, 0, ix, kx, c] - \
sum(m.l1_t[jt] * m.Nsc[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Hsei0
def fcp_t_hsc(m, it, ix, kx):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Hsc[it + 1, 0, ix, kx] - \
sum(m.l1_t[jt] * m.Hsc[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Ngei0
def fcp_t_nge(m, it, ix, kx, c):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Nge[it + 1, 0, ix, kx, c] - \
sum(m.l1_t[jt] * m.Nge[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Hgei0
def fcp_t_hge(m, it, ix, kx):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Hge[it + 1, 0, ix, kx] - \
sum(m.l1_t[jt] * m.Hge[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Nsei0
def fcp_t_nse(m, it, ix, kx, c):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Nse[it + 1, 0, ix, kx, c] - \
sum(m.l1_t[jt] * m.Nse[it, jt, ix, kx, c] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# Hsei0
def fcp_t_hse(m, it, ix, kx):
if it < m.nfe_t and 0 < kx <= m.ncp_x:
return m.Hse[it + 1, 0, ix, kx] - \
sum(m.l1_t[jt] * m.Hse[it, jt, ix, kx] for jt in m.cp_t if jt <= m.ncp_t)
else:
return Expression.Skip
# vg
# def a1_rule(m, it, jt, ix, jx):
def Gb_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.vg[it, jt, ix, jx] * m.Ax * sum(m.cb[it, jt, ix, jx, kx] for kx in m.sp) == m.Gb[it, jt, ix, jx]
else:
return Constraint.Skip
# hsc
def a4_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.ecwin[it, jt, ix, jx] == m.Jc[it, jt, ix, jx] * m.hsc[it, jt, ix, jx]
else:
return Constraint.Skip
# hse
def a5_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.eein[it, jt, ix, jx] == m.Je[it, jt, ix, jx] * m.hse[it, jt, ix, jx]
else:
return Constraint.Skip
# nc
def a8_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.ccwin[it, jt, ix, jx, k] == m.Jc[it, jt, ix, jx] * m.nc[it, jt, ix, jx, k]
else:
return Constraint.Skip
# ne
def a9_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.cein[it, jt, ix, jx, k] == m.Je[it, jt, ix, jx] * m.ne[it, jt, ix, jx, k]
else:
return Constraint.Skip
# Je
def a11_rule_2(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.z[it, jt, ix, jx] == m.Je[it, jt, ix, jx] - m.Jc[it, jt, ix, jx]
else:
return Constraint.Skip
# delta
def a13_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.Gb[it, jt, ix, jx] == m.vb[it, jt, ix, jx] * m.Ax * m.delta[it, jt, ix, jx] * sum(m.cb[it, jt, ix, jx, kx] for kx in m.sp)
else:
return Constraint.Skip
# Jc
def a14_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.Jc[it, jt, ix, jx] == \
m.fw * m.delta[it, jt, ix, jx] * m.rhos * (1 - m.ed[it, jt, ix, jx]) * m.vb[it, jt, ix, jx]
else:
return Constraint.Skip
# yb
def a15_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.cb[it, jt, ix, jx, k] == m.yb[it, jt, ix, jx, k] * sum(m.cb[it, jt, ix, jx, kx] for kx in m.sp)
else:
return Constraint.Skip
# yc
def a16_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.cc[it, jt, ix, jx, k] == m.yc[it, jt, ix, jx, k] * sum(m.cc[it, jt, ix, jx, kx] for kx in m.sp)
else:
return Constraint.Skip
# ye
def a17_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.ce[it, jt, ix, jx, k] == m.ye[it, jt, ix, jx, k] * sum(m.ce[it, jt, ix, jx, kx] for kx in m.sp)
else:
return Constraint.Skip
# D 'c'
def a22_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.D[it, jt, ix, jx, 'c'] == \
(0.1593 - 0.1282 * (m.P[it, jt, ix, jx] - 1.4) + 0.001 * (m.Tge[it, jt, ix, jx] - 60 - 273.16) + 0.0964 * (
(m.P[it, jt, ix, jx] - 1.4) ** 2) - 0.0006921 * (
(m.P[it, jt, ix, jx] - 1.4) * (m.Tge[it, jt, ix, jx] - 60 - 273.16)) -
3.3532e-06 * (m.Tge[it, jt, ix, jx] - 60 - 273.16) ** 2) * m.ye[it, jt, ix, jx, 'h'] / (
m.ye[it, jt, ix, jx, 'h'] + m.ye[it, jt, ix, jx, 'n']) + \
(
0.1495 - 0.1204 * (m.P[it, jt, ix, jx] - 1.4) + 0.0008896 * (m.Tge[it, jt, ix, jx] - 60 - 273.16) + 0.0906 * (
(m.P[it, jt, ix, jx] - 1.4) ** 2) -
0.0005857 * (m.P[it, jt, ix, jx] - 1.4) * (m.Tge[it, jt, ix, jx] - 60 - 273.16) -
3.559e-06 * (m.Tge[it, jt, ix, jx] - 60 - 273.16) ** 2) * m.ye[it, jt, ix, jx, 'n'] / (
m.ye[it, jt, ix, jx, 'h'] + m.ye[it, jt, ix, jx, 'n'])
else:
return Constraint.Skip
# D 'h'
def a23_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.D[it, jt, ix, jx, 'h'] == \
(0.1593 - 0.1282 * (m.P[it, jt, ix, jx] - 1.4) + 0.001 * (m.Tge[it, jt, ix, jx] - 60 - 273.16) +
0.0964 * ((m.P[it, jt, ix, jx] - 1.4) ** 2) - 0.0006921 * (
(m.P[it, jt, ix, jx] - 1.4) | |
Seismic Unix data files. It
currently can only read IEEE 4 byte float encoded SU data files.
"""
def __init__(self, file=None, endian=None, unpack_headers=False,
headonly=False, read_traces=True):
"""
:param file: A file like object with the file pointer set at the
beginning of the SEG Y file. If file is None, an empty SEGYFile
object will be initialized.
:param endian: The endianness of the file. If None, autodetection will
be used.
:type unpack_header: bool
:param unpack_header: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory
usage and the performance. They can be unpacked on-the-fly after
being read. Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records
will be read and unpacked. Has a huge impact on memory usage. Data
will not be unpackable on-the-fly after reading the file.
Defaults to False.
:type read_traces: bool
:param read_traces: Data traces will only be read if this is set to
``True``. The data will be completely ignored if this is set to
``False``.
"""
if file is None:
self._create_empty_su_file_object()
return
# Set the endianness to big.
if endian is None:
self.endian = '>'
else:
self.endian = ENDIAN[endian]
return
self.file = file
# If endian is None autodetect is.
if not endian:
self._autodetect_endianness()
else:
self.endian = ENDIAN[endian]
if read_traces:
# Read the actual traces.
[i for i in self._read_traces(unpack_headers=unpack_headers,
headonly=headonly)]
def _autodetect_endianness(self):
"""
Tries to automatically determine the endianness of the file at hand.
"""
self.endian = autodetect_endian_and_sanity_check_su(self.file)
if self.endian is False:
msg = 'Autodetection of Endianness failed. Please specify it ' + \
'by hand or contact the developers.'
raise Exception(msg)
def _create_empty_su_file_object(self):
"""
Creates an empty SUFile object.
"""
self.traces = []
def __str__(self):
"""
Prints some information about the SU file.
"""
return '%i traces in the SU structure.' % len(self.traces)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def _read_traces(self, unpack_headers=False, headonly=False,
yield_each_trace=False):
"""
Reads the actual traces starting at the current file pointer position
to the end of the file.
:type unpack_header: bool
:param unpack_header: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory
usage and the performance. They can be unpacked on-the-fly after
being read. Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records
will be unpacked. Useful if one is just interested in the headers.
Data will not be unpackable on-the-fly after reading the file.
Defaults to False.
:type yield_each_trace: bool
:param yield_each_trace: If True, it will yield each trace after it
has been read. This enables a simple implementation of a
streaming interface to read SEG-Y files. Read traces will no
longer be collected in ``self.traces`` list if this is set to
``True``.
"""
self.traces = []
# Big loop to read all data traces.
while True:
# Read and as soon as the trace header is too small abort.
try:
# Always unpack with IEEE
trace = SEGYTrace(self.file, 5, self.endian,
unpack_headers=unpack_headers,
headonly=headonly)
if yield_each_trace:
yield trace
else:
self.traces.append(trace)
except SEGYTraceHeaderTooSmallError:
break
def write(self, file, endian=None):
"""
Write a SU Y file to file which is either a file like object with a
write method or a filename string.
If endian is set it will be enforced.
"""
if not hasattr(file, 'write'):
with open(file, 'wb') as file:
self._write(file, endian=endian)
return
self._write(file, endian=endian)
def _write(self, file, endian=None):
"""
Write a SU Y file to file which is either a file like object with a
write method or a filename string.
If endian is set it will be enforced.
"""
# Write all traces.
for trace in self.traces:
trace.write(file, data_encoding=5, endian=endian)
def _read_su(file, endian=None, unpack_headers=False, headonly=False):
"""
Reads a Seismic Unix (SU) file and returns a SUFile object.
:param file: Open file like object or a string which will be assumed to be
a filename.
:type endian: str
:param endian: String that determines the endianness of the file. Either
'>' for big endian or '<' for little endian. If it is None,
obspy.io.segy will try to autodetect the endianness. The endianness
is always valid for the whole file.
:type unpack_header: bool
:param unpack_header: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory usage
and the performance. They can be unpacked on-the-fly after being read.
Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records will be
unpacked. Useful if one is just interested in the headers. Defaults to
False.
"""
# Open the file if it is not a file like object.
if not hasattr(file, 'read') or not hasattr(file, 'tell') or not \
hasattr(file, 'seek'):
with open(file, 'rb') as open_file:
return _internal_read_su(open_file, endian=endian,
unpack_headers=unpack_headers,
headonly=headonly)
# Otherwise just read it.
return _internal_read_su(file, endian=endian,
unpack_headers=unpack_headers, headonly=headonly)
def _internal_read_su(file, endian=None, unpack_headers=False, headonly=False):
"""
Reads on open file object and returns a SUFile object.
:param file: Open file like object.
:type endian: str
:param endian: String that determines the endianness of the file. Either
'>' for big endian or '<' for little endian. If it is None,
obspy.io.segy will try to autodetect the endianness. The endianness
is always valid for the whole file.
:type unpack_header: bool
:param unpack_header: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory usage
and the performance. They can be unpacked on-the-fly after being read.
Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records will be
unpacked. Useful if one is just interested in the headers. Defaults to
False.
"""
return SUFile(file, endian=endian, unpack_headers=unpack_headers,
headonly=headonly)
def autodetect_endian_and_sanity_check_su(file):
"""
Takes an open file and tries to determine the endianness of a Seismic
Unix data file by doing some sanity checks with the unpacked header values.
Returns False if the sanity checks failed and the endianness otherwise.
It is assumed that the data is written as 32bit IEEE floating points in
either little or big endian.
The test currently can only identify SU files in which all traces have the
same length. It basically just makes a sanity check for various fields in
the Trace header.
"""
pos = file.tell()
if isinstance(file, io.BytesIO):
file.seek(0, 2)
size = file.tell()
file.seek(pos, 0)
else:
size = os.fstat(file.fileno())[6]
if size < 244:
return False
# Also has to be a multiple of 4 in length because every header is 400 long
# and every data value 4 byte long.
elif (size % 4) != 0:
return False
# Jump to the number of samples field in the trace header.
file.seek(114, 0)
sample_count = file.read(2)
interval = file.read(2)
# Jump to the beginning of the year fields.
file.seek(156, 0)
year = file.read(2)
jul_day = file.read(2)
hour = file.read(2)
minute = file.read(2)
second = file.read(2)
# Jump to previous position.
file.seek(pos, 0)
# Unpack in little and big endian.
le_sample_count = unpack(b'<h', sample_count)[0]
be_sample_count = unpack(b'>h', sample_count)[0]
# Check if both work.
working_byteorders = []
if le_sample_count > 0:
length = 240 + (le_sample_count * 4)
if (size % length) == 0:
working_byteorders.append('<')
if be_sample_count > 0:
length = 240 + (be_sample_count * 4)
if (size % length) == 0:
working_byteorders.append('>')
# If None works return False.
if len(working_byteorders) == 0:
return False
# Check if the other header values make sense.
still_working_byteorders = []
for bo in working_byteorders:
fmt = ("%sh" % bo).encode('ascii', 'strict')
this_interval = unpack(fmt, interval)[0]
this_year = unpack(fmt, year)[0]
this_julday = unpack(fmt, jul_day)[0]
this_hour = unpack(fmt, hour)[0]
this_minute = unpack(fmt, minute)[0]
this_second = unpack(fmt, second)[0]
# Make a sanity check for each.
# XXX: The arbitrary maximum of the sample interval is 10 seconds.
if this_interval <= 0 or this_interval > 10E7:
continue
# Some programs write two | |
#coding:utf-8
from datetime import datetime, date, time
import decimal
import re
import xlrd
import xlwt
from xlwt.Style import default_style
from simple_report.interface import ISpreadsheetSection
from simple_report.core.exception import XLSReportWriteException
from simple_report.core.spreadsheet_section import (
SpreadsheetSection, AbstractMerge
)
from simple_report.xls.cursor import CalculateNextCursorXLS
from simple_report.utils import FormulaWriteExcel
from xlwt import Formatting as xlwt_formatting
# for k, v in xlwt.ExcelMagic.all_funcs_by_name.items():
# xlwt.ExcelMagic.all_funcs_by_name[k] = list(v)
# xlwt.ExcelMagic.all_funcs_by_name[k][2] = 100
xlwt.ExcelMagic.all_funcs_by_name['SUM'] = (4, 1, 100, 'V', 'D+')
KEEP_TEXT_TYPE = False
FORMULA_XLS_TYPE = 'formula_xls'
EXCEL_IMAGE_TYPE = 'excel_image'
TEXT_CELL_FORMAT = '@'
def _get_out_cell(out_sheet, col_index, row_index):
""" HACK: Extract the internal xlwt cell representation. """
row = out_sheet._Worksheet__rows.get(row_index)
if not row:
return None
cell = row._Row__cells.get(col_index)
return cell
class Section(SpreadsheetSection, ISpreadsheetSection):
"""
Класс секции отчета в xls
"""
def __init__(self, sheet, name, begin, end, writer):
super(Section, self).__init__(sheet, name, begin, end)
self.sheet_data = sheet
self.writer = writer
def flush(self, params, oriented=ISpreadsheetSection.LEFT_DOWN,
used_formulas=None, keep_text_type=KEEP_TEXT_TYPE):
"""
Запись секции в отчет
:param params: словарь с параметрами подстановки
:param oriented: направление вывода секции
:param used_formulas: используемые формулы - нужны для записи
простых формул в отчет
:result: None
"""
for k, v in params.items():
if v is None:
params[k] = ''
if used_formulas is None:
used_formulas = {}
begin_row, begin_column = self.begin
end_row, end_column = self.end
book = self.sheet_data.sheet.book
current_col, current_row = self.calc_next_cursor(oriented=oriented)
for rdrowx in range(begin_row, end_row + 1):
# индекс строки независит от колонок
wtrowx = current_row + rdrowx - begin_row
for rdcolx in range(begin_column, end_column + 1):
# Вычисляем координаты ячейки для записи.
wtcolx = current_col + rdcolx - begin_column
try:
cell = self.writer.rdsheet.cell(rdrowx, rdcolx)
except IndexError:
continue
val = cell.value
# доставем формат ячейки
xf_index = cell.xf_index
xf = book.xf_list[xf_index]
format_key = xf.format_key
format_ = book.format_map[format_key]
format_str = format_.format_str
cty = cell.ctype
f_id = None
for key, value in params.items():
if unicode(cell.value).count(u''.join(['#', key, '#'])):
if used_formulas:
formula_id_list = used_formulas.get(key)
if formula_id_list:
for formula_id in formula_id_list:
self.sheet_data.formula_id_dict.setdefault(
formula_id, []
).append(
''.join([xlrd.colname(wtcolx),
str(wtrowx + 1)])
)
if isinstance(value, FormulaWriteExcel):
# Если приходит формула, то заменяем на
# ее значение с указанием списка ячеек
formula = value.excel_function
f_id = value.formula_id
if formula is not None and f_id is not None:
formula_cells = self.sheet_data.formula_id_dict.get(
f_id
)
if formula_cells:
if value.ranged:
val = '%s(%s)' % (formula, ':'.join(
[formula_cells[0],
formula_cells[-1]]))
else:
val = '%s(%s)' % (formula, ','.join(
formula_cells))
self.sheet_data.formula_id_dict[f_id] = []
cty = FORMULA_XLS_TYPE
else:
val = ''
cty = xlrd.XL_CELL_TEXT
break
elif isinstance(value, XLSImage):
cty = EXCEL_IMAGE_TYPE
val = value
break
# Тип ячейки
cty = self.get_value_type(value=value,
default_type=cell.ctype)
value = unicode(value)
val = val.replace(u'#%s#' % key, value)
if isinstance(val, basestring):
while u'#' in val:
val = re.sub(u'#.*#', '', val)
if len(val.split('#')) == 2:
break
# Копирование всяких свойств из шаблона в результирующий отчет.
if (wtcolx not in self.writer.wtcols
and rdcolx in self.writer.rdsheet.colinfo_map):
rdcol = self.writer.rdsheet.colinfo_map[rdcolx]
wtcol = self.writer.wtsheet.col(wtcolx)
wtcol.width = rdcol.width
wtcol.set_style(self.writer.style_list[rdcol.xf_index])
wtcol.hidden = rdcol.hidden
wtcol.level = rdcol.outline_level
wtcol.collapsed = rdcol.collapsed
self.writer.wtcols.add(wtcolx)
if cty == xlrd.XL_CELL_EMPTY:
continue
# XF - индексы
if cell.xf_index is not None:
style = self.writer.style_list[cell.xf_index]
else:
style = default_style
rdcoords2d = rdrowx, rdcolx
if rdcoords2d in self.writer.merged_cell_top_left_map:
rlo, rhi, clo, chi = self.writer.merged_cell_top_left_map[
rdcoords2d
]
assert (rlo, clo) == rdcoords2d
if isinstance(val, XLSImage):
self.writer.wtsheet.merge(
wtrowx,
wtrowx + rhi - rlo - 1,
wtcolx,
wtcolx + chi - clo - 1,
style
)
#TODO: вынести в метод записи
self.writer.wtsheet.insert_bitmap(
val.path,
wtrowx,
wtcolx
)
continue
self.writer.wtsheet.write_merge(
wtrowx, wtrowx + rhi - rlo - 1,
wtcolx, wtcolx + chi - clo - 1,
val, style)
continue
if rdcoords2d in self.writer.merged_cell_already_set:
continue
# если поле текстовое и
# стоит настройка "Сохранять текстовые поля"
# то не преобразуем текст в число
if keep_text_type and format_str == TEXT_CELL_FORMAT:
pass
else:
try:
val1 = val
if isinstance(val1, float):
val1 = str(val1)
decimal.Decimal(val1)
cty = xlrd.XL_CELL_NUMBER
except (decimal.InvalidOperation, TypeError):
pass
runlist = self.writer.rdsheet.rich_text_runlist_map.get(
(rdrowx, rdcolx)
)
self.write_result((wtcolx, wtrowx),
val,
style,
cty,
(runlist, rdrowx, rdcolx))
# перетащим заодно и высоту текущей строки
rdrow = self.writer.rdsheet.rowinfo_map.get(rdrowx)
wtrow = self.writer.wtsheet.rows.get(wtrowx)
if rdrow is not None and wtrow is not None:
wtrow.height = rdrow.height
# height_mismatch нужен для того, чтобы применилась высота
wtrow.height_mismatch = rdrow.height_mismatch
def get_width(self):
"""
Получение ширины секции
"""
begin_row, begin_col = self.begin
end_row, end_col = self.end
return end_col - begin_col + 1
def calc_next_cursor(self, oriented=ISpreadsheetSection.LEFT_DOWN):
"""
Вычисляем следующее положение курсора.
"""
begin_row, begin_column = self.begin
end_row, end_column = self.end
current_col, current_row = CalculateNextCursorXLS().get_next_cursor(
self.sheet_data.cursor, (begin_column, begin_row),
(end_column, end_row), oriented, section=self)
return current_col, current_row
#TODO реализовать для поддержки интерфейса ISpreadsheetSection
def get_all_parameters(self):
"""
Получение всех параметров секции.
:result: None
"""
def get_cell_final_type(self, value, cell_type):
"""
Окончательный тип значения ячейки. Нужна, для того, чтобы точно
определить, является ли ячейка числовой
"""
cty = cell_type
if KEEP_TEXT_TYPE and cell_type == xlrd.XL_CELL_TEXT:
return cty
try:
long(value)
cty = xlrd.XL_CELL_NUMBER
except ValueError:
pass
return cty
def get_value_type(self, value, default_type=xlrd.XL_CELL_TEXT):
"""
Возвращаем тип значения для выходного элемента
:param value: значение
:param default_type: тип по умолчанию
:result: тип ячейки
"""
if isinstance(value, basestring):
cty = xlrd.XL_CELL_TEXT
elif isinstance(value, (datetime, date, time)):
cty = xlrd.XL_CELL_DATE
elif isinstance(value, bool):
cty = xlrd.XL_CELL_BOOLEAN
elif value is None:
cty = xlrd.XL_CELL_EMPTY
# elif isinstance(value, numbers.Number):
# if default_type == xlrd.XL_CELL_TEXT and KEEP_TEXT_TYPE:
# return default_type
# cty = xlrd.XL_CELL_NUMBER
else:
cty = default_type
# if default_type == xlrd.XL_CELL_TEXT and KEEP_TEXT_TYPE:
# return cty
# try:
# long(value)
# cty = xlrd.XL_CELL_NUMBER
# except ValueError:
# cty = default_type
return cty
def get_rich_text_list(self, text, runlist, default_font):
"""
получение списка строк для rich_text
:param text:
:param runlist:
:param default_font:
:result:
"""
rtl = []
len_runlist = len(runlist)
counter = 0
# для первых символов берется дефолтный шрифт
if len_runlist:
rtl.append(
(text[:runlist[0][0]], default_font)
)
# затем строка разбивается на куски
for char_num, font_id in runlist:
if char_num > len(text):
break
if counter == len_runlist - 1:
end_char_num = None
else:
end_char_num = runlist[counter + 1][0]
rtl.append(
(text[char_num:end_char_num], self.get_font(font_id))
)
counter += 1
return rtl
def get_font(self, font_index):
"""
Получение шрифта по индексу
:param font_index: индекс шрифта
:result: шрифт
"""
if not hasattr(self, 'fonts'):
self.fonts = {}
wt_font = self.fonts.get(font_index)
if not wt_font:
wt_font = self.create_font(font_index)
self.fonts[font_index] = wt_font
return wt_font
def create_font(self, rd_font_index):
"""
Создание шрифта
:param rd_font_index: индекс шрифта в исходном файле
:result: шрифт в выходном файле
"""
font_list = self.writer.rdbook.font_list
rdf = font_list[rd_font_index]
# Далее копипаста из xlutils
wtf = xlwt_formatting.Font()
wtf.height = rdf.height
wtf.italic = rdf.italic
wtf.struck_out = rdf.struck_out
wtf.outline = rdf.outline
wtf.shadow = rdf.outline
wtf.colour_index = rdf.colour_index
wtf.bold = rdf.bold #### This attribute is redundant, should be driven by weight
wtf._weight = rdf.weight #### Why "private"?
wtf.escapement = rdf.escapement
wtf.underline = rdf.underline_type ####
# wtf.???? = rdf.underline #### redundant attribute, set on the fly when writing
wtf.family = rdf.family
wtf.charset = rdf.character_set
wtf.name = rdf.name
# Конец копипасты
return wtf
def write_result(
self, write_coords, value, style, cell_type, (runlist, rdrowx, rdcolx)
):
"""
Выводим в ячейку с координатами `write_coords`
значение `value`.
:param write_coords: координаты ячейки
:param value: значение
:param style: стиль вывода
:param cell_type: тип ячейки
:param runlist:
:param rdrowx: строка в исходном файле
:param rdcolx: колонка в исходном файле
"""
wtcolx, wtrowx = write_coords
if cell_type == EXCEL_IMAGE_TYPE:
self.writer.wtsheet.insert_bitmap(
value.path, wtrowx, wtcolx
)
return
# cell_type = self.get_cell_final_type(value, cell_type)
#cell = _get_out_cell(self.writer.wtsheet, wtcolx, wtrowx)
#xf_idx = cell.xf_idx
# Вывод
wtrow = self.writer.wtsheet.row(wtrowx)
if cell_type == FORMULA_XLS_TYPE:
self.writer.wtsheet.write(wtrowx, wtcolx, xlwt.Formula(value),
style)
elif cell_type == xlrd.XL_CELL_TEXT or cell_type == xlrd.XL_CELL_EMPTY:
if runlist is not None:
rich_text_list = self.get_rich_text_list(value,
runlist,
style.font)
self.writer.wtsheet.write_rich_text(
wtrowx, wtcolx, rich_text_list, style=style)
else:
wtrow.set_cell_text(wtcolx, value, style)
elif cell_type == xlrd.XL_CELL_NUMBER:
wtrow.set_cell_number(wtcolx, value, style)
elif cell_type == xlrd.XL_CELL_DATE:
wtrow.set_cell_text(wtcolx, value, style)
elif cell_type == xlrd.XL_CELL_BLANK:
wtrow.set_cell_blank(wtcolx, style)
elif cell_type == xlrd.XL_CELL_BOOLEAN:
wtrow.set_cell_boolean(wtcolx, value, style)
elif cell_type == xlrd.XL_CELL_ERROR:
wtrow.set_cell_error(wtcolx, value, style)
else:
raise XLSReportWriteException
cell = _get_out_cell(self.writer.wtsheet, wtcolx, wtrowx)
#if xf_idx:
# cell.xf_idx = xf_idx
class MergeXLS(AbstractMerge):
"""
Конструкция Merge
"""
def _merge(self):
self.section.writer.wtsheet.merge(self.begin_row_merge,
self.end_row_merge,
self._begin_merge_col,
self._end_merge_col)
def _calculate_merge_column(self, column):
"""
Подсчет колонок слияния
:param column: текущая колонка
:result: (1 колонка секции, 2 колонка секции)
"""
first_section_column = column - self.section.get_width()
last_section_column = column - 1
return first_section_column, last_section_column
class XLSImage(object):
"""
Рисунок. Может быть использован при | |
posx + (width - offset) / 2, posy + (height - offset) / 2, width < height ? height : width);
var fgcolor = $.rgbToHex($.map($.hexToRgb(color), function(dataAndEvents) {
return dataAndEvents * 0.3 >> 0;
}));
gradient.addColorStop(0, color);
gradient.addColorStop(1, fgcolor);
ctx.fillStyle = gradient;
}
if (border) {
ctx.strokeStyle = border;
/** @type {number} */
ctx.lineWidth = 3;
}
ctx.fillRect(posx, posy, Math.max(0, width - offset), Math.max(0, height - offset));
if (border) {
ctx.strokeRect(pos.x, pos.y, width, height);
}
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
if (this.viz.clickedNode && !$jit.Graph.Util.isDescendantOf(opt_attributes, this.viz.clickedNode.id)) {
return false;
}
var offsetCoordinate = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("width");
var epsilon = opt_attributes.getData("height");
return this.nodeHelper.rectangle.contains({
x : offsetCoordinate.x + actual / 2,
y : offsetCoordinate.y + epsilon / 2
}, value, actual, epsilon);
}
}
});
$jit.Icicle.Plot.EdgeTypes = new Class({
/** @type {function (): undefined} */
none : $.empty
});
Layout.ForceDirected = new Class({
/**
* @param {?} $allOptions
* @return {?}
*/
getOptions : function($allOptions) {
var element = this.canvas.getSize();
var originalWidth = element.width;
var originalHeight = element.height;
/** @type {number} */
var count = 0;
this.graph.eachNode(function(dataAndEvents) {
count++;
});
/** @type {number} */
var variance = originalWidth * originalHeight / count;
/** @type {number} */
var mult = Math.sqrt(variance);
var ld = this.config.levelDistance;
return{
width : originalWidth,
height : originalHeight,
tstart : originalWidth * 0.1,
/**
* @param {number} v00
* @return {?}
*/
nodef : function(v00) {
return variance / (v00 || 1);
},
/**
* @param {?} value
* @return {?}
*/
edgef : function(value) {
return mult * (value - ld);
}
};
},
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
compute : function(adj, type) {
var lab = $.splat(adj || ["current", "start", "end"]);
var coord = this.getOptions();
column.compute(this.graph, lab, this.config);
this.graph.computeLevels(this.root, 0, "ignore");
this.graph.eachNode(function(self) {
$.each(lab, function(prop) {
var p = self.getPos(prop);
if (p.equals(Vector.KER)) {
/** @type {number} */
p.x = coord.width / 5 * (Math.random() - 0.5);
/** @type {number} */
p.y = coord.height / 5 * (Math.random() - 0.5);
}
self.disp = {};
$.each(lab, function(timeoutKey) {
self.disp[timeoutKey] = getIndex(0, 0);
});
});
});
this.computePositions(lab, coord, type);
},
/**
* @param {?} node
* @param {Object} prop
* @param {Object} p
* @return {undefined}
*/
computePositions : function(node, prop, p) {
var len = this.config.iterations;
/** @type {number} */
var i = 0;
var jQuery = this;
if (p) {
(function play() {
var pl = p.iter;
/** @type {number} */
var j = 0;
for (;j < pl;j++) {
prop.t = prop.tstart;
if (len) {
prop.t *= 1 - i++ / (len - 1);
}
jQuery.computePositionStep(node, prop);
if (len && i >= len) {
p.onComplete();
return;
}
}
p.onStep(Math.round(i / (len - 1) * 100));
setTimeout(play, 1);
})();
} else {
for (;i < len;i++) {
/** @type {number} */
prop.t = prop.tstart * (1 - i / (len - 1));
this.computePositionStep(node, prop);
}
}
},
/**
* @param {?} attributes
* @param {Object} options
* @return {undefined}
*/
computePositionStep : function(attributes, options) {
var graph = this.graph;
/** @type {function (...[*]): number} */
var mn = Math.min;
/** @type {function (...[*]): number} */
var max = Math.max;
var pos = getIndex(0, 0);
graph.eachNode(function(node) {
$.each(attributes, function(y) {
/** @type {number} */
node.disp[y].x = 0;
/** @type {number} */
node.disp[y].y = 0;
});
graph.eachNode(function(n) {
if (n.id != node.id) {
$.each(attributes, function(prop) {
var p = node.getPos(prop);
var v = n.getPos(prop);
/** @type {number} */
pos.x = p.x - v.x;
/** @type {number} */
pos.y = p.y - v.y;
var x = pos.norm() || 1;
node.disp[prop].$add(pos.$scale(options.nodef(x) / x));
});
}
});
});
/** @type {boolean} */
var T = !!graph.getNode(this.root).visited;
graph.eachNode(function(node) {
node.eachAdjacency(function(adj) {
var child = adj.nodeTo;
if (!!child.visited === T) {
$.each(attributes, function(prop) {
var p = node.getPos(prop);
var v = child.getPos(prop);
/** @type {number} */
pos.x = p.x - v.x;
/** @type {number} */
pos.y = p.y - v.y;
var udataCur = pos.norm() || 1;
node.disp[prop].$add(pos.$scale(-options.edgef(udataCur) / udataCur));
child.disp[prop].$add(pos.$scale(-1));
});
}
});
/** @type {boolean} */
node.visited = !T;
});
var precision = options.t;
/** @type {number} */
var indents = options.width / 2;
/** @type {number} */
var minMargin = options.height / 2;
graph.eachNode(function(event) {
$.each(attributes, function(prop) {
var pos = event.disp[prop];
var I = pos.norm() || 1;
prop = event.getPos(prop);
prop.$add(getIndex(pos.x * mn(Math.abs(pos.x), precision) / I, pos.y * mn(Math.abs(pos.y), precision) / I));
/** @type {number} */
prop.x = mn(indents, max(-indents, prop.x));
/** @type {number} */
prop.y = mn(minMargin, max(-minMargin, prop.y));
});
});
}
});
$jit.ForceDirected = new Class({
Implements : [valid, Extras, Layout.ForceDirected],
/**
* @param {?} controller
* @return {undefined}
*/
initialize : function(controller) {
var $ForceDirected = $jit.ForceDirected;
var config = {
iterations : 50,
levelDistance : 50
};
this.controller = this.config = $.merge(Options("Canvas", "Node", "Edge", "Fx", "Tips", "NodeStyles", "Events", "Navigation", "Controller", "Label"), config, controller);
var canvasConfig = this.config;
if (canvasConfig.useCanvas) {
this.canvas = canvasConfig.useCanvas;
/** @type {string} */
this.config.labelContainer = this.canvas.id + "-label";
} else {
if (canvasConfig.background) {
canvasConfig.background = $.merge({
type : "Circles"
}, canvasConfig.background);
}
this.canvas = new Canvas(this, canvasConfig);
/** @type {string} */
this.config.labelContainer = (typeof canvasConfig.injectInto == "string" ? canvasConfig.injectInto : canvasConfig.injectInto.id) + "-label";
}
this.graphOptions = {
/** @type {function (number, (number|string)): undefined} */
klass : Vector,
Node : {
selected : false,
exist : true,
drawn : true
}
};
this.graph = new Graph(this.graphOptions, this.config.Node, this.config.Edge);
this.labels = new $ForceDirected.Label[canvasConfig.Label.type](this);
this.fx = new $ForceDirected.Plot(this, $ForceDirected);
this.op = new $ForceDirected.Op(this);
/** @type {null} */
this.json = null;
/** @type {boolean} */
this.busy = false;
this.initializeExtras();
},
/**
* @return {undefined}
*/
refresh : function() {
this.compute();
this.plot();
},
/**
* @return {undefined}
*/
reposition : function() {
this.compute("end");
},
/**
* @param {?} lab
* @return {undefined}
*/
computeIncremental : function(lab) {
lab = $.merge({
iter : 20,
property : "end",
/** @type {function (): undefined} */
onStep : $.empty,
/** @type {function (): undefined} */
onComplete : $.empty
}, lab || {});
this.config.onBeforeCompute(this.graph.getNode(this.root));
this.compute(lab.property, lab);
},
/**
* @return {undefined}
*/
plot : function() {
this.fx.plot();
},
/**
* @param {?} opt_attributes
* @return {undefined}
*/
animate : function(opt_attributes) {
this.fx.animate($.merge({
modes : ["linear"]
}, opt_attributes || {}));
}
});
/** @type {boolean} */
$jit.ForceDirected.$extend = true;
(function(Hypertree) {
Hypertree.Op = new Class({
Implements : Graph.Op
});
Hypertree.Plot = new Class({
Implements : Graph.Plot
});
Hypertree.Label = {};
Hypertree.Label.Native = new Class({
Implements : Graph.Label.Native
});
Hypertree.Label.SVG = new Class({
Implements : Graph.Label.SVG,
/**
* @param {?} viz
* @return {undefined}
*/
initialize : function(viz) {
this.viz = viz;
},
/**
* @param {?} from
* @param {?} lab
* @param {?} options
* @return {undefined}
*/
placeLabel : function(from, lab, options) {
var pos = lab.pos.getc(true);
var canvas = this.viz.canvas;
var ox = canvas.translateOffsetX;
var oy = canvas.translateOffsetY;
var sx = canvas.scaleOffsetX;
var sy = canvas.scaleOffsetY;
var $cont = canvas.getSize();
var tl = {
x : Math.round(pos.x * sx + ox + $cont.width / 2),
y : Math.round(pos.y * sy + oy + $cont.height / 2)
};
from.setAttribute("x", tl.x);
from.setAttribute("y", tl.y);
options.onPlaceLabel(from, lab);
}
});
Hypertree.Label.HTML = new Class({
Implements : Graph.Label.HTML,
/**
* @param {?} viz
* @return {undefined}
*/
initialize : function(viz) {
this.viz = viz;
},
/**
* @param {?} from
* @param {?} lab
* @param {?} options
* @return {undefined}
*/
placeLabel : function(from, lab, options) {
var pos = lab.pos.getc(true);
var canvas = this.viz.canvas;
var ox = canvas.translateOffsetX;
var oy = canvas.translateOffsetY;
var sx = canvas.scaleOffsetX;
var sy = canvas.scaleOffsetY;
var $cont = canvas.getSize();
var labelPos = {
x : Math.round(pos.x * sx + ox + $cont.width / 2),
y : Math.round(pos.y * sy + oy + $cont.height / 2)
};
var style | |
constants = np.array([atrial_cycle_length, conduction_constant])
constants = np.stack([constants]*signals.shape[1], axis=1)
x_i = np.concatenate([signals, constants], axis=0)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
idx3 = np.where(signals[2] == 1)[0]
cond_i = y_to_cond(idx3, signals.shape[1], y_i)
idx, = np.where(np.array(x_shapes) == x_i.shape[1])
if len(idx) != 0:
x[idx[0]].append(x_i)
cond[idx[0]].append(cond_i)
else:
x.append([x_i])
x_shapes.append(x_i.shape[1])
cond.append([cond_i])
aa_mean = np.loadtxt(constants_folder / "aa_mean_est.csv")
aa_std = np.loadtxt(constants_folder / "aa_std_est.csv")
cc_mean = np.loadtxt(constants_folder / "cc_mean_est.csv")
cc_std = np.loadtxt(constants_folder / "cc_std_est.csv")
cond_mean = np.loadtxt(constants_folder / "cond_signals_mean_est.csv")
cond_std = np.loadtxt(constants_folder / "cond_signals_std_est.csv")
for i in range(len(x)):
x[i] = torch.tensor(x[i], dtype=torch.float32)
x[i][:, 0:-2, :] += 0.1 * \
torch.randn(x[i].shape[0], x[i].shape[1]-2, x[i].shape[2])
x[i][:, -2, :] = (x[i][:, -2, :] - aa_mean) / aa_std
x[i][:, -1, :] = (x[i][:, -1, :] - cc_mean) / cc_std
x[i] = torch.tensor(x[i], dtype=torch.float32)
cond[i] = torch.tensor(cond[i], dtype=torch.float32)
cond[i][:, 0, :] = (cond[i][:, 0, :] - cond_mean[0]) / cond_std[0]
cond[i][:, 1, :] = (cond[i][:, 1, :] - cond_mean[1]) / cond_std[1]
cond[i] = torch.tensor(cond[i], dtype=torch.float32)
assert(not np.any(np.isnan(np.array(x[i]))))
assert(not np.any(np.isnan(np.array(cond[i]))))
return x, cond
def get_signals_recurrent_batch(batch_size):
x = []
x_shapes = []
y = []
for i in range(batch_size):
y_i = np.zeros(24)
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
block_pattern = correct_bp(block_pattern, block_type, n_Rwaves)
signals = np.array(bp_to_signals(
block_pattern, block_type, n_Rwaves, fill=False))
constants = np.array([atrial_cycle_length, conduction_constant])
constants = np.stack([constants]*signals.shape[1], axis=1)
x_i = np.concatenate([signals, constants], axis=0)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
idx, = np.where(np.array(x_shapes) == x_i.shape[1])
if len(idx) != 0:
x[idx[0]].append(x_i)
y[idx[0]].append(y_i)
else:
x.append([x_i])
x_shapes.append(x_i.shape[1])
y.append([y_i])
aa_mean = np.loadtxt(constants_folder / "aa_mean_est.csv")
aa_std = np.loadtxt(constants_folder / "aa_std_est.csv")
cc_mean = np.loadtxt(constants_folder / "cc_mean_est.csv")
cc_std = np.loadtxt(constants_folder / "cc_std_est.csv")
y_mean = np.loadtxt(constants_folder / "y_mean_est.csv")
y_std = np.loadtxt(constants_folder / "y_std_est.csv")
for i in range(len(x)):
x[i] = torch.tensor(x[i], dtype=torch.float32)
x[i][:, 0:-2, :] += 0.1 * \
torch.randn(x[i].shape[0], x[i].shape[1]-2, x[i].shape[2])
x[i][:, -2, :] = (x[i][:, -2, :] - aa_mean) / aa_std
x[i][:, -1, :] = (x[i][:, -1, :] - cc_mean) / cc_std
x[i] = torch.tensor(x[i], dtype=torch.float32)
y[i] = (y[i] - y_mean) / y_std
y[i] = np.stack([y[i]]*x[i].shape[2], axis=2)
y[i] = torch.tensor(y[i], dtype=torch.float32)
assert(not np.any(np.isnan(np.array(x[i]))))
assert(not np.any(np.isnan(np.array(y[i]))))
return x, y
def get_signals_batch(batch_size, test=False, btype=0):
x = []
y = []
for i in range(batch_size):
x_i = np.zeros(602)
y_i = np.zeros(24)
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
#block_type = btype
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
block_pattern_extra = copy.deepcopy(block_pattern)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
block_pattern = correct_bp(block_pattern, block_type, n_Rwaves)
signals = np.array(bp_to_signals(
block_pattern, block_type, n_Rwaves, fill=True))
x_i[0:200] = signals[0]
x_i[200:400] = signals[1]
x_i[400:600] = signals[2]
x_i[600] = atrial_cycle_length
x_i[601] = conduction_constant
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
x.append(x_i)
y.append(y_i)
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
x[:, 0:-2] += 0.1 * torch.randn(x.shape[0], x.shape[1]-2)
aa_mean = np.loadtxt(constants_folder / "aa_mean_est.csv")
aa_std = np.loadtxt(constants_folder / "aa_std_est.csv")
cc_mean = np.loadtxt(constants_folder / "cc_mean_est.csv")
cc_std = np.loadtxt(constants_folder / "cc_std_est.csv")
y_mean = np.loadtxt(constants_folder / "y_mean_est.csv")
y_std = np.loadtxt(constants_folder / "y_std_est.csv")
if not test:
y = (y - y_mean) / y_std
x[:, -2] = (x[:, -2] - aa_mean) / aa_std
x[:, -1] = (x[:, -1] - cc_mean) / cc_std
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
assert(not np.any(np.isnan(np.array(x))))
assert(not np.any(np.isnan(np.array(y))))
if test:
return y, intervals, n_Rwaves, atrial_cycle_length, conduction_constant, block_pattern_extra, block_type
else:
return x, y
def get_approx_stats(n):
aa = []
cc = []
y = []
y_single = []
cond_array = [[], []]
cond_signals_array = [[], []]
for i in range(n):
if i % 10000 == 0:
print(i)
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
aa.append(atrial_cycle_length)
cc.append(conduction_constant)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
y_i = np.zeros(24)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
y.append(y_i)
y_single.extend(intervals[:(n_Rwaves-1)])
seq, matching = block_pattern_to_seq(block_pattern)
cond_i = y_to_cond(matching, len(seq), y_i)
cond_array[0].extend(cond_i[0, :])
cond_array[1].extend(cond_i[1, :])
signals = np.array(bp_to_signals(
block_pattern, block_type, n_Rwaves, fill=False))
idx3 = np.where(signals[2] == 1)[0]
cond_signals = y_to_cond(idx3, signals.shape[1], y_i)
cond_signals_array[0].extend(cond_signals[0, :])
cond_signals_array[1].extend(cond_signals[1, :])
aa = torch.tensor(aa, dtype=torch.float32)
cc = torch.tensor(cc, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
y_single = torch.tensor(y, dtype=torch.float32)
cond_array[0] = torch.tensor(cond_array[0], dtype=torch.float32)
cond_array[1] = torch.tensor(cond_array[1], dtype=torch.float32)
cond_signals_array[0] = torch.tensor(
cond_signals_array[0], dtype=torch.float32)
cond_signals_array[1] = torch.tensor(
cond_signals_array[1], dtype=torch.float32)
aa_mean = torch.mean(aa)
aa_std = torch.std(aa)
cc_mean = torch.mean(cc)
cc_std = torch.std(cc)
y_mean = torch.mean(y, axis=0)
y_std = torch.std(y, axis=0)
y_single_mean = torch.mean(y_single)
y_single_std = torch.std(y_single)
cond_mean = [torch.mean(cond_array[0]), torch.mean(cond_array[1])]
cond_std = [torch.std(cond_array[0]), torch.std(cond_array[1])]
cond_signals_mean = [torch.mean(
cond_signals_array[0]), torch.mean(cond_signals_array[1])]
cond_signals_std = [
torch.std(cond_signals_array[0]), torch.std(cond_signals_array[1])]
np.savetxt(constants_folder / "aa_mean_est.csv", np.array([aa_mean]))
np.savetxt(constants_folder / "aa_std_est.csv", np.array([aa_std]))
np.savetxt(constants_folder / "cc_mean_est.csv", np.array([cc_mean]))
np.savetxt(constants_folder / "cc_std_est.csv", np.array([cc_std]))
np.savetxt(constants_folder / "y_mean_est.csv", y_mean)
np.savetxt(constants_folder / "y_std_est.csv", y_std)
np.savetxt(constants_folder / "y_single_mean_est.csv",
np.array([y_single_mean]))
np.savetxt(constants_folder / "y_single_std_est.csv",
np.array([y_single_std]))
np.savetxt(constants_folder / 'cond_mean_est.csv', cond_mean)
np.savetxt(constants_folder / 'cond_std_est.csv', cond_std)
np.savetxt(constants_folder /
'cond_signals_mean_est.csv', cond_signals_mean)
np.savetxt(constants_folder / 'cond_signals_std_est.csv', cond_signals_std)
def generate_rcINN_batch_old(batch_size):
x = []
x_shapes = []
y = []
for i in range(batch_size):
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
y_i = np.zeros(24)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
seq, matching = block_pattern_to_seq(block_pattern)
one_hot = []
for seq_i in seq:
time_step = list(np.concatenate(block_pattern_to_one_hot(seq_i)))
time_step += [atrial_cycle_length, conduction_constant]
one_hot.append(time_step)
x_i = np.array(one_hot).T
idx, = np.where(np.array(x_shapes) == x_i.shape[1])
if len(idx) != 0:
x[idx[0]].append(x_i)
y[idx[0]].append(y_i)
else:
x.append([x_i])
x_shapes.append(x_i.shape[1])
y.append([y_i])
aa_mean = np.loadtxt(constants_folder / "aa_mean_est.csv")
aa_std = np.loadtxt(constants_folder / "aa_std_est.csv")
cc_mean = np.loadtxt(constants_folder / "cc_mean_est.csv")
cc_std = np.loadtxt(constants_folder / "cc_std_est.csv")
y_mean = np.loadtxt(constants_folder / "y_mean_est.csv")
y_std = np.loadtxt(constants_folder / "y_std_est.csv")
for i in range(len(x)):
x[i] = torch.tensor(x[i], dtype=torch.float32)
x[i][:, 0:-2, :] += 0.1 * \
torch.randn(x[i].shape[0], x[i].shape[1]-2, x[i].shape[2])
x[i][:, -2, :] = (x[i][:, -2, :] - aa_mean) / aa_std
x[i][:, -1, :] = (x[i][:, -1, :] - cc_mean) / cc_std
x[i] = torch.tensor(x[i], dtype=torch.float32)
y[i] = (y[i] - y_mean) / y_std
y[i] = np.stack([y[i]]*x[i].shape[2], axis=2)
y[i] = torch.tensor(y[i], dtype=torch.float32)
assert(not np.any(np.isnan(np.array(x[i]))))
assert(not np.any(np.isnan(np.array(y[i]))))
return x, y
def generate_rcINN_matching_batch_old(batch_size):
x = []
x_shapes = []
cond = []
for i in range(batch_size):
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
y_i = np.zeros(24)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
seq, matching = block_pattern_to_seq(block_pattern)
one_hot = []
for seq_i in seq:
time_step = list(np.concatenate(block_pattern_to_one_hot(seq_i)))
time_step += [atrial_cycle_length, conduction_constant]
one_hot.append(time_step)
x_i = np.array(one_hot).T
cond_i = y_to_cond(matching, len(seq), y_i)
idx, = np.where(np.array(x_shapes) == x_i.shape[1])
if len(idx) != 0:
x[idx[0]].append(x_i)
cond[idx[0]].append(cond_i)
else:
x.append([x_i])
x_shapes.append(x_i.shape[1])
cond.append([cond_i])
aa_mean = np.loadtxt(constants_folder / "aa_mean_est.csv")
aa_std = np.loadtxt(constants_folder / "aa_std_est.csv")
cc_mean = np.loadtxt(constants_folder / "cc_mean_est.csv")
cc_std = np.loadtxt(constants_folder / "cc_std_est.csv")
cond_mean = np.loadtxt(constants_folder / "cond_mean_est.csv")
cond_std = np.loadtxt(constants_folder / "cond_std_est.csv")
for i in range(len(x)):
| |
adhere to user-defined boundaries
such as dist, az, baz, id, or component matches. Don't actually remove
the traces from the stream but rather just collect indices we can use
to skip when plotting.
TODO add distance, azi and backazi skip criteria
:rtype: np.array
:return: returns an indexing list which can be used to skip over
traces that don't adhere to certain criteria
"""
skip_idx = []
for idx in self.idx:
tr = self.st[idx]
# Component-wise removal
if tr.stats.component not in self.components:
skip_idx.append(idx)
# !!! Add more here
print(f"criteria check will remove "
f"{len(skip_idx)}/{len(self.st)} traces")
return np.array(skip_idx)
def get_parameters(self):
"""
Calculate parameters in a specific order and based on the user-defined
information.
.. note::
The order of function calls here is important! Some of the 'get'
functions require the results of other 'get' functions.
Calculated Parameters
::
np.array idx:
a linear indexing of all the traces in the stream
np.array station_ids:
an ordered list of station ids, used to get station names
that match the index defined in `idx`
np.array max_amplitudes:
abs max amplitudes of each trace, used for normalization
np.array amplitude_scaling:
An array to scale amplitudes based on user choices
np.array time_shift_s:
An array to time shift time series based on user choices
np.array y_axis:
Y-Axis values based on sorting algorithm, used for plotting
np.array distances:
source-receiver distances in `distance_units` units
np.array azimuths:
source-receiver azimuths in degrees
np.array backazimuths:
source-receiver backazimuths in degrees
np.array sorted_idx:
sorted indexing on all the traces of the stream based on the
chosen sorting algorithm
"""
# Extract basic information from the Stream
self.idx = np.arange(0, len(self.st), 1)
self.station_ids = np.array([tr.get_id() for tr in self.st])
self.time_shift_s = self.get_time_shifts() # !!! OVERWRITES user input
self.xlim = self.get_xlims()
# Max amplitudes should be RELATIVE to what were showing (e.g., if
# zoomed in on the P-wave, max amplitude should NOT be the surface wave)
for tr, xlim in zip(self.st, self.xlim):
start, stop = xlim
self.max_amplitudes.append(max(abs(tr.data[start:stop])))
self.max_amplitudes = np.array(self.max_amplitudes)
# Figure out which indices we'll be plotting
sorted_idx = self.get_sorted_idx()
skip_idx = self.get_skip_idx()
# Remove skip indexes from sorted index to get the final ordered list
self.sorted_idx = np.array([_ for _ in sorted_idx if _ not in skip_idx])
# Figure out how to manipulate each of the traces in the Stream
self.y_axis = self.get_y_axis_positions()
self.amplitude_scaling = self.get_amplitude_scaling()
def get_xlims(self):
"""
The x-limits of each trace depend on the overall time shift (either
static or applied through move out), as well as the sampling rate of
each trace (which can vary). Retrieve an index-dependent list of
x-limits which can be used to truncate the time series during plotting.
.. note::
Requires that get_time_shifts() has already been run
:rtype: np.array
:return: an array of tuples defining the start and stop indices for EACH
trace to be used during plotting. Already includes time shift
information so xlim can be applied DIRECTLY to the time shifted data
"""
xlim = []
if self.xlim_s is None:
# None's will index the entire trace
xlim = np.array([(None, None) for _ in range(len(self.st))])
else:
# Looping to allow for delta varying among traces,
# AND apply the time shift so that indices can be used directly in
# the plotting function
for tr, tshift in zip(self.st, self.time_shift_s):
start, stop = [int(_/tr.stats.delta) for _ in self.xlim_s]
sshift = int(tshift / tr.stats.delta) # unit: samples
xlim.append((start-sshift, stop-sshift))
xlim = np.array(xlim)
return xlim
def get_srcrcv_stats(self):
"""
Get source receiver information such as min max values, and
count-related numbers (e.g., num stations) to be used mainly for print
statements and text information
Stats Arguments
::
np.array event_names:
unique event names taken from the SAC header
int nevents:
number of unique events in the stream
np.array unique_sta_ids:
unique station codes taken from trace stats
int nstation_ids:
number of unique station codes
np.array network_codes:
unique network codes taken from station ids
int nnetwork:
number of unique network codes
np.array station_codes:
unique station codes taken from station ids
int nstation:
number of unique station codes
np.array location_codes:
unique location codes taken from station ids
int nlocation:
number of unique location codes
np.array channel_codes:
unique channel codes taken from station ids
int nchannel:
number of unique channel codes
bool reverse_sort:
determine if the user wants to reverse their sort, they do this
by appending '_r' to the end of the `sort_by` argument
"""
print("getting source-receiver stats")
def _unique(list_):
"""return a unique numpy array derived from a list"""
return np.unique(np.array(list_, dtype=str))
stats = Dict()
stats.event_names = _unique([tr.stats.sac.kevnm for tr in self.st])
stats.nevents = len(stats.event_names)
stats.unique_sta_ids = _unique([tr.get_id() for tr in self.st])
stats.longest_id = max([len(_) for _ in stats.unique_sta_ids])
stats.nstation_ids = len(stats.unique_sta_ids)
# Get unique network, station, location and channel codes. Also numbers
for name in ["network", "station", "location", "channel", "component"]:
stats[f"{name}_codes"] = _unique(
[getattr(tr.stats, name) for tr in self.st]
)
stats[f"n{name}"] = len(stats[f"{name}_codes"])
# We use `not` in `reverse_sort` to make the top of the y-axis the
# starting point, which seems more intuitive for record sections, but
# is opposite the behavior when you increment from 0
stats.reverse_sort = not bool("_r" in self.sort_by)
# Initiate empty lists for _plot_trace() to fill with min and max data
# values which can be used for global plotting parameters like xlims
stats.xmin, stats.xmax, stats.ymin, stats.ymax = [], [], [], []
return stats
def get_time_shifts(self):
"""
Very simple function which allows float inputs for time shifts and
ensures that time shifts are always per-trace arrays
Applies the move out by calculating a time shift using src-rcv distance
:rtype: np.array
:return: a stream-lengthed array of time shifts that can be applied
per trace
"""
# No user input means time shifts will be 0, so nothing happens
time_shift_arr = np.zeros(len(self.st))
if self.time_shift_s is not None:
# User inputs a static time shift
if isinstance(self.time_shift_s, (int, float)):
time_shift_arr += self.time_shift_s
# User input an array which should have already been checked for len
else:
time_shift_arr = self.time_shift_s
time_shift_arr = np.array(time_shift_arr)
# Further change the time shift if we have move out input
if self.move_out:
print(f"apply {self.move_out} {self.distance_units}/s move out")
move_out_arr = self.distances / self.move_out
time_shift_arr -= move_out_arr
return time_shift_arr
def get_srcrcv_dist_az_baz(self):
"""
Convenience function to wrap _get_srcrcv_dist_az_baz_trace into a loop
over the whole stream and return lists of distances, azimuths, and
backazimuths
:rtype distances: np.array
:return distances: source-receiver distances in user-defined units in
the original order of Stream
:rtype azimuths: np.array
:return azimuths: source-receiver azimuths (deg) in the original
order of Stream
:rtype backazimuths: np.array
:return backazimuths: source-receiver azimuths (deg) in the original
order of Stream
"""
print("calculating source-receiver distance and (back)azimuths")
distances, azimuths, backazimuths = [], [], []
for tr in self.st:
gcd, az, baz = self._get_srcrcv_dist_az_baz_trace(tr=tr)
distances.append(gcd)
azimuths.append(az)
backazimuths.append(baz)
distances = np.array(distances)
azimuths = np.array(azimuths)
backazimuths = np.array(backazimuths)
return distances, azimuths, backazimuths
def _get_srcrcv_dist_az_baz_trace(self, tr=None, idx=0):
"""
Check the source-receiver characteristics such as src-rcv distance,
azimuth, backazimuth for a given trace.
.. note::
This function ASSUMES that SAC headers have been written to the
traces. Otherwise we will need more complicated ways to get
event lat and lon
:type tr: obspy.core.trace.Trace
:param tr: trace to get srcrcv information for. If None, will use `idx`
:type idx: int
:param idx: if `tr`==None, user can provide index of self.st (Stream)
defaults to index 0
:rtype gcdist: float
:return gcdist: great circle distance in units specified by
`distance_units`, can be 'km' or 'deg'
:rtype az: float
:return az: azimuth (degrees) between source and receiver
:rtype baz: float
:return baz: azimuth (degrees) between source and receiver
"""
# Default to the first trace in the Stream
if tr is None:
tr = self.st[int(idx)]
try:
dist = tr.stats.sac.dist # units: km
az = tr.stats.sac.az # units: deg
baz = tr.stats.sac.baz # units: deg
except AttributeError:
# If for whatever reason SAC headers dont contain this info | |
N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num | |
'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_1', 'v_2'), ('v_5', 't_1'), ('t_1', 'c_1'), ('t_1', 'v_5'), ('t_3', 'v_3'), ('c_1', 't_3'),
('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'), ('c_1', 't_1'), ('v_6', 't_3'), ('v_2', 't_1'),
('t_2', 'c_1'), ('v_1', 't_2'), ('v_4', 't_2'), ('t_3', 'c_1'), ('t_3', 'v_6'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_2', 'v_6'), ('t_1', 'v_2'), ('v_5', 't_1'), ('t_3', 'v_4'), ('t_1', 'c_1'), ('t_1', 'v_5'),
('t_3', 'v_3'), ('c_1', 't_3'), ('c_1', 't_2'), ('t_2', 'v_1'), ('c_1', 't_1'), ('v_6', 't_2'),
('v_2', 't_1'), ('t_2', 'c_1'), ('v_1', 't_2'), ('v_4', 't_3'), ('t_3', 'c_1'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_1'), ('t_3', 'v_1'), ('t_1', 'c_1'),
('t_1', 'v_5'), ('c_1', 't_3'), ('c_1', 't_2'), ('t_2', 'v_4'), ('v_6', 't_3'), ('c_1', 't_1'),
('v_2', 't_1'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_3'), ('t_3', 'c_1'), ('t_3', 'v_6')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_2', 'v_6'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_1'), ('t_3', 'v_1'),
('t_1', 'c_1'), ('t_1', 'v_5'), ('t_3', 'v_4'), ('c_1', 't_3'), ('v_4', 't_3'), ('c_1', 't_2'),
('c_1', 't_1'), ('v_6', 't_2'), ('v_2', 't_1'), ('t_2', 'c_1'), ('v_1', 't_3'), ('t_3', 'c_1')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_2', 'v_6'), ('t_1', 'v_2'), ('v_5', 't_1'), ('t_3', 'v_1'), ('t_1', 'c_1'), ('t_1', 'v_5'),
('t_3', 'v_3'), ('c_1', 't_3'), ('c_1', 't_2'), ('t_2', 'v_4'), ('c_1', 't_1'), ('v_6', 't_2'),
('v_2', 't_1'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_3'), ('t_3', 'c_1'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('t_3', 'v_4'), ('t_1', 'c_1'), ('v_5', 't_3'),
('c_1', 't_3'), ('t_1', 'v_6'), ('c_1', 't_2'), ('t_2', 'v_1'), ('v_6', 't_1'), ('c_1', 't_1'),
('v_2', 't_1'), ('t_2', 'c_1'), ('v_1', 't_2'), ('v_4', 't_3'), ('t_3', 'v_5'), ('t_3', 'c_1')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_1', 'v_2'), ('t_1', 'c_1'), ('t_3', 'v_3'), ('v_5', 't_3'), ('c_1', 't_3'), ('t_1', 'v_6'),
('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'), ('c_1', 't_1'), ('v_6', 't_1'), ('v_2', 't_1'),
('t_2', 'c_1'), ('v_1', 't_2'), ('v_4', 't_2'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_1', 'v_2'), ('v_5', 't_2'), ('t_3', 'v_4'), ('t_1', 'c_1'), ('t_3', 'v_3'), ('v_4', 't_3'),
('c_1', 't_3'), ('t_1', 'v_6'), ('c_1', 't_2'), ('t_2', 'v_1'), ('v_6', 't_1'), ('c_1', 't_1'),
('t_2', 'v_5'), ('t_2', 'c_1'), ('v_1', 't_2'), ('v_2', 't_1'), ('t_3', 'c_1'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('t_3', 'v_1'), ('t_1', 'c_1'), ('v_5', 't_3'),
('c_1', 't_3'), ('t_1', 'v_6'), ('c_1', 't_2'), ('t_2', 'v_4'), ('v_6', 't_1'), ('c_1', 't_1'),
('v_2', 't_1'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_3'), ('t_3', 'v_5'), ('t_3', 'c_1')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_2'), ('t_3', 'v_1'), ('t_1', 'c_1'),
('t_3', 'v_4'), ('v_4', 't_3'), ('c_1', 't_3'), ('t_1', 'v_6'), ('c_1', 't_2'), ('v_6', 't_1'),
('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'), ('v_2', 't_1'), ('v_1', 't_3'), ('t_3', 'c_1')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', 'v_5'), ('t_2', 'v_1'), ('t_3', 'v_2'), ('t_1', 'v_3'),
('t_1', 'v_4'), ('t_2', 'v_6'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'v_6'), ('c_1', 't_1'),
('t_3', 'v_5'), ('t_1', 'v_2'), ('c_1', 'v_4'), ('t_3', 'v_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('c_1', 'v_6'), ('t_2', 'v_2'), ('t_3', 'v_6'), ('t_1', 'v_1'), ('c_1', 'v_2'), ('t_3', 'v_3'),
('c_1', 't_3'), ('c_1', 'v_3'), ('t_2', 'v_5')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 't_3', 'v_6', 'v_5', 'v_3'}, 'City': {'c_1'},
'Town': {'t_3', 't_2', 't_1'}, 'Village': {'v_4', 'v_1', 'v_2', 'v_6', 'v_5', 'v_3'},
'Road': {('t_1', 'v_2'), ('v_5', 't_2'), ('t_3', 'v_1'), ('t_1', 'c_1'), ('t_3', 'v_3'), ('c_1', 't_3'),
('t_1', 'v_6'), ('c_1', 't_2'), ('t_2', 'v_4'), ('v_6', 't_1'), ('c_1', 't_1'), ('t_2', 'v_5'),
('t_2', 'c_1'), ('v_2', 't_1'), ('v_1', 't_3'), ('v_4', 't_2'), ('t_3', 'c_1'), ('v_3', 't_3')},
'>': {('c_1', 'v_1'), ('t_3', 'v_4'), ('t_1', | |
<filename>scripts/mgear/synoptic/widgets.py<gh_stars>1-10
import re
from mgear.vendor.Qt import QtCore, QtWidgets, QtGui
from . import utils
##################################################
# PROMOTED WIDGETS
##################################################
# They must be declared first because they are used in the widget.ui
class toggleCombo(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(toggleCombo, self).__init__(parent)
self.firstUpdate = False
self.currentIndexChanged['QString'].connect(self.handleChanged)
def wheelEvent(self, event):
event.ignore()
# def focusInEvent(self, event):
def showEvent(self, event):
self.model = utils.getModel(self)
self.uihost_name = str(self.property("Object"))
self.combo_attr = str(self.property("Attr"))
self.ctl_name = str(self.property("ik_ctl"))
if not self.currentText():
list1 = utils.getComboKeys(
self.model, self.uihost_name, self.combo_attr)
self.addItems(list1)
self.setCurrentIndex(utils.getComboIndex(
self.model, self.uihost_name, self.combo_attr))
self.firstUpdate = True
def handleChanged(self):
if self.firstUpdate:
if self.currentIndex() == self.count() - 1:
print "Space Transfer"
self.setCurrentIndex(utils.getComboIndex(
self.model, self.uihost_name, self.combo_attr))
utils.ParentSpaceTransfer.showUI(self,
self.model,
self.uihost_name,
self.combo_attr,
self.ctl_name)
else:
utils.changeSpace(self.model,
self.uihost_name,
self.combo_attr,
self.currentIndex(),
self.ctl_name)
class bakeSprings(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
utils.bakeSprings(model)
class clearSprings(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
utils.clearSprings(model)
class ikfkMatchButton(QtWidgets.QPushButton):
MAXIMUM_TRY_FOR_SEARCHING_FK = 1000
def __init__(self, *args, **kwargs):
# type: (*str, **str) -> None
super(ikfkMatchButton, self).__init__(*args, **kwargs)
self.numFkControllers = None
def searchNumberOfFkControllers(self):
# type: () -> None
for i in range(self.MAXIMUM_TRY_FOR_SEARCHING_FK):
prop = self.property("fk{0}".format(str(i)))
if not prop:
self.numFkControllers = i
break
def mousePressEvent(self, event):
# type: (QtCore.QEvent) -> None
mouse_button = event.button()
model = utils.getModel(self)
ikfk_attr = str(self.property("ikfk_attr"))
uiHost_name = str(self.property("uiHost_name"))
if not self.numFkControllers:
self.searchNumberOfFkControllers()
fks = []
for i in range(self.numFkControllers):
label = "fk{0}".format(str(i))
prop = str(self.property(label))
fks.append(prop)
ik = str(self.property("ik"))
upv = str(self.property("upv"))
ikRot = str(self.property("ikRot"))
if ikRot == "None":
ikRot = None
if mouse_button == QtCore.Qt.RightButton:
utils.IkFkTransfer.showUI(
model, ikfk_attr, uiHost_name, fks, ik, upv, ikRot)
return
else:
utils.ikFkMatch(model, ikfk_attr, uiHost_name, fks, ik, upv, ikRot)
return
class SpineIkfkMatchButton(QtWidgets.QPushButton):
def __init__(self, *args, **kwargs):
# type: (*str, **str) -> None
super(SpineIkfkMatchButton, self).__init__(*args, **kwargs)
self.numFkControllers = None
def mousePressEvent(self, event):
# type: (QtCore.QEvent) -> None
uihost_name = str(self.property("Object"))
mouse_button = event.button()
model = utils.getModel(self)
fkControls = str(self.property("fkControls")).split(",")
ikControls = str(self.property("ikControls")).split(",")
if mouse_button == QtCore.Qt.LeftButton:
utils.SpineIkFkTransfer.showUI(model,
uihost_name,
fkControls,
ikControls)
return
class selGroup(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
group_suffix = str(self.property("groupSuffix"))
utils.selGroup(model, group_suffix)
class keyGroup(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
group_suffix = str(self.property("groupSuffix"))
utils.keyGroup(model, group_suffix)
class toggleAttrButton(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
object_name = str(self.property("Object"))
attr_name = str(self.property("Attr"))
utils.toggleAttr(model, object_name, attr_name)
class resetTransform(QtWidgets.QPushButton):
def mousePressEvent(self, event):
utils.resetSelTrans()
class resetBindPose(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
utils.bindPose(model)
class MirrorPoseButton(QtWidgets.QPushButton):
def mousePressEvent(self, event):
utils.mirrorPose()
class FlipPoseButton(QtWidgets.QPushButton):
def mousePressEvent(self, event):
utils.mirrorPose(True)
class QuickSelButton(QtWidgets.QPushButton):
def mousePressEvent(self, event):
model = utils.getModel(self)
channel = str(self.property("channel"))
mouse_button = event.button()
utils.quickSel(model, channel, mouse_button)
class SelectButton(QtWidgets.QWidget):
over = False
color_over = QtGui.QColor(255, 255, 255, 255)
def __init__(self, parent=None):
super(SelectButton, self).__init__(parent)
self.defaultBGColor = QtGui.QPalette().color(self.backgroundRole())
self.setBorderColor(self.defaultBGColor)
p = self.palette()
p.setColor(self.foregroundRole(), QtGui.QColor(000, 000, 000, 000))
p.setColor(self.backgroundRole(), QtGui.QColor(000, 000, 000, 000))
self.setPalette(p)
def enterEvent(self, event):
self.over = True
QtWidgets.QToolTip.showText(QtGui.QCursor.pos(),
str(self.property("object")))
self.repaint()
self.update()
def leaveEvent(self, event):
self.over = False
self.repaint()
self.update()
def rectangleSelection(self, event, firstLoop):
if firstLoop:
key_modifier = event.modifiers()
else:
if event.modifiers():
key_modifier = event.modifiers()
else:
key_modifier = (QtCore.Qt.ControlModifier
| QtCore.Qt.ShiftModifier)
model = utils.getModel(self)
object = str(self.property("object")).split(",")
mouse_button = event.button()
utils.selectObj(model, object, mouse_button, key_modifier)
def mousePressEvent(self, event):
model = utils.getModel(self)
object = str(self.property("object")).split(",")
mouse_button = event.button()
key_modifier = event.modifiers()
utils.selectObj(model, object, mouse_button, key_modifier)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
if self.over:
painter.setBrush(self.color_over)
else:
painter.setBrush(self.color)
self.drawShape(painter)
painter.end()
def paintSelected(self, paint=False):
if paint:
p = self.palette()
p.setColor(self.foregroundRole(), QtGui.QColor(255, 255, 255, 255))
self.setPalette(p)
self.setBorderColor(QtGui.QColor(255, 255, 255, 255))
else:
p = self.palette()
p.setColor(self.foregroundRole(),
QtGui.QColor(000, 000, 000, 0o10))
self.setPalette(p)
self.setBorderColor(self.defaultBGColor)
def setBorderColor(self, color):
self.borderColor = color
def drawPathWithBorder(self, painter, path, borderWidth):
painter.setRenderHint(QtGui.QPainter.Antialiasing)
pen = QtGui.QPen(self.borderColor, borderWidth)
painter.setPen(pen)
painter.fillPath(path, QtCore.Qt.red)
painter.drawPath(path)
class SelectButtonStyleSheet(QtWidgets.QFrame):
def __init__(self, parent=None):
""" This class allows you to charge a QFrame widget on your picker
that will maintain the StyleSheet compatibility coming form your UI
file.
"""
super(SelectButtonStyleSheet, self).__init__(parent)
def __get_background_color(self, control, stylesheet):
""" Returns the background color for the given control on the
style sheet provided
"""
current_style = ""
for i in stylesheet:
if (re.findall("\control_style={}".format(control), i) and
not i.count("disabled") or re.findall("\#{}".format(
control), i) and not i.count("disabled")):
current_style += i
return re.findall(r"\S+[a-z][-][a-z].+\W;", current_style)
def __create_new_style(self, control_object, control_style, paint):
""" Generates a simple qt style sheet update for the given control.
This needs to be done to force the Maya control selection give the
effect of the button been hovered.
"""
# gets overall style sheet
style_sheet = self.parent().styleSheet()
widgets_styles = style_sheet.split("\n\n")
new_style = ""
# handles # type style sheet
if style_sheet.count(control_object):
bg_color, hover_color = self.__get_background_color(control_object,
widgets_styles)
for i in widgets_styles:
if i.count(control_object) and paint and not i.count("hover"):
new_style = "QFrame#%s{\n %s\n}" % (control_object,
hover_color)
elif i.count(control_object) and not paint:
new_style = "QFrame#%s{\n %s\n}" % (control_object,
bg_color)
new_style += "QFrame#%s:hover{\n %s\n}" % (
control_object, hover_color)
# handles property type style sheet
elif style_sheet.count(control_style):
bg_color, hover_color = self.__get_background_color(control_style,
widgets_styles)
for i in widgets_styles:
if i.count(control_style) and paint and not i.count("hover"):
new_style = "QFrame[control_style=%s]{\n %s\n}" % (
control_style, hover_color)
elif i.count(control_style) and not paint:
new_style = "QFrame[control_style=%s]{\n %s\n}" % (
control_style, bg_color)
new_style += "QFrame[control_style=%s]:hover{\n %s\n}"\
% (control_style, hover_color)
return new_style
def enterEvent(self, event):
if not self.isEnabled():
return
point = QtGui.QCursor.pos()
point.setX(point.x() + 10)
point.setY(point.y() - 20)
QtWidgets.QToolTip.showText(point, str(self.property("object")))
self.repaint()
self.update()
def leaveEvent(self, event):
self.repaint()
self.update()
def rectangleSelection(self, event, firstLoop):
if not self.isEnabled():
return
if firstLoop:
key_modifier = event.modifiers()
else:
if event.modifiers():
key_modifier = event.modifiers()
else:
key_modifier = (QtCore.Qt.ControlModifier
| QtCore.Qt.ShiftModifier)
model = utils.getModel(self)
control_object = str(self.property("object")).split(",")
mouse_button = event.button()
utils.selectObj(model, control_object, mouse_button, key_modifier)
def mousePressEvent(self, event):
if not self.isEnabled():
return
model = utils.getModel(self)
control_object = str(self.property("object")).split(",")
mouse_button = event.button()
key_modifier = event.modifiers()
utils.selectObj(model, control_object, mouse_button, key_modifier)
def paintSelected(self, paint=False):
""" This method is responsible of been able to have the hover state
been activated when the control is selected on Maya's viewport
"""
if not self.isEnabled():
return
# get control name and control_style properties from the widget
control_object = str(self.property("object")).split(",")[0]
control_style = str(self.property("control_style")).split(",")[0]
# gets new style sheet
try:
new_style = self.__create_new_style(control_object,
control_style,
paint)
self.setStyleSheet(new_style)
except Exception as error:
message = "Something is wrong with your current style-sheet." \
"Contact mGear development team with the following" \
" error... "
raise error(message)
##############################################################################
# Classes for Mixin Color
##############################################################################
class SelectBtn_StyleSheet(SelectButtonStyleSheet):
pass
class SelectBtn_RFk(SelectButton):
color = QtGui.QColor(0, 0, 192, 255)
class SelectBtn_RIk(SelectButton):
color = QtGui.QColor(0, 128, 192, 255)
class SelectBtn_CFk(SelectButton):
color = QtGui.QColor(128, 0, 128, 255)
class SelectBtn_CIk(SelectButton):
color = QtGui.QColor(192, 64, 192, 255)
class SelectBtn_LFk(SelectButton):
color = QtGui.QColor(192, 0, 0, 255)
class SelectBtn_LIk(SelectButton):
color = QtGui.QColor(192, 128, 0, 255)
class SelectBtn_yellow(SelectButton):
color = QtGui.QColor(255, 192, 0, 255)
class SelectBtn_green(SelectButton):
color = QtGui.QColor(0, 192, 0, 255)
class SelectBtn_darkGreen(SelectButton):
color = QtGui.QColor(0, 100, 0, 255)
##############################################################################
# Classes for Mixin Drawing Shape
##############################################################################
class SelectBtn_StyleSheet_Draw(SelectButtonStyleSheet):
pass
class SelectBtn_Box(SelectButton):
def drawShape(self, painter):
borderWidth = 1
x = borderWidth / 2.0
y = borderWidth / 2.0
w = self.width() - borderWidth
h = self.height() - borderWidth
# round radius
if self.height() < self.width():
rr = self.height() * 0.20
else:
rr = self.width() * 0.20
path = QtGui.QPainterPath()
path.addRoundedRect(QtCore.QRectF(x, y, w, h), rr, rr)
self.drawPathWithBorder(painter, path, borderWidth)
class SelectBtn_OutlineBox(SelectButton):
def drawShape(self, painter):
borderWidth = 1
x = borderWidth / 2.0
y = borderWidth / 2.0
w = self.width() - borderWidth
h = self.height() - borderWidth
# round radius and outline width
if self.height() < self.width():
rr = self.height() * 0.20
ow = self.height() * 0.33
else:
rr = self.width() * 0.20
ow = self.width() * 0.33
pathOuter = QtGui.QPainterPath()
pathOuter.addRoundedRect(QtCore.QRectF(x, y, w, h), rr, rr)
innX = x + ow
innY = y + ow
innW = w - (ow * 2)
innH = h - (ow * 2)
innR = rr * 0.2
pathInner = QtGui.QPainterPath()
pathInner.addRoundedRect(QtCore.QRectF(innX, innY, innW, innH),
innR, innR)
self.drawPathWithBorder(painter, pathOuter - pathInner, borderWidth)
class SelectBtn_Circle(SelectButton):
def drawShape(self, painter):
borderWidth = 1
x = borderWidth / 2.0
y = borderWidth / 2.0
w = self.width() - borderWidth
h = self.height() - borderWidth
path = QtGui.QPainterPath()
path.addEllipse(QtCore.QRectF(x, y, w, h))
self.drawPathWithBorder(painter, path, borderWidth)
class SelectBtn_OutlineCircle(SelectButton):
def drawShape(self, painter):
borderWidth = 1
x = borderWidth / 2.0
y = borderWidth / 2.0
w = self.width() - borderWidth
h = self.height() - borderWidth
path = QtGui.QPainterPath()
path.addEllipse(QtCore.QRectF(x, y, w, h))
if self.height() < self.width():
ow = self.height() * 0.25
else:
ow = self.width() * 0.25
innX = x + ow
innY = y + ow
innW = w - (ow * 2)
| |
<gh_stars>0
import pytest
from dlapp.collection import Element
from dlapp.collection import LookupCls
from dlapp.collection import List
from dlapp.collection import ListIndexError
@pytest.fixture
def dict_data():
obj = {
"widget": {
"debug": "on",
"window": {
"title": "ABC Widget",
"name": "window abc",
"width": 500,
"height": 500
},
"image": {
"src": "Images/abc.png",
"name": "image abc",
"width": 100,
"height": 100,
"alignment": "center"
},
"text": {
"data": "Click ABC",
"size": 36,
"style": "bold",
"name": "text abc",
"width": 300,
"height": 20,
"alignment": "center",
}
}
}
yield obj
@pytest.fixture
def list_data():
obj = [
{
"widget": {
"debug": "on",
"window": {
"title": "ABC Widget",
"name": "window abc",
"width": 500,
"height": 500
},
"image": {
"src": "Images/abc.png",
"name": "image abc",
"width": 100,
"height": 100,
"alignment": "center"
},
"text": {
"data": "Click ABC",
"size": 36,
"style": "bold",
"name": "text abc",
"width": 300,
"height": 20,
"alignment": "center",
}
}
},
{
"widget": {
"debug": "off",
"window": {
"title": "XYZ Widget",
"name": "window xyz",
"width": 599,
"height": 599
},
"image": {
"src": "Images/xyz.png",
"name": "image xyz",
"width": 199,
"height": 199,
"alignment": "right"
},
"text": {
"data": "Click XYZ",
"size": 96,
"style": "normal",
"name": "text abc",
"width": 399,
"height": 29,
"alignment": "left",
}
}
}
]
yield obj
class TestElement:
@pytest.mark.parametrize(
"lookup,select_statement,expected_result",
[
('title', '', ['ABC Widget']),
('alignment', '', ['center', 'center']),
]
)
def test_find_a_lookup(
self, dict_data, lookup, select_statement, expected_result
):
elm = Element(dict_data)
result = elm.find(lookup, select=select_statement)
assert result == expected_result
assert result.first == result[0]
assert result.first == expected_result[0]
assert result.last == result[-1]
assert result.last == expected_result[-1]
@pytest.mark.parametrize(
"lookup,select_statement,expected_result",
[
('name=_iwildcard(*abc*)', 'src', [{'src': 'Images/abc.png'}]),
('alignment=center', 'name where width eq 300', [{'name': 'text abc'}]),
('alignment', 'name where width eq 300 and_ data match (?i).+ abc', [{'name': 'text abc'}])
]
)
def test_find_a_lookup_and_validate_dict_obj(
self, dict_data, lookup, select_statement, expected_result
):
elm = Element(dict_data)
result = elm.find(lookup, select=select_statement)
assert result == expected_result
@pytest.mark.parametrize(
"lookup,select_statement,expected_result",
[
(
'debug=off', # lookup
'window', # select statement
[ # expected_result
{
"window": {
"title": "XYZ Widget",
"name": "window xyz",
"width": 599,
"height": 599
}
}
]
)
]
)
def test_find_a_lookup_and_validate_list_obj(
self, list_data, lookup, select_statement, expected_result
):
elm = Element(list_data)
result = elm.find(lookup, select=select_statement)
assert result == expected_result
class TestLookupCls:
@pytest.mark.parametrize(
"lookup,expected_left,expected_right",
[
(
'full_name', # lookup only has left expression
'^full_name$', # expected left pattern
None, # expected right pattern
),
(
'full++name', # lookup only has left expression
'^full\\+\\+name$', # expected left pattern
None, # expected right pattern
),
(
'full**name', # lookup only has left expression
'^full\\*\\*name$', # expected left pattern
None, # expected right pattern
),
(
'_text(full_name)', # lookup only has left expression
'^full_name$', # expected left pattern
None, # expected right pattern
),
(
'full_name=<NAME>', # lookup has left and right expr
'^full_name$', # expected left pattern
'^David\\ M\\.\\ James$', # expected right pattern
),
(
'_itext(full_name)=<NAME>', # lookup has left and right expr
'(?i)^full_name$', # expected left pattern
'^David\\ M\\.\\ James$', # expected right pattern
),
(
'_itext(full_name)=_itext(<NAME>)', # lookup has left and right expr
'(?i)^full_name$', # expected left pattern
'(?i)^David\\ M\\.\\ James$', # expected right pattern
),
(
'full_itext(+name)=_itext(<NAME>)', # lookup has left and right expr
'(?i)^full\\+name$', # expected left pattern
'(?i)^David\\ M\\.\\ James$', # expected right pattern
),
]
)
def test_lookup_text(self, lookup, expected_left, expected_right):
lkup_obj = LookupCls(lookup)
assert lkup_obj.left == expected_left
assert lkup_obj.right == expected_right
@pytest.mark.parametrize(
"lookup,left_data,right_data",
[
(
'full_name', # lookup
[ # left data
['full_name'], # matched
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[None], # matched
[None] # unmatched
]
),
(
'_itext(full_name)=_itext(<NAME>)', # lookup
[ # left data
[ # matched
'full_name',
'Full_Name',
'FULL_NAME'
],
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[ # matched
'<NAME>',
'<NAME>'
],
[ # unmatched
'<NAME>',
'my friend name is <NAME>'
]
]
),
(
'_itext(full_name)=David _itext(<NAME>)', # lookup
[ # left data
[ # matched
'full_name',
'Full_Name',
'FULL_NAME'
],
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[ # matched
'<NAME>',
'<NAME>'
],
[ # unmatched
'<NAME>',
'is <NAME>'
]
]
),
]
)
def test_lookup_text_and_verify(self, lookup, left_data, right_data):
lkup_obj = LookupCls(lookup)
left_matched_data, left_unmatched_data = left_data
for data in left_matched_data:
is_match = lkup_obj.is_left_matched(data)
assert is_match is True
for data in left_unmatched_data:
is_match = lkup_obj.is_left_matched(data)
assert is_match is False
right_matched_data, right_unmatched_data = right_data
for data in right_matched_data:
if lkup_obj.is_right:
is_match = lkup_obj.is_right_matched(data)
assert is_match is True
for data in right_unmatched_data:
if lkup_obj.is_right:
is_match = lkup_obj.is_right_matched(data)
assert is_match is False
@pytest.mark.parametrize(
"lookup,expected_left,expected_right",
[
(
'_wildcard(full?name)', # lookup only has left expression
'^full.name$', # expected left pattern
None, # expected right pattern
),
(
'_iwildcard(full?name)', # lookup only has left expression
'(?i)^full.name$', # expected left pattern
None, # expected right pattern
),
(
'ful_iwildcard(l?n)ame', # lookup only has left expression
'(?i)^full.name$', # expected left pattern
None, # expected right pattern
),
(
'_iwildcard(*name)=_iwildcard(David *James)', # lookup has left and right expr
'(?i)^.*name$', # expected left pattern
'(?i)^David .*James$', # expected right pattern
),
(
'full_name=David_wildcard( [MTW]. )James', # lookup has left and right expr
'^full_name$', # expected left pattern
'^David [MTW]\\. James$', # expected right pattern
),
(
'full_name=David_wildcard( [!MTW]. )James', # lookup has left and right expr
'^full_name$', # expected left pattern
'^David [^MTW]\\. James$', # expected right pattern
),
]
)
def test_lookup_wildcard(self, lookup, expected_left, expected_right):
lkup_obj = LookupCls(lookup)
assert lkup_obj.left == expected_left
assert lkup_obj.right == expected_right
@pytest.mark.parametrize(
"lookup,left_data,right_data",
[
(
'_wildcard(full?name)', # lookup
[ # left data
[ # matched
'full?name',
'full_name',
'full name',
'full-name',
'full.name',
'fullaname'
],
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[None], # matched
[None] # unmatched
]
),
(
'_iwildcard(*name)=_wildcard(David *James)', # lookup
[ # left data
[ # matched
'first name',
'last NAME',
'anything BLABLABLA name'
],
[ # unmatched
'the full name is',
'full_names'
]
],
[ # right data
[ # matched
'<NAME>',
'<NAME>',
'<NAME>laBla James'
],
[ # unmatched
'<NAME>',
'my friend name is <NAME>'
]
]
),
(
'full_name=David _wildcard([WTM].) James', # lookup
[ # left data
[ # matched
'full_name',
],
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[ # matched
'<NAME>',
'<NAME>',
'<NAME>'
],
[ # unmatched
'<NAME>',
'is <NAME>'
]
]
),
(
'full_name=David _wildcard([!WTM].) James', # lookup
[ # left data
[ # matched
'full_name',
],
[ # unmatched
'the full_name',
'full_names'
]
],
[ # right data
[ # matched
'<NAME>',
'<NAME>',
'<NAME>'
],
[ # unmatched
'<NAME>',
'<NAME>',
'<NAME>'
]
]
),
]
)
def test_lookup_wildcard_and_verify(self, lookup, left_data, right_data):
lkup_obj = LookupCls(lookup)
left_matched_data, left_unmatched_data = left_data
for data in left_matched_data:
is_match = lkup_obj.is_left_matched(data)
assert is_match is True
for data in left_unmatched_data:
is_match = lkup_obj.is_left_matched(data)
assert is_match is False
right_matched_data, right_unmatched_data = right_data
for data in right_matched_data:
if lkup_obj.is_right:
is_match = lkup_obj.is_right_matched(data)
assert is_match is True
for data in right_unmatched_data:
if lkup_obj.is_right:
is_match = lkup_obj.is_right_matched(data)
assert is_match is False
@pytest.mark.parametrize(
"lookup,expected_left,expected_right",
[
(
'_regex([Ff]ull[- _]?[Nn]ame)', # lookup only has left expression
'^[Ff]ull[- _]?[Nn]ame$', # expected left pattern
None, # expected right pattern
),
(
'_iregex([Ff]ull[- _]?[Nn]ame)', # lookup only has left expression
'(?i)^[Ff]ull[- _]?[Nn]ame$', # expected left pattern
None, # expected right pattern
),
(
'Full_iregex([- _]?)Name', # lookup
'(?i)^Full[- _]?Name$', # expected left pattern
None, # expected right pattern
),
(
'_iregex(full[- _]?name)=_iregex(David ([MTW][.] )?James)', # lookup
'(?i)^full[- _]?name$', # expected left pattern
'(?i)^David ([MTW][.] )?James$', # expected right pattern
),
]
)
def test_lookup_regex(self, lookup, expected_left, expected_right):
lkup_obj = LookupCls(lookup)
assert lkup_obj.left == expected_left
assert lkup_obj.right == expected_right
@pytest.mark.parametrize(
"lookup,left_data,right_data",
[
(
'_regex(full[ ._-]name)', # lookup
[ # left data
[ # matched
'full name',
'full.name',
'full_name',
'full-name',
],
[ # unmatched
'full?name',
'Full name'
]
],
[ # right data
[None], # matched
[None] # unmatched
]
),
(
'_iregex(\\w+ name)=_regex(David ([MTW][.] )?James)', # lookup
[ # left data
[ # matched
'first name',
'last NAME',
'any_letter name'
],
[ # unmatched
'the full name is',
'full_names'
]
],
| |
<gh_stars>0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Glidein creation module Classes and functions needed to
# handle dictionary files
#
import os,os.path,string,copy
import cgWConsts,cWConsts
import cWDictFile
import pwd
import shutil
from glideinwms.lib import condorPrivsep
MY_USERNAME=pwd.getpwuid(os.getuid())[0]
# values are (group_name)
class MonitorGroupDictFile(cWDictFile.DictFile):
def file_header(self,want_comments):
if want_comments:
return ("<!-- This entry is part of following monitoring groups -->\n") + ("<monitorgroups>")
else:
return ("<monitorgroups>")
def file_footer(self,want_comments):
return ("</monitorgroups>")
# key can be None
# in that case it will be composed out of value
def add(self,key,val,allow_overwrite=0):
if not (type(val) in (type(()),type([]))):
raise RuntimeError, "Values '%s' not a list or tuple"%val
if len(val)!=1:
raise RuntimeError, "Values '%s' not (group_name)"%str(val)
if key is None:
key="%s"%val
return cWDictFile.DictFile.add(self,key,val,allow_overwrite)
def add_extended(self,
group_name,
allow_overwrite=0):
self.add(None,(group_name,))
def format_val(self,key,want_comments):
return " <monitorgroup group_name=\"%s\">"%(self.vals[key][0],)
def parse_val(self,line):
if len(line)==0:
return #ignore emoty lines
if line[0]=='#':
return # ignore comments
arr=line.split(None,3)
if len(arr)==0:
return # empty line
if len(arr)!=4:
raise RuntimeError,"Not a valid var line (expected 4, found %i elements): '%s'"%(len(arr),line)
key=arr[-1]
return self.add(key,arr[:-1])
# values are (Type,System,Ref)
class InfoSysDictFile(cWDictFile.DictFile):
def file_header(self,want_comments):
if want_comments:
return (cWDictFile.DictFile.file_header(self,want_comments)+"\n"+
("# %s \t%30s \t%s \t\t%s\n"%('Type','Server','Ref','ID'))+
("#"*78))
else:
return None
# key can be None
# in that case it will be composed out of value
def add(self,key,val,allow_overwrite=0):
if not (type(val) in (type(()),type([]))):
raise RuntimeError, "Values '%s' not a list or tuple"%val
if len(val)!=3:
raise RuntimeError, "Values '%s' not (Type,System,Ref)"%str(val)
if key is None:
key="%s://%s/%s"%val
return cWDictFile.DictFile.add(self,key,val,allow_overwrite)
def add_extended(self,
infosys_type,
server_name,
ref_str,
allow_overwrite=0):
self.add(None,(infosys_type,server_name,ref_str))
def format_val(self,key,want_comments):
return "%s \t%30s \t%s \t\t%s"%(self.vals[key][0],self.vals[key][1],self.vals[key][2],key)
def parse_val(self,line):
if len(line)==0:
return #ignore emoty lines
if line[0]=='#':
return # ignore comments
arr=line.split(None,3)
if len(arr)==0:
return # empty line
if len(arr)!=4:
raise RuntimeError,"Not a valid var line (expected 4, found %i elements): '%s'"%(len(arr),line)
key=arr[-1]
return self.add(key,arr[:-1])
class CondorJDLDictFile(cWDictFile.DictFile):
def __init__(self,dir,fname,sort_keys=False,order_matters=False,jobs_in_cluster=None,
fname_idx=None): # if none, use fname
cWDictFile.DictFile.__init__(self,dir,fname,sort_keys,order_matters,fname_idx)
self.jobs_in_cluster=jobs_in_cluster
def file_footer(self,want_comments):
if self.jobs_in_cluster is None:
return "Queue"
else:
return "Queue %s"%self.jobs_in_cluster
def format_val(self,key,want_comments):
if self.vals[key] == "##PRINT_KEY_ONLY##":
return "%s" % key
else:
return "%s = %s"%(key,self.vals[key])
def parse_val(self,line):
if line[0]=='#':
return # ignore comments
arr=line.split(None,2)
if len(arr)==0:
return # empty line
if arr[0]=='Queue':
# this is the final line
if len(arr)==1:
# default
self.jobs_in_cluster=None
else:
self.jobs_in_cluster=arr[1]
return
if len(arr) <= 2:
return self.add(arr[0],"") # key = <empty> or placeholder for env variable
else:
return self.add(arr[0],arr[2])
def is_equal(self,other, # other must be of the same class
compare_dir=False,compare_fname=False,
compare_keys=None): # if None, use order_matters
if self.jobs_in_cluster==other.jobs_in_cluster:
return cWDictFile.DictFile.is_equal(other,compare_dir,compare_fname,compare_keys)
else:
return False
################################################
#
# Functions that create default dictionaries
#
################################################
# internal, do not use from outside the module
def get_common_dicts(submit_dir,stage_dir):
common_dicts={'attrs':cWDictFile.ReprDictFile(submit_dir,cgWConsts.ATTRS_FILE),
'description':cWDictFile.DescriptionDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.DESCRIPTION_FILE),fname_idx=cWConsts.DESCRIPTION_FILE),
'consts':cWDictFile.StrDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.CONSTS_FILE),fname_idx=cWConsts.CONSTS_FILE),
'params':cWDictFile.ReprDictFile(submit_dir,cgWConsts.PARAMS_FILE),
'vars':cWDictFile.VarsDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.VARS_FILE),fname_idx=cWConsts.VARS_FILE),
'untar_cfg':cWDictFile.StrDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.UNTAR_CFG_FILE),fname_idx=cWConsts.UNTAR_CFG_FILE),
'file_list':cWDictFile.FileDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.FILE_LISTFILE),fname_idx=cWConsts.FILE_LISTFILE),
'signature':cWDictFile.SHA1DictFile(stage_dir,cWConsts.insert_timestr(cWConsts.SIGNATURE_FILE),fname_idx=cWConsts.SIGNATURE_FILE)}
refresh_description(common_dicts)
return common_dicts
def get_main_dicts(submit_dir,stage_dir):
main_dicts=get_common_dicts(submit_dir,stage_dir)
main_dicts['summary_signature']=cWDictFile.SummarySHA1DictFile(submit_dir,cWConsts.SUMMARY_SIGNATURE_FILE)
main_dicts['glidein']=cWDictFile.StrDictFile(submit_dir,cgWConsts.GLIDEIN_FILE)
main_dicts['frontend_descript']=cWDictFile.ReprDictFile(submit_dir,cgWConsts.FRONTEND_DESCRIPT_FILE)
main_dicts['gridmap']=cWDictFile.GridMapDict(stage_dir,cWConsts.insert_timestr(cWConsts.GRIDMAP_FILE))
main_dicts['after_file_list']=cWDictFile.FileDictFile(stage_dir,cWConsts.insert_timestr(cgWConsts.AFTER_FILE_LISTFILE),fname_idx=cgWConsts.AFTER_FILE_LISTFILE)
return main_dicts
def get_entry_dicts(entry_submit_dir,entry_stage_dir,entry_name):
entry_dicts=get_common_dicts(entry_submit_dir,entry_stage_dir)
entry_dicts['job_descript']=cWDictFile.StrDictFile(entry_submit_dir,cgWConsts.JOB_DESCRIPT_FILE)
entry_dicts['infosys']=InfoSysDictFile(entry_submit_dir,cgWConsts.INFOSYS_FILE)
entry_dicts['mongroup']=MonitorGroupDictFile(entry_submit_dir,cgWConsts.MONITOR_CONFIG_FILE)
return entry_dicts
################################################
#
# Functions that load dictionaries
#
################################################
# internal, do not use from outside the module
def load_common_dicts(dicts, # update in place
description_el):
# first submit dir ones (mutable)
dicts['params'].load()
dicts['attrs'].load()
# now the ones keyed in the description
dicts['signature'].load(fname=description_el.vals2['signature'])
dicts['file_list'].load(fname=description_el.vals2['file_list'])
file_el=dicts['file_list']
# all others are keyed in the file_list
dicts['consts'].load(fname=file_el[cWConsts.CONSTS_FILE][0])
dicts['vars'].load(fname=file_el[cWConsts.VARS_FILE][0])
dicts['untar_cfg'].load(fname=file_el[cWConsts.UNTAR_CFG_FILE][0])
if dicts.has_key('gridmap') and file_el.has_key(cWConsts.GRIDMAP_FILE):
dicts['gridmap'].load(fname=file_el[cWConsts.GRIDMAP_FILE][0])
def load_main_dicts(main_dicts): # update in place
main_dicts['glidein'].load()
main_dicts['frontend_descript'].load()
# summary_signature has keys for description
main_dicts['summary_signature'].load()
# load the description
main_dicts['description'].load(fname=main_dicts['summary_signature']['main'][1])
# all others are keyed in the description
main_dicts['after_file_list'].load(fname=main_dicts['description'].vals2['after_file_list'])
load_common_dicts(main_dicts,main_dicts['description'])
def load_entry_dicts(entry_dicts, # update in place
entry_name,summary_signature):
try:
entry_dicts['infosys'].load()
except RuntimeError:
pass # ignore errors, this is optional
entry_dicts['job_descript'].load()
# load the description (name from summary_signature)
entry_dicts['description'].load(fname=summary_signature[cgWConsts.get_entry_stage_dir("",entry_name)][1])
# all others are keyed in the description
load_common_dicts(entry_dicts,entry_dicts['description'])
############################################################
#
# Functions that create data out of the existing dictionary
#
############################################################
def refresh_description(dicts): # update in place
description_dict=dicts['description']
description_dict.add(dicts['signature'].get_fname(),"signature",allow_overwrite=True)
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
description_dict.add(dicts[k].get_fname(),k,allow_overwrite=True)
def refresh_file_list(dicts,is_main, # update in place
files_set_readonly=True,files_reset_changed=True):
file_dict=dicts['file_list']
file_dict.add(cWConsts.CONSTS_FILE,
(dicts['consts'].get_fname(), 'regular', 0, 'TRUE', 'CONSTS_FILE',
dicts['consts'].save_into_str(set_readonly=files_set_readonly, reset_changed=files_reset_changed)),
allow_overwrite=True)
file_dict.add(cWConsts.VARS_FILE,
(dicts['vars'].get_fname(), 'regular', 0, 'TRUE', 'CONDOR_VARS_FILE',
dicts['vars'].save_into_str(set_readonly=files_set_readonly, reset_changed=files_reset_changed)),
allow_overwrite=True)
file_dict.add(cWConsts.UNTAR_CFG_FILE,
(dicts['untar_cfg'].get_fname(), 'regular', 0, 'TRUE', 'UNTAR_CFG_FILE',
dicts['untar_cfg'].save_into_str(set_readonly=files_set_readonly, reset_changed=files_reset_changed)),
allow_overwrite=True)
if is_main and dicts.has_key('gridmap'):
file_dict.add(cWConsts.GRIDMAP_FILE,
(dicts['gridmap'].get_fname(), 'regular', 0, 'TRUE', 'GRIDMAP',
dicts['gridmap'].save_into_str(set_readonly=files_set_readonly, reset_changed=files_reset_changed)),
allow_overwrite=True)
# dictionaries must have been written to disk before using this
def refresh_signature(dicts): # update in place
signature_dict=dicts['signature']
for k in ('consts','vars','untar_cfg','gridmap','file_list','after_file_list','description'):
if dicts.has_key(k):
signature_dict.add_from_file(dicts[k].get_filepath(),allow_overwrite=True)
# add signatures of all the files linked in the lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
filedict=dicts[k]
for fname in filedict.get_immutable_files():
signature_dict.add_from_file(os.path.join(filedict.dir,fname),allow_overwrite=True)
################################################
#
# Functions that save dictionaries
#
################################################
# internal, do not use from outside the module
def save_common_dicts(dicts, # will update in place, too
is_main,
set_readonly=True):
# make sure decription is up to date
refresh_description(dicts)
# save the immutable ones
for k in ('description',):
dicts[k].save(set_readonly=set_readonly)
# Load files into the file list
# 'consts','untar_cfg','vars' will be loaded
refresh_file_list(dicts,is_main)
# save files in the file lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
dicts[k].save_files(allow_overwrite=True)
# then save the lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
dicts[k].save(set_readonly=set_readonly)
# calc and save the signatues
refresh_signature(dicts)
dicts['signature'].save(set_readonly=set_readonly)
#finally save the mutable one(s)
dicts['params'].save(set_readonly=set_readonly)
dicts['attrs'].save(set_readonly=set_readonly)
# must be invoked after all the entries have been saved
def save_main_dicts(main_dicts, # will update in place, too
set_readonly=True):
main_dicts['glidein'].save(set_readonly=set_readonly)
main_dicts['frontend_descript'].save(set_readonly=set_readonly)
save_common_dicts(main_dicts,True,set_readonly=set_readonly)
summary_signature=main_dicts['summary_signature']
summary_signature.add_from_file(key="main",filepath=main_dicts['signature'].get_filepath(),fname2=main_dicts['description'].get_fname(),allow_overwrite=True)
summary_signature.save(set_readonly=set_readonly)
def save_entry_dicts(entry_dicts, # will update in place, too
entry_name,summary_signature, # update in place
set_readonly=True):
entry_dicts['mongroup'].save(set_readonly=set_readonly)
entry_dicts['infosys'].save(set_readonly=set_readonly)
entry_dicts['job_descript'].save(set_readonly=set_readonly)
save_common_dicts(entry_dicts,False,set_readonly=set_readonly)
summary_signature.add_from_file(key=cgWConsts.get_entry_stage_dir("",entry_name),filepath=entry_dicts['signature'].get_filepath(),fname2=entry_dicts['description'].get_fname(),allow_overwrite=True)
################################################
#
# Functions that reuse dictionaries
#
################################################
def reuse_simple_dict(dicts,other_dicts,key,compare_keys=None):
if dicts[key].is_equal(other_dicts[key],compare_dir=True,compare_fname=False,compare_keys=compare_keys):
# if equal, just use the old one, and mark it as unchanged and readonly
dicts[key]=copy.deepcopy(other_dicts[key])
dicts[key].changed=False
dicts[key].set_readonly(True)
return True
else:
return False
def reuse_file_dict(dicts,other_dicts,key):
dicts[key].reuse(other_dicts[key])
return reuse_simple_dict(dicts,other_dicts,key)
def reuse_common_dicts(dicts, other_dicts,is_main,all_reused):
# save the immutable ones
# check simple dictionaries
for k in ('consts','untar_cfg','vars'):
all_reused=reuse_simple_dict(dicts,other_dicts,k) and all_reused
# since the file names may have changed, refresh the file_list
refresh_file_list(dicts,is_main)
# check file-based dictionaries
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
all_reused=reuse_file_dict(dicts,other_dicts,k) and all_reused
if all_reused:
# description and signature track other files
# so they change iff the others change
for k in ('description','signature'):
dicts[k]=copy.deepcopy(other_dicts[k])
dicts[k].changed=False
dicts[k].set_readonly(True)
# check the mutable ones
for k in ('attrs','params'):
reuse_simple_dict(dicts,other_dicts,k)
return all_reused
def reuse_main_dicts(main_dicts, other_main_dicts):
reuse_simple_dict(main_dicts, other_main_dicts,'glidein')
reuse_simple_dict(main_dicts, other_main_dicts,'frontend_descript')
reuse_simple_dict(main_dicts, other_main_dicts,'gridmap')
all_reused=reuse_common_dicts(main_dicts, other_main_dicts,True,True)
# will not try to reuse the summary_signature... being in submit_dir
# can be rewritten and it is not worth the pain to try to prevent it
return all_reused
def reuse_entry_dicts(entry_dicts, other_entry_dicts,entry_name):
reuse_simple_dict(entry_dicts, other_entry_dicts,'job_descript')
reuse_simple_dict(entry_dicts, other_entry_dicts,'infosys')
all_reused=reuse_common_dicts(entry_dicts, other_entry_dicts,False,True)
return all_reused
################################################
#
# Handle dicts as Classes
#
################################################
################################################
#
# Support classes
#
################################################
###########################################
# Privsep support classes
class clientDirSupport(cWDictFile.simpleDirSupport):
def __init__(self,user,dir,dir_name,privsep_mkdir=False):
cWDictFile.simpleDirSupport.__init__(self,dir,dir_name)
self.user=user
self.privsep_mkdir=privsep_mkdir
def create_dir(self,fail_if_exists=True):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s."%(self.dir_name,base_dir)
if os.path.isdir(self.dir):
if fail_if_exists:
raise RuntimeError,"Cannot create %s dir %s, already exists."%(self.dir_name,self.dir)
else:
return False # already exists, nothing to do
if self.user==MY_USERNAME:
# keep it simple, if possible
try:
os.mkdir(self.dir)
except OSError,e:
raise RuntimeError,"Failed to create %s dir: %s"%(self.dir_name,e)
elif self.privsep_mkdir:
try:
# use privsep mkdir, as requested
condorPrivsep.mkdir(base_dir,os.path.basename(self.dir),self.user)
# with condor 7.9.4 a permissions change is required
condorPrivsep.execute(self.user,base_dir,'/bin/chmod',['chmod','0755',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to create %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to create %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
else:
try:
# use the execute command
# do not use the mkdir one, as we do not need root privileges
condorPrivsep.execute(self.user,base_dir,'/bin/mkdir',['mkdir',self.dir],stdout_fname=None)
# with condor 7.9.4 a permissions change is required
condorPrivsep.execute(self.user,base_dir,'/bin/chmod',['chmod','0755',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to create %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to create %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
return True
def delete_dir(self):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s!"%(self.dir_name,base_dir)
if self.user==MY_USERNAME:
# keep it simple, if possible
shutil.rmtree(self.dir)
elif self.privsep_mkdir:
try:
# use privsep rmtree, as requested
condorPrivsep.rmtree(base_dir,os.path.basename(self.dir))
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to remove %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to remove %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
else:
try:
# use the execute command
# do not use the rmtree one, as we do not need root privileges
condorPrivsep.execute(self.user,base_dir,'/bin/rm',['rm','-fr',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to remove %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to remove %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
class chmodClientDirSupport(clientDirSupport):
def __init__(self,user,dir,chmod,dir_name):
clientDirSupport.__init__(self,user,dir,dir_name)
self.chmod=chmod
def create_dir(self,fail_if_exists=True):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s."%(self.dir_name,base_dir)
if os.path.isdir(self.dir):
if fail_if_exists:
raise RuntimeError,"Cannot create %s dir %s, already exists."%(self.dir_name,self.dir)
else:
return False # already exists, nothing to do
if self.user==MY_USERNAME:
# keep it simple, if possible
try:
os.mkdir(self.dir,self.chmod)
except OSError,e:
raise RuntimeError,"Failed to create %s dir: %s"%(self.dir_name,e)
else:
try:
# | |
pas d erreur
# role = Item_QModelIndex.data(QtCore.Qt.BackgroundRole) # ne retourne pas d erreur
# self.printSTD(role) # ne retourne pas d erreur
modelScript.setData(
modelScript.index(n, colIndex),
QtGui.QColor(QtCore.Qt.green),
QtCore.Qt.BackgroundColorRole
)
#====================================================================== Fin Scripts
# now = datetime.datetime.now()
now = datetime.now()
date = now.strftime("%Y%m%d-%H-%M-%S")
array_scriptToSync = []
myPrefs = json.load(open(self.MYPREFSFILE))
for v in myPrefs["scripts"]:
array_scriptToSync.append(v)
for ap in self.ALL_PROJECTS:
DIR_DISTANT_BACKUP = '/u/'+ap+self.PATH_EXEMPLES+'/'+self.DIR_BACKUP
if not os.path.exists(DIR_DISTANT_BACKUP):
os.makedirs(DIR_DISTANT_BACKUP)
APU = str(ap).upper()
ap = str(ap).lower()
if str(self.CURRENT_PROJECT_lower) != str(ap):
msg = '---------------------------------- SYNC SCRIPTS ' + self.CURRENT_PROJECT + ' -> ' + APU
self.printSTD(' ')
self.printSTD('-----------------------------------------------------------------------')
self.printSTD(str(msg))
self.printSTD('-----------------------------------------------------------------------\n\n')
for s in array_scriptToSync:
checkCopy = False
filename = s.split('.')[0]
ext = s.split('.')[1]
sbackup = filename+'_'+date+'.'+ext
path_local = '/u/'+self.CURRENT_PROJECT_lower+self.PATH_EXEMPLES+'/'+s
path_distant = '/u/'+ap+self.PATH_EXEMPLES+'/'+s
path_distant_backup = DIR_DISTANT_BACKUP+'/'+sbackup
self.printSTD(path_local)
self.printSTD('->')
self.printSTD(path_distant)
try:
if os.path.isfile(path_local):
#========= 1 - FIRST , IMPORTANT backup distant file before copy
if os.path.isfile(path_distant):
shutil.copyfile(path_distant, path_distant_backup)
if not os.path.isfile(path_distant):
self.printSTD('---[ NEW FILE ]---')
checkCopy = True
#========= 2 -copy sync
shutil.copyfile(path_local, path_distant)
if os.path.isfile(path_local) and os.path.isfile(path_distant) and os.path.isfile(path_distant_backup) and checkCopy == False:
applyUI_OK()
self.printSTD('[ SYNC OK ]')
if os.path.isfile(path_local) and os.path.isfile(path_distant) and checkCopy == True:
applyUI_OK()
self.printSTD('[ COPY OK ]')
if not os.path.isfile(path_distant) and not os.path.isfile(path_distant_backup) and checkCopy == False:
self.printSTD('[ SYNC ERROR ]')
except:
self.printSTD('[ SYNC ERROR ]')
self.printSTD('-----------------------------------------------------------------------\n')
msg = '\nEND ' + msg
self.printSTD(str(msg))
#========= write liste , for secu, to do better
self.write_Prefs(myPrefs,False)
def eventFilter(self, object, event):
name = object.objectName()
#========= Mouse Click
if event.type() == QtCore.QEvent.MouseButtonPress:
if str(name)=='BT_HOME_SCRIPTS':
self.on_BT_MAIN_clicked(name)
if str(name)=='BT_SYNC_SCRIPTS':
# self.on_BT_SYNC_SCRIPTS_clicked()
self.confirmBox('Confirm Sync')
return True
#========= Mouse Over
if event.type() == QtCore.QEvent.HoverEnter:
r = self.HOME_COLOR[0]
g = self.HOME_COLOR[1]
b = self.HOME_COLOR[2]
hexColorHome = self.rvbToHex(r, g, b)
if str(name)=='BT_SYNC_SCRIPTS':
hexColor = self.rvbToHex(25, 44, 50)
object.setStyleSheet(
"color: white;"
"background-color: "+hexColorHome+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"border-radius: 16px;"
"height: 40px;"
"max-width: 600px;"
)
else:
object.setStyleSheet(
"color: white;"
"background-color: "+hexColorHome+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
"border-radius: 16px;"
)
return True
#========= Mouse Out
if event.type() == QtCore.QEvent.HoverLeave:
hexColor = self.rvbToHex(25, 44, 50)
if str(name)=='BT_SYNC_SCRIPTS':
object.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"border-radius: 16px;"
"height: 40px;"
"max-width: 600px;"
)
else:
object.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
"border-radius: 16px;"
)
return True
#========= Mouse ClicK
# if event.type() == QtCore.QEvent.HoverLeave:
# hexColor = self.rvbToHex(25, 44, 50)
# object.setStyleSheet(
# "color: white;"
# "background-color: "+hexColor+";"
# "selection-color: yellow;"
# "selection-background-color: blue;"
# "font: bold 14px;"
# "border-style: outset;"
# "height: 40px;"
# )
# return True
#=========
return False
#======================================================================================================
#========= UI Construct Functions
#======================================================================================================
def on_TAB_clicked(self):
self.printSTD("on_TAB_clicked")
def closeWindows(self):
self.close()
def clear_LayoutOrWidget(self, LW):
try:
LW.deleteLater()
except:
pass
try:
self.clearLayout(LW)
except:
pass
def delete_TopAndMiddle(self):
try:
self.clear_LayoutOrWidget(self.MiddleAreaContainer)
except:
pass
try:
self.clear_LayoutOrWidget(self.BottomAreaContainer)
except:
pass
try:
self.clear_LayoutOrWidget(self.ScriptsAreaContainer)
except:
pass
def Construct_TopAndMiddle(self):
self.MiddleAreaContainer = QtGui.QWidget()
self.MiddleAreaContainer.setObjectName("MiddleAreaContainer")
self.construct_MiddleTabsArea()
self.BottomAreaContainer = QtGui.QWidget()
self.BottomAreaContainer.setObjectName("BottomAreaContainer")
self.construct_BottomAreaContent()
self.mainLayout.addWidget(self.MiddleAreaContainer)
self.mainLayout.addWidget(self.BottomAreaContainer)
def Construct_MiddleScript(self):
#========= Scripts Area container
self.ScriptsAreaContainer = QtGui.QListView()
self.ScriptsAreaContainer.setObjectName("ScriptsAreaContainer")
self.construct_ScriptsListArea()
self.mainLayout.addWidget(self.ScriptsAreaContainer)
self.mainLayout.addWidget(self.BT_SYNC_SCRIPTS, QtCore.Qt.AlignRight)
def on_CHKCP_CLIPBRD(self):
''' '''
cb = QtGui.QApplication.clipboard() # todo copy bt
cb.clear(mode=cb.Clipboard )
txtClipBoard = cb.text()
txtClipBoard = ''
# BottomContent = self.lockedOutputBottom.toPlainText()
if self.CHKCP_CLIPBRD.isChecked():
try:
lines = [line.rstrip('\n') for line in open(self.TMP_PATH_FILE_LOCKED)]
except:
lines = ['.']
if len(lines[0])>0: # 0 is point trick, to do better
for line in lines:
if len(line)>10: # 10 is path lenght, arbitrary
self.lockedOutputBottomCursor.movePosition(QtGui.QTextCursor.End)
txtClipBoard = str(line) +'\n'+ str(txtClipBoard)
txtClipBoard = txtClipBoard[:-2]
cb.setText(txtClipBoard, mode=cb.Clipboard)
self.lockedOutputBottom.selectAll()
else:
cb.setText(txtClipBoard, mode=cb.Clipboard)
# unSelectAll
my_text_cursor = self.lockedOutputBottom.textCursor()
my_text_cursor.clearSelection()
self.lockedOutputBottom.setTextCursor(my_text_cursor)
def on_BT_LOCKEDFILE_Local_clicked(self,name):
''' '''
if str(name)=='BT_CLEAR_LOCKEDFILE_Local':
f = open(self.TMP_PATH_FILE_LOCKED, 'w')
f.write('.\n') # python will convert \n to os.linesep
f.close()
self.lockedOutputBottom.setVisible(True)
self.lockedOutputBottom.setText('')
if str(name)=='BT_SEE_LOCKEDFILE_Local':
self.lockedOutputBottom.setFixedHeight(200)
self.lockedOutputBottom.setSizePolicy(QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Fixed)
self.lockedOutputBottomCursor.movePosition(QtGui.QTextCursor.End)
# to mutu
self.update_TMP_PATH_FILE_LOCKED()
#======================================================================================================
#========= Others Functions
#======================================================================================================
def list_Scripts(self):
scripts = os.listdir(self.CURRENT_SCRIPTS_PATH)
return scripts
def populate_prefs_scripts(self,item):
checkIfExist = False
#========= check if checked or already exist in list
#========= get json method 1
# with open(self.MYPREFSFILE) as json_file:
# myPrefs = json.load(json_file)
#========= get json method 2
# s = open(self.MYPREFSFILE, 'r').read()
# myPrefs = eval(s)
#========= get json method 3
myPrefs = json.load(open(self.MYPREFSFILE))
for key, value in myPrefs.items():
if str(key) == "scripts" and str(item.text()) in str(value) : # value is a list
checkIfExist = True
if checkIfExist == False:
self.MYPREFSJSON = myPrefs
self.MYPREFSJSON["scripts"].append(item.text())
# self.write_Prefs(self.MYPREFSJSON,False)
if checkIfExist == True:
new_values = []
for v in myPrefs["scripts"]:
if str(v) != str(item.text()):
new_values.append(v)
self.MYPREFSJSON.pop('scripts', 0) # to do better
self.MYPREFSJSON["scripts"] = []
for v in new_values:
self.MYPREFSJSON["scripts"].append(v)
# self.printSTD(self.MYPREFSJSON["scripts"])
self.write_Prefs(self.MYPREFSJSON,False)
def write_Prefs(self,myPrefs,isIndent=False):
if isIndent == False:
# with open(self.MYPREFSFILE, 'w') as outfile:
# json.dump(myPrefs, outfile)
json.dump(myPrefs, open(self.MYPREFSFILE,'w'))
if isIndent == True:
# with open(self.MYPREFSFILE, 'w') as outfile:
# json.dump(myPrefs, outfile, indent=4)
file.write(dumps(self.MYPREFSFILE, file, indent=4))
def printSTD(self,msg):
print >> sys.__stderr__, msg
#======================================================================================================
#========= StyleSheets
#======================================================================================================
def rvbToHex(self,r,g,b):
# r = array_rgb[1], g=array_rgb[2], b=array_rgb[3]
hexColor = '#%02x%02x%02x' % (r, g, b)
return hexColor
def apply_Stylesheets(self):
#======================================================================================================
#========= Globals Colors
#======================================================================================================
#========= general Colors
self.rvb_White = [255, 255, 255]
self.palette_White = QtGui.QPalette()
self.palette_White.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(255, 255, 255))
self.rvb_Black = [0, 0, 0]
self.palette_Black = QtGui.QPalette()
self.palette_Black.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(0, 0, 0))
self.rvb_Grey = [128, 128, 128]
self.palette_Grey = QtGui.QPalette()
self.palette_Grey.setColor(QtGui.QPalette.Background,QtCore.Qt.gray)
self.rvb_hellGrey = [230, 230, 230]
self.palette_hellGrey = QtGui.QPalette()
self.palette_hellGrey.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(128, 128, 128))
self.rvb_darkGrey = [255, 255, 255]
self.palette_darkGrey = QtGui.QPalette()
self.palette_darkGrey.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(66, 66, 66))
#============================================ Complementary blues Colors # http://paletton.com
#========= blues Colors
self.rvb_Blue1 = [97, 114, 141]
self.palette_Blue1 = QtGui.QPalette()
self.palette_Blue1.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(97, 114, 141))
self.rvb_Blue2 = [64, 84, 115]
self.palette_Blue2 = QtGui.QPalette()
self.palette_Blue2.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(64, 84, 115))
self.rvb_Blue3 = [43, 61, 91]
self.palette_Blue3 = QtGui.QPalette()
self.palette_Blue3.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(43, 61, 91))
self.rvb_Blue4 = [25, 44, 75]
self.palette_Blue4 = QtGui.QPalette()
self.palette_Blue4.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(25, 44, 75))
self.rvb_Blue5 = [10, 25, 50]
self.palette_Blue5 = QtGui.QPalette()
self.palette_Blue5.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(10, 25, 50))
#========= blues Colors
self.rvb_Orange1 = [255, 196, 77]
self.palette_Orange1 = QtGui.QPalette()
self.palette_Orange1.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(255, 196, 77))
self.rvb_Orange2 = [255, 179, 27]
self.palette_Orange2 = QtGui.QPalette()
self.palette_Orange2.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(255, 179, 27))
self.rvb_Orange3 = [255, 170, 0]
self.palette_Orange3 = QtGui.QPalette()
self.palette_Orange3.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(255, 170, 0))
self.rvb_Orange4 = [206, 137, 0]
self.palette_Orange4 = QtGui.QPalette()
self.palette_Orange4.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(206, 137, 0))
self.rvb_Orange5 = [160, 107, 0]
self.palette_Orange5 = QtGui.QPalette()
self.palette_Orange5.setColor(QtGui.QPalette.Background, QtGui.QColor.fromHsv(160, 107, 0))
# other samples
# pal.setColor(QtGui.QPalette.ColorRole(9),QtGui.QColor("#4B4B4B"))
# pal.setColor(QtGui.QPalette.ColorRole(6),QtGui.QColor("#CCCCCC"))
#========= Style Buttons
#========= Main Home Buttons
hexColor = self.rvbToHex(25, 44, 50)
# self.BT_HOME_SCRIPTS.setStyleSheet('QPushButton {background-color: '+hexColor+'; color: white; height: 40px;}')
self.BT_HOME_SCRIPTS.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
"border-radius: 16px;"
)
self.BT_MAIN_2.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
"border-radius: 16px;"
)
self.BT_MAIN_3.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
"border-radius: 16px;"
)
# #========= Back to Home Button
r = self.HOME_COLOR[0]
g = self.HOME_COLOR[1]
b = self.HOME_COLOR[2]
hexColor = self.rvbToHex(r, g, b)
self.BT_BACK_HOME.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 14px;"
"border-style: outset;"
"height: 40px;"
)
#========= LOCAL TAB
#========= Locked Buttons local tab
try:
self.BT_SEE_LOCKEDFILE_Local.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 10px;"
"border-style: outset;"
"height: 15px;"
)
self.BT_CLEAR_LOCKEDFILE_Local.setStyleSheet(
"color: white;"
"background-color: "+hexColor+";"
"selection-color: yellow;"
"selection-background-color: blue;"
"font: bold 10px;"
"border-style: outset;"
"height: 15px;"
)
#========= checkBox local tab
self.CHK_SEARCH_ALL.setStyleSheet("color: white;") # to do
self.CHKCP_CLIPBRD.setStyleSheet("color: white;") # to do
except:
pass
#================================================================================================================ end Class QT__QT_KBZ__
#======================================================================================================================== Thread Classes
#================================ Thread Instance ( container )
class Thread_Instance():
def __init__(self, *args):
self.threads = []
arguments = []
for arg in args:
arguments.append(arg)
t = Thread_Worker(arguments, self) # self very Important
t.start()
self.threads.append(t)
# Wait for all threads to complete to test
# for t in self.threads:
# t.join()
# print "Exiting Main Thread"
def __del__(self):
for t in self.threads:
running = t.running()
# t.join() # Python QT QNetworkRequest exits with (process:3265): GLib-ERROR : Creating pipes for GWakeup: Too many open files
t.stop()
# print "Exiting Main Thread"
if not t.finished():
t.wait()
#================================ Thread Worker
class Thread_Worker(QtCore.QThread):
def __init__(self, args, receiver):
QtCore.QThread.__init__(self)
self.args = args
self.receiver = receiver # receiver ( self ) very Important
self.Terminated = 0
def run(self):
time.sleep(0.1) # to do in thread
try:
result = self.run_myFunction(self.args)
except:
pass
def stop(self):
self.Terminated = 1
#================================ functions
def run_myFunction(self,args):
''' get fileList '''
self.source = args[0]
self.USER_TO_SEARCH = args[1]
self.CURRENT_PROJECT = args[2]
self.EXCLUDE_DIR_LOCKED = args[3]
self.INCLUDE_EXT_LOCKED = args[4]
self.TMP_PATH_FILE_LOCKED = args[5]
self.CHK_SEARCH_ALL = args[6]
self.n_user = args[7]
self.n_users_real = args[8]
self.n_users_tot = args[9]
startTimeAll = datetime.now()
msg = '\n----- Search [ ' + self.USER_TO_SEARCH + ' ] Locked-UnPublished Files, Work in Progress! Please wait ...\n'
print >> sys.__stderr__, msg
randwait = ['.','..','...'] # for deco
matches = []
for root, dirnames, filenames in os.walk(self.source, topdown=False, onerror=None, followlinks=False):
if not dirnames:
for filename in filenames:
ext = None
try:
ext = os.path.splitext(filename)[1][1:]
except:
pass
try:
if ext.upper() in self.INCLUDE_EXT_LOCKED:
filePath = os.path.join(root, filename)
result = self.get_fileInfo(filePath)
infoWrite = result[0]
infoOwner = result[1]
# matches.append(os.path.join(root, filename))
if infoWrite == True and infoOwner == self.USER_TO_SEARCH:
matches.append(os.path.join(root, filename))
msg = '\n' + filePath + ' [ LOCKED ]\n'
print >> sys.__stderr__, msg
except:
pass
matches = list(set(matches))
if len(matches) > 0 :
try:
with open(self.TMP_PATH_FILE_LOCKED) as f:
content = f.readlines()
except:
content=['.']
pass
f = open(self.TMP_PATH_FILE_LOCKED,'a')
for line in matches:
a7 = str(line)+'\n'
if a7 not in content:
# f = open(self.TMP_PATH_FILE_LOCKED,'a')
f.write('\n' +line+'\n') # python will convert \n to os.linesep
# f.close()
f.close()
# f = open(self.TMP_PATH_FILE_LOCKED, 'a')
# # mylist = [1, 2 ,6 ,56, 78]
# f.write("\n".join(map(lambda x: str(x), matches)))
# f.close()
#====== verbose mode
msg = ' '
print >> sys.__stderr__, msg
msg = '\n--------------------------------- ' + self.source + ' [ DONE ] \n'
print >> sys.__stderr__, msg
msg = str(self.n_user) + ' | ' + str(self.n_users_tot) + '\n'
print >> sys.__stderr__, msg
totTime = datetime.now() - startTimeAll
print >> sys.__stderr__, totTime
if str(self.n_user) == str(self.n_users_tot):
msg = '\n--------------------------------- [ CHECK PUBLISHED AND LOCKED FILE DONE in : ' + totTime + ' ] \n'
print >> sys.__stderr__, msg
def get_fileInfo(self,source):
fileInfo = QtCore.QFileInfo(source)
infoWrite = fileInfo.isWritable()
infoOwner = fileInfo.owner()
return infoWrite, infoOwner
#================================ end functions Thread classes
#================================================================================================================ End Thread Classes
#===================================================================================================================================
#========= Start QT
#===================================================================================================================================
def start(parent, data):
print >> sys.__stderr__, "__QT_KBZ__"
main = __QT_KBZ__(parent)
array_welcome = ['Welcome Dear', 'Welcome', 'Willkommen', 'Welkom']
array_welcome = array_welcome + ['Bienvenue', 'Bienvenue Fulgence']
array_welcome = array_welcome + ['Xos', 'Ongietorri', 'I mirepritur']
welcome = random.choice(array_welcome)
main.setWindowTitle( ink.io.ConnectUserInfo()[2].upper() + ' | KARLOVA DASHBOARDZATOR | '+ welcome +' ' + os.getenv('USER') )
main.showMaximized()
# sG = QtGui.QApplication.desktop().screenGeometry()
# w = sG.width
# h = sG.height
main.resize(800, 800)
# main.move(300, 300)
# oX = 300
# oY = 300
# x = 150
# y = 200
# main.setGeometry(oX, oY, x, y)
main.setModal(True)
main.activateWindow()
main.raise_()
main.show()
############################################
# def checkLocal_locked():
# ''' '''
# for i in range(self.modelTab1.count()):
# yield self.item(i)
# # item1 = self.item(i).text(0) # text at first (0) column
# # self.printSTD(item1)
# local_TreeView = self.Tab1
# colIndex = 0
# root = self.modelTab1.invisibleRootItem()
# child_count = root.childCount()
# for i in range(child_count):
# item = root.child(i)
# item1 = item.text(0) # text at first (0) column
# # item.setText(1, 'result from %s' % url) # update result column (1)
# self.printSTD(item1)
# for n in range(nRows):
# Item_QModelIndex = modelScript.index(n, colIndex)
# # self.printSTD(Item_QModelIndex)
# # self.printSTD('------------------------')
# # item = Item_QModelIndex.data # return <built-in method data of QModelIndex object at 0x6102140>
# # self.printSTD(item)
# # item = Item_QModelIndex.data() # return default Qt.DisplayRole = .data(QtCore.Qt.DisplayRole) = text
# # self.printSTD(item)
# itemChecked | |
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import List, Tuple, Mapping, Text, Union, Optional, Callable
import random
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as tfkl
from .utils import is_tensor_type
from .backend.ops import geometric_weighted_mean, get_local_lateral, get_global_lateral
# anything that can directly initialize tf.zeros(.) is acceptable
Shape3D = Union[Tuple[int, int, int], tf.TensorShape, tf.Tensor]
class GLOM(keras.Model):
HPARAM_DEFAULTS = dict(
a_td=1., # top down weight multiply
a_lat=1., # lateral weight multiply
a_bu=1., # bottom up weight multiply
b_td=0., # top down weight shift
b_lat=0., # lateral weight shift
b_bu=0., # bottom up weight shift
sparsity=0.2, # activation sparsity
lr_awake=0.005, # learning rate when awake
lr_asleep=0.02, # learning rate when asleep
epsilon_control=1e-3, # prevents ln(0) for td attn weight
window_size=(5, 5), # x_loc window
roll_over=True, # x_loc connect edges
global_sparsity=0.1, # x_global sparsity
connection_activation='relu', # for interlayer influence
# run_steps=4, # number of steps to run before returning output
clip_value_min=1e-2,
clip_value_max=10.,
sparsity_L1_penalty=0.1,
)
CONN_KS = dict(
fn='fn', # if None (default) a neural network is made and stored by GLOMRNNCell
inputs='inputs', # list, tuple, or single string
outputs='outputs', # list, tuple, or single string
type='type', # 'bu' | 'td' | 'lat'
)
def __init__(self,
input_layers: List[Text],
output_layers: List[Text],
layer_sizes: Mapping[Text, Shape3D],
connections: List[Mapping],
asleep_optimizer: Optional[tf.optimizers.Optimizer] = None,
awake_optimizer: Optional[tf.optimizers.Optimizer] = None,
hparams: Optional[Mapping[Text, object]] = None,
name: Optional[Text] = None):
super(GLOM, self).__init__(name=name)
self._mode = 'awake' # 'awake' | 'asleep'
self.input_layers = input_layers
self.output_layers = output_layers
self.layer_sizes = {k: tf.TensorShape(v) for k, v in layer_sizes.items()}
self.connections = connections
if hparams is None:
hparams = dict()
self.hparams: dict = GLOM.HPARAM_DEFAULTS.copy()
self.hparams.update(hparams)
if awake_optimizer is None:
awake_optimizer = tf.optimizers.SGD(self.hparams['lr_awake'])
if asleep_optimizer is None:
asleep_optimizer = tf.optimizers.SGD(self.hparams['lr_asleep'])
self.optimizers = dict(awake=awake_optimizer,
asleep=asleep_optimizer)
# clean self.connections
for i in range(len(self.connections)):
# allow unit (non list) connections
if not isinstance(connections[i]['inputs'], (list, tuple)):
self.connections[i]['inputs'] = [connections[i]['inputs']]
if not isinstance(connections[i]['outputs'], (list, tuple)):
self.connections[i]['outputs'] = [connections[i]['outputs']]
if 'fn' not in connections[i]:
connections[i]['fn'] = None
self.call_fns = {
('awake', True): self._call_awake_training,
('awake', False): self._call_awake_not_training,
('asleep', True): self._call_asleep_training,
('asleep', False): self._call_asleep_not_training,
}
def build(self, input_shape):
for i, connection in enumerate(self.connections):
if connection['fn'] is None:
self.connections[i]['fn'] = ManyToManyDense(
input_layer_shapes=[(layer, self.layer_sizes[layer])
for layer in connection['inputs']],
output_layer_shapes=[(layer, self.layer_sizes[layer])
for layer in connection['outputs']],
activation=self.hparams['connection_activation'],
sparsity=self.hparams['sparsity'],
concat_axis=-2, split_axis=-2,
name=f'{".".join(connection["inputs"])}-{".".join(connection["outputs"])}'
)
def call(self, layer_states: Mapping[Text, Mapping], training=None, mask=None):
training = True # QT workaround "call() got multiple values for argument 'training'"
if training is None:
training = False
grads_and_vars = list()
call_fn = self.call_fns[(self._mode, training)]
# run appropriate function
layer_states, new_grads_and_vars = call_fn(layer_states)
grads_and_vars.extend(new_grads_and_vars)
# maybe apply gradients
if training:
# THIS IS NOT NEEDED SINCE TF.OPTIMIZER CONSOLIDATES GRADIENTS AUTOMATICALLY
## consolidate grads and vars
# vars = set(v for g, v in grads_and_vars)
# grads_and_vars_dict = {v: [] for v in vars}
# for g, v in grads_and_vars:
# grads_and_vars_dict[g].append(v)
# for g, _ in grads_and_vars:
# grads_and_vars_dict[g] = tf.concat(grads_and_vars_dict[g], axis=0)
optimizer = self.optimizers[self._mode]
optimizer.apply_gradients(grads_and_vars)
layer_states = {
layer: dict(
x=tf.clip_by_value(state['x'],
clip_value_min=self.hparams['clip_value_min'],
clip_value_max=self.hparams['clip_value_max']),
e=tf.clip_by_value(state['e'],
clip_value_min=self.hparams['clip_value_min'],
clip_value_max=self.hparams['clip_value_max']),
e_norm=tf.clip_by_value(state['e_norm'],
clip_value_min=self.hparams['clip_value_min'],
clip_value_max=self.hparams['clip_value_max'])
) for layer, state in layer_states.items()
}
# return next layer states # output values
return layer_states # {k: layer_states[k]['x'] for k in self.output_layers}
def _call_awake_training(self, layer_states: Mapping[Text, Mapping[Text, tf.Tensor]]) \
-> Tuple[Mapping[Text, Mapping[Text, tf.Tensor]], List[Tuple[tf.Tensor, tf.Variable]]]:
new_errs = {layer: list() for layer in self.layer_sizes.keys()}
layer_targets = {layer: list() for layer in self.layer_sizes.keys()}
new_layer_states = {layer: dict(x=None, e=None, e_norm=None)
for layer in self.layer_sizes.keys()}
grads_and_vars = []
# compute targets for all layers and backpropagate errors
for connection in self.connections:
# get inputs
input_vals = [layer_states[layer]['x'] for layer in connection['inputs']]
# forward propagation
with tf.GradientTape() as tape:
tape.watch(input_vals)
output_vals = connection['fn'](input_vals)
ssl_loss = sum(connection['fn'].losses)
output_vals = tf.nest.flatten(output_vals) # ensure is list. x -> [x]
# TODO If I want contrastive spatial representation, add this to the gradients targets
# get x_local, x_global from output_val for each output_val in output_vals
# sim_local = tf.einsum('...d,...id', x, x_local)
# sim_global = tf.einsum('...d,...id', x, x_global)
# sim_cat = tf.concat([sim_local, sim_global], axis=-2)
# self.add_loss(tf.reduce_mean(sim_global) - tf.reduce_mean(sim_local))
# difference-based saliency
ws = [tf.norm(val - layer_states[layer]['x'], ord=2, axis=-1)
for layer, val in zip(connection['outputs'], output_vals)]
# apply hyper-parameters
conn_type = connection['type'] # 'bu' or 'td'. Must match dictionary key exactly
ws = [self.hparams[f'a_{conn_type}'] * w + self.hparams[f'b_{conn_type}'] for w in ws]
# assign output vals
for layer, w, output in zip(connection['outputs'], ws, output_vals):
layer_targets[layer].append((w[..., None], output[..., None]))
# backpropagate errors
input_grads, weight_grads = tape.gradient(
target=(output_vals, ssl_loss),
sources=(input_vals, connection['fn'].trainable_weights),
output_gradients=[layer_states[layer]['e'] for layer in connection['outputs']])
# backpropagate errors top down
for layer, input_grad in zip(connection['inputs'], input_grads):
new_errs[layer].append(input_grad)
# store parameter gradients
grads_and_vars.extend([(grads, var) for grads, var
in zip(weight_grads, connection['fn'].trainable_weights)])
# compute lateral self-attention
for layer in layer_states.keys():
x = layer_states[layer]['x']
tf.assert_rank(x, 4, 'inputs should be four dimensional [B, X, Y, D]')
x_local = get_local_lateral(x=x, window_size=self.hparams['window_size'],
roll_over=self.hparams['roll_over'])
x_global = get_global_lateral(x=x, global_sparsity=self.hparams['global_sparsity'])
x_neighbor = tf.concat([x_local, x_global], axis=-2)
x_neighbor = tf.einsum('...id->...di', x_neighbor)
# compute similarity scores
similarity = tf.einsum('...d,...di->...i', x, x_neighbor)
# divide by x**2 to make self similarity = 1
self_similarity = tf.einsum('...d,...d->...', x, x)
similarity = similarity / (self_similarity[..., None] + 1e-2)
similarity = self.hparams['a_lat'] * similarity + self.hparams['b_lat']
layer_targets[layer].append((similarity, x_neighbor))
# apply targets
for layer, targets in layer_targets.items():
new_layer_states[layer]['x'] = geometric_weighted_mean(
xs=[x for w, x in targets], ws=[w for w, x in targets])
# update errors
for layer in new_layer_states.keys():
with tf.name_scope(f'{layer}_update'):
new_errs[layer].append(new_layer_states[layer]['x'] - layer_states[layer]['x'])
new_layer_states[layer]['e'] = sum(new_errs[layer]) / len(new_errs[layer])
new_layer_states[layer]['e_norm'] = tf.norm(new_layer_states[layer]['e'], ord=2, axis=-1) / \
new_layer_states[layer]['e'].shape[-1] # assuming binary RV's
return new_layer_states, grads_and_vars
def _call_awake_not_training(self, layer_states: Mapping[Text, Mapping[Text, tf.Tensor]]) \
-> Tuple[Mapping[Text, Mapping[Text, tf.Tensor]], List[Tuple[tf.Tensor, tf.Variable]]]:
pass
def _call_asleep_training(self, layer_states: Mapping[Text, Mapping[Text, tf.Tensor]]) \
-> Tuple[Mapping[Text, Mapping[Text, tf.Tensor]], List[Tuple[tf.Tensor, tf.Variable]]]:
pass
def _call_asleep_not_training(self, layer_states: Mapping[Text, Mapping[Text, tf.Tensor]]) \
-> Tuple[Mapping[Text, Mapping[Text, tf.Tensor]], List[Tuple[tf.Tensor, tf.Variable]]]:
pass
@property
def state_size(self):
initial_state_sizes = {layer: {
'x': size,
'e': size,
'e_norm': size[:-1],
} for layer, size in self.layer_sizes.items()}
return initial_state_sizes # tf.nest.flatten(initial_state_sizes)
def get_initial_state(self, batch_size=1):
return tf.nest.map_structure(lambda shape: tf.random.uniform(shape=[batch_size] + shape, minval=0, maxval=0.1),
self.state_size)
# @property
# def output_size(self):
# return [self.layer_sizes[layer] for layer in self.output_layers]
@property
def get_mode(self):
return self._mode
def set_mode(self, mode):
self._mode = mode
class ManyToManyDense(tfkl.Layer):
def __init__(self,
input_layer_shapes: List[Tuple[Text, Tuple]],
output_layer_shapes: List[Tuple[Text, Tuple]],
activation: Union[Text, Callable] = 'relu',
sparsity: float = 0.1,
concat_axis: int = -2,
split_axis: int = -2,
name: Text = None):
super(ManyToManyDense, self).__init__(name=name)
self.input_layer_shapes = input_layer_shapes
self.output_layer_shapes = output_layer_shapes
self.activation = activation
self.sparsity = sparsity
self.concat_axis = concat_axis
self.split_axis = split_axis
def build(self, input_shape):
del input_shape
combined_input_len = sum(input_layer_shape[1][self.concat_axis]
for input_layer_shape in self.input_layer_shapes)
input_shape = list(self.input_layer_shapes[0][1])
input_shape[self.concat_axis] = combined_input_len
combined_output_len = sum(output_layer_shape[1][self.split_axis]
for output_layer_shape in self.output_layer_shapes)
output_shape = list(self.output_layer_shapes[0][1])
output_shape[self.split_axis] = combined_output_len
self.denseND_layer = _DenseND(
input_shape=input_shape,
output_shape=output_shape,
sparsity=self.sparsity,
activation=self.activation,
name=f'{self.name}_denseND'
)
self.concat_transform_split = _ConcatTransformSplit(
transform_fn=self.denseND_layer,
concat_axis=self.concat_axis,
num_or_size_splits=[output_layer_shape[1][self.split_axis]
for output_layer_shape in self.output_layer_shapes],
split_axis=self.split_axis,
name=f'{self.name}_concatsplit'
)
def call(self, inputs, **kwargs):
return self.concat_transform_split(inputs)
class _ConcatTransformSplit(tfkl.Layer):
def __init__(self,
transform_fn: Optional[Callable[[tf.Tensor], tf.Tensor]] = None,
concat_axis: Optional[int] = -2,
num_or_size_splits: Optional[List[int]] = None,
split_axis: Optional[int] = -2,
name: Optional[Text] = None):
super(_ConcatTransformSplit, self).__init__(name=name)
if transform_fn is None:
transform_fn = (lambda x: x)
self.transform_fn = transform_fn
self.concat_axis = concat_axis
self.num_or_size_splits | |
import os
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import sys
import tkinter as Tk
import tkinter.filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from scipy.optimize import curve_fit
from scipy.signal import butter, lfilter
import pandas as pd
import xlrd
from astropy.table import Table
import xlsxwriter
from tkinter import ttk
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning)
root = Tk.Tk()
#Initialize global variables for analysis
Delete=1; # Number of initial points to delete
Clipid=15e-3 # Concentration of the Lipid in the syringe [M]
Cdrug=100e-6 # Concentration of the Drug in the cell [M]
Vw=200e-6 # Volume of the Cell [L]
vL=(1398e-27)*(6.0221409e23) #Vesicles Volume
aL=(7e-19)*(6.0221409e23) #Vesicles Area
aD=(3e-19)*(6.0221409e23) #Drug Area
R=8.314459848; #J/(mol*Kelvin)
T=298.15; #Kelvin
F=96485.336521; #C/mol
eps0=8.85*1e-12; #Farads/meter
eps=78.4; #Farads/meter
SaltC=0.15; #moles/L Salt concentration
zdrug= 1; #Charge of drug
#Initializing GUI global variables
sheet = None
a = None
dh_drug = None
injv_drug = None
sheet2 = None
a2 = None
dh_control = None
injv_control = None
savename=''
CD = None
label5 = None
drug_charge_prev = None
canvas=None
canvas2=None
lipidselect=None
lipid_col=3
label4 = None
label3=None
#Reset GUI
def Reset1():
python = sys.executable
os.execl(python, python, * sys.argv)
#Initial GUI screen, select data directory/files
def Command1():
global CD
global label3
global label4
global label5
if label3 != None:
label3.destroy()
if savename == '': #Choosing directory for the first time
root.directory = Tk.filedialog.askdirectory()
CD=root.directory
label3=Tk.Label(root,text=CD)
label3.grid(row=0,column=1,sticky=Tk.W,columnspan=6)
label4=Tk.Label(root,text='Select ITC Experiment File - Drug')
label4.grid(row=1,column=1,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=2,column=1,sticky=Tk.W)
else: #Reselecting directory
label4.destroy()
label5.destroy()
root.directory = Tk.filedialog.askdirectory()
CD=root.directory
label3=Tk.Label(root,text=CD)
label3.grid(row=0,column=1,sticky=Tk.W,columnspan=6)
label4=Tk.Label(root,text='Select ITC Experiment File - Drug')
label4.grid(row=1,column=1,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=3,column=1,sticky=Tk.W)
#Choose drug file
def Drug1():
global a
global sheet
global savename
global dh_drug
global injv_drug
global label1001
global entry1001
global label1002
global entry1002
global label1003
global entry1003
global label1004
global entry1004
global label1005
global entry1005
global label1006
global entry1006
global label1007
global entry1007
global label1008
global entry1008
global label1009
global entry1009
global button3
global label5
global label4
global button99
global lipidselect
global label_lip_area
global label_lip_area_e
global label_lip_thickness
global label_lip_thickness_e
global button10
global lipid_col
#User can choose experiment drug excel file, which is then read
root.filename = Tk.filedialog.askopenfilename(initialdir = root.directory,title = "Select file",filetypes = (("XLS","*.xls"),("XLSX","*.xlsx"),("all files","*.*")))
df = pd.read_excel(root.filename,)
a=df.shape
wb = xlrd.open_workbook(root.filename)
sheet = wb.sheet_by_index(0)
label4.destroy()
label4=Tk.Label(root,text=root.filename)
label4.grid(row=1,column=1,sticky=Tk.W)
savename=root.filename
#User can select columns for the heat and injection volume from excel file
button3.destroy()
label5.destroy()
button99.destroy()
labeldh1=Tk.Label(root,text='Column for Heat (DH):')
labelinjv1=Tk.Label(root,text='Column for Injection Volume (INJV):')
entrydh1=Tk.Entry(root,width=5)
entryinjv1=Tk.Entry(root,width=5)
labeldh1.grid(row=1,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entrydh1.grid(row=1,column=4,sticky=Tk.W)
entrydh1.insert(Tk.END, '0')
dh_drug = int(entrydh1.get())
labelinjv1.grid(row=2,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entryinjv1.grid(row=2,column=4,sticky=Tk.W)
entryinjv1.insert(Tk.END, '1')
injv_drug = int(entryinjv1.get())
#Moving buttons and labels in GUI to make it look nice
button3=Tk.Button(text='Select Control Background File',fg='blue',command=Background1,width=25)
button3.grid(row=3,column=0,sticky=Tk.W)
label5=Tk.Label(root,text='Select ITC Background File - Control (Optional)')
label5.grid(row=3,column=1,sticky=Tk.W)
button99=Tk.Button(text='Run ITC Analysis',fg='black',command=testing,height=5,width=25)
button99.grid(row=4,column=0,sticky=Tk.W,columnspan=2,rowspan=5)
lipid_col = 5
label1001.grid(row=1,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1001.grid(row=1,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1002.grid(row=2,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1002.grid(row=2,column=6,sticky=Tk.W,pady=(2,2), padx=(0,))
label1003.grid(row=3,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1003.grid(row=3,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1004.grid(row=4,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1004.grid(row=4,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1007.grid(row=8,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1007.grid(row=8,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1008.grid(row=9,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1008.grid(row=9,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label1009.grid(row=10,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
entry1009.grid(row=10,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
button8.grid(row=0,column=8,sticky=Tk.E)
label10100.grid(row=5,column=5,sticky=Tk.W,pady=(2,2), padx=(15,0))
labelarrow.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(70,0), columnspan=2, rowspan=2)
label1005.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(95,0), columnspan=2)
entry1005.grid(row=6,column=8,sticky=Tk.W,pady=(2,2), padx=(0,5))
label1006.grid(row=7,column=6,sticky=Tk.W,pady=(2,2), padx=(95,0), columnspan=2)
entry1006.grid(row=7,column=8,sticky=Tk.W,pady=(2,2), padx=(0,5))
label_lip_area.grid(row=6,column=5,sticky=Tk.W,pady=(2,2), padx=(90,0))
label_lip_area_e.grid(row=6,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
label_lip_thickness.grid(row=7,column=5,sticky=Tk.W,pady=(2,2), padx=(90,0))
label_lip_thickness_e.grid(row=7,column=6,sticky=Tk.W,pady=(2,2), padx=(0,0))
lipidselect.grid(column = 6, row = 5, sticky= Tk.W)
button10.grid(row=5,column=7,sticky=Tk.W,pady=(2,2), padx=(5,0)) #15
#Choose background file
def Background1():
global sheet2
global a2
global dh_control
global injv_control
global button99
global label5
label5.destroy()
#User can choose experiment drug excel file, which is then read
root.filename = Tk.filedialog.askopenfilename(initialdir = root.directory,title = "Select file",filetypes = (("XLS","*.xls"),("XLSX","*.xlsx"),("all files","*.*")))
df2 = pd.read_excel(root.filename,)
a2=df2.shape
wb2 = xlrd.open_workbook(root.filename)
sheet2 = wb2.sheet_by_index(0)
label5=Tk.Label(root,text=root.filename)
label5.grid(row=3,column=1,sticky=Tk.W)
#User can select columns for the heat and injection volume from excel file
labeldh2=Tk.Label(root,text='Column for Heat (DH):')
labelinjv2=Tk.Label(root,text='Column for Injection Volume (INJV):')
entrydh2=Tk.Entry(root,width=5)
entryinjv2=Tk.Entry(root,width=5)
labeldh2.grid(row=3,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entrydh2.grid(row=3,column=4,sticky=Tk.W)
entrydh2.insert(Tk.END, '0')
dh_control = int(entrydh2.get())
labelinjv2.grid(row=4,column=3,sticky=Tk.W,pady=(2,2), padx=(5,0))
entryinjv2.grid(row=4,column=4,sticky=Tk.W)
entryinjv2.insert(Tk.END, '1')
injv_control = int(entryinjv2.get())
button99.destroy()
button99=Tk.Button(text='Run ITC Analysis',fg='black',command=testing,height=5,width=25)
button99.grid(row=5,column=0,sticky=Tk.W,columnspan=2,rowspan=5)
#Run ITC analysis
def testing():
global sheet
global a
global sheet2
global a2
global savename
global CD
global injv_drug
global injv_control
global dh_drug
global dh_control
global drug_charge_prev
global canvas
global canvas2
savename = savename.split('.')[0]
#Get parameters from GUI
Delete=int(entry1001.get())
Clipid=float(entry1002.get())
Cdrug=float(entry1003.get())
Vw=float(entry1004.get())
vL_str = entry1005.get()
if '*' in vL_str:
vL_arr = vL_str.split('*')
if '(' in vL_str:
vL =float(vL_arr[0].strip("()"))*float(vL_arr[1].strip("()"))
else:
vL =float(vL_arr[0])*float(vL_arr[1])
else:
if '(' in vL_str:
vL = float(vL_str.strip("()"))
else:
vL = float(vL_str)
aL_str = entry1006.get()
if '*' in aL_str:
aL_arr = aL_str.split('*')
if '(' in aL_str:
aL =float(aL_arr[0].strip("()"))*float(aL_arr[1].strip("()"))
else:
aL =float(aL_arr[0])*float(aL_arr[1])
else:
if '(' in aL_str:
aL = float(aL_str.strip("()"))
else:
aL = float(aL_str)
aD_str = entry1007.get()
if '*' in aD_str:
aD_arr = aD_str.split('*')
if '(' in aD_str:
aD =float(aD_arr[0].strip("()"))*float(aD_arr[1].strip("()"))
else:
aD =float(aD_arr[0])*float(aD_arr[1])
else:
if '(' in aD_str:
aD = float(aD_str.strip("()"))
else:
aD = float(aD_str)
R=8.314459848; #J/(mol*Kelvin)
T=298.15; #Kelvin
F=96485.336521; #C/mol
eps0=8.85*1e-12; #Farads/meter
eps=78.4; #Farads/meter
SaltC=float(entry1008.get())
zdrug=int(entry1009.get())
#Define fit functions used for Kp and Phi
def func1(x, dH, Kp):
return (Vw*Cdrug)*Vw*Kp*Vinj_add*Clipid*aL*dH*(1e6)/np.power((Vw+(x-0.5)*Vinj_add*(1+Clipid*aL*Kp)),2)
def func2(X, dH, Kp):
x,Phi = X
return 1e6*((dH*(Vw*Cdrug)*x*const1*Kp*np.exp(-beta*Phi)/(Vw+x*Vinj+x*const1*Kp*np.exp(-beta*Phi)))-(dH*(Vw*Cdrug)*(x-1)*const1*Kp*np.exp(-beta*Phi)/(Vw+(x-1)*Vinj+(x-1)*const1*Kp*np.exp(-beta*Phi))))
#Getting values for heats and injection volumes from excel file
DH=[]
Vinj=[]
Inj=[]
for i in range(a[0]-1-Delete):
DH_add=sheet.cell_value(i+1+Delete, dh_drug)
DH.append(DH_add)
Vinj_add=sheet.cell_value(i+1+Delete, injv_drug)
Vinj_add=Vinj_add/1e6
Vinj.append(Vinj_add)
Inj.append(i+1)
DH=np.array(DH)
Vinj=np.array(Vinj)
Inj=np.array(Inj)
if sheet2 != None:
DH2=[]
for i in range(a2[0]-1-Delete):
DH_add2=sheet2.cell_value(i+1+Delete, dh_drug)
DH2.append(DH_add2)
DH3=np.array(DH2)
if DH.shape[0]>DH3.shape[0]:
for i in range(DH.shape[0]-DH3.shape[0]):
DH2.append(0)
DH2=np.array(DH2)
if DH.shape[0]<DH3.shape[0]:
DH2=np.array(DH2)
DH2=DH2[range(DH.shape[0])]
DH=DH-DH2
xdata = Inj
ydata = DH
#Clears previous graphs and output text if present
if drug_charge_prev != None:
for ix in range(4):
labempty = Tk.Label(root,text=' ' * 280)
labempty.grid(row=14+ix,column=0,columnspan=4)
canvas.get_tk_widget().destroy()
if drug_charge_prev != 0 and canvas2 != None:
canvas2.get_tk_widget().destroy()
#Display Kp graph
f = Figure(figsize=(5, 4), dpi=100)
aa = f.add_subplot(111)
aa.plot(xdata,ydata,'.')
aa.set_xlabel('Injection Number')
aa.set_xticks(np.arange(0, np.max(xdata)+1, step=2))
aa.set_ylabel('\u03BC cal')
canvas = FigureCanvasTkAgg(f, master=root)
canvas.draw()
if zdrug == 0:
canvas.get_tk_widget().grid(row=12,column=1, columnspan=3, pady=10, padx=10)
else:
canvas.get_tk_widget().grid(row=12,column=0, columnspan=2, pady=10, padx=10)
#Fit for Kp
Kp=1
dH=ydata[0]*T
dHeat=(0,ydata[0]*np.inf)
for i in range(1000):
popt, pcov = curve_fit(func1, xdata, ydata, p0=[dH,Kp], bounds=([np.min(dHeat), 1e-10], [np.max(dHeat), 10e10]))
residuals2 = ydata- func1(xdata, *popt)
ss_res2 = np.sum(residuals2**2)
ss_tot2 = np.sum((ydata-np.mean(ydata))**2)
r_squared2 = 1 - (ss_res2 / ss_tot2)
dH2=popt[0]
Kp2=popt[1]
dG2=-1.9858775*298.15*np.log(Kp2*(aL/vL))
TdS2=dH2-dG2
if np.abs(Kp-Kp2)+np.abs(dH-dH2)<=0:
break
else:
Kp=Kp2
dH=dH2
Sample1=[]
Sample1.append([Kp2*(aL/vL),dH2,dG2,TdS2,r_squared2,ss_res2])
aa.plot(xdata, func1(xdata, *popt))
f.savefig(CD+'/figure1_Kp.png')
xdata_np = np.array(xdata)
fit_yvals = np.array(func1(xdata, *popt))
fit_table2 = []
for fiti in range(0,len(xdata_np)):
xx = xdata[fiti]
yy = fit_yvals[fiti]
minilst = [xx,yy]
fit_table2.append(minilst)
fit_table2 = np.array(fit_table2)
fit_df = pd.DataFrame(data=fit_table2, index=None, columns=["Injection Number", "Heat (ucal)"])
writer = pd.ExcelWriter((savename+'fit.xlsx'), engine='xlsxwriter')
fit_df.to_excel(writer, sheet_name='fit',index=False)
writer.save()
#Phi calculations and fit
if zdrug != 0:
k=np.power(8000*R*T*eps*eps0*SaltC,.5)
beta=zdrug*F/(R*T)
const1=Clipid*aL*Vinj_add
const2=const1*k/(zdrug*F)
for i in range(1000):
Phi=[]
DH=DH/1e6
for j in range(xdata.shape[0]):
A1=zdrug*F/(np.power(8000*R*T*eps*eps0,.5)*(np.power(SaltC,.5)))
B1=np.sum(DH[range(j+1)])/((xdata[j]*Vinj_add*Clipid*aL*dH)+(np.sum(DH[range(j+1)])*aD))
A=np.sum(DH[range(j+1)])/(xdata[j]*const2*dH)
B=dH*xdata[j]*const1/(np.sum(DH[range(j+1)])*aD+dH*xdata[j]*const1)
Phi_=(2/beta)*np.arcsinh(A1*B1)*(zdrug/abs(zdrug))
Phi.append(Phi_)
Phi=np.array(Phi)
DH=DH*1e6
popt, pcov = curve_fit(func2, (xdata,Phi), ydata, p0=[dH, Kp], bounds=([-10e10, 1e-10], [10e10, 10e10]))
residuals2 = ydata- func2((xdata,Phi), *popt)
ss_res2 = np.sum(residuals2**2)
ss_tot2 = np.sum((ydata-np.mean(ydata))**2)
r_squared2 = 1 - (ss_res2 / ss_tot2)
dH2=popt[0]
Kp2=popt[1]
dG2=-1.9858775*298.15*np.log(Kp2*(aL/vL))
TdS2=dH2-dG2
if np.abs(Kp-Kp2)+np.abs(dH-dH2)<=0:
break
else:
Kp=Kp2
dH=dH2
#Display Phi graph
f2 = Figure(figsize=(5, 4), dpi=100)
aa2 = f2.add_subplot(111)
aa2.plot(xdata, Phi*1000,'+')
aa2.set_xlabel('Injection Number')
aa2.set_xticks(np.arange(0, np.max(xdata)+1, step=2))
aa2.set_ylabel('\u03A6 mV')
f2.savefig(CD+'/figure2_Phi.png')
canvas2 = FigureCanvasTkAgg(f2, master=root)
canvas2.draw()
canvas2.get_tk_widget().grid(row=12,column=3, columnspan=5, pady=10, padx=10)
Sample1.append([Kp2*(aL/vL),dH2,dG2,TdS2,r_squared2,ss_res2])
t = Table(np.round(np.array(Sample1),decimals=4), names=('Kp','\u0394H','\u0394G','T\u0394S','r\u00b2','SSE'))
#Display results of analysis
if zdrug == 0:
charge_print = '0'
elif zdrug > 0:
charge_print = '+' + str(zdrug) + ' '
elif zdrug < 0:
charge_print = str(zdrug) + ' '
if zdrug == 0:
Sample1_copy = np.round(np.array(Sample1[0][:]),decimals=4)
Sample1_copy = np.transpose(np.reshape(Sample1_copy, (-1,1)))
all_charges=['0 ']
else:
Sample1_copy = np.round(np.array(Sample1),decimals=4)
all_charges=['0 ',charge_print]
outrow,outcol = np.shape(Sample1_copy)
table_rows = ['Kp','\u0394H','\u0394G','T\u0394S','r\u00b2','SSE']
for ix in range(outrow+2):
row_txt = " \t"
for iy in range(outcol):
if ix == 0:
val_t = str(Sample1_copy[0,iy])
char = len(val_t)
w_space = " " * 6
name_t = table_rows[iy]
extra1 = " " * (int(np.floor(char/2))-int(np.ceil(len(name_t)/2)))
extra2 = " " * (int(np.ceil(char/2))-int(np.floor(len(name_t)/2)))
row_txt = row_txt + w_space + extra1 + name_t + extra2 + w_space + "\t"
if ix == 1:
val_t = str(Sample1_copy[0,iy])
char = len(val_t) + 1
w_space = " " * 3
barr = "-" * char
extra = " " * 3
row_txt = row_txt + w_space + barr + extra + "\t"
if ix > 1:
val_t = str(Sample1_copy[ix-2,iy])
w_space = " " * 3
row_txt = row_txt + w_space + val_t + w_space + "\t"
if ix == 0 and iy == outcol-1:
lab = Tk.Label(root,text=row_txt)
lab.grid(row=14+ix,column=1,columnspan=3)
elif ix == 1 and iy == outcol-1:
lab = Tk.Label(root,text=row_txt)
lab.grid(row=14+ix,column=1,columnspan=3)
elif ix > 1 and iy == outcol-1:
lab = Tk.Label(root,text=row_txt)
lab.grid(row=14+ix,column=1,columnspan=3)
lab_charge = Tk.Label(root,text='\t Calculations for Z(Drug) = '+all_charges[ix-2]+' --->', justify=Tk.LEFT)
| |
#!/usr/bin/env python
"""
emr_simulator.py
Part of Dooplicity framework
Runs JSON-encoded Hadoop Streaming job flow. FUNCTIONALITY IS IDIOSYNCRATIC;
it is currently confined to those features used by Rail. Format of input JSON
mirrors that of StepConfig list from JSON sent to EMR via RunJobsFlow. Any
files input to a mapper can be gzip'd, but inputs to a reducer currently cannot
be.
In --ipy mode, the script uses IPython Parallel to run tasks on different
engines mediated by a controller. IPython Parallel controller and engines must
be started before this script is invoked.
All paths in input JSON should be absolute.
Licensed under the MIT License:
Copyright (c) 2014 <NAME> and <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import argparse
import sys
from collections import defaultdict, OrderedDict, deque
import time
import json
import interface as dp_iface
import gc
import signal
import socket
import subprocess
import glob
import hashlib
import tempfile
import shutil
import os
import contextlib
from tools import make_temp_dir, make_temp_dir_and_register_cleanup
from ansibles import Url
import site
import string
def add_args(parser):
""" Adds args relevant to EMR simulator.
parser: object of type parser.ArgumentParser
No return value.
"""
parser.add_argument(
'-m', '--memcap', type=int, required=False, default=(1024*300),
help=('Maximum amount of memory (in bytes) to use per UNIX sort '
'instance.')
)
parser.add_argument(
'-p', '--num-processes', type=int, required=False, default=1,
help='Number of subprocesses to open at once.'
)
parser.add_argument(
'-t', '--max-attempts', type=int, required=False, default=4,
help=('Maximum number of times to attempt a task.')
)
parser.add_argument(
'-s', '--separator', type=str, required=False, default='\t',
help='Separator between successive fields in inputs and '
'intermediates.'
)
parser.add_argument(
'-k', '--keep-intermediates', action='store_const', const=True,
default=False,
help='Keeps all intermediate output.'
)
parser.add_argument(
'--keep-last-output', action='store_const', const=True,
default=False,
help='If --keep-intermediates is False, keeps outputs that are ' \
'unused as inputs by steps.'
)
parser.add_argument('--gzip-outputs', action='store_const',
const=True, default=False,
help='Compress step output files with gzip.'
)
parser.add_argument('--gzip-level', type=int, required=False,
default=3,
help='Level of gzip compression to use, if applicable.'
)
parser.add_argument('--ipy', action='store_const', const=True,
default=False,
help=('Uses IPython Parallel controller and engines to execute '
'tasks; this permits running a MapReduce job flow on a wide '
'array of cluster setups. Ignores --num-processes in favor '
'of the number of available engines.')
)
parser.add_argument('--ipcontroller-json', type=str, required=False,
default=None,
help=('Path to ipcontroller-client.json file; relevant only if '
'--ipy is invoked. See IPython Parallel documentation for '
'more information. If left unspecified, IPython\'s '
'default path is used.')
)
parser.add_argument('--ipy-profile', type=str, required=False,
default=None,
help=('Connects to this IPython profile; relevant only '
'if --ipy is invoked and takes precedence over '
'--ipcontroller-json.')
)
parser.add_argument('--scratch', type=str, required=False,
default=None,
help=('Where to write any intermediate output before copying to '
'consolidated intermediate directory. This is typically '
'a directory local to a given node. None means write '
'directly to consolidated intermediate directory. The '
'string \"-\" means write to a temporary directory securely '
'created by Python.')
)
parser.add_argument('--direct-write', action='store_const',
const=True, default=False,
help=('Always write intermediate files directly to consolidated '
'intermediate directory, even if --scratch is specified.')
)
parser.add_argument('--common', type=str, required=False,
default=None,
help=('Location of a writable directory accessible across all '
'nodes; this is where some temporary files may be stored '
'and is not important unless running in --ipy mode; if '
'left unspecified, defaults to Python temporary directory'))
parser.add_argument('--sort', type=str, required=False,
default='sort',
help=('Path to sort executable. Add arguments as necessary, '
'e.g. for specifying a directory for storing sort\'s '
'temporary files.'))
def init_worker():
""" Prevents KeyboardInterrupt from reaching a pool's workers.
Exiting gracefully after KeyboardInterrupt or SystemExit is a
challenge. The solution implemented here is by <NAME> and is from
http://noswap.com/blog/python-multiprocessing-keyboardinterrupt .
No return value.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def yopen(gzipped, *args):
""" Passes args on to the appropriate opener, gzip or regular.
A dooplicity.tools.xopen that uses the gzip module, which is
unsafe for writing. See xopen's docstring in dooplicity.tools for
more information.
gzipped: True iff gzip.open() should be used to open rather than
open(); False iff open() should be used; None if input should be
read and guessed
*args: unnamed arguments to pass
Return value: file object
"""
import gzip
if gzipped is None:
with open(args[0], 'rb') as binary_input_stream:
# Check for magic number
if binary_input_stream.read(2) == '\x1f\x8b':
gzipped = True
else:
gzipped = False
if gzipped:
return gzip.open(*args)
return open(*args)
def parsed_keys(partition_options, key_fields):
""" Parses UNIX sort options to figure out what to partition on.
Returned is a function that takes a line as input and returns a tuple
of elements from the line to partition on OR False if the args are
invalid.
partition_options: UNIX sort options like -k1,1 -k3 -k3,4r -k 4 -k 5,3
key_fields: number of fields from line to consider key
Return value: see above
"""
try:
# Make list of tuples of start, end indexes
parsed_args = [
tuple([int(el) - 1
for el in arg.strip().strip('nr').split(',')])
for arg in partition_options.split('-k')
if arg.strip() and len(arg.split(',')) <= 2
]
except Exception:
# args are invalid
return False
else:
exec (
"""def partitioned_key(line, separator):
key = line.strip().split(separator)[:{key_fields}]
return {return_value}
""".format(key_fields=key_fields,
return_value='+'.join(['key[{}:{}]'.format(
arg[0], arg[1] + 1 if len(arg) == 2 else ''
) for arg in parsed_args]))
)
return partitioned_key
def gzip_into(gzip_level, outfn):
return subprocess.Popen('gzip -%d >%s' % (gzip_level, outfn),
shell=True, bufsize=-1,
executable='/bin/bash',
stdin=subprocess.PIPE)
def presorted_tasks(input_files, process_id, sort_options, output_dir,
key_fields, separator, partition_options, task_count,
memcap, gzip=False, gzip_level=3, scratch=None,
direct_write=False, sort='sort', mod_partition=False,
max_attempts=4):
""" Partitions input data into tasks and presorts them.
Files in output directory are in the format x.y, where x is a task
number on the interval [0, number of tasks - 1], and y is a process
ID that identifies which process created the file. y is unimportant;
the glob x.* should be catted to the reducer.
Formula for computing task assignment:
int(hashlib.md5(key).hexdigest(), 16) % (task_count)
input_files: list of files on which to operate.
process_id: unique identifier for current process.
sort_options: options to use when presorting.
output_dir: directory in which to write output files.
key_fields: number of fields from a line to consider the key.
separator: separator between successive fields from line.
partition_options: sort-like options to use when partitioning.
task_count: number of tasks in which to partition input.
memcap: maximum percent of memory to use per UNIX sort instance.
gzip: True iff all files written should be gzipped; else False.
gzip_level: Level of gzip compression to use, if applicable.
scratch: where to write output before copying to output_dir. If "-"
string, writes to temporary directory; if None, writes directly
to output directory.
direct_write: write intermediate files directly to final destination,
no matter what scratch is.
sort: path to sort executable
mod_partition: if True, task is assigned according to formula
(product of fields) % task_count
max_attempts: maximum number of times to attempt partitioning input.
MUST BE FINAL ARG to be compatible with
execute_balanced_job_with_retries().
Return value: None if no errors encountered; otherwise error string.
"""
try:
from operator import mul
task_streams = {}
if scratch is not None:
scratch = os.path.expanduser(os.path.expandvars(scratch))
if gzip:
task_stream_processes = {}
if direct_write:
final_output_dir = output_dir
elif scratch == '-':
# Write to temporary directory
final_output_dir = output_dir
try:
output_dir = tempfile.mkdtemp()
except OSError as e:
return ('Problem encountered creating temporary '
'scratch subdirectory: %s' % e)
elif scratch:
# Write to temporary directory in special location
final_output_dir = output_dir
try:
os.makedirs(scratch)
except OSError as e:
if os.path.isfile(scratch):
return ('Scratch directory | |
test = {
'name': 'Problem 10',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> p0 = [2, 2, 3]
>>> p1 = [6, 1, 2]
>>> fastest_words(game(['What', 'great', 'luck'], [p0, p1]))
[['What'], ['great', 'luck']]
>>> p0 = [2, 2, 3]
>>> p1 = [6, 1, 3]
>>> fastest_words(game(['What', 'great', 'luck'], [p0, p1])) # with a tie, choose the first player
[['What', 'luck'], ['great']]
>>> p2 = [4, 3, 1]
>>> fastest_words(game(['What', 'great', 'luck'], [p0, p1, p2]))
[['What'], ['great'], ['luck']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 4, 3, 5, 1]]
>>> fastest_words(game(['neurine', 'statutably', 'quantivalent', 'intrarachidian', 'itinerantly'], p))
[['neurine', 'statutably', 'quantivalent', 'intrarachidian', 'itinerantly']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 1, 1], [2, 5, 5]]
>>> fastest_words(game(['unsimilar', 'conditioning', 'crystallogenical'], p))
[['conditioning', 'crystallogenical'], ['unsimilar']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1, 3, 2, 4, 3]]
>>> fastest_words(game(['intraepiphyseal', 'sporangiform', 'saccharate', 'hermeneutic', 'butanal'], p))
[['intraepiphyseal', 'sporangiform', 'saccharate', 'hermeneutic', 'butanal']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(game([], p))
[[], [], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 3, 5, 2, 1, 5], [3, 5, 3, 5, 4, 1], [2, 1, 3, 1, 2, 3]]
>>> fastest_words(game(['multivoltine', 'nonpacifist', 'oviferous', 'postelection', 'multidigitate', 'reallege'], p))
[['multivoltine', 'multidigitate'], ['oviferous', 'reallege'], ['nonpacifist', 'postelection']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 1, 1, 5, 2], [1, 4, 5, 4, 2], [5, 3, 2, 2, 3]]
>>> fastest_words(game(['notchel', 'phengitical', 'dollier', 'bushlet', 'sciographic'], p))
[['phengitical', 'dollier', 'sciographic'], ['notchel'], ['bushlet']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5], [3], [3]]
>>> fastest_words(game(['cisplatine'], p))
[[], ['cisplatine'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4]]
>>> fastest_words(game(['accompaniment'], p))
[['accompaniment']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1]]
>>> fastest_words(game(['elasticness'], p))
[['elasticness']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 2, 5, 4], [1, 3, 2, 1], [4, 2, 5, 1]]
>>> fastest_words(game(['temporomandibular', 'unannexed', 'umbellar', 'rambutan'], p))
[['unannexed'], ['temporomandibular', 'umbellar', 'rambutan'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 1, 2, 3, 1], [2, 1, 3, 1, 5]]
>>> fastest_words(game(['intercreate', 'sulpholipin', 'inkhornizer', 'lycanthropic', 'optimize'], p))
[['intercreate', 'sulpholipin', 'inkhornizer', 'optimize'], ['lycanthropic']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], []]
>>> fastest_words(game([], p))
[[], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1, 2, 5, 2, 3], [4, 3, 1, 1, 5], [3, 2, 4, 5, 4]]
>>> fastest_words(game(['choultry', 'caryopilite', 'unowed', 'overslaugh', 'unshriveled'], p))
[['choultry', 'caryopilite', 'unshriveled'], ['unowed', 'overslaugh'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(game([], p))
[[], [], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 5, 3, 1]]
>>> fastest_words(game(['nearby', 'atriopore', 'conchiferous', 'zygostyle'], p))
[['nearby', 'atriopore', 'conchiferous', 'zygostyle']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 4, 2, 1, 3]]
>>> fastest_words(game(['infinite', 'uncorked', 'subjacency', 'looplike', 'nasoethmoidal'], p))
[['infinite', 'uncorked', 'subjacency', 'looplike', 'nasoethmoidal']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5, 2, 1, 1, 1, 3], [3, 5, 1, 2, 3, 3]]
>>> fastest_words(game(['pauldron', 'kairine', 'sulpholysis', 'kalo', 'cecidiology', 'progne'], p))
[['kairine', 'sulpholysis', 'kalo', 'cecidiology', 'progne'], ['pauldron']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5, 2, 2, 2, 1, 3], [3, 4, 4, 4, 2, 2]]
>>> fastest_words(game(['cnidophore', 'orrery', 'bargham', 'iridentropium', 'nickelous', 'cedarbird'], p))
[['orrery', 'bargham', 'iridentropium', 'nickelous'], ['cnidophore', 'cedarbird']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 3, 3], [1, 1, 3], [2, 3, 3]]
>>> fastest_words(game(['inadequateness', 'capsulate', 'careers'], p))
[['careers'], ['inadequateness', 'capsulate'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 1, 3, 2, 3, 3], [5, 1, 2, 4, 2, 5]]
>>> fastest_words(game(['havent', 'kilneye', 'wistful', 'scorbutic', 'chichipe', 'antemeridian'], p))
[['havent', 'kilneye', 'scorbutic', 'antemeridian'], ['wistful', 'chichipe']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 1, 1, 3], [3, 4, 4, 1], [1, 2, 3, 3]]
>>> fastest_words(game(['bran', 'stratum', 'onager', 'drinking'], p))
[['stratum', 'onager'], ['drinking'], ['bran']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], []]
>>> fastest_words(game([], p))
[[], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 5, 1, 5], [3, 5, 1, 3]]
>>> fastest_words(game(['saltless', 'bailage', 'nonformation', 'yeven'], p))
[['bailage', 'nonformation'], ['saltless', 'yeven']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(game([], p))
[[], [], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 5, 4], [5, 4, 3], [4, 4, 4]]
>>> fastest_words(game(['upbid', 'weave', 'titterer'], p))
[['upbid'], ['weave', 'titterer'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 1, 5, 5, 2, 5]]
>>> fastest_words(game(['powell', 'indifferently', 'palatograph', 'capucine', 'scowlful', 'noration'], p))
[['powell', 'indifferently', 'palatograph', 'capucine', 'scowlful', 'noration']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1, 5, 3, 2, 4, 2], [5, 1, 3, 4, 1, 3]]
>>> fastest_words(game(['tautomeric', 'unprejudicedly', 'disregardance', 'reconveyance', 'rebellow', 'gaiety'], p))
[['tautomeric', 'disregardance', 'reconveyance', 'gaiety'], ['unprejudicedly', 'rebellow']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[]]
>>> fastest_words(game([], p))
[[]]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5], [1]]
>>> fastest_words(game(['incoherentific'], p))
[[], ['incoherentific']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 1, 4], [2, 1, 2]]
>>> fastest_words(game(['accompliceship', 'dumpish', 'unqueried'], p))
[['accompliceship', 'dumpish'], ['unqueried']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 2, 4, 2, 2], [2, 4, 3, 3, 5]]
>>> fastest_words(game(['counterflange', 'justly', 'contralto', 'erythematous', 'intromissive'], p))
[['justly', 'erythematous', 'intromissive'], ['counterflange', 'contralto']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(game([], p))
[[], [], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[2, 4, 3, 2, 5, 4], [2, 4, 2, 3, 4, 1]]
>>> fastest_words(game(['draughtmanship', 'arboriform', 'oppugner', 'nucleonics', 'reducer', 'watered'], p))
[['draughtmanship', 'arboriform', 'nucleonics'], ['oppugner', 'reducer', 'watered']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[]]
>>> fastest_words(game([], p))
[[]]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5, 4], [4, 3]]
>>> fastest_words(game(['collectorship', 'radome'], p))
[[], ['collectorship', 'radome']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1, 2, 1, 4], [4, 1, 1, 2]]
>>> fastest_words(game(['clinometrical', 'stuporose', 'didst', 'hexactinellidan'], p))
[['clinometrical', 'didst'], ['stuporose', 'hexactinellidan']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 3], [5, 2]]
>>> fastest_words(game(['surdation', 'piddler'], p))
[['surdation'], ['piddler']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[3, 4, 4]]
>>> fastest_words(game(['unbattered', 'ridicule', 'undersweep'], p))
[['unbattered', 'ridicule', 'undersweep']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5, 3, 2], [2, 5, 1]]
>>> fastest_words(game(['noggen', 'goofy', 'undimerous'], p))
[['goofy'], ['noggen', 'undimerous']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1], [5]]
>>> fastest_words(game(['unidigitate'], p))
[['unidigitate'], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[1, 3, 2], [5, 3, 4], [3, 4, 4]]
>>> fastest_words(game(['boga', 'unzephyrlike', 'infragenual'], p))
[['boga', 'unzephyrlike', 'infragenual'], [], []]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[4, 2, 3]]
>>> fastest_words(game(['dysanalyte', 'whiffletree', 'mamelonation'], p))
[['dysanalyte', 'whiffletree', 'mamelonation']]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> p = [[5, 5, 2], [3, 3, 3], [5, 4, 3]]
>>> fastest_words(game(['parapet', 'linenman', 'noneffervescent'], p))
[['noneffervescent'], ['parapet', 'linenman'], []]
""",
'hidden': False,
'locked': | |
import random as r
from time import sleep
import time as t
loading = ['Loading -', 'Loading \\', 'Loading |', 'Loading /']
for i in range(10):
for load in loading:
sleep(0.25)
print(f'\r{load}', end='')
print()
print("Welcome to...")
sleep(1)
print("""
▄ ▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄ ▄
▐░▌ ▐░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░▌ ▐░▌
▐░▌ ▐░▌▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░▌
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌
▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌
▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌
▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀▀▀ ▀▀▀▀█░█▀▀▀▀
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌
▀ ▀ ▀ ▀ ▀ ▀ ▀
▄ ▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄ ▄ ▄▄▄▄▄▄▄▄▄▄▄
▐░▌ ▐░▌▐░░░░░░░░░░░▌▐░▌ ▐░▌▐░░░░░░░░░░░▌
▐░▌ ▐░▌▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░▌▐░█▀▀▀▀▀▀▀█░▌
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌
▐░█▄▄▄▄▄▄▄█░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░█▄▄▄▄▄▄▄█░▌
▐░░░░░░░░░░░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░░░░░░░░░░░▌
▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░█▀▀▀▀█░█▀▀
▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌▐░▌ ▐░▌
▐░▌ ▐░▌▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌▐░▌ ▐░▌
▐░▌ ▐░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░▌ ▐░▌
▀ ▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀ ▀
""")
sleep(1)
name = input("Please enter your name: ")
health = 100
money = 0
start_time = t.time()
def loadingbar():
print("Loading...")
for i in range(51):
sleep(r.uniform(0.05, 0.2))
bar = ('□' * i) + '-' * (50 - i)
print(f'\r[{bar}]', end='')
print()
print('-=!LOADED!=-')
def timer(time):
current_time = t.time()
result = current_time - time
seconds = int(result)
minutes = seconds / 60
minutes_string = str(minutes)
minutes_list = minutes_string.split('.')
seconds_decimal = round(float("0." + minutes_list[1]) * 60)
minutes_time = int(minutes)
return "%02d minutes and %02d seconds" % (minutes_time, seconds_decimal)
def checkpoint(point):
if point.lower() == 'mysteriousworld':
sleep(1)
loadingbar()
sleep(2)
act_two()
elif point.lower() == 'fieldofwonders':
sleep(1)
loadingbar()
sleep(2)
act_three()
elif point.lower() == 'thetruthoftheworld':
sleep(1)
loadingbar()
sleep(2)
act_four()
elif point.lower() == 'thebigbadbar':
sleep(1)
loadingbar()
sleep(2)
act_five()
else:
sleep(1)
print(f"{name} forgets his place...")
sleep(2)
menu()
def waiting():
useless = input('')
def death(cause):
global health
global money
if cause == '1':
health = 100
money = 0
backpack.inventory = ['note']
print("""
You pretty much died of laziness in bed...
""")
waiting()
menu()
elif cause == '2':
health = 100
money = 0
backpack.inventory = ['note']
print("""
Died of cowardice.. Maybe cross the river next time.
""")
waiting()
menu()
elif cause == '3':
health = 100
money = 0
backpack.inventory = ['note']
print("""
Wow, you died to a mushroom...
""")
waiting()
menu()
elif cause == '4':
health = 100
money = 0
backpack.inventory = ['note']
print("""
Died from an explosion huh... Not too shabby!
""")
waiting()
menu()
elif cause == '5':
health = 100
money = 0
backpack.inventory = ['note']
print("""
A real shame that was...
""")
waiting()
menu()
elif cause == '6':
health = 100
money = 0
backpack.inventory = ['note']
print("""
There was still much for you to enjoy in life...
""")
menu()
def update_health_money(h, m):
global health
global money
health += h
money += m
def menu():
menu_choice = input("What would you like to do?\n[a]Play!\n[b]Check point\n[c]Credits\n[d]Quit\n>")
choice = menu_choice.lower()
if choice == 'a':
sleep(1)
loadingbar()
act_one()
elif choice == 'b':
sleep(1)
checkpoint(input("What is your point passphrase?\n>"))
elif choice == 'c':
credits()
elif choice == 'd':
sleep(1)
response = False
while not response:
confirm = input("Are you sure you want to quit? y/n\n>")
if confirm.lower() == 'y':
quit()
elif confirm.lower() == 'n':
response = True
return menu()
else:
print("Invalid reply...")
def credits(): # Make a cool credit sequence
sleep(1)
print("__Credits__")
sleep(0.5)
print("Lead Developer: Alan")
sleep(0.5)
print("Lead Director: Alan")
sleep(0.5)
print("Original Idea: Alan")
sleep(0.5)
print("Sponsors: Alan")
return menu()
class Backpack:
inventory = ['note']
def __init__(self, user):
self.name = user
def __repr__(self):
return f"The current user is {self.name}"
def add_item(self, item):
self.inventory.append(item)
def boss_override(self):
if 'mushroom' in self.inventory:
new_inventory = list(filter(lambda item: item == 'mushroom', self.inventory))
self.inventory = new_inventory
else:
self.inventory = []
def trash_item(self, item):
response = False
while not response:
choice = input(f"{self.name}, are you sure you wish to throw away your {item}? y/n\n>" )
if choice.lower() == 'y':
response = True
sleep(1)
try:
print("TESTING 1")
index = self.inventory.index(item)
print("INDEX:")
print(index)
self.inventory.pop(index) # could also use .remove(item)
print("TESTING 3")
print(f"{self.name} has thrown away his {item}")
except ValueError:
print(f"{self.name} does not have this item...")
elif choice.lower() == 'n':
response = True
sleep(1)
print("Good choice...")
else:
print("Invalid response!")
def view_backpack(self):
print((", ".join(self.inventory)))
def use_item(self, item):
global health
global money
if item.lower() == 'health potion' and item in self.inventory:
sleep(1)
print(f"{self.name} feels power surge through his veins...")
update_health_money(25, 0)
self.inventory.remove(item)
elif item.lower() == 'banknote' and item in self.inventory:
sleep(1)
print(f"{self.name} jumps out of pure joy!\n'TEN DOLLARS!!!' he exclaims...")
update_health_money(0, 10)
self.inventory.remove(item)
elif item.lower() == 'mushroom' and item in self.inventory:
sleep(1)
print(f"{name} decides that he wants to eat the strange mushroom.")
update_health_money(-25, 0)
sleep(1)
print("*VOMITS*")
sleep(1)
self.inventory.remove(item)
print(f"{name} wakes up after being unconscious...\nWhat happened..?")
if health <= 0:
death('3')
elif item.lower() == 'note' and item in self.inventory:
sleep(1)
print(f"I'm trapped in an endless cycle... \n From: ~~~~~~")
sleep(1)
print("The note suddenly disappears...")
self.inventory.remove(item)
else:
sleep(1)
print(f"{self.name} searches his backpack, only to realize that he is a fool.")
backpack = Backpack(name)
def show_stats():
print(f"Health: {health}")
print(f"Money: {money}")
print("Time Played: {}".format(timer(start_time)))
print(backpack.inventory)
def use_backpack():
choice = input("What would you like to do?\n[a]Throw an item away\n[b]Use an item\n[c]View inventory\n[d]Exit\n>")
c = choice.lower()
if c == 'a':
sleep(2)
show_stats()
sleep(2)
trash = input("Which item would you like to throw away?\n>")
t = trash.lower()
sleep(2)
show_stats()
backpack.trash_item(t)
show_stats()
elif c == 'b':
sleep(2)
show_stats()
sleep(2)
use = input("Which item would you like to use?\n>")
u = use.lower()
sleep(2)
show_stats()
backpack.use_item(u)
show_stats()
elif c == 'c':
sleep(2)
print(f"{name} opens his backpack to see what things he has collected so far...")
show_stats()
elif c == 'd':
sleep(2.5)
print(f"{name} stops rummaging through his backpack and gets on his feet.")
else:
print(f"Confused, {name} closes up his bag and continues his journey.")
def games(game):
if game == 'rps':
tries = 3
wins = 0
while tries > 0:
tries -= 1
bot = r.randint(1, 3)
bot_c = ''
if bot == 1:
bot_c = 'rock'
elif bot == 2:
bot_c = 'paper'
elif bot == 3:
bot_c = 'scissors'
choice = input("Choose:\n[1]Rock\n[2]Paper\n[3]Scissors\n>")
if choice.isdigit():
user = int(choice)
else:
user = 10
if user == 1:
user_c = 'ROCK!'
elif user == 2:
user_c = 'PAPER!'
elif user == 3:
user_c = 'SCISSORS!'
else:
user_c = 'NOTHING!'
print("ROCK, PAPER, SCISSORS, SHOOT!")
if user == bot:
waiting()
print(f"{name} chose {user_c}!")
waiting()
print(f"Opponent: {bot_c}")
sleep(0.5)
print("IT'S A TIE!")
elif user == 1 and bot == 3:
waiting()
print(f"{name} chose ROCK!")
waiting()
print(f"Opponent: {bot_c}")
sleep(0.5)
print(f"{name} WINS!")
backpack.add_item('banknote')
print("Banknote added to inventory.")
wins += 1
elif user == 2 and bot == 1:
waiting()
print(f"{name} chose PAPER!")
waiting()
print(f"Opponent: {bot_c}")
sleep(0.5)
print(f"{name} WINS!")
backpack.add_item('banknote')
print("Banknote added to inventory.")
wins += 1
elif user == 3 and bot == 2:
waiting()
print(f"{name} chose SCISSORS!")
waiting()
print(f"Opponent: {bot_c}")
sleep(0.5)
print(f"{name} WINS!")
backpack.add_item('banknote')
print("Banknote added to inventory.")
wins += 1
else:
waiting()
print(f"{name} chose {user_c}")
waiting()
print(f"Opponent: {bot_c}")
sleep(0.5)
print("OPPONENT WINS!")
sleep(1)
print("GAME ENDED!")
waiting()
if wins >= 2:
print(f"{name} earned a bonus banknote!")
backpack.add_item('banknote')
else:
print(f"{name} is saddened by his loss, but is content with the results.")
elif game == 'minefield':
print("MINEFIELD! You have twenty-four rocks to throw into the field... If a rock happens to hit a mine, it'll hurt!")
board = [['□'] * 5 for i in range(5)]
rocks = 24
while rocks > 0:
for row in board:
print(" ".join(row))
user_row = input("Enter the row: ")
user_col = input("Enter the column: ")
if user_row.isdigit() and user_col.isdigit():
if int(user_row) > 0 and int(user_col) > 0:
if int(user_row) < 6 and int(user_col) < 6:
user1_row = int(user_row) - 1
user1_col = int(user_col) - 1
rand_row = r.randint(0, 4)
rand_col = r.randint(0, 4)
while board[rand_row][rand_col] == 'X':
rand_row = r.randint(0, 4)
rand_col = r.randint(0, 4)
if board[user1_row][user1_col] == 'X':
print("You already chose that plot!!!")
else:
rocks -= 1
if user1_row == rand_row and user1_col == rand_col:
board[user1_row][user1_col] = 'X'
sleep(1)
print("💣💣💣💣💣")
sleep(0.25)
print("BOOOM!")
update_health_money(-25, 0)
waiting()
else:
board[user1_row][user1_col] = 'X'
else:
print("That's too high!")
else:
print("That's too low!")
else:
print("Invalid response.")
sleep(1)
if health <= 0:
death('4')
else:
print("That seems like enough rocks to safely assume the mines are duds.")
elif game == 'letter':
with open("letter1529.txt", "w") as f:
f.write("""
- | |
<gh_stars>1-10
"""
Functions to sort/combine nucleotide pairs/combine lines of multiline formats, and to revert combined formats to regular form
Usage:
genome_3nt.py FASTA_FILENAME FASTA_OUTPUT FASTQ_FILENAME FASTQ_OUTPUT
Options:
-FASTA_FILENAME fasta filename
"""
from numba.roc.hsaimpl import lower
from Utility.generators_utilities import class_generator
from Utility.parallel_generator import parallel_generator
import subprocess
from Utility.Fastq_class import Fastq
from Utility.Samfile_class import SamLine
import re
from Processing import sam_sorting
from docopt import docopt
import itertools
from Utility.multiline_sort import multiline_sort
import logging
import os
import shutil
import sys
def pre(fasta_file,fastq_file,out_fasta_file,out_fastq_file, nt_replacement):
if type(nt_replacement) is list:
nt_replacement = "".join(nt_replacement)
if fasta_file == out_fasta_file:
fasta_file_temp = fasta_file + "_temp_copy.fasta"
shutil.copyfile(fasta_file, fasta_file_temp)
fasta_nt = fasta_to_3nt(nt_replacement, fasta_file_temp,output_file=out_fasta_file)
os.remove(fasta_file_temp)
else:
fasta_nt = fasta_to_3nt(nt_replacement, fasta_file,output_file=out_fasta_file)
#logging.info(f"==exec==\nmultiline_to_3nt({2},{'~'}, {nt_replacement}, {fasta_file},output_file={out_fasta_file})")
#logging.info(f"==exec==\nmultiline_to_3nt({2},chr(127), {nt_replacement}, {fastq_file},output_file={out_fastq_file})")
if fastq_file is not None:
fastq_nt = multiline_to_3nt(4, chr(127), 2, nt_replacement, fastq_file , output_file=out_fastq_file)
else:
fastq_nt = None
return fastq_nt, fasta_nt
def post(fastq_file, sam_file, out_fastq_file, out_sam_file, **kwargs):
#logging.info(f"==exec==\nrevert_fastq_to_4nt({fastq_file}, kwargs['fastq_lib'],output_filename=out_fastq_file)multiline_to_3nt({2},chr(127), {fastq_file},output_file={out_fastq_file})")
if fastq_file is not None:
fastq_ntr = revert_fastq_to_4nt(fastq_file, kwargs['fastq_lib'],output_filename=out_fastq_file)
else:
fastq_ntr = None
#logging.info(f"==exec==\nmultiline_to_3nt({2},chr(127), {fastq_file},output_file={out_fastq_file})")
sam_ntr = revert_sam_to_4nt(sam_file, output_filename=out_sam_file,fastq_lib=kwargs['fastq_lib'])
return fastq_ntr, sam_ntr
# nt_pairings = {'AG': 'A', 'GA': 'A', 'CTU': 'U', 'AC': 'C', 'CA': 'C', 'CG': 'G', 'GC': 'G'}
nt_pairings = {"{x}{y}".format(x=x, y=y): min(x, y) for x, y in itertools.product("AGCT", repeat=2)}
def fasta_to_3nt(nt_replacement, filename, output_file=None):
'''
alternative function to multiline_to_3nt for fasta files with multiple lines for the sequence, instead of a single
line with the entire sequence
:param nt_replacement: nucleotides to combine (ex: 'AG', 'CT')
:param filename: filename for the input
:param output_file: filename for the output, if desired
:return: output filename
'''
if output_file is None:
splited = filename.split(".")
output_file = ".".join(splited[:-1])+"_nt."+splited[-1]
with open(filename, 'r'), open(output_file, 'w'):
# TODO: does case matter?
#if type(nt_replacement) is list:
#nt_replacement = "".join(nt_replacement)
#lower_case_nt = nt_replacement.lower()
lower_case_replacement = nt_pairings[nt_replacement].lower()
#print(lower_case_replacement)
command = f"cat {filename} | sed -r '/^[ \t]*$/d' | sed '/^[NAGCTnagct]/s/[{nt_replacement}]/{nt_pairings[nt_replacement]}/g' "\
f" | sed '/^[NAGCTnagct]/s/{[nt_replacement.lower()]}/{lower_case_replacement}/g' > {output_file} "
#command = f"cat {filename} | sed -r '/^[ \t]*$/d' | sed '/[AGCTagct]*/s/[{nt_replacement}]/{nt_pairings[nt_replacement]}/ig' > {output_file} "
#^ also works
# print(command)
try:
subprocess.run(command, shell=True)
except Exception as e:
raise e
return output_file
def multiline_to_3nt(number_of_lines, new_delim, sequence_line, nt_replacement, multi_filename=None, output_file=None):
'''
:param number_of_lines: number of lines in each entry
:param new_delim: any character that is not within the alphabet of possible characters that are used in the format
:param sequence_line: the number of the line containing the sequence to be modified
:param nt_replacement: the nucleotides to be combined
:param multi_filename: name of the file containing the data
:param output_file: optional filename to write output to. if not specified output goes to input filename with .nt extension
:return:
'''
if output_file is None:
splited = multi_filename.split(".")
output_file = ".".join(splited[:-1])+"_nt."+splited[-1]
with open(multi_filename, 'r'), open(output_file, 'w'):
# TODO: does case matter?
if type(nt_replacement) is list:
nt_replacement = "".join(nt_replacement)
#command = f"cat {multi_filename} | sed -r '/^[ \t]*$/d' |paste -d \"{new_delim}\" {' -'*int(number_of_lines)} \
#| awk -F{new_delim} 'BEGIN {{OFS = \"{new_delim}\"}}; {{gsub(/[{nt_replacement}]/,\"{nt_pairings[nt_replacement]}\",${sequence_line})}};{{print}};'\
#| sed 's/{1}/\\n/g' > {output_file} "
#| awk -F{new_delim} 'BEGIN {{OFS = \"{new_delim}\"}}; {{gsub(/[tolower({nt_replacement})]/,tolower(\"{nt_pairings[nt_replacement]}\"),${sequence_line})}};{{print}}' \
#OLD command with number variable replacement rather than format string replacement.
command = "cat {0} | sed -r '/^[ \t]*$/d' |paste -d \"{1}\" {2}\
| awk -F{1} 'BEGIN {{OFS = \"{1}\"}}; {{${5} = toupper(${5}); gsub(/[{3}]/,\"{4}\",${5})}};{{print}}' |" \
"sed 's/{1}/\\n/g' > {6} ".format(multi_filename, new_delim, ' -'*int(number_of_lines), nt_replacement, nt_pairings[nt_replacement], sequence_line, output_file)
# print(command)
#bash command for fastqs, AT/at to A/a
#"cat smallseq.fastq | sed -r '/^[ \t]*$/d' |paste -d "~" - - - -| awk -F~ 'BEGIN {OFS="~"};{gsub(/[AT]/,"A",$2);gsub(/[at]/,"a",$2)};{print}' | sed 's/~/\n/g' >smallseq.nt.fastq"
try:
subprocess.run(command, shell=True)
except Exception as e:
raise e
return output_file
def revert_fastq_to_4nt(fastq_3nt_filename, fastq_4nt_filename, output_filename=None):
'''
:param fastq_3nt_filename: negative reads from alignment of 3nt fastq library
:param fastq_4nt_filename: original 4nt fastq library
:param output_filename:
:return:
'''
sorted_fastq_3nt = multiline_sort(4,'~',1, fastq_3nt_filename)
sorted_fastq_4nt = multiline_sort(4,'~',1, fastq_4nt_filename)
if output_filename is None:
splited = fastq_3nt_filename.split(".")
output_filename = ".".join(splited[:-1]) + "_ntR." + splited[-1]
with open(sorted_fastq_3nt, 'r') as fastq_3nt, open(sorted_fastq_4nt, 'r') as fastq_4nt:
gen_3nt = class_generator(Fastq, file = fastq_3nt, number_of_lines=4)
gen_4nt = class_generator(Fastq, file = fastq_4nt, number_of_lines=4)
get_fastq_id = lambda fq: fq.strings[0]
with open(output_filename, 'w') as out:
p_gen = parallel_generator([gen_3nt, gen_4nt], [get_fastq_id, get_fastq_id])
for [fq_3, fq_4] in p_gen:
if (fq_3 is not None) and (fq_4 is not None):
fq_3[0].strings[1] = fq_4[0].strings[1]
out.write(re.sub('\n\n', '\n',str(fq_3[0])))
return output_filename
def reverse_multiline(number_of_lines, new_delim, sequence_line, multi_filename, output_file=None):
'''
:param number_of_lines: number of lines in each entry
:param new_delim: any character that is not within the alphabet of possible characters that are used in the format
:param sequence_line: the number of the line containing the sequence to be modified
:param nt_replacement: the nucleotides to be combined
:param multi_filename: name of the file containing the data
:param output_file: optional filename to write output to. if not specified output goes to input filename with .nt extension
:return:
'''
if output_file is None:
splited = multi_filename.split(".")
output_file = ".".join(splited[:-1])+"_nt."+splited[-1]
with open(multi_filename, 'r'), open(output_file, 'w'):
#OLD command with number variable replacement rather than format string replacement.
command = "cat {0} | sed -r '/^[ \t]*$/d' |paste -d \"{1}\" {2}\
| awk -F{1} 'BEGIN {{OFS = \"{1}\"}};{{cmd=\"echo \"${3}\" | rev\"; cmd | getline reversed_line ; close(cmd) ;gsub(${3},reversed_line,${3})}};{{print}}' |" \
"sed 's/{1}/\\n/g' > {4} ".format(multi_filename, new_delim, ' -'*int(number_of_lines), sequence_line, output_file)
# print(command)
#bash command for fastqs, AT/at to A/a
#"cat smallseq.fastq | sed -r '/^[ \t]*$/d' |paste -d "~" - - - -| awk -F~ 'BEGIN {OFS="~"};{gsub(/[AT]/,"A",$2);gsub(/[at]/,"a",$2)};{print}' | sed 's/~/\n/g' >smallseq.nt.fastq"
try:
subprocess.run(command, shell=True)
except Exception as e:
raise e
return output_file
def complement_multiline(number_of_lines, new_delim, sequence_line, multi_filename, output_filename=None):
'''
Complement the sequence line of a multilined format file. Nucleotides are switched to their appropriate base pair (A->T, C->G, etc)
:param number_of_lines: number of lines in each entry
:param new_delim: any character that is not within the alphabet of possible characters that are used in the format
:param sequence_line: the number of the line containing the sequence to be modified
:param nt_replacement: the nucleotides to be combined
:param multi_filename: name of the file containing the data
:param output_file: optional filename to write output to. if not specified output goes to input filename with .nt extension
:return:
'''
#TODO: only works on fasta now
if output_filename is None:
splited = multi_filename.split(".")
output_filename = ".".join(splited[:-1])+"_complement."+splited[-1]
with open(multi_filename, 'r') as fasta_file, open(output_filename, 'w') as out:
translation = str.maketrans('ACGTNacgtn', 'TGCANtgcan')
#TODO: use "tr AGCTagct TCGAtcga" command to flip the nucleotides
fasta_gen = class_generator(str, file = fasta_file)
for line in fasta_gen:
if line[0] != '>':
line = line.translate(translation)
out.write(line.strip("\n") + "\n")
return output_filename
def reverse_complement_sequence(sequence):
'''
reverse complement of sequence of nucleotides
:return: reversed sequence
'''
reverse_dict = {'a':'t', 't':'a', 'g':'c', 'c':'g', 'A':'T', 'T':'A','G':'C' ,'C':'G', 'N':'N', 'n':'n'}
reversed_sequence = ''
for nt in reversed(sequence):
reversed_sequence += reverse_dict[nt]
return reversed_sequence
def revert_samline_to_4nt(samline, fastq_entry):
'''
replace the sequence field of a SAM file line with the sequence from a fastq entry
:param samline: samline to revert. Of type SamLine, from Utility.Samfile_class
:param fastq_entry: fastq entry to use as reference
:return: the updated SamLine object
'''
fastq_sequence = fastq_entry.get_seq().strip("\n")
if samline.flags == 16:
fastq_sequence = reverse_complement_sequence(fastq_sequence)
samline.sequence = fastq_sequence
return samline
def flip_sam_flags(samfile, flag_to_replace, new_flag, output_filename=None):
if output_filename is None:
splited = samfile.split(".")
output_filename = ".".join(splited[:-1]) + "_flipped." + splited[-1]
#create temp file if input and output filenames are the same
if samfile == output_filename:
temp_name=samfile+"temp"
shutil.copyfile(samfile, temp_name)
samfile = temp_name
with open(samfile, 'r') as sam_file, open(output_filename, 'w') as out:
header_gen = class_generator(str, skip_condition = (lambda line: line[0]!='@'), file=sam_file)
for line in header_gen:
out.write(str(line))
with open(samfile, 'r') as sam_file, open(output_filename, 'a') as out:
sam_gen = class_generator(SamLine, skip_condition = (lambda line: line[0]=='@'), file = sam_file)
for line in sam_gen:
if line.flags == flag_to_replace:
line.flags = new_flag#samclass turns flag to string using str(new_flag), turning(for example) 16->20
out.write(str(line) + '\n')
if samfile == output_filename:
os.remove(temp_name)
return output_filename
def sort_sam_alphabetic(samfile):
splited = samfile.split(".")
sorted_name = ".".join(splited[:-1]) + "_sorted." + splited[-1]
command = "cat {0} | sed -r '/^[ \t]*$/d' | LC_ALL=C sort -k1,1n > {1}".format(samfile,sorted_name)
try:
subprocess.run(command, shell = True)
except Exception as e:
raise(e)
return sorted_name
def revert_sam_to_4nt(aligned_3nt_sam_filename, output_filename=None, sorted=False, **kwargs):
'''
:param aligned_3nt_sam_filename: filename | |
<reponame>Xudewang/scarlet
import numpy as np
from astropy.visualization.lupton_rgb import LinearMapping, AsinhMapping
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import MaxNLocator
from .bbox import Box
from .component import Component
def channels_to_rgb(channels):
"""Get the linear mapping of multiple channels to RGB channels
The mapping created here assumes the the channels are ordered in wavelength
direction, starting with the shortest wavelength. The mapping seeks to produce
a relatively even weights for across all channels. It does not consider e.g.
signal-to-noise variations across channels or human perception.
Parameters
----------
channels: int in range(0,7)
Number of channels
Returns
-------
array (3, channels) to map onto RGB
"""
assert channels in range(
0, 8
), "No mapping has been implemented for more than {} channels".format(channels)
channel_map = np.zeros((3, channels))
if channels == 1:
channel_map[0, 0] = channel_map[1, 0] = channel_map[2, 0] = 1
elif channels == 2:
channel_map[0, 1] = 0.667
channel_map[1, 1] = 0.333
channel_map[1, 0] = 0.333
channel_map[2, 0] = 0.667
channel_map /= 0.667
elif channels == 3:
channel_map[0, 2] = 1
channel_map[1, 1] = 1
channel_map[2, 0] = 1
elif channels == 4:
channel_map[0, 3] = 1
channel_map[0, 2] = 0.333
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.667
channel_map[2, 1] = 0.333
channel_map[2, 0] = 1
channel_map /= 1.333
elif channels == 5:
channel_map[0, 4] = 1
channel_map[0, 3] = 0.667
channel_map[1, 3] = 0.333
channel_map[1, 2] = 1
channel_map[1, 1] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 1.667
elif channels == 6:
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
elif channels == 7:
channel_map[:, 6] = 2/3.
channel_map[0, 5] = 1
channel_map[0, 4] = 0.667
channel_map[0, 3] = 0.333
channel_map[1, 4] = 0.333
channel_map[1, 3] = 0.667
channel_map[1, 2] = 0.667
channel_map[1, 1] = 0.333
channel_map[2, 2] = 0.333
channel_map[2, 1] = 0.667
channel_map[2, 0] = 1
channel_map /= 2
return channel_map
class LinearPercentileNorm(LinearMapping):
def __init__(self, img, percentiles=[1, 99]):
"""Create norm that is linear between lower and upper percentile of img
Parameters
----------
img: array_like
Image to normalize
percentile: array_like, default=[1,99]
Lower and upper percentile to consider. Pixel values below will be
set to zero, above to saturated.
"""
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
super().__init__(minimum=vmin, maximum=vmax)
class AsinhPercentileNorm(AsinhMapping):
def __init__(self, img, percentiles=[1, 99]):
"""Create norm that is linear between lower and upper percentile of img
Parameters
----------
img: array_like
Image to normalize
percentile: array_like, default=[1,99]
Lower and upper percentile to consider. Pixel values below will be
set to zero, above to saturated.
"""
assert len(percentiles) == 2
vmin, vmax = np.percentile(img, percentiles)
# solution for beta assumes flat spectrum at vmax
stretch = vmax - vmin
beta = stretch / np.sinh(1)
super().__init__(minimum=vmin, stretch=stretch, Q=beta)
def img_to_3channel(img, channel_map=None, fill_value=0):
"""Convert multi-band image cube into 3 RGB channels
Parameters
----------
img: array_like
This should be an array with dimensions (channels, height, width).
channel_map: array_like
Linear mapping with dimensions (3, channels)
fill_value: float, default=`0`
Value to use for any masked pixels.
Returns
-------
RGB: numpy array with dtype float
"""
# expand single img into cube
assert len(img.shape) in [2, 3]
if len(img.shape) == 2:
ny, nx = img.shape
img_ = img.reshape(1, ny, nx)
elif len(img.shape) == 3:
img_ = img
C = len(img_)
# filterWeights: channel x band
if channel_map is None:
channel_map = channels_to_rgb(C)
else:
assert channel_map.shape == (3, len(img))
# map channels onto RGB channels
_, ny, nx = img_.shape
rgb = np.dot(channel_map, img_.reshape(C, -1)).reshape(3, ny, nx)
if hasattr(rgb, "mask"):
rgb = rgb.filled(fill_value)
return rgb
def img_to_rgb(img, channel_map=None, fill_value=0, norm=None, mask=None):
"""Convert images to normalized RGB.
If normalized values are outside of the range [0..255], they will be
truncated such as to preserve the corresponding color.
Parameters
----------
img: array_like
This should be an array with dimensions (channels, height, width).
channel_map: array_like
Linear mapping with dimensions (3, channels)
fill_value: float, default=`0`
Value to use for any masked pixels.
norm: `scarlet.display.Norm`, default `None`
Norm to use for mapping in the allowed range [0..255]. If `norm=None`,
`scarlet.display.LinearPercentileNorm` will be used.
mask: array_like
A [0,1] binary mask to apply over the top of the image,
where pixels with mask==1 are masked out.
Returns
-------
rgb: numpy array with dimensions (3, height, width) and dtype uint8
"""
RGB = img_to_3channel(img, channel_map=channel_map)
if norm is None:
norm = LinearMapping(image=RGB)
rgb = norm.make_rgb_image(*RGB)
if mask is not None:
rgb = np.dstack([rgb, ~mask * 255])
return rgb
panel_size = 4.0
def show_likelihood(blend, figsize=None, **kwargs):
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(blend.log_likelihood, **kwargs)
ax.set_xlabel("Iteration")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel("log-Likelihood")
return fig
def show_observation(
observation,
norm=None,
channel_map=None,
sky_coords=None,
show_psf=False,
add_labels=True,
figsize=None,
):
"""Plot observation in standardized form.
"""
panels = 1 if show_psf is False else 2
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
if not hasattr(ax, "__iter__"):
ax = (ax,)
# Mask any pixels with zero weight in all bands
mask = np.sum(observation.weights, axis=0) == 0
# if there are no masked pixels, do not use a mask
if np.all(mask == 0):
mask = None
panel = 0
extent = get_extent(observation.bbox)
ax[panel].imshow(
img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),
extent=extent,
origin="lower",
)
ax[panel].set_title("Observation")
if add_labels:
assert sky_coords is not None, "Provide sky_coords for labeled objects"
for k, center in enumerate(sky_coords):
center_ = observation.get_pixel(center)
color = "w" if observation.C > 1 else "r"
ax[panel].text(*center_[::-1], k, color=color, ha="center", va="center")
panel += 1
if show_psf:
psf_image = np.zeros(observation.data.shape)
if observation.psf is not None:
psf_model = observation.psf.get_model()
# make PSF as bright as the brightest pixel of the observation
psf_model *= (
observation.data.mean(axis=0).max() / psf_model.mean(axis=0).max()
)
# insert into middle of "blank" observation
full_box = Box(psf_image.shape)
shift = tuple(
psf_image.shape[c] // 2 - psf_model.shape[c] // 2
for c in range(full_box.D)
)
model_box = Box(psf_model.shape) + shift
model_box.insert_into(psf_image, psf_model)
# slices = scarlet.box.overlapped_slices
ax[panel].imshow(img_to_rgb(psf_image, norm=norm), origin="lower")
ax[panel].set_title("PSF")
fig.tight_layout()
return fig
def show_scene(
sources,
observation=None,
norm=None,
channel_map=None,
show_model=True,
show_observed=False,
show_rendered=False,
show_residual=False,
add_labels=True,
add_boxes=False,
figsize=None,
linear=True,
):
"""Plot all sources to recreate the scence.
The functions provides a fast way of evaluating the quality of the entire model,
i.e. the combination of all scences that seek to fit the observation.
Parameters
----------
sources: list of source models
observation: `~scarlet.Observation`
norm: norm to compress image intensity to the range [0,255]
channel_map: array_like
Linear mapping with dimensions (3, channels)
show_model: bool
Whether the model is shown in the model frame
show_observed: bool
Whether the observation is shown
show_rendered: bool
Whether the model, rendered to match the observation, is shown
show_residual: bool
Whether the residuals between rendered model and observation is shown
add_label: bool
Whether each source is labeled with its numerical index in the source list
add_boxes: bool
Whether each source box is shown
figsize: matplotlib figsize argument
linear: bool
Whether or not to display the scene in a single line (`True`) or
on multiple lines (`False`).
Returns
-------
matplotlib figure
"""
if show_observed or show_rendered or show_residual:
assert (
observation is not None
), "Provide matched observation to show observed frame"
panels = sum((show_model, show_observed, show_rendered, show_residual))
if linear:
if figsize is None:
figsize = (panel_size * panels, panel_size)
fig, ax = plt.subplots(1, panels, figsize=figsize)
else:
columns = int(np.ceil(panels / 2))
if figsize is None:
figsize = (panel_size * columns, panel_size * 2)
fig = plt.figure(figsize=figsize)
ax = [fig.add_subplot(2, columns, n + 1) for n in range(panels)]
if not hasattr(ax, "__iter__"):
ax = (ax,)
# Mask any pixels with zero weight in all bands
if observation is not None:
mask = np.sum(observation.weights, axis=0) == 0
# if there are no masked pixels, do not use a mask
if np.all(mask == 0):
mask = None
model_frame = sources[0].frame
model = np.zeros(model_frame.shape)
for src in sources:
model += src.get_model(frame=model_frame)
panel = 0
if show_model:
extent = | |
<filename>tests/keras/layers/wrappers_test.py<gh_stars>100-1000
import pytest
import numpy as np
import copy
from numpy.testing import assert_allclose
from keras.utils import CustomObjectScope
from keras.layers import wrappers, Input, Layer
from keras.layers import RNN
from keras import layers
from keras.models import Sequential, Model, model_from_json
from keras import backend as K
from keras.utils.generic_utils import object_list_uid, to_list
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed():
# first, test with Dense layer
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# test when specifying a batch_input_shape
test_input = np.random.random((1, 3, 4))
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(wrappers.TimeDistributed(layers.Dense(2),
batch_input_shape=(1, 3, 4)))
reference.add(layers.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference.layers[0].set_weights(weights)
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Embedding
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
batch_input_shape=(10, 3, 4),
dtype='int32'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
np.random.random((10, 3, 4, 6)), epochs=1, batch_size=10)
# compare to not using batch_input_shape
test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
input_shape=(3, 4), dtype='int32'))
reference.compile(optimizer='rmsprop', loss='mse')
reference.layers[0].set_weights(weights)
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Conv2D
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Conv2D(5, (2, 2),
padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
np.random.random((1, 2, 4, 4, 5)))
model = model_from_json(model.to_json())
model.summary()
# test stacked layers
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
model.add(wrappers.TimeDistributed(layers.Dense(3)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test wrapping Sequential model
model = Sequential()
model.add(layers.Dense(3, input_dim=2))
outer_model = Sequential()
outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test with functional API
x = Input(shape=(3, 2))
y = wrappers.TimeDistributed(model)(x)
outer_model = Model(x, y)
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test with BatchNormalization
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.BatchNormalization(center=True, scale=True),
name='bn', input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
uid = object_list_uid(model.inputs)
assert len(td._input_map.keys()) == 1
assert uid in td._input_map
assert K.int_shape(td._input_map[uid]) == (None, 2)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Flaky with CNTK backend')
def test_TimeDistributed_learning_phase():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = Input(shape=(3, 2))
y = wrappers.TimeDistributed(layers.Dropout(.999))(x, training=True)
model = Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
assert_allclose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed_trainable():
# test layers that need learning_phase to be set
x = Input(shape=(3, 2))
layer = wrappers.TimeDistributed(layers.BatchNormalization())
_ = layer(x)
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
layer.trainable = False
assert len(layer.updates) == 0
assert len(layer.trainable_weights) == 0
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
@pytest.mark.skipif((K.backend() == 'cntk' or K.backend() == 'mxnet'),
reason='Unknown timestamps for RNN not supported in CNTK and MXNet.')
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape():
# test with unspecified shape and Embeddings with mask_zero
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None)))
# the shape so far: (N, t_1, t_2, 6)
model.add(wrappers.TimeDistributed(layers.SimpleRNN(7, return_sequences=True)))
model.add(wrappers.TimeDistributed(layers.SimpleRNN(8, return_sequences=False)))
model.add(layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4), dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = K.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
assert np.array_equal(mask_outputs_val[i], ref_mask_val[i])
assert mask_outputs[-1] is None # final layer
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed_with_masking_layer():
# test with Masking layer
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Masking(mask_value=0.,),
input_shape=(None, 4)))
model.add(wrappers.TimeDistributed(layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [model.layers[1].compute_mask(model.layers[1].input,
mask_outputs[-1])]
func = K.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
assert np.array_equal(mask_outputs_val[0], np.any(model_input, axis=-1))
assert np.array_equal(mask_outputs_val[1], np.any(model_input, axis=-1))
def test_regularizers():
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, kernel_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.layers[0].layer.losses) == 1
assert len(model.layers[0].losses) == 1
assert len(model.layers[0].get_losses_for(None)) == 1
assert len(model.losses) == 1
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, activity_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.losses) == 1
def test_Bidirectional():
rnn = layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
dropout_rate = 0.2
for mode in ['sum', 'concat']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim,
return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = Input((timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = Input(batch_shape=(1, timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Unknown timestamps not supported in CNTK.')
def test_Bidirectional_dynamic_timesteps():
# test with functional API with dynamic length
rnn = layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
dropout_rate = 0.2
for mode in ['sum', 'concat']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = Input((None, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
@pytest.mark.parametrize('merge_mode', ['sum', 'mul', 'ave', 'concat', None])
def test_Bidirectional_merged_value(merge_mode):
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
X = [np.random.rand(samples, timesteps, dim)]
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = Input((timesteps, dim))
layer = wrappers.Bidirectional(rnn(units, return_sequences=True),
merge_mode=merge_mode)
f_merged = K.function([inputs], to_list(layer(inputs)))
f_forward = K.function([inputs], [layer.forward_layer.call(inputs)])
f_backward = K.function([inputs],
[K.reverse(layer.backward_layer.call(inputs), 1)])
y_merged = f_merged(X)
y_expected = to_list(merge_func(f_forward(X)[0], f_backward(X)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
assert_allclose(x1, x2, atol=1e-5)
# test return_state
inputs = Input((timesteps, dim))
layer = wrappers.Bidirectional(rnn(units, return_state=True),
merge_mode=merge_mode)
f_merged = K.function([inputs], layer(inputs))
f_forward = K.function([inputs], layer.forward_layer.call(inputs))
f_backward = K.function([inputs], layer.backward_layer.call(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(X)
y_forward = f_forward(X)
y_backward = f_backward(X)
y_expected = to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
assert_allclose(x1, x2, atol=1e-5)
# test if the state of a BiRNN is the concatenation of the underlying RNNs
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
assert_allclose(state_birnn, state_inner, atol=1e-5)
@pytest.mark.skipif(K.backend() == 'theano' or K.backend() == 'mxnet', reason='Not supported.')
@pytest.mark.parametrize('merge_mode', ['sum', 'concat', None])
def test_Bidirectional_dropout(merge_mode):
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
X = [np.random.rand(samples, timesteps, dim)]
inputs = Input((timesteps, dim))
wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2),
merge_mode=merge_mode)
outputs = to_list(wrapped(inputs, training=True))
assert all(not getattr(x, '_uses_learning_phase') for x in outputs)
inputs = Input((timesteps, dim))
wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True),
merge_mode=merge_mode)
outputs = to_list(wrapped(inputs))
assert all(x._uses_learning_phase for x in outputs)
model = Model(inputs, outputs)
assert model.uses_learning_phase
y1 = to_list(model.predict(X))
y2 = to_list(model.predict(X))
for x1, x2 in zip(y1, y2):
assert_allclose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse():
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = | |
Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.pow(2%(decimalpoint)s, self.datax2a, self.dataout, matherrors=True)
# This is the actual test. There should be no exception on math errors.
arrayfunc.pow(2%(decimalpoint)s, self.datayovfl, self.dataout, matherrors=True)
########################################################
def test_pow_array_array_none_e1(self):
"""Test pow as *array-array-none* for x ** %(pow_y_err)s%(decimalpoint)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.pow(self.datax2a, self.datayok)
# This is the actual test.
with self.assertRaises(%(exceptioncode)s):
arrayfunc.pow(self.datax2b, self.datayovfl)
########################################################
def test_pow_array_array_none_e2(self):
"""Test pow as *array-array-none* for x ** %(pow_y_err)s%(decimalpoint)s with matherrors=True - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.pow(self.datax2a, self.datayok, matherrors=True)
# This is the actual test. There should be no exception on math errors.
arrayfunc.pow(self.datax2b, self.datayovfl, matherrors=True)
########################################################
def test_pow_array_array_array_f1(self):
"""Test pow as *array-array-array* for x ** %(pow_y_err)s%(decimalpoint)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.pow(self.datax2a, self.datayok, self.dataout)
# This is the actual test.
with self.assertRaises(%(exceptioncode)s):
arrayfunc.pow(self.datax2b, self.datayovfl, self.dataout)
########################################################
def test_pow_array_array_array_f2(self):
"""Test pow as *array-array-array* for x ** %(pow_y_err)s%(decimalpoint)s with matherrors=True - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.pow(self.datax2a, self.datayok, self.dataout, matherrors=True)
# This is the actual test. There should be no exception on math error.
arrayfunc.pow(self.datax2b, self.datayovfl, self.dataout, matherrors=True)
##############################################################################
'''
# ==============================================================================
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# for pow.
nan_data_pow_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_pow_%(typelabel)s(unittest.TestCase):
"""Test %(funclabel)s for basic general function operation using parameter %(errordata)s.
nan_data_pow_template
"""
##############################################################################
def FloatassertEqual(self, dataoutitem, expecteditem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
# This is active for float numbers only.
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [-5.0, -4.0, -3.0, -2.0, 2.0, 3.0, 4.0, 5.0])
self.dataok2 = array.array('%(typecode)s', [-2.0, 3.0, -4.0, 5.0, 5.0, 4.0, -3.0, 2.0])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.dataok2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval)
expected = [math.%(funcname)s(x, testval) for x in errordata]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
else:
arrayfunc.%(funcname)s(errordata, testval)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.dataok2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(x, testval) for x in errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.dataok2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval, self.dataout)
expected = [math.%(funcname)s(x, testval) for x in errordata]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
else:
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.dataok2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(x, testval) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.dataok1:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok2 = copy.copy(self.dataok2)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, dataok2)
expected = [math.%(funcname)s(testval, x) for x in errordata]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
else:
arrayfunc.%(funcname)s(testval, errordata)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.dataok1:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.dataok1:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.dataok2, self.dataout)
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
else:
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.dataok1:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
dataok2 = copy.copy(self.dataok2)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, dataok2)
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
expected = [math.%(funcname)s(x, y) for x,y in zip(dataok1, self.errordata)]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(dataok1, self.errordata)
else:
arrayfunc.%(funcname)s(dataok1, self.errordata)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(y, x) for x,y in zip(self.errordata, self.dataok2)]
arrayfunc.%(funcname)s(self.dataok2, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok2, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.dataok1, self.dataok2, self.dataout)
expected = [math.%(funcname)s(x, y) for x,y in zip(self.dataok1, self.errordata)]
# This is the actual test.
# Some values will produce non-finite (nan, inf, -inf) results
# while some will not. We therefore provide means of checking both.
if not all([math.isfinite(x) for x in expected]):
# At least one value will produce a non-finite result.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
else:
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(y, x) for x,y in zip(self.errordata, self.dataok2)]
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# ==============================================================================
# Which opstemplate is valid for which operation. Each math operation requires
# different templates for signed int, unsigned int, and float.
opstemplates = {
'+' : {'int' : [param_overflow_add_max1_template,
param_overflow_add_min1_template],
'uint' : [param_overflow_add_max1_template],
'float' : [param_overflow_add_max1_template,
param_overflow_add_min1_template]
},
'-' : {'int' : [param_overflow_sub_max1_template,
param_overflow_sub_min1_template,
param_overflow_sub_1max_template,
param_overflow_sub_1min_template],
'uint' : [param_overflow_sub_min1_template],
'float' : [param_overflow_sub_max1_template,
param_overflow_sub_min1_template,
param_overflow_sub_1max_template]
},
'*' : {'int' : [param_overflow_mul_max2_template,
param_overflow_mul_min2_template,
param_overflow_mul_max2neg_template,
param_overflow_mul_min2neg_template,
param_overflow_mul_min1neg_template],
'uint' : [param_overflow_mul_max2_template],
'float' : [param_overflow_mul_max2_template,
param_overflow_mul_min2_template,
param_overflow_mul_max2neg_template,
param_overflow_mul_min2neg_template]
},
'/' : {'int' : [param_overflow_truediv_divzero_template,
param_overflow_truediv_divzero_errors_template,
param_overflow_truediv_mindivminus1_template],
'uint' : [param_overflow_truediv_divzero_template,
param_overflow_truediv_divzero_errors_template],
'float' : [param_overflow_truediv_divzero_template]
},
'//' : {'int' : [param_overflow_floordiv_divzero_template,
param_overflow_floordiv_divzero_errors_template,
param_overflow_floordiv_mindivminus1_template],
'uint' : [param_overflow_floordiv_divzero_template,
param_overflow_floordiv_divzero_errors_template],
'float' : [param_overflow_floordiv_divzero_template]
},
'%' : {'int' : [param_overflow_mod_divzero_template,
param_overflow_mod_divzero_errors_template],
'uint' : [param_overflow_mod_divzero_template,
param_overflow_mod_divzero_errors_template],
'float' : [param_overflow_mod_divzero_template]
},
'**' : {'int' : [param_overflow_pow_negy_template,
param_overflow_pow_error_template],
'uint' : [param_overflow_pow_error_template],
'float' : [param_overflow_pow_error_template]
},
}
# ==============================================================================
# These are all the test code templates.
test_templates = {'test_template_op' : test_op_templ,
'nan_data_error_template' : nan_data_error_template,
'nan_div_data_error_template' : nan_div_data_error_template,
'inf_floordiv_data_error_template' : inf_floordiv_data_error_template,
'inf_mod_data_error_template' : inf_mod_data_error_template,
'nan_data_pow_template' : nan_data_pow_template,
}
# ==============================================================================
# Used for | |
""" MODULE images2swf
Provides a function (writeSwf) to store a series of PIL images or numpy
arrays in an SWF movie, that can be played on a wide range of OS's.
This module came into being because I wanted to store a series of images
in a movie that can be viewed by other people, and which I can embed in
flash presentations. For writing AVI or MPEG you really need a c/c++
library, and allthough the filesize is then very small, the quality is
sometimes not adequate. Besides I'd like to be independant of yet another
package. I tried writing animated gif using PIL (which is widely available),
but the quality is so poor because it only allows for 256 different colours.
I also looked into MNG and APNG, two standards similar to the PNG stanard.
Both standards promise exactly what I need. However, hardly any application
can read those formats, and I cannot import them in flash.
Therefore I decided to check out the swf file format, which is very well
documented. This is the result: a pure python module to create an SWF file
that shows a series of images. The images are stored using the DEFLATE
algorithm (same as PNG and ZIP and which is included in the standard Python
distribution). As this compression algorithm is much more effective than
that used in GIF images, we obtain better quality (24 bit colours + alpha
channel) while still producesing smaller files (a test showed ~75%).
Although SWF also allows for JPEG compression, doing so would probably
require a third party library (because encoding JPEG is much harder).
This module requires Python 2.x and numpy.
This code is provided as is, and is free to use for all.
sources and tools:
- SWF on wikipedia
- Adobes "SWF File Format Specification" version 10
(http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf)
- swftools (swfdump in specific) for debugging
- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really
good quality, while file size is reduced with factors 20-100.
A good program in my opinion. The free version has the limitation
of a watermark in the upper left corner.
<NAME> (August 2009)
"""
try:
import PIL.Image
except ImportError:
PIL = None
import numpy as np
import zlib
import sys, time
## Base functions and classes
if int(sys.version[0])<3:
bytes = str
class BitArray:
""" Dynamic array of bits that automatically resizes
with factors of two.
Append bits using .Append() or +=
You can reverse bits using .Reverse()
"""
def __init__(self, initvalue=None):
self.data = np.zeros((16,), dtype=np.uint8)
self._len = 0
if initvalue is not None:
self.Append(initvalue)
def __len__(self):
return self._len #self.data.shape[0]
def __repr__(self):
return self.data[:self._len].tostring()
def _checkSize(self):
# check length... grow if necessary
arraylen = self.data.shape[0]
if self._len >= arraylen:
tmp = np.zeros((arraylen*2,), dtype=np.uint8)
tmp[:self._len] = self.data[:self._len]
self.data = tmp
def __add__(self, value):
self.Append(value)
return self
def Append(self, bits):
# check input
if isinstance(bits, BitArray):
bits = str(bits)
if isinstance(bits, int):
bits = str(bits)
if not isinstance(bits, basestring):
raise ValueError("Append bits as strings or integers!")
# add bits
for bit in bits:
self.data[self._len] = ord(bit)
self._len += 1
self._checkSize()
def Reverse(self):
""" In-place reverse. """
tmp = self.data[:self._len].copy()
self.data[:self._len] = np.flipud(tmp)
def ToBytes(self):
""" Convert to bytes. If necessary,
zeros are padded to the end (right side).
"""
bits = str(self)
# determine number of bytes
nbytes = 0
while nbytes*8 < len(bits):
nbytes +=1
# pad
bits = bits.ljust(nbytes*8, '0')
# go from bits to bytes
bb = bytes()
for i in range(nbytes):
tmp = int( bits[i*8:(i+1)*8], 2)
bb += intToUint8(tmp)
# done
return bb
def intToUint32(i):
number = int(i)
n1, n2, n3, n4 = 1, 256, 256*256, 256*256*256
b4, number = number // n4, number % n4
b3, number = number // n3, number % n3
b2, number = number // n2, number % n2
b1 = number
return chr(b1) + chr(b2) + chr(b3) + chr(b4)
def intToUint16(i):
i = int(i)
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i//256)
# make string (little endian)
return chr(i1) + chr(i2)
def intToUint8(i):
return chr(int(i))
def intToBits(i,n=None):
""" convert int to a string of bits (0's and 1's in a string),
pad to n elements. Convert back using int(ss,2). """
ii = i
# make bits
bb = BitArray()
while ii > 0:
bb += str(ii % 2)
ii = ii >> 1
bb.Reverse()
# justify
if n is not None:
if len(bb) > n:
raise ValueError("intToBits fail: len larger than padlength.")
bb = str(bb).rjust(n,'0')
# done
return BitArray(bb)
def signedIntToBits(i,n=None):
""" convert signed int to a string of bits (0's and 1's in a string),
pad to n elements. Negative numbers are stored in 2's complement bit
patterns, thus positive numbers always start with a 0.
"""
# negative number?
ii = i
if i<0:
# A negative number, -n, is represented as the bitwise opposite of
ii = abs(ii) -1 # the positive-zero number n-1.
# make bits
bb = BitArray()
while ii > 0:
bb += str(ii % 2)
ii = ii >> 1
bb.Reverse()
# justify
bb = '0' + str(bb) # always need the sign bit in front
if n is not None:
if len(bb) > n:
raise ValueError("signedIntToBits fail: len larger than padlength.")
bb = bb.rjust(n,'0')
# was it negative? (then opposite bits)
if i<0:
bb = bb.replace('0','x').replace('1','0').replace('x','1')
# done
return BitArray(bb)
def twitsToBits(arr):
""" Given a few (signed) numbers, store them
as compactly as possible in the wat specifief by the swf format.
The numbers are multiplied by 20, assuming they
are twits.
Can be used to make the RECT record.
"""
# first determine length using non justified bit strings
maxlen = 1
for i in arr:
tmp = len(signedIntToBits(i*20))
if tmp > maxlen:
maxlen = tmp
# build array
bits = intToBits(maxlen,5)
for i in arr:
bits += signedIntToBits(i*20, maxlen)
return bits
def floatsToBits(arr):
""" Given a few (signed) numbers, convert them to bits,
stored as FB (float bit values). We always use 16.16.
Negative numbers are not (yet) possible, because I don't
know how the're implemented (ambiguity).
"""
bits = intToBits(31, 5) # 32 does not fit in 5 bits!
for i in arr:
if i<0:
raise ValueError("Dit not implement negative floats!")
i1 = int(i)
i2 = i - i1
bits += intToBits(i1, 15)
bits += intToBits(i2*2**16, 16)
return bits
## Base Tag
class Tag:
def __init__(self):
self.bytes = ''
self.tagtype = -1
def ProcessTag(self):
""" Implement this to create the tag. """
raise NotImplemented()
def GetTag(self):
""" Calls processTag and attaches the header. """
self.ProcessTag()
# tag to binary
bits = intToBits(self.tagtype,10)
# complete header uint16 thing
bits += '1'*6 # = 63 = 0x3f
# make uint16
bb = intToUint16( int(str(bits),2) )
bb = bytes(bb)
# now add 32bit length descriptor
bb += intToUint32(len(self.bytes))
# done, attach and return
bb += self.bytes
return str(bb)
def MakeRectRecord(self, xmin, xmax, ymin, ymax):
""" Simply uses makeCompactArray to produce
a RECT Record. """
return twitsToBits([xmin, xmax, ymin, ymax])
def MakeMatrixRecord(self, scale_xy=None, rot_xy=None, trans_xy=None):
# empty matrix?
if scale_xy is None and rot_xy is None and trans_xy is None:
return "0"*8
# init
bits = BitArray()
# scale
if scale_xy:
bits += '1'
bits += floatsToBits([scale_xy[0], scale_xy[1]])
else:
bits += '0'
# rotation
if rot_xy:
bits += '1'
bits += floatsToBits([rot_xy[0], rot_xy[1]])
else:
bits += '0'
# translation (no flag here)
if trans_xy:
bits += twitsToBits([trans_xy[0], trans_xy[1]])
else:
bits += twitsToBits([0,0])
# done
return bits
## Control tags
class ControlTag(Tag):
def __init__(self):
Tag.__init__(self)
class FileAttributesTag(ControlTag):
def __init__(self):
ControlTag.__init__(self)
self.tagtype = 69
def ProcessTag(self):
self.bytes = bytes( '\x00' * (1+3) | |
<gh_stars>100-1000
import torch
import torch.nn as nn
import numpy as np
from functools import partial
import torch.nn.init as init
import torch.nn.functional as F
import math
from timm.models.layers import DropPath, to_2tuple
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_
import numpy as np
from utils import batch_index_select
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),
'classifier': 'head',
**kwargs
}
default_cfgs = {
'LV_ViT_Tiny': _cfg(),
'LV_ViT': _cfg(),
'LV_ViT_Medium': _cfg(crop_pct=1.0),
'LV_ViT_Large': _cfg(crop_pct=1.0),
}
DROPOUT_FLOPS = 4
LAYER_NORM_FLOPS = 5
ACTIVATION_FLOPS = 8
SOFTMAX_FLOPS = 5
class GroupLinear(nn.Module):
'''
Group Linear operator
'''
def __init__(self, in_planes, out_channels,groups=1, bias=True):
super(GroupLinear, self).__init__()
assert in_planes%groups==0
assert out_channels%groups==0
self.in_dim = in_planes
self.out_dim = out_channels
self.groups=groups
self.bias = bias
self.group_in_dim = int(self.in_dim/self.groups)
self.group_out_dim = int(self.out_dim/self.groups)
self.group_weight = nn.Parameter(torch.zeros(self.groups, self.group_in_dim, self.group_out_dim))
self.group_bias=nn.Parameter(torch.zeros(self.out_dim))
def forward(self, x):
t,b,d=x.size()
x = x.view(t,b,self.groups,int(d/self.groups))
out = torch.einsum('tbgd,gdf->tbgf', (x, self.group_weight)).reshape(t,b,self.out_dim)+self.group_bias
return out
def extra_repr(self):
s = ('{in_dim}, {out_dim}')
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Mlp(nn.Module):
'''
MLP with support to use group linear operator
'''
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., group=1):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
if group==1:
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, out_features)
else:
self.fc1 = GroupLinear(in_features, hidden_features,group)
self.fc2 = GroupLinear(hidden_features, out_features,group)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GroupNorm(nn.Module):
def __init__(self, num_groups, embed_dim, eps=1e-5, affine=True):
super().__init__()
self.gn = nn.GroupNorm(num_groups, embed_dim,eps,affine)
def forward(self, x):
B,T,C = x.shape
x = x.view(B*T,C)
x = self.gn(x)
x = x.view(B,T,C)
return x
class Attention(nn.Module):
'''
Multi-head self-attention
from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
with some modification to support different num_heads and head_dim.
'''
def __init__(self, dim, num_heads=8, head_dim=None, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
if head_dim is not None:
self.head_dim=head_dim
else:
head_dim = dim // num_heads
self.head_dim = head_dim
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, self.head_dim* self.num_heads * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.head_dim* self.num_heads, dim)
self.proj_drop = nn.Dropout(proj_drop)
def softmax_with_policy(self, attn, policy, eps=1e-6):
B, N, _ = policy.size()
B, H, N, N = attn.size()
attn_policy = policy.reshape(B, 1, 1, N) # * policy.reshape(B, 1, N, 1)
eye = torch.eye(N, dtype=attn_policy.dtype, device=attn_policy.device).view(1, 1, N, N)
attn_policy = attn_policy + (1.0 - attn_policy) * eye
max_att = torch.max(attn, dim=-1, keepdim=True)[0]
attn = attn - max_att
# attn = attn.exp_() * attn_policy
# return attn / attn.sum(dim=-1, keepdim=True)
# for stable training
attn = attn.to(torch.float32).exp_() * attn_policy.to(torch.float32)
attn = (attn + eps/N) / (attn.sum(dim=-1, keepdim=True) + eps)
return attn.type_as(max_att)
def forward(self, x, policy, padding_mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
# B,heads,N,C/heads
q, k, v = qkv[0], qkv[1], qkv[2]
# trick here to make [email protected] more stable
attn = ((q * self.scale) @ k.transpose(-2, -1))
if padding_mask is not None:
# attn = attn.view(B, self.num_heads, N, N)
# attn = attn.masked_fill(
# padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
# float("-inf"),
# )
# attn_float = attn.softmax(dim=-1, dtype=torch.float32)
# attn = attn_float.type_as(attn)
raise NotImplementedError
else:
if policy is None:
attn = attn.softmax(dim=-1)
else:
attn = self.softmax_with_policy(attn, policy)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.head_dim* self.num_heads)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
'''
Pre-layernorm transformer block
'''
def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, group=1, skip_lam=1.):
super().__init__()
self.dim = dim
self.mlp_hidden_dim = int(dim * mlp_ratio)
self.skip_lam = skip_lam
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=self.mlp_hidden_dim, act_layer=act_layer, drop=drop, group=group)
def forward(self, x, policy=None, padding_mask=None):
x = x + self.drop_path(self.attn(self.norm1(x), policy, padding_mask))/self.skip_lam
x = x + self.drop_path(self.mlp(self.norm2(x)))/self.skip_lam
return x
def flops(self, s):
heads = self.attn.num_heads
h = self.dim
i = self.mlp_hidden_dim
mha_block_flops = dict(
kqv=3 * h * h ,
attention_scores=h * s,
attn_softmax=SOFTMAX_FLOPS * s * heads,
attention_dropout=DROPOUT_FLOPS * s * heads,
attention_scale=s * heads,
attention_weighted_avg_values=h * s,
attn_output=h * h,
attn_output_bias=h,
attn_output_dropout=DROPOUT_FLOPS * h,
attn_output_residual=h,
attn_output_layer_norm=LAYER_NORM_FLOPS * h,)
ffn_block_flops = dict(
intermediate=h * i,
intermediate_act=ACTIVATION_FLOPS * i,
intermediate_bias=i,
output=h * i,
output_bias=h,
output_dropout=DROPOUT_FLOPS * h,
output_residual=h,
output_layer_norm=LAYER_NORM_FLOPS * h,)
return sum(mha_block_flops.values())*s + sum(ffn_block_flops.values())*s
class MHABlock(nn.Module):
"""
Multihead Attention block with residual branch
"""
def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, group=1, skip_lam=1.):
super().__init__()
self.dim = dim
self.norm1 = norm_layer(dim)
self.skip_lam = skip_lam
self.attn = Attention(
dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, padding_mask=None):
x = x + self.drop_path(self.attn(self.norm1(x*self.skip_lam), padding_mask))/self.skip_lam
return x
def flops(self, s):
heads = self.attn.num_heads
h = self.dim
block_flops = dict(
kqv=3 * h * h ,
attention_scores=h * s,
attn_softmax=SOFTMAX_FLOPS * s * heads,
attention_dropout=DROPOUT_FLOPS * s * heads,
attention_scale=s * heads,
attention_weighted_avg_values=h * s,
attn_output=h * h,
attn_output_bias=h,
attn_output_dropout=DROPOUT_FLOPS * h,
attn_output_residual=h,
attn_output_layer_norm=LAYER_NORM_FLOPS * h,)
return sum(block_flops.values())*s
class FFNBlock(nn.Module):
"""
Feed forward network with residual branch
"""
def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, group=1, skip_lam=1.):
super().__init__()
self.skip_lam = skip_lam
self.dim = dim
self.mlp_hidden_dim = int(dim * mlp_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=self.mlp_hidden_dim, act_layer=act_layer, drop=drop, group=group)
def forward(self, x):
x = x + self.drop_path(self.mlp(self.norm2(x*self.skip_lam)))/self.skip_lam
return x
def flops(self, s):
heads = self.attn.num_heads
h = self.dim
i = self.mlp_hidden_dim
block_flops = dict(
intermediate=h * i,
intermediate_act=ACTIVATION_FLOPS * i,
intermediate_bias=i,
output=h * i,
output_bias=h,
output_dropout=DROPOUT_FLOPS * h,
output_residual=h,
output_layer_norm=LAYER_NORM_FLOPS * h,)
return sum(block_flops.values())*s
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim,kernel_size=1)
def forward(self, x):
x = self.backbone(x)[-1]
x = self.proj(x)
return x
class PatchEmbedNaive(nn.Module):
"""
Image to Patch Embedding
from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
return x
def flops(self):
img_size = self.img_size[0]
block_flops = dict(
proj=img_size*img_size*3*self.embed_dim,
)
return sum(block_flops.values())
class PatchEmbed4_2(nn.Module):
"""
Image to Patch Embedding with 4 layer convolution
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
new_patch_size = to_2tuple(patch_size // 2)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.embed_dim = embed_dim
self.conv1 = nn.Conv2d(in_chans, 64, kernel_size=7, stride=2, padding=3, bias=False) # 112x112
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False) # 112x112
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.proj = nn.Conv2d(64, embed_dim, kernel_size=new_patch_size, stride=new_patch_size)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.proj(x) # [B, C, W, H]
return x
def flops(self):
img_size = self.img_size[0]
block_flops = dict(
conv1=img_size/2*img_size/2*3*64*7*7,
conv2=img_size/2*img_size/2*64*64*3*3,
conv3=img_size/2*img_size/2*64*64*3*3,
proj=img_size/2*img_size/2*64*self.embed_dim,
)
return sum(block_flops.values())
class PatchEmbed4_2_128(nn.Module):
"""
Image to Patch Embedding with 4 layer convolution and 128 filters
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
new_patch_size = to_2tuple(patch_size // 2)
img_size = | |
import math
import numpy as np
class NcsSections:
"""
Contains information regarding the contiguous sections of records in an Ncs file.
Methods of NcsSectionsFactory perform parsing of this information from an Ncs file and
produce these where the sections are discontiguous in time and in temporal order.
TODO: This class will likely need __ne__ to be useful in
more sophisticated segment construction algorithms.
"""
def __init__(self):
self.sects = []
self.sampFreqUsed = 0 # actual sampling frequency of samples
self.microsPerSampUsed = 0 # microseconds per sample
def __eq__(self, other):
samp_eq = self.sampFreqUsed == other.sampFreqUsed
micros_eq = self.microsPerSampUsed == other.microsPerSampUsed
sects_eq = self.sects == other.sects
return (samp_eq and micros_eq and sects_eq)
def __hash__(self):
return (f'{self.sampFreqUsed};{self.microsPerSampUsed};'
f'{[s.__hash__() for s in self.sects]}').__hash__()
class NcsSection:
"""
Information regarding a single contiguous section or group of records in an Ncs file.
Model is that times are closed on the left and open on the right. Record
numbers are closed on both left and right, that is, inclusive of the last record.
endTime should never be set less than startTime for comparison functions to work
properly, though this is not enforced.
"""
_RECORD_SIZE = 512 # nb sample per signal record
def __init__(self):
self.startRec = -1 # index of starting record
self.startTime = -1 # starttime of first record
self.endRec = -1 # index of last record (inclusive)
self.endTime = -1 # end time of last record, that is, the end time of the last
# sampling period contained in the last record of the section
def __init__(self, sb, st, eb, et, ns):
self.startRec = sb
self.startTime = st
self.endRec = eb
self.endTime = et
self.n_samples = ns
def __eq__(self, other):
return (self.startRec == other.startRec
and self.startTime == other.startTime
and self.endRec == other.endRec
and self.endTime == other.endTime
and self.n_samples == other.n_samples)
def __hash__(self):
s = f'{self.startRec};{self.startTime};{self.endRec};{self.endTime};{self.n_samples}'
return s.__hash__()
def before_time(self, rhb):
"""
Determine if this section is completely before another section in time.
"""
return self.endTime < rhb.startTime
def overlaps_time(self, rhb):
"""
Determine if this section overlaps another in time.
"""
return self.startTime <= rhb.endTime and self.endTime >= rhb.startTime
def after_time(self, rhb):
"""
Determine if this section is completely after another section in time.
"""
return self.startTime >= rhb.endTime
class NcsSectionsFactory:
"""
Class for factory methods which perform parsing of contiguous sections of records
in Ncs files.
Model for times is that times are rounded to nearest microsecond. Times
from start of a sample until just before the next sample are included,
that is, closed lower bound and open upper bound on intervals. A
channel with no samples is empty and contains no time intervals.
Moved here since algorithm covering all 3 header styles and types used is
more complicated.
"""
_maxGapSampFrac = 0.2 # maximum fraction of a sampling interval between predicted
# and actual record timestamps still considered within one section
@staticmethod
def get_freq_for_micros_per_samp(micros):
"""
Compute fractional sampling frequency, given microseconds per sample.
"""
return 1e6 / micros
@staticmethod
def get_micros_per_samp_for_freq(sampFr):
"""
Calculate fractional microseconds per sample, given the sampling frequency (Hz).
"""
return 1e6 / sampFr
@staticmethod
def calc_sample_time(sampFr, startTime, posn):
"""
Calculate time rounded to microseconds for sample given frequency,
start time, and sample position.
"""
return round(startTime + NcsSectionsFactory.get_micros_per_samp_for_freq(sampFr) * posn)
@staticmethod
def _parseGivenActualFrequency(ncsMemMap, ncsSects, chanNum, reqFreq, blkOnePredTime):
"""
Parse sections in memory mapped file when microsPerSampUsed and sampFreqUsed are known,
filling in an NcsSections object.
PARAMETERS
ncsMemMap:
memmap of Ncs file
ncsSections:
NcsSections with actual sampFreqUsed correct, first NcsSection with proper startSect
and startTime already added.
chanNum:
channel number that should be present in all records
reqFreq:
rounded frequency that all records should contain
blkOnePredTime:
predicted starting time of second record in block
RETURN
NcsSections object with block locations marked
"""
startBlockPredTime = blkOnePredTime
blk_len = 0
curBlock = ncsSects.sects[0]
for recn in range(1, ncsMemMap.shape[0]):
timestamp = ncsMemMap['timestamp'][recn]
channel_id = ncsMemMap['channel_id'][recn]
sample_rate = ncsMemMap['sample_rate'][recn]
nb_valid = ncsMemMap['nb_valid'][recn]
if channel_id != chanNum or sample_rate != reqFreq:
raise IOError('Channel number or sampling frequency changed in ' +
'records within file')
predTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed,
startBlockPredTime, blk_len)
nValidSamps = nb_valid
if timestamp != predTime:
curBlock.endRec = recn - 1
curBlock.endTime = predTime
curBlock.n_samples = blk_len
curBlock = NcsSection(recn, timestamp, -1, -1, -1)
ncsSects.sects.append(curBlock)
startBlockPredTime = NcsSectionsFactory.calc_sample_time(
ncsSects.sampFreqUsed,
timestamp,
nValidSamps)
blk_len = 0
else:
blk_len += nValidSamps
curBlock.endRec = ncsMemMap.shape[0] - 1
endTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed,
startBlockPredTime,
blk_len)
curBlock.endTime = endTime
return ncsSects
@staticmethod
def _buildGivenActualFrequency(ncsMemMap, actualSampFreq, reqFreq):
"""
Build NcsSections object for file given actual sampling frequency.
Requires that frequency in each record agrees with requested frequency. This is
normally obtained by rounding the header frequency; however, this value may be different
from the rounded actual frequency used in the recording, since the underlying
requirement in older Ncs files was that the number of microseconds per sample in the
records is the inverse of the sampling frequency stated in the header truncated to
whole microseconds.
PARAMETERS
ncsMemMap:
memmap of Ncs file
actualSampFreq:
actual sampling frequency used
reqFreq:
frequency to require in records
RETURN:
NcsSections object
"""
# check frequency in first record
if ncsMemMap['sample_rate'][0] != reqFreq:
raise IOError("Sampling frequency in first record doesn't agree with header.")
chanNum = ncsMemMap['channel_id'][0]
nb = NcsSections()
nb.sampFreqUsed = actualSampFreq
nb.microsPerSampUsed = NcsSectionsFactory.get_micros_per_samp_for_freq(actualSampFreq)
# check if file is one block of records, which is often the case, and avoid full parse
lastBlkI = ncsMemMap.shape[0] - 1
ts0 = ncsMemMap['timestamp'][0]
nb0 = ncsMemMap['nb_valid'][0]
predLastBlockStartTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, ts0,
NcsSection._RECORD_SIZE *
lastBlkI)
lts = ncsMemMap['timestamp'][lastBlkI]
lnb = ncsMemMap['nb_valid'][lastBlkI]
if ncsMemMap['channel_id'][lastBlkI] == chanNum and \
ncsMemMap['sample_rate'][lastBlkI] == reqFreq and \
lts == predLastBlockStartTime:
lastBlkEndTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, lts, lnb)
n_samples = NcsSection._RECORD_SIZE * lastBlkI
curBlock = NcsSection(0, ts0, lastBlkI, lastBlkEndTime, n_samples)
nb.sects.append(curBlock)
return nb
# otherwise need to scan looking for breaks
else:
blkOnePredTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, ts0, nb0)
curBlock = NcsSection(0, ts0, -1, -1, -1)
nb.sects.append(curBlock)
return NcsSectionsFactory._parseGivenActualFrequency(ncsMemMap, nb, chanNum, reqFreq,
blkOnePredTime)
@staticmethod
def _parseForMaxGap(ncsMemMap, ncsSects, maxGapLen):
"""
Parse blocks of records from file, allowing a maximum gap in timestamps between records
in sections. Estimates frequency being used based on timestamps.
PARAMETERS
ncsMemMap:
memmap of Ncs file
ncsSects:
NcsSections object with sampFreqUsed set to nominal frequency to use in computing time
for samples (Hz)
maxGapLen:
maximum difference within a block between predicted time of start of record and
recorded time
RETURN:
NcsSections object with sampFreqUsed and microsPerSamp set based on estimate from
largest block
"""
chanNum = ncsMemMap['channel_id'][0]
recFreq = ncsMemMap['sample_rate'][0]
# check for consistent channel_ids and sampling rates
ncsMemMap['channel_id']
if not (ncsMemMap['channel_id'] == chanNum).all():
raise IOError('Channel number changed in records within file')
if not all(ncsMemMap['sample_rate'] == recFreq):
raise IOError('Sampling frequency changed in records within file')
# find most frequent number of samples
exp_nb_valid = np.argmax(np.bincount(ncsMemMap['nb_valid']))
# detect records with incomplete number of samples
gap_rec_ids = list(np.where(ncsMemMap['nb_valid'] != exp_nb_valid)[0])
rec_duration = 1e6 / ncsSects.sampFreqUsed * ncsMemMap['nb_valid']
pred_times = np.rint(ncsMemMap['timestamp'] + rec_duration).astype(np.int64)
max_pred_times = pred_times + maxGapLen
# data records that start later than the predicted time (including the
# maximal accepted gap length) are considered delayed and a gap is
# registered.
delayed_recs = list(np.where(max_pred_times[:-1] < ncsMemMap['timestamp'][1:])[0])
gap_rec_ids.extend(delayed_recs)
# cleaning extracted gap ids
# last record can not be the beginning of a gap
last_rec_id = len(ncsMemMap['timestamp']) - 1
if last_rec_id in gap_rec_ids:
gap_rec_ids.remove(last_rec_id)
# gap ids can only be listed once
gap_rec_ids = sorted(set(gap_rec_ids))
# create recording segments from identified gaps
ncsSects.sects.append(NcsSection(0, ncsMemMap['timestamp'][0], -1, -1, -1))
for gap_rec_id in gap_rec_ids:
curr_sec = ncsSects.sects[-1]
curr_sec.endRec = gap_rec_id
curr_sec.endTime = pred_times[gap_rec_id]
n_samples = np.sum(ncsMemMap['nb_valid'][curr_sec.startRec:gap_rec_id + 1])
curr_sec.n_samples = n_samples
next_sec = NcsSection(gap_rec_id + 1,
ncsMemMap['timestamp'][gap_rec_id + 1], -1, -1, -1)
ncsSects.sects.append(next_sec)
curr_sec = ncsSects.sects[-1]
curr_sec.endRec = len(ncsMemMap['timestamp']) - 1
curr_sec.endTime = pred_times[-1]
n_samples = np.sum(ncsMemMap['nb_valid'][curr_sec.startRec:])
curr_sec.n_samples = n_samples
# calculate the estimated frequency of the block with the most samples
max_blk_idx = np.argmax([bl.endRec - bl.startRec for bl in ncsSects.sects])
max_blk = ncsSects.sects[max_blk_idx]
maxBlkFreqEstimate = (max_blk.n_samples - ncsMemMap['nb_valid'][max_blk.endRec]) * 1e6 / \
| |
<reponame>safdark/advanced-lane-lines<gh_stars>0
'''
Created on Dec 23, 2016
@author: safdar
'''
from operations.baseoperation import Operation
import numpy as np
from _collections import deque
import cv2
from operations.perspectivetransformer import PerspectiveTransformer
import math
from utils.plotter import Image
from utils.plotter import Graph
# Constants:
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meteres per pixel in x dimension
class Lane(object):
def __init__(self, maxsize, yvals):
self.maxsize = maxsize
self.yvals = yvals
self.xs = deque(maxlen=maxsize)
self.fit = deque(maxlen=maxsize)
self.fitxs = deque(maxlen=maxsize)
self.curverad_ps = deque(maxlen=maxsize)
self.curverad_rs = deque(maxlen=maxsize)
self.confidences = deque(maxlen=maxsize)
def add(self, xs, fit, fitx, curverad_ps, curverad_rs, confidence):
assert confidence is not None or not (0 <= confidence <= 1.0), "Confidence value must always be between 0 and 1"
self.xs.append(xs)
self.fit.append(fit)
self.fitxs.append(fitx)
self.curverad_ps.append(curverad_ps)
self.curverad_rs.append(curverad_rs)
self.confidences.append(confidence)
def get_relativeconfidence(self):
if len(self.confidences) > 0:
return self.confidences[-1]
return None
def get_totalconfidence(self):
# This is relative to the scenario where all 'maxlen' frames' lanes are known
p = sum(self.confidences)
pnorm = p / self.maxsize
return pnorm
def get_curverad(self):
if len(self.curverad_ps) > 0:
return self.curverad_ps[-1], self.curverad_rs[-1]
return None, None
def getyvals(self):
return self.yvals
def getlatestx(self):
if len(self.xs) > 0:
return self.xs[-1][0]
return None
def getlatestfitxs(self):
if len(self.fitxs) > 0:
if not self.fitxs[-1] is None:
return list(self.fitxs[-1])
return None
def getlatestfitx(self, y):
if len(self.fitxs) > 0:
if not self.fitxs[-1] is None and len(self.fitxs[-1]) > 0:
return int(self.fitxs[-1][self.yvals.index(y)])
return None
def getxhistory(self):
return list(self.xs) # [item for sublist in self.xs for item in sublist]
class Car(object):
def __init__(self, cameracenter_ps, cameracenter_rs):
self.__position__ = None #Distance from center. Left of center = -, right of center = +
self.__lanecenter_ps__ = None
self.__lanecenter_rs__ = None
self.__cameracenter_ps__ = cameracenter_ps
self.__cameracenter_rs__ = cameracenter_rs
def set_lanecenter(self, lanecenter_ps, lanecenter_rs):
self.__lanecenter_ps__ = lanecenter_ps
self.__lanecenter_rs__ = lanecenter_rs
def get_lanecenter(self):
return self.__lanecenter_ps__, self.__lanecenter_rs__
def get_cameracenter(self):
return self.__cameracenter_ps__, self.__cameracenter_rs__
# def get_cameracenter(self):
# return self.__cameracenter__
def get_drift(self):
if not self.__lanecenter_ps__ is None:
return self.__cameracenter_ps__ - self.__lanecenter_ps__, self.__cameracenter_rs__ - self.__lanecenter_rs__
return None, None
class PeakInfo(object):
def __init__(self, peakrange, windowsize, timewindowctr, trackwindowctr, peak, value):
self.windowsize = windowsize
self.timewindowctr = timewindowctr
self.trackwindowctr = trackwindowctr
self.peakrange = peakrange
self.peak = peak
self.value = value
def found(self):
return self.peak is not None
# Detects the points on each lane and fits each with a polynomial function
# Also detects the position of the car relative to the center of the lane,
# based on the lane positions.
class LaneFinder(Operation):
# Config:
SliceRatio = 'SliceRatio'
PeakWindowRatio = 'PeakWindowRatio'
PeakRangeRatios = 'PeakRangeRatios'
LookBackFrames = 'LookBackFrames'
CameraPositionRatio = 'CameraPositionRatio'
DynamicVerticalSplit = 'DynamicVerticalSplit'
# Constants:
CurrentPointRadius = 30
CurrentPointColor = [255,0,0] #[50,205,50]
PreviousPointRadius = 2
PreviousPointColor = [0,0,0] #[152,251,152]
HitWindowColor = [152, 251, 152]
MissWindowColor = [255,160,122]
SliceMarkerColor = [139,139,131]
SliceMarkerThickness = 5
VerticalCenterColor = [255,255,224]
VerticalCenterThickness = 5
# Outputs
LeftLane = "LeftLane"
RightLane = "RightLane"
Vehicle = 'Vehicle'
def __init__(self, params):
Operation.__init__(self, params)
self.__slice_ratio__ = params[self.SliceRatio]
self.__peak_window_ratio__ = params[self.PeakWindowRatio]
self.__peak_range_ratios__ = params[self.PeakRangeRatios]
self.__look_back_frames__ = params[self.LookBackFrames]
self.__camera_position_ratio__ = params[self.CameraPositionRatio]
self.__use_dynamic_vertical_split__ = params[self.DynamicVerticalSplit]
self.__left_lane__ = None
self.__right_lane__ = None
def __processupstream__(self, original, latest, data, frame):
x_dim, y_dim = latest.shape[1], latest.shape[0]
if self.__left_lane__ is None:
self.__slice_len__ = int(y_dim * self.__slice_ratio__)
self.__peak_range__ = (int(self.__peak_range_ratios__[0] * self.__slice_len__), int(self.__peak_range_ratios__[1] * self.__slice_len__))
self.__peak_window_size__ = int(x_dim * self.__peak_window_ratio__)
self.__slices__ = []
self.__yvals__ = []
for idx in range (y_dim, 0, -self.__slice_len__):
self.__slices__.append((idx, max(idx-self.__slice_len__, 0)))
self.__yvals__.append(idx)
if not 0 in self.__yvals__:
self.__yvals__.append(0)
self.__left_lane__ = Lane(self.__look_back_frames__, self.__yvals__)
self.__right_lane__ = Lane(self.__look_back_frames__, self.__yvals__)
self.__camera_position_ps__ = int(x_dim * self.__camera_position_ratio__)
self.__camera_position_rs__ = int(x_dim * self.__camera_position_ratio__ * xm_per_pix)
self.__vehicle__ = Car(self.__camera_position_ps__, self.__camera_position_rs__)
self.setdata(data, self.LeftLane, self.__left_lane__)
self.setdata(data, self.RightLane, self.__right_lane__)
self.setdata(data, self.Vehicle, self.__vehicle__)
if self.__use_dynamic_vertical_split__:
midx = self.find_midx(x_dim, self.__left_lane__, self.__right_lane__)
else:
midx = int(x_dim/2)
leftxs, rightxs, leftconfidence, rightconfidence, leftinfos, rightinfos = self.locate_peaks(latest, midx, self.__slices__, self.__left_lane__, self.__right_lane__, self.__peak_window_size__, self.__peak_range__)
# Prepare an illustration of the points:
# if self.isplotting():
# zeros1d = np.zeros_like(latest).astype(np.uint8)
# foreground = np.dstack((zeros1d, zeros1d, zeros1d))
# Things to plot:
# - History of X values with size proportional to confidence level
# - Last frame's fitted x
# - This frame's X values
# - Windows used to limit peak search
# - Lines for the slices
# Get previous points
all_leftx = self.__left_lane__.getxhistory()
all_rightx = self.__right_lane__.getxhistory()
# Plot illustration of peak search:
if self.isplotting():
black1d = np.zeros_like(latest, dtype=np.uint8)
foreground = np.dstack((black1d, black1d, black1d))
background = np.copy(self.getdata(data, PerspectiveTransformer.WarpedColor, PerspectiveTransformer))
# self.__plot__(frame, background, None, "Background", None)
# Draw the vertical center:
verticalcenter = np.copy(foreground)
cv2.line(verticalcenter, (midx, y_dim), (midx, 0), self.VerticalCenterColor, self.VerticalCenterThickness)
# self.__plot__(frame, verticalcenter, None, "VerticalCenter", None)
# Draw the slices:
slices = np.copy(foreground)
for cut in self.__slices__:
cv2.line(slices, (0, cut[0]), (x_dim, cut[0]), self.SliceMarkerColor, self.SliceMarkerThickness)
# self.__plot__(frame, slices, None, "Slices", None)
# Draw all the previous points found:
previouspoints = np.copy(foreground)
for j in range (len (all_leftx)):
for i in range (len (self.__slices__)):
y = self.__slices__[i][0]
leftx = all_leftx[j][i]
rightx = all_rightx[j][i]
if leftx is not None:
cv2.circle(previouspoints, (leftx, y), self.PreviousPointRadius, self.PreviousPointColor, -1)
if rightx is not None:
cv2.circle(previouspoints, (rightx, y), self.PreviousPointRadius, self.PreviousPointColor, -1)
# self.__plot__(frame, previouspoints, None, "PreviousPoints", None)
# Draw the windows:
timewindows = np.copy(foreground)
for i in range (len (self.__slices__)):
leftinfo, rightinfo = leftinfos[i], rightinfos[i]
cut = self.__slices__[i]
if leftinfo.timewindowctr is not None:
start = (max(leftinfo.timewindowctr-leftinfo.windowsize, 0), cut[0])
end = (min(leftinfo.timewindowctr+leftinfo.windowsize, midx), cut[1])
if leftinfo.found():
cv2.rectangle(timewindows, start, end, self.HitWindowColor, cv2.FILLED)
else:
cv2.rectangle(timewindows, start, end, self.MissWindowColor, cv2.FILLED)
if rightinfo.timewindowctr is not None:
start = (max(rightinfo.timewindowctr-rightinfo.windowsize, midx), cut[0])
end = (min(rightinfo.timewindowctr+rightinfo.windowsize, x_dim), cut[1])
if rightinfo.found():
cv2.rectangle(timewindows, start, end, self.HitWindowColor, cv2.FILLED)
else:
cv2.rectangle(timewindows, start, end, self.MissWindowColor, cv2.FILLED)
# self.__plot__(frame, timewindows, None, "Time Windows", None)
trackwindows = np.copy(foreground)
for i in range (len (self.__slices__)):
leftinfo, rightinfo = leftinfos[i], rightinfos[i]
cut = self.__slices__[i]
if leftinfo.trackwindowctr is not None:
start = (max(leftinfo.trackwindowctr-leftinfo.windowsize, 0), cut[0])
end = (min(leftinfo.trackwindowctr+leftinfo.windowsize, midx), cut[1])
if leftinfo.found():
cv2.rectangle(trackwindows, start, end, self.HitWindowColor, cv2.FILLED)
else:
cv2.rectangle(trackwindows, start, end, self.MissWindowColor, cv2.FILLED)
if rightinfo.trackwindowctr is not None:
start = (max(rightinfo.trackwindowctr-rightinfo.windowsize, midx), cut[0])
end = (min(rightinfo.trackwindowctr+rightinfo.windowsize, x_dim), cut[1])
if rightinfo.found():
cv2.rectangle(trackwindows, start, end, self.HitWindowColor, cv2.FILLED)
else:
cv2.rectangle(trackwindows, start, end, self.MissWindowColor, cv2.FILLED)
# self.__plot__(frame, trackwindows, None, "Track Windows", None)
# Draw the found points
foundpoints = np.copy(foreground)
for i in range (len (self.__slices__)):
leftinfo, rightinfo = leftinfos[i], rightinfos[i]
cut = self.__slices__[i]
if leftinfo.found():
cv2.circle(foundpoints, (leftinfo.peak, cut[0]-leftinfo.value), self.CurrentPointRadius, self.CurrentPointColor, -1)
if rightinfo.found():
cv2.circle(foundpoints, (rightinfo.peak, cut[0]-rightinfo.value), self.CurrentPointRadius, self.CurrentPointColor, -1)
# self.__plot__(frame, foundpoints, None, "FoundPoints", None)
# Add all layers to the bw lane image:
background = cv2.addWeighted(background, 1, verticalcenter, 0.3, 0)
background = cv2.addWeighted(background, 1, slices, 0.3, 0)
background = cv2.addWeighted(background, 0.5, previouspoints, 1, 0)
background = cv2.addWeighted(background, 1, trackwindows, 0.3, 0)
background = cv2.addWeighted(background, 1, timewindows, 0.3, 0)
background = cv2.addWeighted(background, 1, foundpoints, 0.7, 0)
self.__plot__(frame, Image("Peak Search Algorithm", background, None))
# Combine historical points with present before doing a fit:
all_leftx.append(leftxs)
all_rightx.append(rightxs)
all_leftys, all_leftxs = self.merge_prune(self.__yvals__, all_leftx)
all_rightys, all_rightxs = self.merge_prune(self.__yvals__, all_rightx)
# Find radius of curvature:
left_fit, left_fitx, right_fit, right_fitx, left_curverad_ps, right_curverad_ps, left_curverad_rs, right_curverad_rs = None, None, None, None, None, None, None, None
if len(all_leftys) > 0:
# Fit a second order polynomial to each lane line
left_fit, left_fitx = self.fit_polynomial(all_leftys, all_leftxs, np.array(self.__yvals__))
y_left_eval = np.max(all_leftys)
# Determine the curvature in pixel-space
left_curverad_ps = ((1 + (2*left_fit[0]*y_left_eval + left_fit[1])**2)**1.5) \
/np.absolute(2*left_fit[0])
# Determine curvature in real space
left_fit_cr = np.polyfit(all_leftys*ym_per_pix, all_leftxs*xm_per_pix, 2)
left_curverad_rs = ((1 + (2*left_fit_cr[0]*y_left_eval + left_fit_cr[1])**2)**1.5) \
/np.absolute(2*left_fit_cr[0])
if len(all_rightys)>0:
# Fit a second order polynomial to each lane line
right_fit, right_fitx = self.fit_polynomial(all_rightys, all_rightxs, np.array(self.__yvals__))
y_right_eval = np.max(all_rightys)
right_curverad_ps = ((1 + (2*right_fit[0]*y_right_eval + right_fit[1])**2)**1.5) \
/np.absolute(2*right_fit[0])
right_fit_cr = np.polyfit(all_rightys*ym_per_pix, all_rightxs*xm_per_pix, 2)
right_curverad_rs = ((1 + (2*right_fit_cr[0]*y_right_eval + right_fit_cr[1])**2)**1.5) \
/np.absolute(2*right_fit_cr[0])
# Save values:
self.__left_lane__.add(leftxs, left_fit, left_fitx, left_curverad_ps, left_curverad_rs, leftconfidence)
self.__right_lane__.add(rightxs, right_fit, right_fitx, right_curverad_ps, right_curverad_rs, rightconfidence)
lpos = self.__left_lane__.getlatestfitx(y_dim)
rpos = self.__right_lane__.getlatestfitx(y_dim)
if not lpos is None and not rpos is None:
lanecenter_ps = (lpos + rpos) / 2
lanecenter_rs = lanecenter_ps * xm_per_pix
self.__vehicle__.set_lanecenter(lanecenter_ps, lanecenter_rs)
return latest
def find_midx(self, xdim, leftlane, rightlane):
if leftlane is not None and rightlane is not None:
leftfitxs, rightfitxs = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
from marvin.codes import PASS, FAILED
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (stopVirtualMachine,
stopRouter,
startRouter)
from marvin.lib.utils import (cleanup_resources,
get_process_status,
get_host_credentials)
from marvin.lib.base import (ServiceOffering,
VirtualMachine,
Account,
Template,
ServiceOffering,
NATRule,
NetworkACL,
FireWallRule,
PublicIPAddress,
NetworkOffering,
Network,
Router,
EgressFireWallRule)
from marvin.lib.common import (get_zone,
get_test_template,
get_domain,
list_virtual_machines,
list_networks,
list_configurations,
list_routers,
list_nat_rules,
list_publicIP,
list_firewall_rules,
list_hosts)
# Import System modules
import time
import logging
def check_router_command(virtual_machine, public_ip, ssh_command, check_string, test_case, retries=5):
result = 'failed'
try:
ssh = virtual_machine.get_ssh_client(ipaddress=public_ip, retries=retries)
result = str(ssh.execute(ssh_command))
except Exception as e:
test_case.fail("Failed to SSH into the Virtual Machine: %s" % e)
logging.debug("Result from SSH into the Virtual Machine: %s" % result)
return result.count(check_string)
class TestRedundantIsolateNetworks(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestRedundantIsolateNetworks')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
cls.testClient = super(TestRedundantIsolateNetworks, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.template = get_test_template(cls.api_client, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
# Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.services["nw_off_persistent_RVR_egress_true"] = cls.services["nw_off_persistent_RVR"].copy()
cls.services["nw_off_persistent_RVR_egress_true"]["egress_policy"] = "true"
cls.services["nw_off_persistent_RVR_egress_false"] = cls.services["nw_off_persistent_RVR"].copy()
cls.services["nw_off_persistent_RVR_egress_false"]["egress_policy"] = "false"
cls.services["egress_80"] = {
"startport": 80,
"endport": 80,
"protocol": "TCP",
"cidrlist": ["0.0.0.0/0"]
}
cls.services["egress_53"] = {
"startport": 53,
"endport": 53,
"protocol": "UDP",
"cidrlist": ["0.0.0.0/0"]
}
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"], required_hardware="true")
def test_01_RVR_Network_FW_PF_SSH_default_routes_egress_true(self):
""" Test redundant router internals """
self.logger.debug("Starting test_01_RVR_Network_FW_PF_SSH_default_routes_egress_true...")
self.logger.debug("Creating Network Offering with default egress TRUE")
network_offering_egress_true = NetworkOffering.create(
self.apiclient,
self.services["nw_off_persistent_RVR_egress_true"],
conservemode=True
)
network_offering_egress_true.update(self.api_client, state='Enabled')
self.logger.debug("Creating network with network offering: %s" % network_offering_egress_true.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=network_offering_egress_true.id,
zoneid=self.zone.id
)
self.logger.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.logger.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.logger.debug("Deployed VM in network: %s" % network.id)
self.cleanup.insert(0, network_offering_egress_true)
self.cleanup.insert(0, network)
self.cleanup.insert(0, virtual_machine)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM should be in running state after deployment"
)
self.logger.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
public_ips = list_publicIP(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id
)
public_ip = public_ips[0]
self.assertEqual(
isinstance(public_ips, list),
True,
"Check for list public IPs response return valid data"
)
self.logger.debug("Creating Firewall rule for VM ID: %s" % virtual_machine.id)
FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.id,
protocol=self.services["natrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
self.logger.debug("Creating NAT rule for VM ID: %s" % virtual_machine.id)
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
public_ip.id
)
# Test SSH after closing port 22
expected = 1
ssh_command = "ping -c 3 8.8.8.8"
check_string = " 0% packet loss"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Ping to outside world from VM should be successful!"
)
expected = 1
ssh_command = "wget -t 1 -T 5 www.google.com"
check_string = "HTTP request sent, awaiting response... 200 OK"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Attempt to retrieve google.com index page should be successful!"
)
EgressFireWallRule.create(
self.apiclient,
networkid=network.id,
protocol=self.services["egress_80"]["protocol"],
startport=self.services["egress_80"]["startport"],
endport=self.services["egress_80"]["endport"],
cidrlist=self.services["egress_80"]["cidrlist"]
)
expected = 0
ssh_command = "wget -t 1 -T 1 www.google.com"
check_string = "HTTP request sent, awaiting response... 200 OK"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Attempt to retrieve google.com index page should NOT be successful once rule is added!"
)
return
@attr(tags=["advanced", "advancedns", "ssh"], required_hardware="true")
def test_02_RVR_Network_FW_PF_SSH_default_routes_egress_false(self):
""" Test redundant router internals """
self.logger.debug("Starting test_02_RVR_Network_FW_PF_SSH_default_routes_egress_false...")
self.logger.debug("Creating Network Offering with default egress FALSE")
network_offering_egress_false = NetworkOffering.create(
self.apiclient,
self.services["nw_off_persistent_RVR_egress_false"],
conservemode=True
)
network_offering_egress_false.update(self.api_client, state='Enabled')
self.logger.debug("Creating network with network offering: %s" % network_offering_egress_false.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=network_offering_egress_false.id,
zoneid=self.zone.id
)
self.logger.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.logger.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.logger.debug("Deployed VM in network: %s" % network.id)
self.cleanup.insert(0, network_offering_egress_false)
self.cleanup.insert(0, network)
self.cleanup.insert(0, virtual_machine)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM should be in running state after deployment"
)
self.logger.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
public_ips = list_publicIP(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id
)
self.assertEqual(
isinstance(public_ips, list),
True,
"Check for list public IPs response return valid data"
)
public_ip = public_ips[0]
self.logger.debug("Creating Firewall rule for VM ID: %s" % virtual_machine.id)
FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.id,
protocol=self.services["natrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
self.logger.debug("Creating NAT rule for VM ID: %s" % virtual_machine.id)
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
public_ip.id
)
expected = 0
ssh_command = "ping -c 3 8.8.8.8"
check_string = " 0% packet loss"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Ping to outside world from VM should NOT be successful"
)
expected = 0
ssh_command = "wget -t 1 -T 1 www.google.com"
check_string = "HTTP request sent, awaiting response... 200 OK"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Attempt to retrieve google.com index page should NOT be successful"
)
EgressFireWallRule.create(
self.apiclient,
networkid=network.id,
protocol=self.services["egress_80"]["protocol"],
startport=self.services["egress_80"]["startport"],
endport=self.services["egress_80"]["endport"],
cidrlist=self.services["egress_80"]["cidrlist"]
)
EgressFireWallRule.create(
self.apiclient,
networkid=network.id,
protocol=self.services["egress_53"]["protocol"],
startport=self.services["egress_53"]["startport"],
endport=self.services["egress_53"]["endport"],
cidrlist=self.services["egress_53"]["cidrlist"]
)
expected = 1
ssh_command = "wget -t 1 -T 5 www.google.com"
check_string = "HTTP request sent, awaiting response... 200 OK"
result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self)
self.assertEqual(
result,
expected,
"Attempt to retrieve google.com index page should be successful once rule is added!"
)
return
@attr(tags=["advanced", "advancedns", "ssh"], required_hardware="true")
def test_03_RVR_Network_check_router_state(self):
""" Test redundant router internals """
self.logger.debug("Starting test_03_RVR_Network_check_router_state...")
hypervisor = self.testClient.getHypervisorInfo()
self.logger.debug("Creating Network Offering with default egress FALSE")
network_offering_egress_false = NetworkOffering.create(
self.apiclient,
self.services["nw_off_persistent_RVR_egress_false"],
conservemode=True
)
network_offering_egress_false.update(self.apiclient, state='Enabled')
self.logger.debug("Creating network with network offering: %s" % network_offering_egress_false.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=network_offering_egress_false.id,
zoneid=self.zone.id
)
self.logger.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.logger.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.logger.debug("Deployed VM in network: %s" % network.id)
self.cleanup.insert(0, network_offering_egress_false)
self.cleanup.insert(0, network)
self.cleanup.insert(0, virtual_machine)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
| |
<filename>htmlapi_client.py
#
# Copyright (C) 2013 Comcast Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import urllib2
import urlparse
import base64
from lxml import etree
def _normalize_whitespace(s):
return ' '.join(s.split())
def _extract_text_help(root, acc):
if root.text is not None and root.text.strip():
acc.append(_normalize_whitespace(root.text.strip()))
for child in root.getchildren():
acc = _extract_text_help(child, acc)
if child.tail is not None and child.tail.strip():
acc.append(_normalize_whitespace(child.tail.strip()))
return acc
def _extract_text(root):
return ' '.join(_extract_text_help(root,[]))
def _extract(elt, doc):
"""This function takes a given DOM node 'elt' and attempts to interpret
it as a Python value of some sort (possibly an object)."""
if 'itemtype' in elt.attrib or 'itemscope' in elt.attrib:
return MicrodataObject(elt, doc)
return _value(elt, doc)
def _value(elt, doc):
tag = elt.tag
if tag == 'a' and 'href' in elt.attrib:
href = elt.attrib['href']
if href.startswith('#'):
target = doc._doc.getroot().find(".//*[@id='%s']" % href[1:])
if target is not None: return _extract(target, doc)
else:
up = urlparse.urlparse(href)
# Get schema and host:port from the document url
doc_up = urlparse.urlparse(doc._url)
scheme = up.scheme if up.scheme else doc_up.scheme
netloc = up.netloc if up.netloc else doc_up.netloc
remote_doc = enter(urlparse.urlunparse((scheme, netloc, up.path, up.params, up.query, '')))
#print remote_doc.__dict__
#print remote_doc.objects
if up.fragment:
target = remote_doc._doc.getroot().find(".//*[@id='%s']" % up.fragment)
if target is not None: return _extract(target, remote_doc)
if len(remote_doc.objects) == 1: return remote_doc.objects[0]
return _extract(remote_doc._doc.getroot(), remote_doc)
if tag == 'img': return elt.attrib['src']
if tag == 'meta': return elt.attrib['content']
return _extract_text(elt)
def _value_of(doc, fragment=''):
if fragment:
target = doc._doc.getroot().find(".//*[@id='%s']" % fragment)
if target is not None: return _extract(target, doc)
if len(doc.objects) == 1: return doc.objects[0]
if len(doc.objects) > 0: return doc.objects
return _extract(doc._doc.getroot(), doc)
class Link(object):
"""Links are basically a representation of HTML <a> tags. The main
thing you can do with a Link is to follow it."""
def __init__(self, elt, doc):
self._elt = elt
self._doc = doc
def __repr__(self):
return "<Link %s at 0x%x>" % (self._elt.attrib['href'], id(self))
def follow(self):
href = self._elt.attrib['href']
resolved = urlparse.urljoin(self._doc._url, href)
up = urlparse.urlparse(resolved)
resolved_base = urlparse.urlunparse((up.scheme, up.netloc, up.path,
up.params, up.query, ''))
if resolved_base == self._doc._url:
# local
return _value_of(self._doc, up.fragment)
else:
# remote
remote_doc = enter(resolved_base)
return _value_of(remote_doc, up.fragment)
class Form(object):
"""Forms are a representation of an HTML <form> tag. Then main thing
you can do with a form is to 'submit' one by providing a dictionary
of key-value pairs corresponding to the values to supply to the form's
<input> elements. N.B. This is not fully implemented per the HTML spec,
as we only support <input> and not, for example, <textarea> or <select>
at this point. The other useful thing you can do with a Form is to ask
it for its .params field, which returns a list of the input names
provided."""
def __init__(self, elt, doc):
self._elt = elt
self._doc = doc
def __repr__(self):
if 'data-rel' not in self._elt.attrib:
return "<Form at 0x%x>" % id(self)
return "<Form %s at 0x%x>" % (self._elt.attrib['data-rel'], id(self))
def _set_value_for(self, elt, args, params):
if 'name' not in elt.attrib: return
name = elt.attrib['name']
if name in args:
params[name] = args[name]
else:
if 'value' in elt.attrib:
params[name] = elt.attrib['value']
else:
params[name] = ""
def _get_params(self):
out = []
for elt in self._elt.findall(".//input"):
if 'type' in elt.attrib and elt.attrib['type'] == 'hidden':
continue
if 'name' in elt.attrib: out.append(elt.attrib['name'])
for elt in self._elt.findall(".//textarea"):
if 'type' in elt.attrib and elt.attrib['type'] == 'hidden':
continue
if 'name' in elt.attrib: out.append(elt.attrib['name'])
return out
params = property(_get_params)
def _build_params(self, args):
params = {}
for elt in self._elt.findall(".//textarea"):
self._set_value_for(elt, args, params)
for elt in self._elt.findall(".//input"):
self._set_value_for(elt, args, params)
return urllib.urlencode(params)
def submit(self, args={}):
action = urlparse.urljoin(self._doc._url, self._elt.attrib['action'])
params = self._build_params(args)
if 'method' not in self._elt.attrib or self._elt.attrib['method'] == 'GET':
up = urlparse.urlparse(action)
if up.params: allparams = "%s&%s" % (up.params, params)
else: allparams = params
where = urlparse.urlunparse((up.scheme, up.netloc, up.path,
up.params, allparams, ''))
return enter(where)
else:
print "POST", action, "...",
f = urllib2.urlopen(action, params)
print "OK"
return MicrodataDocument(f, action)
class MicrodataObject(object):
"""This represents a particular semantic object, i.e. something identified
by an @itemscope attribute. MicrodataObjects have several useful properties
besides their actual semantic @itemprop properties:
.props = return names of (local) microdata @itemprop properties
.itemtype = return the @itemtype of this object
.links = return a list of Link objects contained by this object
.forms = return a list of Form objects contained by this object
There is also a shortcut method .submit() that will submit the first
contained form with the given link relation (as notated by the @data-rel
attribute)."""
def __init__(self, root, doc):
self._root = root
self._doc = doc
self._propmap = None
self._linkmap = None
self._formmap = None
self._orphan_forms = None
def __repr__(self):
t = self.itemtype
if t is None: return "<untyped at 0x%x>" % id(self)
return "<%s at 0x%x>" % (self.itemtype, id(self))
def _dfs_build_help(self, elt):
if 'itemprop' in elt.attrib:
prop = elt.attrib['itemprop']
if prop not in self._propmap: self._propmap[prop] = []
self._propmap[prop].append(elt)
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_build_help(child)
def _dfs_form_help(self, elt):
if elt.tag == 'form':
if 'data-rel' in elt.attrib:
rel = elt.attrib['data-rel']
if rel not in self._formmap: self._formmap[rel] = []
self._formmap[rel].append(Form(elt, self._doc))
else:
self._orphan_forms.append(Form(elt, self._doc))
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_form_help(child)
def _build_formmap(self):
self._formmap = {}
self._orphan_forms = []
for child in self._root.getchildren():
self._dfs_form_help(child)
def _dfs_link_help(self, elt):
if elt.tag == 'a' and 'rel' in elt.attrib:
rel = elt.attrib['rel']
if rel not in self._linkmap: self._linkmap[rel] = []
self._linkmap[rel].append(Link(elt, self._doc))
if 'itemscope' in elt.attrib: return
for child in elt.getchildren():
self._dfs_link_help(child)
def _build_linkmap(self):
self._linkmap = {}
for child in self._root.getchildren():
self._dfs_link_help(child)
def _build_propmap(self):
self._propmap = {}
for child in self._root.getchildren():
self._dfs_build_help(child)
def _get_propmap(self):
if self._propmap is None: self._build_propmap()
return self._propmap
def __len__(self): return self._get_propmap().__len__()
def __contains__(self,x): return self._get_propmap().__contains__(x)
def __iter__(self): return self._get_propmap().__iter__()
def get_property(self, prop, raw=False, allow_multi=True):
propmap = self._get_propmap()
if prop not in propmap:
self_link = self.get_links("self", raw=False, allow_multi=False)
if self_link is not None:
alt = self_link.follow()
if alt is not None and type(alt) == MicrodataObject:
return alt.get_property(prop, raw, allow_multi)
return None
vals = propmap[prop]
if not raw:
vals = map(lambda v : _extract(v, self._doc), vals)
if len(vals) == 0: return None
if len(vals) == 1 or not allow_multi: return vals[0]
return vals
@property
def value(self):
return _value(self._root, self._doc)
def get_props(self):
return self._get_propmap().keys()
props = property(get_props)
def get_itemtype(self):
if 'itemtype' not in self._root.attrib: return None
return self._root.attrib['itemtype']
itemtype = property(get_itemtype)
def _get_linkmap(self):
if self._linkmap is None: self._build_linkmap()
return self._linkmap
links = property(_get_linkmap)
def _get_formmap(self):
if self._formmap is None: self._build_formmap()
return self._formmap
forms = property(_get_formmap)
def submit(self, rel, args):
return self.forms[rel][0].submit(args)
def get_links(self, rel, raw=False, allow_multi=True):
linkmap = self._get_linkmap()
if rel not in linkmap: return None
links = linkmap[rel]
if raw:
return map(lambda l : l._elt, links)
if len(links) == 0: return None
if len(links) == 1 or not allow_multi: return links[0]
return out
def __getitem__(self, name):
return self.get_property(name, raw=False, allow_multi=True)
def __getattr__(self, name):
return self.get_property(name, raw=False, allow_multi=True)
class MicrodataDocument:
"""MicrodataDocuments represent a client application state, usually the
result of evaluating an entry point via enter(), following a Link, or
submitting a Form. Useful properties include:
.forms = return all @data-rel annotated forms
.allforms = return all <form> elements regardless of annotation
.links = return all top-level Links (<a> tags, not <link> tags at the
moment)
.objects = returns all top-level MicrodataObjects (ones that are not
enclosed by another MicrodataObject)
Plus the following convenience methods:
.follow(rel) = follow the first Link with the given link relation
.submit(rel, args) = submit the first Form with the given link relation,
using the 'args' dictionary to supply values for the input elements"""
def __init__(self, f, url):
parser = etree.HTMLParser()
self._doc = etree.parse(f, parser)
self._url = url
def _dfs_help(self, root, acc):
if 'itemtype' | |
minimum score for mapping to genome or past-end
minIdentity: (float) minimum %identity for mapping to genome or past-end
tileSize: (int) size of an alignment tile
stepSize: (int) distance between the starting bases of alignment tiles
(will overlap if stepSize<tileSize)
output_fp: (str) path to output_file
The below are used for Handle Genome BLAT
nPastEndTrumps: (int) hit to past-end (close to) as good as hit to genome
nPastEndIgnored: (int) weak hit to past-end ignored
nMapUnique: (int)
nMapped: (int)
nameToBarcode: (dict)
delta: (int) minimum difference in score for considering a hit unique.
Returns:
OUTPUT file handle for output file
"""
b8_d = inp_dict
b8_d['queriesFile'] = inp_dict['tmpFNA_fp']
b8_d['dbFile'] = inp_dict['genome_fp']
blat8_fp = os.path.join(inp_dict['tmp_dir'], 'blat8_genome')
b8_d['blat8_fp'] = blat8_fp
RunBLAT8(b8_d)
logging.info("Parsing " + blat8_fp)
b = open(blat8_fp, "r")
# OUTPUT is file handle for output
OUTPUT = open(inp_dict['output_fp'], "w")
# Handle Genome BLAT Dict
HG_d = {
"nPastEndTrumps": inp_dict[ "nPastEndTrumps"],
"nPastEndIgnored": inp_dict[ "nPastEndIgnored"],
"nMapUnique": inp_dict[ "nMapUnique"],
"nMapped": inp_dict[ "nMapped"],
"nameToBarcode": inp_dict[ "nameToBarcode"],
"delta": inp_dict[ "delta"],
"OUTPUT": OUTPUT
}
lines = []
c_line = b.readline()
while c_line != "":
# Removing newline \n
c_line = c_line.rstrip()
F = c_line.split('\t')
# query name, subject name, identity, length, mismatches, number gaps,
# query Beginning, query End, subject Beginning loc, subject End loc, e-value, bit score
query, subject, identity, l, mm, gs, qB, qE, sB, sE, evl, score = F
# Sometimes there is the same query twice in a row
if len(lines) == 0 or query == lines[0][0]:
lines.append(F)
else:
# We have a new query, so existing query will be "Handled", and new
# query will be added to "lines"
HandleGenomeBLAT(lines, inp_dict['hitsPastEnd'], HG_d,
inp_dict['debug'])
# We make lines contain the new query
lines = [F]
c_line = b.readline()
# We run it a last time on the remaining lines
HandleGenomeBLAT(lines, inp_dict['hitsPastEnd'], HG_d, inp_dict['debug'])
b.close()
if not inp_dict['keepblat8']:
os.unlink(blat8_fp)
# We return file handle, and HG_d (Handle Genome Dict)
return [OUTPUT, HG_d]
# This function finds barcodes and end of transposon and writes remaining
# sequence to TMP FNA
# FASTQ must already be unzipped
def find_barcodes_and_end_of_transposon(fastq_fp, model_str, flanking,
wobbleAllowed, minQuality,
minScore, maxReads,
debug=False
):
"""
This function does the following:
Reads either the entire FASTQ input file or up to "MaxReads"*4 lines (*4 accounts for the
4 lines per read).
First it makes sure the FASTQ sequence is properly formatted - every sequence
has at least a single nucleotide. Additionally
the sequence and quality must be the same length, and
the sequence must be longer than the model and the 'minScore'
variable.
Then it takes the sequence and the model and looks for the "flanking"
parts of the model in the sequence, so for example: 5 nucleotides
to the left and right of the barcode in the model, in the sequence.
It then checks the quality of the barcode reading and compares each nt
to the variable "minQuality". If the barcode length and quality
are good, then it keeps the barcode and its start location.
Then it maps the "short" name (name of seq before space) to the barcode
in the dict "nameToBarcode".
Then it looks for the end of the model within the sequence in the function
FindModelEnd. If we find the end of the model in the sequence, then we
write the shortname and the part of the sequence after the model to the
file "TMPFNA". So TMPFNA contains shortname of sequence and part of sequence
after the model in a FASTA file format:
>Name\nSequenceAfterModel\n>Name\nSeq...
unmapped keeps track of unmapped sequences?
TRUNC is a truncated version of the fastq file with only good sequences
from the end of the Model and the barcode is maintained in the name
Args:
fastq_fp (str) FASTQ Filepath Read from
model_str (str): Model string, not pastEnd part
maxReads (int or None) (maxReads)
minScore (int) The minimum num nucleotieds of sequence beyond model
needed.
wobbleAllowed (int) uncertainty in location of barcode or end of transposon,
on either side of expectation
minQuality (int) every nucleotide in a barcode must be at least this quality
flanking (int) number of nucleotides on each side that must match
debug (bool)
# (deprecated) trunc_fp (str) Write to
# (deprecated) tmpFNA_fp (str) Write to
(deprecated) unmapped_fp (str or None) Write to (fp)
Returns:
ret_d (d):
nReads (int): Number of reads gone through
nModelStartFound (int): Number of reads with start of model found
nModelEndFound (int): Number of reads with end of model found
"""
# We un-dict this variable to allow for lighter programming
nameToBarcode = {}
# FILE HANDLES (Note: read, write, write)
FASTQ = open(fastq_fp, "r")
#TMPFNA = open(inp_dict['tmpFNA_fp'], "w")
#TRUNC = open(inp_dict['trunc_fp'], "w")
# Current number of reads read
nReads = 0
# How many reads are candidates for mapping
nTryToMap = 0
# Current number of Long hits -
# this is defined by: the length of the sequence vs length of model and
# minScore
nLong = 0
nModelStartFound = 0
nModelEndFound = 0
# Start and End of barcode within model: (important that start of model is 'n', not 'N')
barcodeStart = model_str.find("N")
barcodeEnd = model_str.rfind("N")
barcodeLen = barcodeEnd - barcodeStart + 1
if barcodeStart == 0:
raise Exception("Barcode starts at beginning of model, should be in the middle.")
for i in range(barcodeLen):
if model_str[barcodeStart + i] != "N":
raise Exception("Barcode should be consecutive Ns, found a non-N in between edge 'N's."
" Supposed barcode range: " + model_str[barcodeStart:barcodeEnd + 1])
# The following dict is used for the "Find" functions:
# FindBarcode & FindModelEnd. We use fastq_fp to print
# out debug statements
Find_cfg_d = {
"flanking": flanking,
"wobbleAllowed": wobbleAllowed,
"minQuality": minQuality,
"fastq_fp": fastq_fp,
"debug": debug
}
# This is to keep track of location in FASTQ files for errors
line_num = 0
minReadLength = len(model_str) + minScore
# We break out of while loop if file ends (name == '')
while (maxReads is None or (nReads < maxReads)):
name = FASTQ.readline()
line_num += 1
if name == '':
# If the file ends we break out of the loop
break
name = name.rstrip()
if not name[0] == '@':
raise Exception("Sequence name line does not start with @. File: "
"{}, Line no. {}".format( fastq_fp, line_num))
seq = FASTQ.readline()
line_num += 1
seq = seq.rstrip()
if not len(seq) > 0:
raise Exception("Sequence line is empty. File {}, Line {}".format(
fastq_fp, line_num))
if not re.match(r'^[A-Z]+$', seq):
raise Exception("Sequence line contains invalid chars: " + seq \
+ " \n File {}, Line {}".format(
fastq_fp, line_num) )
break_line = FASTQ.readline()
line_num += 1
if not break_line[0] == '+':
raise Exception("Third line does not start with +")
quality = FASTQ.readline()
quality = quality.rstrip()
line_num += 1
if not (len(quality) == len(seq)):
raise Exception("Quality line is wrong length. "
" File {}, Line {}".format(
fastq_fp, line_num) )
# Ignore second side of paired-end reads
if re.match(r'^\S+ 2:', name):
continue
nReads += 1
# Short sequences are unmappable
if not len(seq) > minReadLength:
continue
nLong += 1
# We keep track of location within FASTQ file for Debugging purposes
Find_cfg_d['line_num'] = line_num
# obsStart is start of barcode within sequence
# str, int. This function returns [None, None] if the
# quality or length fails.
barcode, obsStart = FindBarcode(seq, quality, model_str,
barcodeStart, barcodeEnd,
Find_cfg_d)
if barcode is None:
continue
else:
nModelStartFound += 1
# We create a shortname which removes " " to end of "name" of sequence
# e.g. "@M00361:58:000000000-C9BPW:1:1102:11648:1000 1:N:0:GCCAAT"
# becomes "@M00361:58:000000000-C9BPW:1:1102:11648:1000"
shortname = re.sub(r' .*$', '', name)
if shortname in nameToBarcode:
raise Exception("Duplicate read name: {}\nFile {} line no. {}".format(
shortname, inp_dict['fastq_fp'], line_num -3))
nameToBarcode[shortname] = barcode
# We take start point of barcode in sequence and start point | |
<filename>cdci_osa_plugin/osa_lightcurve_query.py
"""
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
# Standard library
# eg copy
# absolute import rg:from copy import deepcopy
import os
# Dependencies
# eg numpy
# absolute import eg: import numpy as np
# Project
# relative import eg: from .mod import f
import ddaclient as dc
# Project
# relative import eg: from .mod import f
import numpy as np
from numpy.lib.recfunctions import append_fields
from pathlib import Path
from astropy.io import fits as pf
from cdci_data_analysis.analysis.io_helper import FitsFile
from cdci_data_analysis.analysis.queries import LightCurveQuery
from cdci_data_analysis.analysis.products import LightCurveProduct, QueryProductList, QueryOutput
from cdci_data_analysis.analysis.io_helper import FilePath
from cdci_data_analysis.analysis.parameters import TimeDelta
from cdci_data_analysis.analysis.plot_tools import ScatterPlot
from cdci_data_analysis.analysis.exceptions import RequestNotUnderstood
from oda_api.data_products import NumpyDataProduct
from .osa_dataserve_dispatcher import OsaDispatcher, OsaQuery
from .osa_common_pars import DummyOsaRes, split_osa_version
class OsaLightCurve(LightCurveProduct):
def __init__(self,
name='osa_lc',
file_name=None,
data=None,
file_dir=None,
prod_prefix=None,
src_name='',
meta_data={}):
if meta_data == {} or meta_data is None:
self.meta_data = {'product': 'osa_lc',
'instrument': 'integral', 'src_name': src_name}
else:
self.meta_data = meta_data
self.meta_data['time'] = 'TIME'
self.meta_data['rate'] = 'RATE'
self.meta_data['rate_err'] = 'ERROR'
data.name = name
super(OsaLightCurve, self).__init__(name=name,
data=data,
name_prefix=prod_prefix,
file_dir=file_dir,
file_name=file_name,
meta_data=meta_data)
@staticmethod
def ensure_timedel(du):
# TODO: move to https://github.com/integral-observatory/ogip/
if 'TIMEDEL' in du.data:
print(
f"\033[31m TIMEDEL column already available in du: {du} \033[0m")
else:
timedel = du.header['TIMEDEL']
timepix = du.header['TIMEPIXR']
t_lc = du.data['TIME'] + (0.5 - timepix) * timedel
dt_lc = (timedel / 2) * np.ones(t_lc.shape)
for i in range(len(t_lc) - 1):
dt_lc[i + 1] = min(timedel / 2, t_lc[i + 1] - t_lc[i] - dt_lc[i])
_d = np.array(du.data)
_o = append_fields(_d, 'TIMEDEL', dt_lc*2)
du.data = _o.data
@classmethod
def build_isgri_lc_from_ddosa_res(cls,
res,
src_name='',
prod_prefix='',
file_dir=None,
api=False):
lc_list = []
if file_dir is None:
file_dir = './'
if prod_prefix is None:
prod_prefix = ''
print(
f"\033[31m build_isgri_lc_from_ddosa_res: {res.extracted_sources} \033[0m")
for source_name, lightcurve_attr in res.extracted_sources:
meta_data = {}
input_lc_path = getattr(res, lightcurve_attr)
npd = NumpyDataProduct.from_fits_file(
input_lc_path, meta_data=meta_data)
print(
f"\033[31m build_isgri_lc_from_ddosa_res: {res.extracted_sources} \033[0m")
du = npd.get_data_unit_by_name('ISGR-SRC.-LCR')
if du is not None:
src_name = du.header['NAME']
meta_data['src_name'] = src_name
meta_data['time_bin'] = du.header['TIMEDEL']
out_file_name = Path(input_lc_path).resolve().stem
OsaLightCurve.ensure_timedel(du)
lc = cls(name='isgri_lc', data=npd, file_name=out_file_name, file_dir=file_dir, prod_prefix=prod_prefix,
src_name=src_name, meta_data=meta_data)
lc_list.append(lc)
return lc_list
@classmethod
def build_jemx_lc_from_ddosa_res(cls,
res,
src_name='',
prod_prefix='',
file_dir=None,
api=False):
lc_list = []
lc_path_list = [getattr(res, attr)
for attr in dir(res) if attr.startswith("lc_")]
src_name_list = [attr for attr in dir(res) if attr.startswith("lc_")]
src_name_list = [n.replace('lc_', '') for n in src_name_list]
src_name_list = [n.replace('_', ' ') for n in src_name_list]
print('->', lc_path_list, src_name_list)
if file_dir is None:
file_dir = './'
if prod_prefix is None:
prod_prefix = ''
for source_name, input_lc_paht in zip(src_name_list, lc_path_list):
meta_data = {}
npd = NumpyDataProduct.from_fits_file(
input_lc_paht, meta_data=meta_data)
du = npd.get_data_unit_by_name('JMX2-SRC.-LCR')
if du is None:
du = npd.get_data_unit_by_name('JMX1-SRC.-LCR')
if du is None:
# warning, this one is empty (add to warning list)
continue
# raise RuntimeError('Missing data unit with light curve in the fits file')
if du is not None:
src_name = du.header['NAME']
meta_data['src_name'] = src_name
meta_data['time_bin'] = du.header['TIMEDEL']
out_file_name = Path(input_lc_paht).resolve().stem
OsaLightCurve.ensure_timedel(du)
lc = cls(name='jemx_lc', data=npd, file_name=out_file_name, file_dir=file_dir, prod_prefix=prod_prefix,
src_name=src_name, meta_data=meta_data)
lc_list.append(lc)
return lc_list
def get_html_draw(self, plot=False):
#
npd = NumpyDataProduct.from_fits_file(self.file_path.path)
du = npd.get_data_unit_by_name('ISGR-SRC.-LCR')
if du is None:
du = npd.get_data_unit_by_name('JMX2-SRC.-LCR')
if du is None:
du = npd.get_data_unit_by_name('JMX1-SRC.-LCR')
if du is None:
raise RuntimeError('du with lc not found in fits file')
data = du.data
header = du.header
# filtering zero flux values
msk_non_zero = np.count_nonzero(
[data['RATE'], data['ERROR']], axis=0) > 0
data = data[msk_non_zero]
x = data['TIME']
dx = data['TIMEDEL']*0.5
y = data['RATE']
dy = data['ERROR']
try:
mjdref = header['mjdref'] + np.int(x.min())
except:
mjdref = header['MJDREF'] + np.int(x.min())
x = x - np.int(x.min())
sp = ScatterPlot(w=600, h=600, x_label='MJD-%d (days)' %
mjdref, y_label='Rate (cts/s)')
sp.add_errorbar(x, y, yerr=dy, xerr=dx)
footer_str = None
if self.name == 'jemx_lc':
exposure = np.sum(data['FRACEXP']) * du.header['TIMEDEL']
exposure *= 86400.
elif self.name == 'isgri_lc':
# TODO: clarify
# XAX_E is a XRONOS-specific non-OGIP keyword https://heasarc.gsfc.nasa.gov/lheasoft/xanadu/xronos/manual/node8.html
# exposure = np.sum(data['FRACEXP'] * du.header['XAX_E']) * 2
exposure = np.sum(data['FRACEXP'] * du.header['TIMEDEL'])
exposure *= 86400.
else:
# TODO update this option
footer_str = 'Exposure non evaluated for product %s' % self.name
if footer_str != None:
footer_str = 'Exposure %5.5f (s) \n' % exposure
try:
slope = None
normalized_slope = None
chisq_red = None
poly_deg = 0
p, chisq, chisq_red, dof, xf, yf = self.do_linear_fit(
x, y, dy, poly_deg, 'constant fit')
sp.add_line(xf, yf, 'constant fit', color='green')
if p is not None:
footer_str += '\n'
footer_str += 'Constant fit\n'
footer_str += 'flux level %5.5f (cts/s)\n' % p[0]
footer_str += 'dof ' + '%d' % dof + '\n'
footer_str += 'Chi-squared red. %5.5f\n' % chisq_red
except:
pass
try:
poly_deg = 1
p, chisq, chisq_red, dof, xf, yf = self.do_linear_fit(
x, y, dy, poly_deg, 'linear fit')
if p is not None:
footer_str += '\n'
footer_str += 'Linear fit\n'
footer_str += 'slope %5.5f\n' % p[0]
footer_str += 'dof ' + '%d' % dof + '\n'
footer_str += 'Chi-squared red. %5.5f\n' % chisq_red
sp.add_line(xf, yf, 'linear fit', color='orange')
except:
pass
html_dict = sp.get_html_draw()
res_dict = {}
res_dict['image'] = html_dict
res_dict['header_text'] = ''
res_dict['table_text'] = ''
res_dict['footer_text'] = footer_str
return res_dict
def do_linear_fit(self, x, y, dy, poly_deg, label):
p = None
chisq = None
chisq_red = None
dof = None
x_grid = None
y_grid = None
if y.size > poly_deg + 1:
p = np.polyfit(x, y, poly_deg)
x_grid = np.linspace(x.min(), x.max(), 100)
lin_fit = np.poly1d(p)
chisq = (lin_fit(x) - y) ** 2 / dy ** 2
dof = y.size - (poly_deg + 1)
chisq_red = chisq.sum() / float(dof)
y_grid = lin_fit(x_grid)
return p, chisq, chisq_red, dof, x_grid, y_grid
class OSATimebin(TimeDelta):
def __init__(self,
value=None,
delta_T_format_name=None,
name=None):
self.t_bin_max_seconds = 4000.
super(OSATimebin, self).__init__(value=value,
delta_T_format_name=delta_T_format_name,
name=name)
@property
def value(self):
return self._astropy_time_delta.value
@value.setter
def value(self, v):
units = self.units
self._set_time(v, format=units)
print('setting time bine to', v)
if self._astropy_time_delta.sec > self.t_bin_max_seconds:
raise RuntimeError('Time bin max value exceeded =%f' %
self.t_bin_max_seconds)
class OsaLightCurveQuery(OsaQuery):
def __init__(self, name):
#super(OsaLightCurveQuery, self).__init__(name)
# TODO define TimeDelta parameter with max value = 3ks
# TODO done, verify
osa_time_bin = OSATimebin(
value=1000., name='time_bin', delta_T_format_name='time_bin_format')
parameters_list = [osa_time_bin]
super(OsaLightCurveQuery, self).__init__(name, parameters_list)
def get_data_server_query(self, instrument,
config=None):
scwlist_assumption, cat, extramodules, inject = OsaDispatcher.get_osa_query_base(
instrument)
E1 = instrument.get_par_by_name('E1_keV').value
E2 = instrument.get_par_by_name('E2_keV').value
src_name = instrument.get_par_by_name('src_name').value
delta_t = instrument.get_par_by_name(
'time_bin')._astropy_time_delta.sec
osa_version = instrument.get_par_by_name('osa_version').value
if (isinstance(self, JemxLightCurveQuery)):
jemx_num = instrument.get_par_by_name('jemx_num').value
target, modules, assume = self.set_instr_dictionaries(extramodules, scwlist_assumption, E1, E2, src_name,
delta_t, osa_version=osa_version, jemx_num=jemx_num)
else:
target, modules, assume = self.set_instr_dictionaries(extramodules, scwlist_assumption, E1, E2, src_name,
delta_t, osa_version=osa_version)
q = OsaDispatcher(config=config, instrument=instrument,
target=target, modules=modules, assume=assume, inject=inject)
return q
def set_instr_dictionaries(self, extramodules, scwlist_assumption, E1, E2, src_name, delta_t):
raise RuntimeError('Must be specified for each instrument')
def process_product_method(self, instrument, prod_list, api=False, **kw):
_names = []
_lc_path = []
_html_fig = []
_data_list = []
print("\033[31mprocess_product_method: prod_list",
prod_list.prod_list, "\033[0m")
for query_lc in prod_list.prod_list:
print('--> lc name', query_lc.name)
print('-->file name', query_lc.file_path.path)
query_lc.add_url_to_fits_file(
instrument._current_par_dic, url=instrument.disp_conf.products_url)
query_lc.write()
if api == False:
_names.append(query_lc.meta_data['src_name'])
_lc_path.append(str(query_lc.file_path.name))
_html_fig.append(query_lc.get_html_draw())
if api == True:
_data_list.append(query_lc.data)
query_out = QueryOutput()
if api == True:
query_out.prod_dictionary['numpy_data_product_list'] = _data_list
else:
query_out.prod_dictionary['name'] = _names
query_out.prod_dictionary['file_name'] = _lc_path
query_out.prod_dictionary['image'] = _html_fig
query_out.prod_dictionary['download_file_name'] = 'light_curve.fits.gz'
query_out.prod_dictionary['prod_process_message'] = ''
return query_out
class IsgriLightCurveQuery(OsaLightCurveQuery):
def __init__(self, name):
super(IsgriLightCurveQuery, self).__init__(name)
def build_product_list(self, instrument, res, out_dir, prod_prefix=None, api=False):
meta_data = {'product': 'light_curve',
'instrument': 'isgri', 'src_name': ''}
meta_data['query_parameters'] = self.get_parameters_list_as_json()
prod_list = OsaLightCurve.build_isgri_lc_from_ddosa_res(res,
prod_prefix=prod_prefix,
file_dir=out_dir,
api=api)
return prod_list
def get_dummy_products(self, instrument, config, out_dir='./', prod_prefix=None, api=False):
meta_data = {'product': 'light_curve',
'instrument': 'isgri', 'src_name': ''}
meta_data['query_parameters'] = self.get_parameters_list_as_json()
dummy_cache = config.dummy_cache
res = DummyOsaRes()
res.__setattr__('dummy_src', 'dummy_src')
res.__setattr__('dummy_lc', '%s/isgri_query_lc.fits' % dummy_cache)
res.__setattr__('extracted_sources', [('dummy_src', 'dummy_lc')])
prod_list = OsaLightCurve.build_isgri_lc_from_ddosa_res(res,
prod_prefix=prod_prefix,
file_dir=out_dir,
api=api)
prod_list = QueryProductList(prod_list=prod_list)
return prod_list
def set_instr_dictionaries(self, extramodules, scwlist_assumption, E1, E2, src_name, delta_t, osa_version="OSA10.2"):
#print('-->lc standard mode from scw_list', scwlist_assumption)
#print('-->src_name', src_name)
target = "ISGRILCSum"
osa_version_base, osa_subversion, osa_version_modifiers = split_osa_version(osa_version)
if extramodules is None:
extramodules = []
if osa_version_base == "OSA10.2":
modules = ["git://ddosa/staging-1-3", 'git://process_isgri_lc/staging'] + \
extramodules + ['git://ddosa_delegate/staging-1-3']
elif osa_version_base == "OSA11.0":
modules = ["git://ddosa/staging-1-3", "git://findic/staging-1-3-icversion", "git://ddosa11/icversion",
'git://process_isgri_lc/staging'] + extramodules + ['git://ddosa_delegate/staging-1-3']
elif osa_version_base == "OSA11.1":
modules = ["git://ddosa/staging-1-3", "git://findic/staging-1-3-icversion", "git://ddosa11/icversion",
'git://process_isgri_lc/staging'] + extramodules + ['git://ddosa_delegate/staging-1-3', "git://osa11p1/master"]
elif osa_version_base == "OSA11.2":
modules | |
"""Make BIDS compatible directory structures and infer meta data from MNE."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import errno
import shutil as sh
import pandas as pd
from collections import defaultdict, OrderedDict
import numpy as np
from mne import Epochs
from mne.io.constants import FIFF
from mne.io.pick import channel_type
from mne.io import BaseRaw
from mne.channels.channels import _unit2human
from mne.externals.six import string_types
from mne.utils import check_version
from datetime import datetime
from warnings import warn
from .pick import coil_type
from .utils import (make_bids_filename, make_bids_folders,
make_dataset_description, _write_json, _write_tsv,
_read_events, _mkdir_p, age_on_date,
copyfile_brainvision, copyfile_eeglab,
_infer_eeg_placement_scheme)
from .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)
ALLOWED_KINDS = ['meg', 'eeg', 'ieeg']
# Orientation of the coordinate system dependent on manufacturer
ORIENTATION = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',
'.ds': 'ALS'}
UNITS = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}
meg_manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',
'.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF',
'.meg4': 'CTF'}
eeg_manufacturers = {'.vhdr': 'BrainProducts', '.eeg': 'BrainProducts',
'.edf': 'Mixed', '.bdf': 'Biosemi', '.set': 'Mixed',
'.fdt': 'Mixed', '.cnt': 'Neuroscan'}
# Merge the manufacturer dictionaries in a python2 / python3 compatible way
MANUFACTURERS = dict()
MANUFACTURERS.update(meg_manufacturers)
MANUFACTURERS.update(eeg_manufacturers)
# List of synthetic channels by manufacturer that are to be excluded from the
# channel list. Currently this is only for stimulus channels.
IGNORED_CHANNELS = {'KIT/Yokogawa': ['STI 014'],
'BrainProducts': ['STI 014'],
'Mixed': ['STI 014'],
'Biosemi': ['STI 014'],
'Neuroscan': ['STI 014']}
def _channels_tsv(raw, fname, overwrite=False, verbose=True):
"""Create a channels.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the channels.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
"""
map_chs = defaultdict(lambda: 'OTHER')
map_chs.update(meggradaxial='MEGGRADAXIAL',
megrefgradaxial='MEGREFGRADAXIAL',
meggradplanar='MEGGRADPLANAR',
megmag='MEGMAG', megrefmag='MEGREFMAG',
eeg='EEG', misc='MISC', stim='TRIG', emg='EMG',
ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG')
map_desc = defaultdict(lambda: 'Other type of channel')
map_desc.update(meggradaxial='Axial Gradiometer',
megrefgradaxial='Axial Gradiometer Reference',
meggradplanar='Planar Gradiometer',
megmag='Magnetometer',
megrefmag='Magnetometer Reference',
stim='Trigger', eeg='ElectroEncephaloGram',
ecog='Electrocorticography',
seeg='StereoEEG',
ecg='ElectroCardioGram',
eog='ElectroOculoGram',
emg='ElectroMyoGram',
misc='Miscellaneous')
get_specific = ('mag', 'ref_meg', 'grad')
# get the manufacturer from the file in the Raw object
manufacturer = None
if hasattr(raw, 'filenames'):
# XXX: Hack for EEGLAB bug in MNE-Python 0.16; fixed in MNE-Python
# 0.17, ... remove the hack after upgrading dependencies in MNE-BIDS
if raw.filenames[0] is None: # hack
ext = '.set' # hack
else:
_, ext = _parse_ext(raw.filenames[0], verbose=verbose)
manufacturer = MANUFACTURERS[ext]
ignored_indexes = [raw.ch_names.index(ch_name) for ch_name in raw.ch_names
if ch_name in
IGNORED_CHANNELS.get(manufacturer, list())]
status, ch_type, description = list(), list(), list()
for idx, ch in enumerate(raw.info['ch_names']):
status.append('bad' if ch in raw.info['bads'] else 'good')
_channel_type = channel_type(raw.info, idx)
if _channel_type in get_specific:
_channel_type = coil_type(raw.info, idx)
ch_type.append(map_chs[_channel_type])
description.append(map_desc[_channel_type])
low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]
units = [u if u not in ['NA'] else 'n/a' for u in units]
n_channels = raw.info['nchan']
sfreq = raw.info['sfreq']
df = pd.DataFrame(OrderedDict([
('name', raw.info['ch_names']),
('type', ch_type),
('units', units),
('description', description),
('sampling_frequency', np.full((n_channels), sfreq)),
('low_cutoff', np.full((n_channels), low_cutoff)),
('high_cutoff', np.full((n_channels), high_cutoff)),
('status', status)]))
df.drop(ignored_indexes, inplace=True)
_write_tsv(fname, df, overwrite, verbose)
return fname
def _events_tsv(events, raw, fname, trial_type, overwrite=False,
verbose=True):
"""Create an events.tsv file and save it.
This function will write the mandatory 'onset', and 'duration' columns as
well as the optional 'event_value' and 'event_sample'. The 'event_value'
corresponds to the marker value as found in the TRIG channel of the
recording. In addition, the 'trial_type' field can be written.
Parameters
----------
events : array, shape = (n_events, 3)
The first column contains the event time in samples and the third
column contains the event id. The second column is ignored for now but
typically contains the value of the trigger channel either immediately
before the event or immediately after.
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the events.tsv to.
trial_type : dict | None
Dictionary mapping a brief description key to an event id (value). For
example {'Go': 1, 'No Go': 2}.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
Notes
-----
The function writes durations of zero for each event.
"""
# Start by filling all data that we know into a df
first_samp = raw.first_samp
sfreq = raw.info['sfreq']
events[:, 0] -= first_samp
data = OrderedDict([('onset', events[:, 0]),
('duration', np.zeros(events.shape[0])),
('trial_type', events[:, 2]),
('event_value', events[:, 2]),
('event_sample', events[:, 0])])
df = pd.DataFrame.from_dict(data)
# Now check if trial_type is specified or should be removed
if trial_type:
trial_type_map = {v: k for k, v in trial_type.items()}
df.trial_type = df.trial_type.map(trial_type_map)
else:
df.drop(labels=['trial_type'], axis=1, inplace=True)
# Onset column needs to be specified in seconds
df.onset /= sfreq
_write_tsv(fname, df, overwrite, verbose)
return fname
def _participants_tsv(raw, subject_id, group, fname, overwrite=False,
verbose=True):
"""Create a participants.tsv file and save it.
This will append any new participant data to the current list if it
exists. Otherwise a new file will be created with the provided information.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
subject_id : str
The subject name in BIDS compatible format ('01', '02', etc.)
group : str
Name of group participant belongs to.
fname : str
Filename to save the participants.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
If there is already data for the given `subject_id` and overwrite is
False, an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
subject_id = 'sub-' + subject_id
data = {'participant_id': [subject_id]}
subject_info = raw.info['subject_info']
if subject_info is not None:
genders = {0: 'U', 1: 'M', 2: 'F'}
sex = genders[subject_info.get('sex', 0)]
# determine the age of the participant
age = subject_info.get('birthday', None)
meas_date = raw.info.get('meas_date', None)
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
if meas_date is not None and age is not None:
bday = datetime(age[0], age[1], age[2])
meas_datetime = datetime.fromtimestamp(meas_date)
subject_age = age_on_date(bday, meas_datetime)
else:
subject_age = "n/a"
data.update({'age': [subject_age], 'sex': [sex], 'group': [group]})
df = pd.DataFrame(data=data,
columns=['participant_id', 'age', 'sex', 'group'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# whether the data exists identically in the current DataFrame
exact_included = df.values.tolist()[0] in orig_df.values.tolist()
# whether the subject id is in the existing DataFrame
sid_included = subject_id in orig_df['participant_id'].values
# if the subject data provided is different to the currently existing
# data and overwrite is not True raise an error
if (sid_included and not exact_included) and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'participant list. Please set overwrite to '
'True.' % subject_id)
# otherwise add the new data
df = orig_df.append(df)
# and drop any duplicates as we want overwrite = True to force the old
# data to be overwritten
df.drop_duplicates(subset='participant_id', keep='last',
inplace=True)
df = df.sort_values(by='participant_id')
# overwrite is forced to True as all issues with overwrite == False have
# been handled by this point
_write_tsv(fname, df, True, verbose)
return fname
def _scans_tsv(raw, raw_fname, fname, overwrite=False, verbose=True):
"""Create a scans.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
raw_fname : str
Relative path to the raw data file.
fname : str
Filename to save the scans.tsv to.
overwrite : bool
Defaults to False.
Whether to overwrite the existing data in the file.
If there is already data for the given `fname` and overwrite is False,
an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
# get measurement date from the data info
meas_date = raw.info['meas_date']
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
acq_time = datetime.fromtimestamp(
meas_date).strftime('%Y-%m-%dT%H:%M:%S')
else:
acq_time = 'n/a'
df = pd.DataFrame(data={'filename': ['%s' % raw_fname],
'acq_time': [acq_time]},
columns=['filename', 'acq_time'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# if the file name is already in the file raise an error
if raw_fname in orig_df['filename'].values and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'scans list. Please set overwrite to '
'True.' % raw_fname)
# otherwise add the new data
df | |
For other parameters, this is always `FALSE`.
:param str is_modified: Indicates how the parameter was modified. If an `ALTER SYSTEM` was performed, the value will be `MODIFIED`.
:param bool is_pdb_modifiable: Indicates whether the parameter can be modified on a per-PDB basis (`TRUE`) or not (`FALSE`). In a non-CDB, the value of this property is `null`.
:param bool is_session_modifiable: Indicates whether the parameter can be changed with `ALTER SESSION` (`TRUE`) or not (`FALSE`)
:param bool is_specified: Indicates whether the parameter was specified in the server parameter file (`TRUE`) or not (`FALSE`). Applicable only when the parameter source is `SPFILE`.
:param str is_system_modifiable: Indicates whether the parameter can be changed with `ALTER SYSTEM` and when the change takes effect:
* IMMEDIATE: Parameter can be changed with `ALTER SYSTEM` regardless of the type of parameter file used to start the instance. The change takes effect immediately.
* DEFERRED: Parameter can be changed with `ALTER SYSTEM` regardless of the type of parameter file used to start the instance. The change takes effect in subsequent sessions.
* FALSE: Parameter cannot be changed with `ALTER SYSTEM` unless a server parameter file was used to start the instance. The change takes effect in subsequent instances.
:param str name: A filter to return all parameters that have the text given in their names.
:param float number: The parameter number.
:param float ordinal: The position (ordinal number) of the parameter value. Useful only for parameters whose values are lists of strings.
:param str sid: The database instance SID for which the parameter is defined.
:param str type: The parameter type.
:param str update_comment: The comments associated with the most recent update.
:param str value: The parameter value.
"""
pulumi.set(__self__, "allowed_values", allowed_values)
pulumi.set(__self__, "category", category)
pulumi.set(__self__, "constraint", constraint)
pulumi.set(__self__, "container_id", container_id)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_value", display_value)
pulumi.set(__self__, "is_adjusted", is_adjusted)
pulumi.set(__self__, "is_basic", is_basic)
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "is_deprecated", is_deprecated)
pulumi.set(__self__, "is_instance_modifiable", is_instance_modifiable)
pulumi.set(__self__, "is_modified", is_modified)
pulumi.set(__self__, "is_pdb_modifiable", is_pdb_modifiable)
pulumi.set(__self__, "is_session_modifiable", is_session_modifiable)
pulumi.set(__self__, "is_specified", is_specified)
pulumi.set(__self__, "is_system_modifiable", is_system_modifiable)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "number", number)
pulumi.set(__self__, "ordinal", ordinal)
pulumi.set(__self__, "sid", sid)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "update_comment", update_comment)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="allowedValues")
def allowed_values(self) -> Sequence['outputs.GetManagedDatabasesDatabaseParameterItemAllowedValueResult']:
"""
A list of allowed values for this parameter.
"""
return pulumi.get(self, "allowed_values")
@property
@pulumi.getter
def category(self) -> str:
"""
The parameter category.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter
def constraint(self) -> str:
"""
Applicable in case of Oracle Real Application Clusters (Oracle RAC) databases. A `UNIQUE` parameter is one which is unique to each Oracle Real Application Clusters (Oracle RAC) instance. For example, the parameter `INSTANCE_NUMBER` must have different values in each instance. An `IDENTICAL` parameter must have the same value for every instance. For example, the parameter `DB_BLOCK_SIZE` must have the same value in all instances.
"""
return pulumi.get(self, "constraint")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> float:
"""
The ID of the database container to which the data pertains. Possible values include:
* `0`: This value is used for data that pertain to the entire CDB. This value is also used for data in non-CDBs.
* `1`: This value is used for data that pertain to only the root container.
* `n`: Where n is the applicable container ID for the data.
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the parameter.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayValue")
def display_value(self) -> str:
"""
The parameter value in a user-friendly format. For example, if the `value` property shows the value 262144 for a big integer parameter, then the `displayValue` property will show the value 256K.
"""
return pulumi.get(self, "display_value")
@property
@pulumi.getter(name="isAdjusted")
def is_adjusted(self) -> bool:
"""
Indicates whether Oracle adjusted the input value to a more suitable value.
"""
return pulumi.get(self, "is_adjusted")
@property
@pulumi.getter(name="isBasic")
def is_basic(self) -> bool:
"""
Indicates whether the parameter is a basic parameter (`TRUE`) or not (`FALSE`).
"""
return pulumi.get(self, "is_basic")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> bool:
"""
Indicates whether the parameter is set to the default value (`TRUE`) or the parameter value was specified in the parameter file (`FALSE`).
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter(name="isDeprecated")
def is_deprecated(self) -> bool:
"""
Indicates whether the parameter has been deprecated (`TRUE`) or not (`FALSE`).
"""
return pulumi.get(self, "is_deprecated")
@property
@pulumi.getter(name="isInstanceModifiable")
def is_instance_modifiable(self) -> bool:
"""
For parameters that can be changed with `ALTER SYSTEM`, indicates whether the value of the parameter can be different for every instance (`TRUE`) or whether the parameter must have the same value for all Real Application Clusters instances (`FALSE`). For other parameters, this is always `FALSE`.
"""
return pulumi.get(self, "is_instance_modifiable")
@property
@pulumi.getter(name="isModified")
def is_modified(self) -> str:
"""
Indicates how the parameter was modified. If an `ALTER SYSTEM` was performed, the value will be `MODIFIED`.
"""
return pulumi.get(self, "is_modified")
@property
@pulumi.getter(name="isPdbModifiable")
def is_pdb_modifiable(self) -> bool:
"""
Indicates whether the parameter can be modified on a per-PDB basis (`TRUE`) or not (`FALSE`). In a non-CDB, the value of this property is `null`.
"""
return pulumi.get(self, "is_pdb_modifiable")
@property
@pulumi.getter(name="isSessionModifiable")
def is_session_modifiable(self) -> bool:
"""
Indicates whether the parameter can be changed with `ALTER SESSION` (`TRUE`) or not (`FALSE`)
"""
return pulumi.get(self, "is_session_modifiable")
@property
@pulumi.getter(name="isSpecified")
def is_specified(self) -> bool:
"""
Indicates whether the parameter was specified in the server parameter file (`TRUE`) or not (`FALSE`). Applicable only when the parameter source is `SPFILE`.
"""
return pulumi.get(self, "is_specified")
@property
@pulumi.getter(name="isSystemModifiable")
def is_system_modifiable(self) -> str:
"""
Indicates whether the parameter can be changed with `ALTER SYSTEM` and when the change takes effect:
* IMMEDIATE: Parameter can be changed with `ALTER SYSTEM` regardless of the type of parameter file used to start the instance. The change takes effect immediately.
* DEFERRED: Parameter can be changed with `ALTER SYSTEM` regardless of the type of parameter file used to start the instance. The change takes effect in subsequent sessions.
* FALSE: Parameter cannot be changed with `ALTER SYSTEM` unless a server parameter file was used to start the instance. The change takes effect in subsequent instances.
"""
return pulumi.get(self, "is_system_modifiable")
@property
@pulumi.getter
def name(self) -> str:
"""
A filter to return all parameters that have the text given in their names.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def number(self) -> float:
"""
The parameter number.
"""
return pulumi.get(self, "number")
@property
@pulumi.getter
def ordinal(self) -> float:
"""
The position (ordinal number) of the parameter value. Useful only for parameters whose values are lists of strings.
"""
return pulumi.get(self, "ordinal")
@property
@pulumi.getter
def sid(self) -> str:
"""
The database instance SID for which the parameter is defined.
"""
return pulumi.get(self, "sid")
@property
@pulumi.getter
def type(self) -> str:
"""
The parameter type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updateComment")
def update_comment(self) -> str:
"""
The comments associated with the most recent update.
"""
return pulumi.get(self, "update_comment")
@property
@pulumi.getter
def value(self) -> str:
"""
The parameter value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetManagedDatabasesDatabaseParameterItemAllowedValueResult(dict):
def __init__(__self__, *,
is_default: bool,
ordinal: float,
value: str):
"""
:param bool is_default: Indicates whether the parameter is set to the default value (`TRUE`) or the parameter value was specified in the parameter file (`FALSE`).
:param float ordinal: The position (ordinal number) of the parameter value. Useful only for parameters whose values are lists of strings.
:param str value: The parameter value.
"""
pulumi.set(__self__, "is_default", is_default)
pulumi.set(__self__, "ordinal", ordinal)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> bool:
"""
Indicates whether the parameter is set to the default value (`TRUE`) or the parameter value was specified in the parameter file (`FALSE`).
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter
def ordinal(self) -> float:
"""
The position (ordinal number) of the parameter value. Useful only for parameters whose values are lists of strings.
"""
return pulumi.get(self, "ordinal")
@property
@pulumi.getter
def value(self) -> str:
"""
The parameter value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetManagedDatabasesDatabaseParametersDatabaseParametersCollectionResult(dict):
def __init__(__self__, *,
database_name: str,
database_sub_type: str,
database_type: str,
database_version: str,
items: Sequence['outputs.GetManagedDatabasesDatabaseParametersDatabaseParametersCollectionItemResult']):
"""
:param str | |
detail only within WPS output", \
# "WPS I/O title should be converted to CWL label of corresponding I/O from additional details"
assert "format" not in pkg["outputs"][1], "Omitted formats in CWL and WPS I/O definitions during deployment" \
"should not add them to the generated CWL package definition"
def test_deploy_literal_and_complex_io_from_wps_xml_reference(self):
body = {
"processDescription": {"process": {"id": self._testMethodName}},
"executionUnit": [{"href": "mock://{}".format(resources.WPS_LITERAL_COMPLEX_IO_XML)}],
"deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication"
}
desc, pkg = self.deploy_process(body, describe_schema=PROCESS_SCHEMA_OLD)
# basic contents validation
assert "cwlVersion" in pkg
assert "process" in desc
proc = desc["process"]
assert proc["id"] == self._testMethodName
# package I/O validation
assert "inputs" in pkg
assert len(pkg["inputs"]) == 2
assert isinstance(pkg["inputs"], list)
assert pkg["inputs"][0]["id"] == "tasmax"
assert "default" not in pkg["inputs"][0]
assert pkg["inputs"][0]["format"] == EDAM_NETCDF
assert isinstance(pkg["inputs"][0]["type"], list), "since minOccurs=1, single value non-array must be allowed"
assert len(pkg["inputs"][0]["type"]) == 2, "single type and array type of same base"
assert pkg["inputs"][0]["type"][0] == "File", "since minOccurs=1, should be type directly"
assert pkg["inputs"][0]["type"][1]["type"] == "array"
assert pkg["inputs"][0]["type"][1]["items"] == "File", "since maxOccurs>1, same base type must array"
assert pkg["inputs"][1]["id"] == "freq"
assert pkg["inputs"][1]["default"] == "YS"
assert isinstance(pkg["inputs"][1]["type"], list), "since minOccurs=0, should be a list with 'null' type"
assert len(pkg["inputs"][1]["type"]) == 2
assert pkg["inputs"][1]["type"][0] == "null"
assert pkg["inputs"][1]["type"][1]["type"] == "enum"
assert pkg["inputs"][1]["type"][1]["symbols"] == ["YS", "MS", "QS-DEC", "AS-JUL"]
assert "outputs" in pkg
assert len(pkg["outputs"]) == 2
assert isinstance(pkg["outputs"], list)
assert pkg["outputs"][0]["id"] == "output_netcdf"
assert "default" not in pkg["outputs"][0]
assert pkg["outputs"][0]["format"] == EDAM_NETCDF
assert pkg["outputs"][0]["type"] == "File"
assert pkg["outputs"][0]["outputBinding"]["glob"] == "output_netcdf.nc"
assert pkg["outputs"][1]["id"] == "output_log"
assert "default" not in pkg["outputs"][1]
assert pkg["outputs"][1]["format"] == EDAM_PLAIN
assert pkg["outputs"][1]["type"] == "File"
assert pkg["outputs"][1]["outputBinding"]["glob"] == "output_log.*"
# process description I/O validation
assert len(proc["inputs"]) == 2
assert proc["inputs"][0]["id"] == "tasmax"
assert proc["inputs"][0]["title"] == "Resource"
assert "abstract" not in proc["inputs"][0], "Field 'abstract' should be replaced by 'description'."
assert proc["inputs"][0]["description"] == "NetCDF Files or archive (tar/zip) containing netCDF files."
assert proc["inputs"][0]["minOccurs"] == 1
assert proc["inputs"][0]["maxOccurs"] == 1000
assert len(proc["inputs"][0]["formats"]) == 1
assert proc["inputs"][0]["formats"][0]["default"] is True
assert proc["inputs"][0]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_NETCDF
assert proc["inputs"][0]["formats"][0]["encoding"] == "base64"
assert proc["inputs"][1]["id"] == "freq"
assert proc["inputs"][1]["title"] == "Frequency"
assert "abstract" not in proc["inputs"][1], "Field 'abstract' should be replaced by 'description'."
assert proc["inputs"][1]["description"] == "Resampling frequency"
assert proc["inputs"][1]["minOccurs"] == 0
assert proc["inputs"][1]["maxOccurs"] == 1
assert "formats" not in proc["inputs"][1]
assert len(proc["outputs"]) == 2
assert proc["outputs"][0]["id"] == "output_netcdf"
assert proc["outputs"][0]["title"] == "Function output in netCDF"
assert "abstract" not in proc["outputs"][0], "Field 'abstract' should be replaced by 'description'."
assert proc["outputs"][0]["description"] == "The indicator values computed on the original input grid."
assert "minOccurs" not in proc["outputs"][0]
assert "maxOccurs" not in proc["outputs"][0]
assert len(proc["outputs"][0]["formats"]) == 1
assert proc["outputs"][0]["formats"][0]["default"] is True
assert proc["outputs"][0]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_NETCDF
assert proc["outputs"][0]["formats"][0]["encoding"] == "base64"
assert proc["outputs"][1]["id"] == "output_log"
assert proc["outputs"][1]["title"] == "Logging information"
assert "abstract" not in proc["inputs"][1], "Field 'abstract' should be replaced by 'description'."
assert proc["outputs"][1]["description"] == "Collected logs during process run."
assert "minOccurs" not in proc["outputs"][1]
assert "maxOccurs" not in proc["outputs"][1]
assert len(proc["outputs"][1]["formats"]) == 1
assert proc["outputs"][1]["formats"][0]["default"] is True
assert proc["outputs"][1]["formats"][0]["mediaType"] == CONTENT_TYPE_TEXT_PLAIN
def test_deploy_enum_array_and_multi_format_inputs_from_wps_xml_reference(self):
body = {
"processDescription": {"process": {"id": self._testMethodName}},
"executionUnit": [{"href": "mock://{}".format(resources.WPS_ENUM_ARRAY_IO_XML)}],
"deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication"
}
desc, pkg = self.deploy_process(body, describe_schema=PROCESS_SCHEMA_OLD)
# basic contents validation
assert "cwlVersion" in pkg
assert "process" in desc
proc = desc["process"]
assert proc["id"] == self._testMethodName
# package I/O validation
assert "inputs" in pkg
assert len(pkg["inputs"]) == 3
assert isinstance(pkg["inputs"], list)
assert pkg["inputs"][0]["id"] == "region"
assert pkg["inputs"][0]["default"] == "DEU"
# first input
assert "format" not in pkg["inputs"][0]
assert isinstance(pkg["inputs"][0]["type"], list)
# single entry of enum allowed values
assert len(pkg["inputs"][0]["type"]) == 3, "default value (null) + single type + array type of same base"
assert pkg["inputs"][0]["type"][0] == "null", "XML defaultValue should result in 'null' as valid unspecified"
assert "default" in pkg["inputs"][0]
assert pkg["inputs"][0]["default"] == "DEU", "CWL default value should match extracted defaultValue from XML"
assert isinstance(pkg["inputs"][0]["type"][1], dict), "enum base type expected since allowed values"
assert pkg["inputs"][0]["type"][1]["type"] == "enum"
assert isinstance(pkg["inputs"][0]["type"][1]["symbols"], list)
assert len(pkg["inputs"][0]["type"][1]["symbols"]) == 220
assert all(isinstance(s, str) for s in pkg["inputs"][0]["type"][1]["symbols"])
# array type of same enum allowed values
assert pkg["inputs"][0]["type"][2]["type"] == "array"
assert pkg["inputs"][0]["type"][2]["items"]["type"] == "enum"
assert isinstance(pkg["inputs"][0]["type"][2]["items"]["symbols"], list)
assert len(pkg["inputs"][0]["type"][2]["items"]["symbols"]) == 220
assert all(isinstance(s, str) for s in pkg["inputs"][0]["type"][2]["items"]["symbols"])
# second input
assert pkg["inputs"][1]["id"] == "mosaic"
# note: modified by https://github.com/crim-ca/weaver/pull/344
# explicit 'null' should not be reported as 'default', causing CWL error seeing as string with "null" value
# must be in 'type' instead to define it as optional, as tested below
# assert pkg["inputs"][1]["default"] == "null"
assert "null" not in pkg["inputs"][1]
assert "format" not in pkg["inputs"][1]
assert isinstance(pkg["inputs"][1]["type"], list), "default 'null' result type formed with it"
assert len(pkg["inputs"][1]["type"]) == 2
assert pkg["inputs"][1]["type"][0] == "null", "CWL omitted input expect from minOccurs=0 from WPS input"
assert pkg["inputs"][1]["type"][1] == "boolean"
assert pkg["inputs"][2]["id"] == "resource"
assert "default" not in pkg["inputs"][2], \
"WPS 'default format media-type' with minOccurs=1 must not result in CWL input with 'default' value"
assert isinstance(pkg["inputs"][2]["type"], list), "single and array File"
assert len(pkg["inputs"][2]["type"]) == 2
assert pkg["inputs"][2]["type"][0] == "File", "single File type"
assert pkg["inputs"][2]["type"][1]["type"] == "array"
assert pkg["inputs"][2]["type"][1]["items"] == "File", "corresponding base type for array type"
# FIXME: TAR cannot be resolved in the CWL context (not official, disable mapping to GZIP)
# this makes all formats to not be resolved (see code: wps_package.any2cwl_io)
# (see issue: https://github.com/crim-ca/weaver/issues/50)
assert "format" not in pkg["inputs"][2], \
"CWL formats should all be dropped because (x-tar) cannot be resolved to an existing schema reference"
# assert isinstance(pkg["inputs"][2]["format"], list)
# assert len(pkg["inputs"][2]["format"]) == 3
# assert pkg["inputs"][2]["format"][0] == EDAM_NETCDF
# assert pkg["inputs"][2]["format"][1] == IANA_TAR
# assert pkg["inputs"][2]["format"][2] == IANA_ZIP
# process description I/O validation
assert len(proc["inputs"]) == 3
assert proc["inputs"][0]["id"] == "region"
assert proc["inputs"][0]["title"] == "Region"
assert "abstract" not in proc["inputs"][0], "Field 'abstract' should be replaced by 'description'."
assert proc["inputs"][0]["description"] == "Country code, see ISO-3166-3"
assert proc["inputs"][0]["minOccurs"] == 0, \
"Real XML indicates 'minOccurs=1' but also has 'defaultValue', Weaver should correct it."
assert proc["inputs"][0]["maxOccurs"] == 220
assert "literalDataDomains" in proc["inputs"][0]
assert "defaultValue" in proc["inputs"][0]["literalDataDomains"][0]
assert len(proc["inputs"][0]["literalDataDomains"][0]["valueDefinition"]) == 220, \
"List of all 220 region abbreviation explicitly provided is expected."
assert proc["inputs"][0]["literalDataDomains"][0]["defaultValue"] == "DEU"
assert "formats" not in proc["inputs"][0]
assert proc["inputs"][1]["id"] == "mosaic"
assert proc["inputs"][1]["title"] == "Union of multiple regions"
assert "abstract" not in proc["inputs"][1], "Field 'abstract' should be replaced by 'description'."
assert proc["inputs"][1]["description"] == \
"If True, selected regions will be merged into a single geometry." # noqa
assert proc["inputs"][1]["minOccurs"] == 0
assert proc["inputs"][1]["maxOccurs"] == 1
assert "formats" not in proc["inputs"][1]
assert proc["inputs"][2]["id"] == "resource"
assert proc["inputs"][2]["title"] == "Resource"
assert "abstract" not in proc["inputs"][2], "Field 'abstract' should be replaced by 'description'."
assert proc["inputs"][2]["description"] == "NetCDF Files or archive (tar/zip) containing NetCDF files."
assert proc["inputs"][2]["minOccurs"] == 1
assert proc["inputs"][2]["maxOccurs"] == 1000
# note: TAR should remain as literal format in the WPS context (not mapped/added as GZIP when resolved for CWL)
assert len(proc["inputs"][2]["formats"]) == 3
assert proc["inputs"][2]["formats"][0]["default"] is True
assert proc["inputs"][2]["formats"][0]["mediaType"] == CONTENT_TYPE_APP_NETCDF
assert "encoding" not in proc["inputs"][2]["formats"][0] # none specified, so omitted in response
assert proc["inputs"][2]["formats"][1]["default"] is False
assert proc["inputs"][2]["formats"][1]["mediaType"] == CONTENT_TYPE_APP_TAR
assert "encoding" not in proc["inputs"][2]["formats"][1] # none specified, so omitted in response
assert proc["inputs"][2]["formats"][2]["default"] is False
assert proc["inputs"][2]["formats"][2]["mediaType"] == CONTENT_TYPE_APP_ZIP
assert "encoding" not in proc["inputs"][2]["formats"][2] # none specified, so omitted in response
# FIXME: implement,
# need to find a existing WPS with some, or manually write XML
# multi-output (with same ID) would be an indirect 1-output with ref to multi (Metalink file)
# (https://github.com/crim-ca/weaver/issues/25)
@pytest.mark.skip(reason="not implemented")
def test_deploy_multi_outputs_file_from_wps_xml_reference(self):
raise NotImplementedError
@pytest.mark.functional
class WpsPackageAppWithS3BucketTest(WpsConfigBase):
@classmethod
def setUpClass(cls):
cls.settings = {
"weaver.wps": True,
"weaver.wps_output": True,
"weaver.wps_output_path": "/wpsoutputs",
"weaver.wps_output_dir": "/tmp", # nosec: B108 # don't care hardcoded for test
"weaver.wps_output_s3_bucket": "wps-output-test-bucket",
"weaver.wps_output_s3_region": MOCK_AWS_REGION, # must match exactly, or mock will not work
"weaver.wps_path": "/ows/wps",
"weaver.wps_restapi_path": "/",
}
super(WpsPackageAppWithS3BucketTest, cls).setUpClass()
@mocked_aws_credentials
@mocked_aws_s3
def test_execute_application_package_process_with_bucket(self):
"""
Test validates:
- Both S3 bucket and HTTP file references can be used simultaneously as inputs.
- Process results are uploaded to the configured S3 bucket.
- Process results are not accessible locally (not referenced as WPS-outputs URL, but as S3 reference).
.. note::
| |
"Project does not exist"
)
plan['project_id'] = project_id
def _create_machine__get_key_object(self, key):
from libcloud.utils.publickey import get_pubkey_openssh_fingerprint
key_obj = super()._create_machine__get_key_object(key)
fingerprint = get_pubkey_openssh_fingerprint(key_obj.public)
keys = self.connection.list_key_pairs()
for k in keys:
if fingerprint == k.fingerprint:
ssh_keys = [{
'label': k.extra['label'],
'key': k.public_key
}]
break
else:
ssh_keys = [{
'label': f'mistio-{key_obj.name}',
'key': key_obj.public
}]
return ssh_keys
def _create_machine__compute_kwargs(self, plan):
kwargs = super()._create_machine__compute_kwargs(plan)
kwargs['ex_project_id'] = plan['project_id']
kwargs['cloud_init'] = plan.get('cloudinit')
kwargs['ssh_keys'] = kwargs.pop('auth')
try:
kwargs['ip_addresses'] = plan['networks']['ip_addresses']
except (KeyError, TypeError):
pass
return kwargs
class VultrComputeController(BaseComputeController):
def _connect(self, **kwargs):
return get_driver(Provider.VULTR)(self.cloud.apikey)
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
# do not include ipv6 on public ips
public_ips = []
for ip in machine.public_ips:
if ip and ':' not in ip:
public_ips.append(ip)
if machine.public_ips != public_ips:
machine.public_ips = public_ips
updated = True
return updated
def _list_machines__machine_creation_date(self, machine, node_dict):
try:
created_at = node_dict['created_at']
except KeyError:
return None
try:
created_at = iso8601.parse_date(created_at)
except iso8601.ParseError as exc:
log.error(repr(exc))
return None
created_at = pytz.UTC.normalize(created_at)
return created_at
def _list_machines__cost_machine(self, machine, node_dict):
from mist.api.clouds.models import CloudSize
external_id = node_dict.get('size')
try:
size_ = CloudSize.objects.get(external_id=external_id,
cloud=self.cloud)
except CloudSize.DoesNotExist:
log.error("Machine's size with external_id: %s not found",
external_id)
return 0, 0
monthly_cost = size_.extra.get('price') or 0
features = node_dict['extra'].get('features', [])
if 'auto_backups' in features:
try:
monthly_cost += config.VULTR_BACKUP_PRICE_PER_SIZE[
size_.external_id]
except KeyError:
pass
if 'ddos_protection' in features:
# DDOS protection is free on Dedicated Cloud sizes
# and not supported on Bare Metal sizes
if size_.extra.get('type') in ('vc2', 'vhf'):
monthly_cost += config.VULTR_DDOS_PROTECTION_PRICE
return 0, monthly_cost
def _list_machines__get_location(self, node_dict):
return node_dict['extra'].get('location')
def _list_machines__machine_actions(self, machine, node_dict):
super()._list_machines__machine_actions(machine, node_dict)
size = node_dict.get('size', '')
# Bare metal nodes don't support resize & snapshot
if size.startswith('vbm'):
machine.actions.resize = False
else:
machine.actions.resize = True
def _list_sizes__get_name(self, size):
# Vultr doesn't have names on sizes.
# We name them after their 4 different size types & their specs.
# - High Frequency
# - Cloud Compute
# - Bare Metal
# - Dedicated Cloud
if size.name.startswith('vc2'):
type_ = 'Cloud Compute'
elif size.name.startswith('vdc'):
type_ = 'Dedicated Cloud'
elif size.name.startswith('vhf'):
type_ = 'High Frequency'
elif size.name.startswith('vbm'):
type_ = 'Bare Metal'
else:
log.warning('Unknown Vultr size id: %s', size.id)
type_ = 'Unknown'
cpus = self._list_sizes__get_cpu(size)
return (f'{type_}: {cpus} CPUs {size.ram} MBs RAM'
f' {size.disk} GBs disk {size.price}$')
def _list_sizes__get_cpu(self, size):
try:
return size.extra['vcpu_count']
except KeyError:
# bare metal size
return size.extra['cpu_count']
def _list_sizes__get_available_locations(self, mist_size):
avail_locations = [str(loc)
for loc in mist_size.extra.get('locations', [])]
from mist.api.clouds.models import CloudLocation
CloudLocation.objects(
cloud=self.cloud,
external_id__in=avail_locations
).update(add_to_set__available_sizes=mist_size)
def _list_images__fetch_images(self, search=None):
# Vultr has some legacy "dummy" images that were provided when
# a node was booted from snapshot, iso, application or backup,
# that are no longer necessary on their API v2.
images = self.connection.list_images()
return [image for image in images
if image.name not in {'Custom',
'Snapshot',
'Backup',
'Application'}]
def _list_images__get_os_distro(self, image):
try:
os_distro = image.extra.get('family').lower()
except AttributeError:
return super()._list_images__get_os_distro(image)
return os_distro
def _generate_plan__parse_networks(self, auth_context, networks_dict,
location):
from mist.api.methods import list_resources
ret_dict = {
'ipv6': networks_dict.get('ipv6') is True,
'hostname': networks_dict.get('hostname')
}
networks = networks_dict.get('networks', [])
if not isinstance(networks, list):
raise BadRequestError('Invalid "networks" type, expected an array')
ret_networks = []
for net in networks:
networks, _ = list_resources(auth_context,
'network',
search=net,
cloud=self.cloud.id,
)
networks = networks.filter(location=location)
try:
network = networks[0]
except IndexError:
raise NotFoundError(f'Network {net} does not exist in'
f' location: {location.name}')
ret_networks.append({
'id': network.id,
'name': network.name,
'external_id': network.network_id,
})
if ret_networks:
ret_dict['networks'] = ret_networks
return ret_dict
def _generate_plan__parse_volume_attrs(self, volume_dict, vol_obj):
return {
'id': vol_obj.id,
'name': vol_obj.name,
'external_id': vol_obj.external_id
}
def _generate_plan__parse_custom_volume(self, volume_dict):
try:
size = int(volume_dict['size'])
name = volume_dict['name']
except KeyError:
raise BadRequestError('name and size are required')
except (TypeError, ValueError):
raise BadRequestError('Invalid size type')
if size < 10:
raise BadRequestError('Size should be at least 10 GBs')
return {
'name': name,
'size': size,
}
def _generate_plan__parse_extra(self, extra, plan):
plan['backups'] = extra.get('backups') is True
plan['ddos_protection'] = extra.get('ddos_protection') is True
def _generate_plan__post_parse_plan(self, plan):
from mist.api.clouds.models import CloudSize, CloudLocation
size = CloudSize.objects.get(id=plan['size']['id'])
bare_metal = size.extra['is_bare_metal']
location = CloudLocation.objects.get(id=plan['location']['id'])
if plan.get('volumes'):
if 'block_storage' not in location.extra.get('option', []):
raise BadRequestError(
f'Volumes are not supported in "{location.name}"')
if bare_metal:
raise BadRequestError(
'Bare Metal metal sizes do not support volume attachment')
if plan['networks'].get('networks') and bare_metal:
raise BadRequestError(
'Bare Metal sizes do not support network attachment')
if plan['ddos_protection']:
if 'ddos_protection' not in location.extra.get('option'):
raise BadRequestError(
f'DDoS protection is not supported in "{location.name}"')
if bare_metal:
raise BadRequestError(
'Bare Metal sizes do not support DDoS protection')
if plan['backups'] and (bare_metal or
size.name.startswith('Dedicated Cloud')):
raise BadRequestError(
'Backups are not supported on the given size type')
hostname = plan['networks']['hostname']
if hostname is None:
plan['networks']['hostname'] = plan['machine_name']
def _create_machine__compute_kwargs(self, plan):
kwargs = super()._create_machine__compute_kwargs(plan)
mist_key = kwargs.pop('auth', None)
if mist_key:
vultr_keys = self.connection.list_key_pairs()
key = next((vultr_key for vultr_key in vultr_keys
if vultr_key.public_key.replace('\n', '') == mist_key.public), # noqa
None)
if key is None:
try:
key = self.connection.import_key_pair_from_string(
mist_key.name,
mist_key.public)
except Exception as exc:
raise MachineCreationError(
f'Failed to import key: {repr(exc)}') from None
kwargs['ex_ssh_key_ids'] = [key.extra['id']]
if plan.get('cloudinit'):
kwargs['ex_userdata'] = plan['cloudinit']
kwargs['ex_hostname'] = plan['networks']['hostname']
kwargs['ex_enable_ipv6'] = plan['networks']['ipv6']
if plan['networks'].get('networks'):
kwargs['ex_private_network_ids'] = [network['external_id']
for network in plan['networks']['networks']] # noqa
kwargs['ex_ddos_protection'] = plan['ddos_protection']
kwargs['ex_backups'] = plan['backups']
return kwargs
def _create_machine__post_machine_creation_steps(self, node, kwargs, plan):
from mist.api.clouds.models import CloudLocation
from mist.api.volumes.models import Volume
from libcloud.compute.base import StorageVolume
volumes = plan.get('volumes', [])
location = CloudLocation.objects.get(id=plan['location']['id'])
# wait till machine is in active state to attach volumes
if volumes:
for _ in range(10):
time.sleep(5)
try:
node = self.connection.ex_get_node(node.id)
except Exception:
continue
if node.state == 'running':
break
for volume in volumes:
if volume.get('id'):
vol = Volume.objects.get(id=volume['id'])
libcloud_vol = StorageVolume(id=vol.external_id,
name=vol.name,
size=vol.size,
driver=self.connection,
extra=vol.extra)
try:
self.connection.attach_volume(node, libcloud_vol)
except Exception:
log.exception('Failed to attach volume')
else:
try:
libcloud_vol = self.connection.create_volume(
size=volume['size'],
name=volume['name'],
location=location.external_id
)
except Exception:
log.exception('Failed to create volume')
continue
try:
self.connection.attach_volume(node, libcloud_vol)
except Exception:
log.exception('Failed to attach volume')
class VSphereComputeController(BaseComputeController):
def _connect(self, **kwargs):
from libcloud.compute.drivers.vsphere import VSphereNodeDriver
from libcloud.compute.drivers.vsphere import VSphere_6_7_NodeDriver
ca_cert = None
if self.cloud.ca_cert_file:
ca_cert_temp_file = tempfile.NamedTemporaryFile(delete=False)
ca_cert_temp_file.write(self.cloud.ca_cert_file.encode())
ca_cert_temp_file.close()
ca_cert = ca_cert_temp_file.name
host, port = dnat(self.cloud.owner, self.cloud.host, 443)
driver_6_5 = VSphereNodeDriver(host=host,
username=self.cloud.username,
password=<PASSWORD>,
port=port,
ca_cert=ca_cert)
self.version = driver_6_5._get_version()
if '6.7' in self.version and config.ENABLE_VSPHERE_REST:
self.version = '6.7'
return VSphere_6_7_NodeDriver(self.cloud.username,
secret=self.cloud.password,
host=host,
port=port,
ca_cert=ca_cert)
else:
self.version = "6.5-"
return driver_6_5
def check_connection(self):
"""Check connection without performing `list_machines`
In vSphere we are sure we got a successful connection with the provider
if `self.connect` works, no need to run a `list_machines` to find out.
"""
self.connect()
def _list_machines__get_location(self, node_dict):
cluster = node_dict['extra'].get('cluster', '')
host = node_dict['extra'].get('host', '')
return cluster or host
def list_vm_folders(self):
all_folders = self.connection.ex_list_folders()
vm_folders = [folder for folder in all_folders if
"VirtualMachine" in folder[
'type'] or "VIRTUAL_MACHINE" in folder['type']]
return vm_folders
def list_datastores(self):
datastores_raw = self.connection.ex_list_datastores()
return datastores_raw
def _list_locations__fetch_locations(self):
"""List locations for vSphere
Return all locations, clusters and hosts
"""
return self.connection.list_locations()
def _list_machines__fetch_machines(self):
"""Perform the actual libcloud call to get list of nodes"""
machine_list = []
for node in self.connection.list_nodes(
max_properties=self.cloud.max_properties_per_request,
extra=config.VSPHERE_FETCH_ALL_EXTRA):
# Check for VMs without uuid
if node.id is None:
log.error("Skipping machine {} on cloud {} - {}): uuid is "
"null".format(node.name,
self.cloud.title,
self.cloud.id))
continue
machine_list.append(node_to_dict(node))
return machine_list
def _list_machines__get_size(self, node_dict):
"""Return key of size_map dict for a specific node
Subclasses MAY override this method.
"""
return None
def _list_machines__get_custom_size(self, node_dict):
# FIXME: resolve circular import issues
from mist.api.clouds.models import CloudSize
updated = False
try:
_size = CloudSize.objects.get(
cloud=self.cloud,
external_id=node_dict['size'].get('id'))
except me.DoesNotExist:
_size = CloudSize(cloud=self.cloud,
external_id=str(node_dict['size'].get('id')))
updated = True
if _size.ram != node_dict['size'].get('ram'):
_size.ram = node_dict['size'].get('ram')
updated = True
if _size.cpus != node_dict['size'].get('extra', {}).get('cpus'):
_size.cpus = node_dict['size'].get('extra', {}).get('cpus')
updated = True
if _size.disk != node_dict['size'].get('disk'):
_size.disk = node_dict['size'].get('disk')
updated = True
name = ""
if _size.cpus:
name += f'{_size.cpus}vCPUs, '
if _size.ram:
name += f'{_size.ram}MB RAM, '
if _size.disk:
name += f'{_size.disk}GB disk.'
if _size.name != name:
_size.name = name
updated = True
if updated:
_size.save()
return _size
def _list_machines__machine_actions(self, machine, node_dict):
super(VSphereComputeController, self)._list_machines__machine_actions(
machine, node_dict)
machine.actions.clone = True
machine.actions.rename = True
machine.actions.create_snapshot = True
machine.actions.remove_snapshot = True
machine.actions.revert_to_snapshot = True
def _stop_machine(self, machine, node):
return self.connection.stop_node(node)
def _start_machine(self, machine, node):
return self.connection.start_node(node)
def _create_machine_snapshot(self, machine, node,
snapshot_name, description='',
| |
path if the 'output_folder' is not off of
'root_folder'. Defaults to 'output'.
formats (MutableMapping[str, FileFormat]): a dictionary of file_formats
and keys with the denovo str names of those formats. Defaults to the
global 'formats' variable.
parameters (MutableMapping[str, str]): keys are the denovo names of
parameters and values are the values which should be passed to the
Distributor instances when loading or savings files. Defaults to the
global 'default_parameters' variable.
"""
settings: denovo.settings = None
root_folder: Union[str, pathlib.Path] = pathlib.Path('..')
input_folder: Union[str, pathlib.Path] = 'input'
output_folder: Union[str, pathlib.Path] = 'output'
formats: MutableMapping[str, FileFormat] = dataclasses.field(
default_factory = lambda: formats)
parameters: MutableMapping[str, str] = dataclasses.field(
default_factory = lambda: default_parameters)
""" Initialization Methods """
def __post_init__(self) -> None:
"""Initializes class instance attributes."""
# Validates core folder paths and writes them to disk.
self.root_folder = self.validate(path = self.root_folder)
self.input_folder = self._validate_io_folder(path = self.input_folder)
self.output_folder = self._validate_io_folder(path = self.output_folder)
# Gets default parameters for file transfers from 'settings'.
base = copy.deepcopy(default_parameters)
base.update(self.parameters)
self.parameters = base
# Adds and/overrides 'parameters' from 'settings'.
self.settings = self.settings or {}
self._add_settings()
# Creates FileLoader and FileSaver instances for loading and saving
# files.
self.loader = FileLoader(clerk = self)
self.saver = FileSaver(clerk = self)
return
""" Public Methods """
def load(self,
file_path: Optional[Union[str, pathlib.Path]] = None,
folder: Optional[Union[str, pathlib.Path]] = None,
file_name: Optional[str] = None,
file_format: Optional[Union[str, FileFormat]] = None,
**kwargs: Any) -> Any:
"""Imports file by calling appropriate method based on file_format.
If needed arguments are not passed, default values are used. If
'file_path' is passed, 'folder' and 'file_name' are ignored.
Args:
file_path (Union[str, Path]]): a complete file path.
Defaults to None.
folder (Union[str, Path]]): a complete folder path or the
name of a folder stored in 'clerk'. Defaults to None.
file_name (str): file name without extension. Defaults to
None.
file_format (Union[str, FileFormat]]): object with
information about how the file should be loaded or the key to
such an object stored in 'clerk'. Defaults to None
**kwargs: can be passed if additional options are desired specific
to the pandas or python method used internally.
Returns:
Any: depending upon method used for appropriate file format, a new
variable of a supported type is returned.
"""
return self.loader.transfer(file_path = file_path,
folder = folder,
file_name = file_name,
file_format = file_format,
**kwargs)
def save(self,
item: Any,
file_path: Optional[Union[str, pathlib.Path]] = None,
folder: Optional[Union[str, pathlib.Path]] = None,
file_name: Optional[str] = None,
file_format: Optional[Union[str, FileFormat]] = None,
**kwargs: Any) -> None:
"""Exports file by calling appropriate method based on file_format.
If needed arguments are not passed, default values are used. If
file_path is passed, folder and file_name are ignored.
Args:
item (Any): object to be save to disk.
file_path (Union[str, pathlib.Path]]): a complete file path.
Defaults to None.
folder (Union[str, pathlib.Path]]): a complete folder path or the
name of a folder stored in 'clerk'. Defaults to None.
file_name (str): file name without extension. Defaults to
None.
file_format (Union[str, FileFormat]]): object with
information about how the file should be loaded or the key to
such an object stored in 'clerk'. Defaults to None
**kwargs: can be passed if additional options are desired specific
to the pandas or python method used internally.
"""
self.saver.transfer(item = item,
file_path = file_path,
folder = folder,
file_name = file_name,
file_format = file_format,
**kwargs)
return
def validate(self, path: Union[str, pathlib.Path],
test: bool = True,
create: bool = True) -> pathlib.Path:
"""Turns 'file_path' into a pathlib.Path.
Args:
path (Union[str, pathlib.Path]): str or Path to be validated. If
a str is passed, the method will see if an attribute matching
'path' exists and if that attribute contains a Path.
test (bool): whether to test if the path exists. Defaults to True.
create (bool): whether to create the folder path if 'test' is True,
but the folder does not exist. Defaults to True.
Raises:
TypeError: if 'path' is neither a str nor Path.
FileNotFoundError: if the validated path does not exist and 'create'
is False.
Returns:
pathlib.Path: derived from 'path'.
"""
if isinstance(path, str):
if (hasattr(self, path)
and isinstance(getattr(self, path), pathlib.Path)):
validated = getattr(self, path)
else:
validated = pathlib.Path(path)
elif isinstance(path, pathlib.Path):
validated = path
else:
raise TypeError(f'path must be a str or Path type')
if test and not validated.exists():
if create:
self._write_folder(folder = validated)
else:
raise FileNotFoundError(f'{validated} does not exist')
return validated
""" Private Methods """
def _validate_io_folder(self,
path: Union[str, pathlib.Path]) -> pathlib.Path:
"""Validates an import or export path.
Args:
path (Union[str, pathlib.Path]): path to be validated.
Returns:
pathlib.Path: validated path.
"""
try:
return self.validate(path = path, create = False)
except FileNotFoundError:
return self.validate(path = self.root_folder / path)
def _add_settings(self) -> None:
"""Returns default parameters for file transfers from 'settings'."""
for section in ['files', 'filer', 'clerk']:
if section in self.settings:
self.parameters.update(self.settings[section])
return
def _write_folder(self, folder: Union[str, pathlib.Path]) -> None:
"""Writes folder to disk.
Parent folders are created as needed.
Args:
folder (Union[str, Path]): intended folder to write to disk.
"""
pathlib.Path.mkdir(folder, parents = True, exist_ok = True)
return
def _make_unique_path(self,
folder: Union[pathlib.Path, str],
name: str) -> pathlib.Path:
"""Creates a unique path to avoid overwriting a file or folder.
Thanks to RealPython for this bit of code:
https://realpython.com/python-pathlib/.
Args:
folder (Path): the folder where the file or folder will be located.
name (str): the basic name that should be used.
Returns:
Path: with a unique name. If the original name conflicts with an
existing file/folder, a counter is used to find a unique name
with the counter appended as a suffix to the original name.
"""
counter = 0
while True:
counter += 1
path = pathlib.Path(folder) / name.format(counter)
if not path.exists():
return path
@dataclasses.dataclass
class FileLoader(Distributor):
"""Manages file importing for denovo.
Args:
clerk (Clerk): related Clerk instance.
"""
clerk: Clerk
""" Public Methods """
def load(self, **kwargs: Any) -> Any:
"""Calls 'transfer' method with **kwargs."""
return self.transfer(**kwargs)
def transfer(self,
file_path: Union[str, pathlib.Path] = None,
folder: Union[str, pathlib.Path] = None,
file_name: Optional[str] = None,
file_format: Union[str, FileFormat] = None,
**kwargs: Any) -> Any:
"""Imports file by calling appropriate method based on file_format.
If needed arguments are not passed, default values are used. If
file_path is passed, folder and file_name are ignored.
Args:
file_path (Union[str, Path]]): a complete file path.
Defaults to None.
folder (Union[str, Path]]): a complete folder path or the
name of a folder stored in 'clerk'. Defaults to None.
file_name (str): file name without extension. Defaults to
None.
file_format (Union[str, FileFormat]]): object with
information about how the file should be loaded or the key to
such an object stored in 'clerk'. Defaults to None
**kwargs: can be passed if additional options are desired specific
to the pandas or python method used internally.
Returns:
Any: depending upon method used for appropriate file format, a new
variable of a supported type is returned.
"""
file_path, file_format = self._prepare_transfer(
file_path = file_path,
folder = folder,
file_name = file_name,
file_format = file_format)
parameters = self._get_parameters(file_format = file_format, **kwargs)
if file_format.module:
tool = file_format.load('import_method')
else:
tool = getattr(self, file_format.import_method)
return tool(file_path, **parameters)
@dataclasses.dataclass
class FileSaver(Distributor):
"""Manages file exporting for denovo.
Args:
clerk (Clerk): related Clerk instance.
"""
clerk: Clerk
""" Public Methods """
def save(self, **kwargs: Any):
"""Calls 'transfer' method with **kwargs."""
return self.transfer(**kwargs)
def transfer(self,
item: Any,
file_path: Optional[Union[str, pathlib.Path]] = None,
folder: Optional[Union[str, pathlib.Path]] = None,
file_name: Optional[str] = None,
file_format: Optional[Union[str, FileFormat]] = None,
**kwargs: Any) -> None:
"""Exports file by calling appropriate method based on file_format.
If needed arguments are not passed, default values are used. If
file_path is passed, folder and file_name are ignored.
Args:
item (Any): object to be save to disk.
file_path (Union[str, Path]]): a complete file path.
Defaults to None.
folder (Union[str, Path]]): a complete folder path or the
name of a folder stored in 'clerk'. Defaults to None.
file_name (str): file name without extension. Defaults to
None.
file_format (Union[str, FileFormat]]): object with
information about | |
for i in range(len(I_d_r_tr))]
if initdate < datetime(2020,6,15):
outliers_init = (datetime(2020,6,15)-initdate).days
outliers_end = (datetime(2020,6,19)-initdate).days
I_d_r_smooth=pd.DataFrame(I_d_r)
I_d_r_smooth = I_d_r_smooth.rolling(7, win_type='gaussian', min_periods=1, center=True).mean(std=2).round()
if self:
# Update database
print('updating database')
self.data = dfappend(self.data,np.array(I_d_r_smooth[0]),I_d_r_tr,name)
self.I_d_r = np.array(I_d_r_smooth[0])
self.I_d_r_tr = I_d_r_tr
self.I_d_r_dates = I_d_r_dates
self.I_d_r_raw = I_d_r
return
else:
return np.array(I_d_r_smooth[0]), I_d_r_tr, I_d_r_dates
# ----------------------------------------------------- #
# Daily infected "informe diario" #
# ----------------------------------------------------- #
def imp_infected_mcyt_infdiario(self=None,initdate = None,endpoint = 'https://github.com/MinCiencia/Datos-COVID19/raw/master/output/producto5/TotalesNacionales.csv',rav=7):
"""Import national infected from the "informe diario"
Args:
self (object, optional): Object to add the data. Defaults to None.
initdate (datetime.datetime, optional): initial date. Defaults to None.
endpoint (str, optional): endpoint. Defaults to 'https://github.com/MinCiencia/Datos-COVID19/raw/master/output/producto5/TotalesNacionales.csv'.
Raises:
Exception: [description]
Exception: [description]
Returns:
[type]: [description]
"""
print('Importing Daily infected with backpropagated correction')
if self:
initdate = self.initdate
else:
if not initdate:
raise Exception("Initial date missing")
aux = pd.read_csv(endpoint)
I_ac_infd = aux.iloc[1,1:]
I_d_infd = aux.iloc[6,1:]
dates_infd = [datetime.strptime(aux.columns[1:][i],'%Y-%m-%d') for i in range(len(aux.columns[1:]))]
I_d_infd_rmean = I_d_infd.rolling(window=rav,win_type='gaussian', min_periods=1, center=True).mean(std=2).round()
index = np.where(np.array(dates_infd) >= initdate)[0][0]
I_d_infd = I_d_infd[index:]
I_ac_infd = I_ac_infd[index:]
dates_infd = dates_infd[index:]
I_d_infd_rmean = I_d_infd_rmean[index:]
t_infd = [(dates_infd[i]-initdate).days for i in range(len(dates_infd))]
if self:
# Update database
print('updating database')
#self.data = dfappend(self.data,I_d_r_smooth,I_d_r_tr,name)
self.I_d_infd = I_d_infd
self.I_ac_infd = I_ac_infd
self.I_d_infd_rmean = I_d_infd_rmean
self.t_infd = t_infd
self.dates_infd = dates_infd
return
else:
return I_d_infd, I_ac_infd, I_d_infd_rmean, t_infd, dates_infd
# --------------------------------------------------------- #
# Ocupación Hospitalaria #
# --------------------------------------------------------- #
# ----------------------------- #
# Datos Ocupacion de Camas #
# ----------------------------- #
def imp_hosp_icu_mcyt(self=None,tstate = '', initdate = None, endpoint = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto58/Camas_UCI_diarias.csv",user=None,password=<PASSWORD>, name = ['UCI_capacity','UCI_use_covid','UCI_use_noncovid']):
"""
Import ICU Bed Occupation data per region.
Currently it just supports states, but soon I'll add Health Services as the minimum territorial data.
input:
- tstate: región
- initdate: Fecha de inicio
- endpoint: Data endpoint
output:
- sochimi, sochimi_dates, sochimi_tr, Hr, Hr_tot, Vr, Vr_tot,
(data frame, fechas, dias desde inicio sim, Hospitalizados, capacidad hospitalizados, ventilados, capacidad ventilados)
Normal Usage:
sochimi, sochimi_dates, sochimi_tr, Hr, Hr_tot, Vr, Vr_tot = importSOCHIMI(tstate = '13', initdate = datetime(2020,5,15))
"""
if self:
tstate = self.tstate
initdate = self.initdate
else:
if not tstate:
raise Exception("State code missing")
if not initdate:
raise Exception("Initial date missing")
cuttoname = {'15': 'Arica y Parinacota', '01': 'Tarapacá', '02': 'Antofagasta', '03': 'Atacama', '04': 'Coquimbo', '05': 'Valparaíso', '13': 'Metropolitana', '06': 'O’Higgins', '07': 'Maule', '16': 'Ñuble', '08': 'Biobío', '09': 'Araucanía', '14': 'Los Ríos', '10': 'Los Lagos', '11': 'Aysén', '12': 'Magallanes'}
data = pd.read_csv(endpoint)
dates = data.columns[2:]
#['Camas UCI habilitadas','Camas UCI ocupadas COVID-19','Camas UCI ocupadas no COVID-19']
if not type(tstate) == list:
tstate = [tstate]
counties = [i for i in tstate if len(i)>2 ]
if counties:
print('This method doesn\'t support comunas for Chile')
states = [i for i in tstate if len(i)==2]
statenames = [cuttoname[i] for i in states]
capacity = []
occupied_covid = []
occupied_non_covid = []
for i in statenames:
capacity.append(data.loc[((data['Region']==i) & (data['Serie']=='Camas UCI habilitadas'))].iloc[:,2:])
occupied_covid.append(data.loc[((data['Region']==i) & (data['Serie']=='Camas UCI ocupadas COVID-19'))].iloc[:,2:])
occupied_non_covid.append(data.loc[((data['Region']==i) & (data['Serie']=='Camas UCI ocupadas no COVID-19'))].iloc[:,2:])
capacity = np.array(capacity).sum(axis=0)[0]
occupied_covid = np.array(occupied_covid).sum(axis=0)[0]
occupied_non_covid = np.array(occupied_non_covid).sum(axis=0)[0]
dates = [datetime.strptime(dates[i][:10],'%Y-%m-%d') for i in range(len(dates))]
index = np.where(np.array(dates) >= initdate)[0][0]
UCI_capacity =list(capacity[index:])
UCI_use_covid =list(occupied_covid[index:])
UCI_use_noncovid =list(occupied_non_covid[index:])
UCI_dates = dates[index:]
UCI_tr = [(UCI_dates[i]-initdate).days for i in range(len(UCI_dates))]
# New daily hospitalized
endpoint2 = 'https://github.com/MinCiencia/Datos-COVID19/raw/master/output/producto91/Ingresos_UCI.csv'
UCI_d = pd.read_csv(endpoint2)
UCI_d_dates = UCI_d.columns[1:]
UCI_d_dates = [datetime.strptime(UCI_d_dates[i][:10],'%Y-%m-%d') for i in range(len(UCI_d_dates))]
UCI_d = UCI_d.iloc[0][1:]
index = np.where(np.array(UCI_d_dates) >= initdate)[0][0]
UCI_d = UCI_d[index:]
UCI_d_dates = UCI_d_dates[index:]
UCI_d_tr = [(UCI_d_dates[i]-initdate).days for i in range(len(UCI_d_dates))]
if self:
# Update database
#print('updating database')
#self.data = dfappend(self.data,UCI_capacity,UCI_tr,name[0])
#self.data = dfappend(self.data,UCI_use_covid,UCI_tr,name[1])
#self.data = dfappend(self.data,UCI_use_noncovid,UCI_tr,name[2])
self.UCI_capacity = UCI_capacity
self.UCI_use_covid = UCI_use_covid
self.UCI_use_noncovid = UCI_use_noncovid
self.UCI_dates = UCI_dates
self.UCI_tr = UCI_tr
self.UCI_d = UCI_d
self.UCI_d_dates = UCI_d_dates
self.UCI_d_tr = UCI_d_tr
return
else:
return UCI_capacity,UCI_use_covid,UCI_use_noncovid,UCI_dates,UCI_tr,UCI_d,UCI_d_dates,UCI_d_tr
# -------------------------------------- #
# Deaths (DEIS) MinCiencia #
# -------------------------------------- #
def imp_deaths_deis_mcyt(self=None,tstate = '',initdate = None,endpoint = 'https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto50/DefuncionesDEIS_confirmadosPorComuna.csv',user=None,password=<PASSWORD>,name = ['D_confirmed','D_suspected','D_ac_confirmed','D_ac_suspected']):
"""
Por ahora solo para data nacional
Import Accumulated Deaths
input:
- tstate: [string or string list] CUT por comuna o región
- initdate: datetime object with the initial date
- endpoint (optional):
output:
- Br: Real accumulated deaths
- Br_tr: days from simulation first day
- Br_dates: data dates
Usage:
Br,Br_tr,Br_dates = importAccumulatedDeaths(self=None,tstate = '13',initdate = datetime(2020,5,15))
Compartir la
"""
print('Importing Deaths by DEIS')
if self:
tstate = self.tstate
initdate = self.initdate
else:
if not tstate:
raise Exception("State code missing")
if not initdate:
raise Exception("Initial date missing")
D_r_confirmed = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto50/DefuncionesDEIS_confirmadosPorComuna.csv').dropna().iloc[:,5:].sum(axis=0)
D_r_suspected = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto50/DefuncionesDEIS_sospechososPorComuna.csv').dropna().iloc[:,5:].sum(axis=0)
D_r_dates = [datetime.strptime(D_r_confirmed.index[i][:10],'%Y-%m-%d') for i in range(len(D_r_confirmed.index))]
index = np.where(np.array(D_r_dates) >= initdate)[0][0]
D_r_confirmed = D_r_confirmed[index:]
D_r_suspected = D_r_suspected[index:]
D_r_dates = D_r_dates[index:]
D_r_tr = [(D_r_dates[i]-initdate).days for i in range(len(D_r_dates))]
B_r_confirmed = D_r_confirmed.cumsum()
B_r_suspected = D_r_suspected.cumsum()
if self:
# Update database
print('updating database')
self.data = dfappend(self.data,D_r_confirmed,D_r_tr,name[0])
self.data = dfappend(self.data,D_r_suspected,D_r_tr,name[1])
self.data = dfappend(self.data,B_r_confirmed,D_r_tr,name[2])
self.data = dfappend(self.data,B_r_suspected,D_r_tr,name[3])
self.Dr = D_r_confirmed
self.Dr_suspected = D_r_suspected
self.Br = B_r_confirmed
self.Br_suspected = B_r_suspected
self.Br_dates = D_r_dates
self.Br_tr = D_r_tr
return
else:
return D_r_confirmed,D_r_suspected,B_r_confirmed,B_r_suspected,D_r_dates,D_r_tr
# ------------------------------------------- #
# National Vaccinated Infected Data #
# ------------------------------------------- #
def imp_vacc_infected_mcyt(self=None,initdate = None,endpoint = 'https://github.com/MinCiencia/Datos-COVID19/raw/master/output/producto90/incidencia_en_vacunados.csv',user=None,password=None):
"""
Product 90 has data of:
*
Import Vaccines
input:
- tstate: [string or string list] CUT por comuna o región
- initdate: datetime object with the initial date
- endpoint (optional):
output:
- V_1st: First Dose (2 doses vaccine)
- V_complete: complete (1 or 2 doses vaccine)
- V_boost: Boost Dose
Usage:
-
"""
print('Importing Vaccines')
if self:
initdate = self.initdate
else:
if not initdate:
raise Exception("Initial date missing")
data = pd.read_csv(endpoint)
#data['date'] = [datetime(2021,1,3) + timedelta(days=7*(i-1)) for i in data['semana_epidemiologica']]
data['date'] = [datetime(2021,1,4) + timedelta(days=7*(i)) for i in data['semana_epidemiologica'].index]
initweek = int((initdate - datetime(2021,1,3)).days/7)
Iv_df = data[['dos_dosis_comp_casos','dosis_unica_comp_casos','dosis_ref_comp_casos']]
Inv_df = data['sin_vac_casos']
Iv_s = data['dos_dosis_comp_casos'] + data['dosis_unica_comp_casos'] + data['dosis_ref_comp_casos']
Iv_d = []
Inv_d = []
for i in Iv_s:
for j in range(7):
Iv_d.append(round(i/7))
for i in Inv_df:
for j in range(7):
Inv_d.append(round(i/7))
Iv_dates = [datetime(2021,1,4)+timedelta(days=i) for i in range(len(Iv_d))]
Iv_d = pd.DataFrame({'Iv_d':Iv_d},index=Iv_dates).rolling(window=7,win_type='gaussian', min_periods=1, center=True).mean(std=2).round()
Inv_d = pd.DataFrame({'Inv_d':Inv_d},index=Iv_dates).rolling(window=7,win_type='gaussian', min_periods=1, center=True).mean(std=2).round()
Iv_ac = Iv_d.cumsum()
Inv_ac = Inv_d.cumsum()
Iv_d = Iv_d.loc[np.array(Iv_dates)>initdate]
Iv_ac = np.array(Iv_ac.loc[np.array(Iv_dates)>initdate]['Iv_d'])
Inv_d = Inv_d.loc[np.array(Iv_dates)>initdate]
Inv_ac = np.array(Inv_ac.loc[np.array(Iv_dates)>initdate]['Inv_d'])
Iv_dates = Iv_dates[np.where(np.array(Iv_dates) > initdate)[0][0]:]
# Active vaccinated infected
Iv0 = data['dos_dosis_comp_casos'][initweek-1:initweek+1].sum() + data['dosis_unica_comp_casos'][initweek-1:initweek+1].sum() + data['dosis_ref_comp_casos'][initweek-1:initweek+1].sum()
Iv_d = np.array(Iv_d['Iv_d'])
Inv_d = np.array(Inv_d['Inv_d'])
if self:
self.Iv_ac = Iv_ac
self.Iv_d = Iv_d
self.Iv0 = Iv0
self.Inv_ac = Inv_ac
self.Inv_d = Inv_d
self.Iv_dates = Iv_dates
self.Iv_df = Iv_df
return
else:
return Iv_ac,Iv_d,Iv0, Inv_d,Inv_ac,Iv_dates
# ------------------------------------------- #
# National Vaccinated Data #
# ------------------------------------------- #
def imp_vaccinated_mcyt(self=None,tstate = '',initdate = None,endpoint = 'https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto76/vacunacion.csv',user=None,password=None):
"""
Product 76 has data of:
*
Import Vaccines
input:
- tstate: [string or string list] CUT por comuna o región
- initdate: datetime object with the initial date
- endpoint (optional):
output:
- V_1st: First Dose (2 doses vaccine)
- V_complete: complete (1 or 2 doses vaccine)
- V_boost: Boost Dose
Usage:
-
"""
tstate = ['15','01','02','03','04','05','13','06','07','16','08','09','14','10','11','12']
print('Importing Vaccines')
if self:
tstate = self.tstate
initdate = self.initdate
population = self.population
else:
if not tstate:
raise Exception("State code missing")
if not initdate:
raise Exception("Initial date missing")
population = 19120000
data = pd.read_csv(endpoint)
# Vacunas acumuladas
v1_ac = data.loc[(data['Region']=='Total')&(data['Dosis']=='Primera')].iloc[:,2:].iloc[0]
v2_ac = data.loc[(data['Region']=='Total')&(data['Dosis']=='Segunda')].iloc[:,2:].iloc[0]
v3_ac = data.loc[(data['Region']=='Total')&(data['Dosis']=='Refuerzo')].iloc[:,2:].iloc[0]
# Vacunas diarias:
v1_d = v1_ac.diff()
v2_d = v2_ac.diff()
v3_d = v3_ac.diff()
v1_d[0] = v1_ac[0]
v2_d[0] = v1_ac[0]
v3_d[0] = v1_ac[0]
# Personas activas en estado de n dosis
v3 = v3_ac
v2 = v2_ac-v3_ac
v1 = v1_ac-v2_ac
v0 = population - v1_ac
v_dates = [datetime.strptime(data.columns[2:][i],'%Y-%m-%d') for i in range(len(data.columns[2:]))]
# | |
"""
The MIT License (MIT)
Copyright (c) 2016-2017 Elastic Email, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import requests
from enum import Enum
# API version 2.42.0
class ApiClient:
apiUri = 'https://api.elasticemail.com/v2'
apiKey = '00000000-0000-0000-0000-0000000000000'
@staticmethod
def Request(method, url, data=None, attachs=None):
if data is None:
data = dict()
data['apikey'] = ApiClient.apiKey
if method == 'POST':
result = requests.post(ApiClient.apiUri + url, data=data, files=attachs)
elif method == 'PUT':
result = requests.put(ApiClient.apiUri + url, data=data)
elif method == 'GET':
attach = ''
params = {k: v for k, v in data.items() if v != None}
result = requests.get(ApiClient.apiUri + url, params=params)
print(result.url)
jsonMy = result.json()
if jsonMy['success'] is False:
return jsonMy['error']
if 'data' in jsonMy.keys():
return jsonMy['data']
else:
return 'success'
@staticmethod
def AddDictionaryParameter(dictionary, paramName, parameters):
for key in dictionary:
parameters[paramName + "_" + key] = dictionary[key]
class ApiTypes:
"""
"""
class AccessLevel(Enum):
"""
"""
EENone = 0
"""
"""
ViewAccount = 1
"""
"""
ViewContacts = 2
"""
"""
ViewForms = 4
"""
"""
ViewTemplates = 8
"""
"""
ViewCampaigns = 16
"""
"""
ViewChannels = 32
"""
"""
ViewAutomations = 64
"""
"""
ViewSurveys = 128
"""
"""
ViewSettings = 256
"""
"""
ViewBilling = 512
"""
"""
ViewSubAccounts = 1024
"""
"""
ViewUsers = 2048
"""
"""
ViewFiles = 4096
"""
"""
ViewReports = 8192
"""
"""
ModifyAccount = 16384
"""
"""
ModifyContacts = 32768
"""
"""
ModifyForms = 65536
"""
"""
ModifyTemplates = 131072
"""
"""
ModifyCampaigns = 262144
"""
"""
ModifyChannels = 524288
"""
"""
ModifyAutomations = 1048576
"""
"""
ModifySurveys = 2097152
"""
"""
ModifyFiles = 4194304
"""
"""
Export = 8388608
"""
"""
SendSmtp = 16777216
"""
"""
SendSMS = 33554432
"""
"""
ModifySettings = 67108864
"""
"""
ModifyBilling = 134217728
"""
"""
ModifyProfile = 268435456
"""
"""
ModifySubAccounts = 536870912
"""
"""
ModifyUsers = 1073741824
"""
"""
Security = 2147483648
"""
"""
ModifyLanguage = 4294967296
"""
"""
ViewSupport = 8589934592
"""
"""
SendHttp = 17179869184
"""
"""
Modify2FA = 34359738368
"""
"""
ModifySupport = 68719476736
"""
"""
ViewCustomFields = 137438953472
"""
"""
ModifyCustomFields = 274877906944
"""
"""
ModifyWebNotifications = 549755813888
"""
"""
ExtendedLogs = 1099511627776
"""
"""
VerifyEmails = 2199023255552
"""
"""
ViewEmailVerifications = 4398046511104
"""
"""
class AccessToken:
"""
Access level or permission to be assigned to this Access Token.
"""
AccessLevel = None # ApiTypes.AccessLevel
"""
Name or email address of the token.
"""
Name = None # string
"""
"""
MaskedToken = None # string
"""
Date this AccessToken was created.
"""
DateCreated = None # DateTime
"""
Date this AccessToken was last used.
"""
LastUse = None # DateTime?
"""
Date this AccessToken expires.
"""
Expires = None # DateTime?
"""
Comma separated list of CIDR notated IP ranges that this token can connect from.
"""
RestrictAccessToIPRange = None # string
"""
"""
AllowUpdate = None # bool
"""
"""
Type = None # ApiTypes.AccessTokenType
"""
"""
class AccessTokenType(Enum):
"""
ApiKey that gives you access to our SMTP and HTTP API's.
"""
APIKey = 1
"""
"""
SMTPCredential = 2
"""
Detailed information about your account
"""
class Account:
"""
Code used for tax purposes.
"""
TaxCode = None # string
"""
Public key for limited access to your Account such as contact/add so you can use it safely on public websites.
"""
PublicAccountID = None # string
"""
True, if Account is a Sub-Account. Otherwise, false
"""
IsSub = None # bool
"""
"""
IsUser = None # bool
"""
The number of Sub-Accounts this Account has.
"""
SubAccountsCount = None # long
"""
Number of status: 1 - Active
"""
StatusNumber = None # int
"""
Account status: Active
"""
StatusFormatted = None # string
"""
URL form for payments.
"""
PaymentFormUrl = None # string
"""
URL to your logo image.
"""
LogoUrl = None # string
"""
HTTP address of your website.
"""
Website = None # string
"""
True: Turn on or off ability to send mails under your brand. Otherwise, false
"""
EnablePrivateBranding = None # bool
"""
"""
EnablePrivateBrandingCss = None # bool
"""
Address to your support.
"""
SupportLink = None # string
"""
Subdomain for your rebranded service
"""
PrivateBrandingUrl = None # string
"""
"""
PrivateBrandingCssUrl = None # string
"""
First name.
"""
FirstName = None # string
"""
Last name.
"""
LastName = None # string
"""
Company name.
"""
Company = None # string
"""
First line of address.
"""
Address1 = None # string
"""
Second line of address.
"""
Address2 = None # string
"""
City.
"""
City = None # string
"""
State or province.
"""
State = None # string
"""
Zip/postal code.
"""
Zip = None # string
"""
Numeric ID of country. A file with the list of countries is available <a href="http://api.elasticemail.com/public/countries"><b>here</b></a>
"""
CountryID = None # int?
"""
Phone number
"""
Phone = None # string
"""
Proper email address.
"""
Email = None # string
"""
URL for affiliating.
"""
AffiliateLink = None # string
"""
Numeric reputation
"""
Reputation = None # double
"""
Amount of emails sent from this Account
"""
TotalEmailsSent = None # long
"""
Amount of emails sent from this Account
"""
MonthlyEmailsSent = None # long?
"""
Current credit in Account for Pay as you go plans.
"""
Credit = None # decimal
"""
Amount of email credits
"""
EmailCredits = None # int
"""
Amount of emails sent from this Account
"""
PricePerEmail = None # decimal
"""
Why your clients are receiving your emails.
"""
DeliveryReason = None # string
"""
URL for making payments.
"""
AccountPaymentUrl = None # string
"""
Address of SMTP server.
"""
Smtp = None # string
"""
Address of alternative SMTP server.
"""
SmtpAlternative = None # string
"""
Status of automatic payments configuration.
"""
AutoCreditStatus = None # string
"""
When AutoCreditStatus is Enabled, the credit level that triggers the credit to be recharged.
"""
AutoCreditLevel = None # decimal
"""
When AutoCreditStatus is Enabled, the amount of credit to be recharged.
"""
AutoCreditAmount = None # decimal
"""
Amount of emails Account can send daily
"""
DailySendLimit = None # int
"""
Creation date.
"""
DateCreated = None # DateTime
"""
True, if you have enabled link tracking. Otherwise, false
"""
LinkTracking = None # bool
"""
Type of content encoding
"""
ContentTransferEncoding = None # string
"""
Enable contact delivery and optimization tools on your Account.
"""
EnableContactFeatures = None # bool
"""
"""
NeedsSMSVerification = None # bool
"""
"""
IsGoogleAccount = None # bool
"""
Indicates if EE logo in the footer is required (ex. for trial account on older plan)
"""
IsEELogoRequired = None # bool
"""
"""
DisableGlobalContacts = None # bool
"""
"""
UntrustedDeviceAlertDisabled = None # bool
"""
Basic overview of your account
"""
class AccountOverview:
"""
Amount of emails sent from this Account
"""
TotalEmailsSent = None # long
"""
Current credit in Account for Pay as you go plans.
"""
Credit = None # decimal
"""
Cost of 1000 emails
"""
CostPerThousand = None # decimal
"""
Number of messages in progress
"""
InProgressCount = None # long
"""
Number of contacts currently with blocked status of Unsubscribed, Complaint, Bounced or InActive
"""
BlockedContactsCount = None # long
"""
Numeric reputation
"""
Reputation = None # double
"""
Number of contacts
"""
ContactCount = None # long
"""
Number of created campaigns
"""
CampaignCount = None # long
"""
Number of available templates
"""
TemplateCount = None # long
"""
Number of created Sub-Accounts
"""
SubAccountCount = None # long
"""
Number of active referrals
"""
ReferralCount = None # long
"""
Maximum allowed Contacts limit if it's a Sub-Account.
"""
MaxContacts = None # int
"""
Lists advanced sending options of your account.
"""
class AdvancedOptions:
"""
True, if you want to track clicks. Otherwise, false
"""
EnableClickTracking = None # bool
"""
True, if you want to track by link tracking. Otherwise, false
"""
EnableLinkClickTracking = None # bool
"""
True, if you want to use template scripting in your emails {{}}. Otherwise, false
"""
EnableTemplateScripting = None # bool
"""
True, if text BODY of message should be created automatically. Otherwise, false
"""
AutoTextFormat = None # bool
"""
True, if you want bounce notifications returned. Otherwise, false
"""
EmailNotificationForError = None # bool
"""
True, if you want to receive low credit email notifications. Otherwise, false
"""
LowCreditNotification = None # bool
"""
True, if this Account is a Sub-Account. Otherwise, false
"""
IsSubAccount = None # bool
"""
True, if this Account resells Elastic Email. Otherwise, false.
"""
IsOwnedByReseller = None # bool
"""
True, if you want to enable list-unsubscribe header. Otherwise, false
"""
EnableUnsubscribeHeader = None # bool
"""
True, if you want to display your labels on your unsubscribe form. Otherwise, false
"""
ManageSubscriptions = None # bool
"""
True, if you want to only display labels that the contact is subscribed to on your unsubscribe form. Otherwise, false
"""
ManageSubscribedOnly = None # bool
"""
True, if you want to display an option for the contact to opt into transactional email only on your unsubscribe form. Otherwise, false
"""
TransactionalOnUnsubscribe = None # bool
"""
"""
ConsentTrackingOnUnsubscribe = None # bool
"""
"""
PreviewMessageID = None # string
"""
True, if you want to apply custom headers to your emails. Otherwise, false
"""
AllowCustomHeaders = None # bool
"""
Email address to send a copy of all email to.
"""
BccEmail = None # string
"""
Type of content encoding
"""
ContentTransferEncoding = None # string
"""
True, if you want to receive bounce email notifications. Otherwise, false
"""
EmailNotification = None # string
"""
Email addresses to send a copy of all notifications from our system. Separated by semicolon
"""
NotificationsEmails = None # string
"""
Emails, separated by semicolon, to which the notification about contact unsubscribing should be sent to
"""
UnsubscribeNotificationEmails = None # string
"""
True, if Account has tooltips active. Otherwise, false
"""
EnableUITooltips = None # bool
"""
True, if you want to use Contact Delivery Tools. Otherwise, false
"""
EnableContactFeatures = None # bool
"""
URL to your logo image.
"""
LogoUrl = None | |
<gh_stars>100-1000
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement Table."""
from abc import ABCMeta, abstractmethod
import copy
from numbers import Integral
from math import log10
from sys import stdout
from hoomd.write.custom_writer import _InternalCustomWriter
from hoomd.custom.custom_action import _InternalAction
from hoomd.logging import LoggerCategories, Logger
from hoomd.data.parameterdicts import ParameterDict
from hoomd.data.typeconverter import OnlyTypes
from hoomd.util import dict_flatten
from hoomd.custom import Action
class _OutputWriter(metaclass=ABCMeta):
"""Represents the necessary functions for writing out data.
We use this to ensure the output object passed to Table will support the
necessary functions.
"""
@abstractmethod
def flush(self):
pass
@abstractmethod
def write(self):
pass
@abstractmethod
def writable(self):
pass
@classmethod
def __subclasshook__(cls, C):
if cls is _OutputWriter:
return all(hasattr(C, method) for method in cls.__abstractmethods__)
else:
return NotImplemented
def _ensure_writable(fh):
if not fh.writable():
raise ValueError("file-like object must be writable.")
return fh
class _Formatter:
"""Internal class for number and string formatting for Table object.
Main method is ``__call__``. It takes a value with the corresponding column
width and outputs the string to use for that column. Some of these
parameters are not currently used in the _InternalTable class, but are
available in the _Formatter class, meaning that adding these features later
would be fairly simple. I (<NAME>) did not think they were worth
complicating the Table Logger any more than it currently is though, so they
are not used now.
Args:
pretty (bool): whether to attempt to make output pretty (more readable).
max_precision (int): The max length for formatting a number or string.
max_decimals_pretty (int): The maximum number of decimals. This is
required to ensure that the decimals don't eat up all space in a
pretty print.
pad (str, optional): What to pad extra column space with, defaults to
space.
align (str, optional): What type of alignment to use, defaults to
centered ('^').
"""
def __init__(self,
pretty=True,
max_precision=15,
max_decimals_pretty=5,
pad=" ",
align="^"):
self.generate_fmt_strings(pad, align)
self.pretty = pretty
self.precision = max_precision - 1
self.max_decimals_pretty = max_decimals_pretty
def generate_fmt_strings(self, pad, align):
base = "{:" + pad + align
self._num_format = base + "{width}{type}}"
self._str_format = base + "{width}}"
def __call__(self, value, column_width):
if isinstance(value, str):
return self.format_str(value, column_width)
else:
return self.format_num(value, column_width)
def format_num(self, value, column_width):
# Always output full integer values
if isinstance(value, Integral):
return self._num_format.format(value, width=column_width, type="d")
# For floating point numbers
else:
# The minimum length representation if greater than one than the
# smallest representation is to write the number without any
# infomration past the decimal point. For values less than 1 the
# smallest is 0.xxx. The plus one is for the decimal point. We
# already attempt to print out as many decimal points as possible so
# we only need to determine the minumum size to the left of the
# decimal point including the decimal point.
min_len_repr = int(log10(max(abs(value), 1))) + 1
if value < 0:
min_len_repr += 1 # add 1 for the negative sign
# Use scientific formatting
if not min_len_repr < 6 or min_len_repr > column_width:
# Determine the number of decimals to use
if self.pretty:
decimals = min(max(column_width - 6, 1),
self.max_decimals_pretty)
else:
decimals = max(self.precision, 0)
type_fmt = "." + str(decimals) + "e"
# Use regular formatting
else:
# Determine the number of decimals to use
if self.pretty:
decimals = min(max(column_width - min_len_repr - 2, 1),
self.max_decimals_pretty)
else:
decimals = max(self.precision - min_len_repr + 1, 0)
type_fmt = "." + str(decimals) + "f"
return self._num_format.format(value,
width=column_width,
type=type_fmt)
def format_str(self, value, column_width):
if self.pretty and len(value) > column_width:
truncate_to = max(1, column_width - 2)
return self._str_format.format(value[-truncate_to:],
width=column_width)
else:
return self._str_format.format(value, width=column_width)
def __eq__(self, other):
if not isinstance(other, _Formatter):
return NotImplemented
return (self.pretty == other.pretty
and self.precision == other.precision
and self.max_decimals_pretty == other.max_decimals_pretty
and self._num_format == other._num_format
and self._str_format == other._str_format)
class _TableInternal(_InternalAction):
"""Implements the logic for a simple text based logger backend.
This currently has to check the logged quantities every time to ensure it
has not changed since the last run of `~.act`. Performance could be
improved by allowing for writing of data without checking for a change in
logged quantities, but would be more fragile.
"""
_invalid_logger_categories = LoggerCategories.any([
'sequence', 'object', 'particle', 'bond', 'angle', 'dihedral',
'improper', 'pair', 'constraint', 'strings'
])
flags = [
Action.Flags.ROTATIONAL_KINETIC_ENERGY, Action.Flags.PRESSURE_TENSOR,
Action.Flags.EXTERNAL_FIELD_VIRIAL
]
_skip_for_equality = {"_comm"}
def __init__(self,
logger,
output=stdout,
header_sep='.',
delimiter=' ',
pretty=True,
max_precision=10,
max_header_len=None):
param_dict = ParameterDict(header_sep=str,
delimiter=str,
min_column_width=int,
max_header_len=OnlyTypes(int,
allow_none=True),
pretty=bool,
max_precision=int,
output=OnlyTypes(
_OutputWriter,
postprocess=_ensure_writable),
logger=Logger)
param_dict.update(
dict(header_sep=header_sep,
delimiter=delimiter,
min_column_width=max(10, max_precision + 6),
max_header_len=max_header_len,
max_precision=max_precision,
pretty=pretty,
output=output,
logger=logger))
self._param_dict = param_dict
# internal variables that are not part of the state.
# Ensure that only scalar and potentially string are set for the logger
if (LoggerCategories.scalar not in logger.categories
or logger.categories & self._invalid_logger_categories
!= # noqa: W504 (yapf formats this incorrectly
LoggerCategories.NONE):
raise ValueError(
"Given Logger must have the scalar categories set.")
self._cur_headers_with_width = dict()
self._fmt = _Formatter(pretty, max_precision)
self._comm = None
def _setattr_param(self, attr, value):
"""Makes self._param_dict attributes read only."""
raise ValueError("Attribute {} is read-only.".format(attr))
def attach(self, simulation):
self._comm = simulation.device._comm
def detach(self):
self._comm = None
def _get_log_dict(self):
"""Get a flattened dict for writing to output."""
return {
key: value[0]
for key, value in dict_flatten(self.logger.log()).items()
}
def _update_headers(self, new_keys):
"""Update headers and write the current headers to output.
This function could be made simpler and faster by moving some of the
transformation to act. Since we don't expect the headers to change often
however, this would likely slow the writer down. The design is to
off-load any potnentially unnecessary calculations to this function even
if that means more overall computation when headers change.
"""
header_output_list = []
header_dict = {}
for namespace in new_keys:
header = self._determine_header(namespace, self.header_sep,
self.max_header_len)
column_size = max(len(header), self.min_column_width)
header_dict[namespace] = column_size
header_output_list.append((header, column_size))
self._cur_headers_with_width = header_dict
self.output.write(
self.delimiter.join((self._fmt.format_str(hdr, width)
for hdr, width in header_output_list)))
self.output.write('\n')
@staticmethod
def _determine_header(namespace, sep, max_len):
if max_len is None:
return sep.join(namespace)
else:
index = -1
char_count = len(namespace[-1])
for name in reversed(namespace[:-1]):
char_count += len(name)
if char_count > max_len:
break
index -= 1
return sep.join(namespace[index:])
def _write_row(self, data):
"""Write a row of data to output."""
headers = self._cur_headers_with_width
self.output.write(
self.delimiter.join(
(self._fmt(data[k], headers[k]) for k in headers)))
self.output.write('\n')
def act(self, timestep=None):
"""Write row to designated output.
Will also write header when logged quantities are determined to have
changed.
"""
output_dict = self._get_log_dict()
if self._comm is not None and self._comm.rank == 0:
# determine if a header needs to be written. This is always the case
# for the first call of act, and if the logged quantities change
# within a run.
new_keys = output_dict.keys()
if new_keys != self._cur_headers_with_width.keys():
self._update_headers(new_keys)
# Write the data and flush. We must flush to ensure that the data
# isn't merely stored in Python ready to be written later.
self._write_row(output_dict)
self.output.flush()
def __getstate__(self):
state = copy.copy(self.__dict__)
state.pop('_comm', None)
# This is to handle when the output specified is just stdout. By default
# file objects like this are not picklable, so we need to handle it
# differently. We let `None` represent stdout in the state dictionary.
# Most other file like objects will simply fail to be pickled here.
if self.output == stdout:
param_dict = ParameterDict()
param_dict.update(state['_param_dict'])
state['_param_dict'] = param_dict
del state['_param_dict']['output']
state['_param_dict']['output'] = None
return state
else:
return super().__getstate__()
def __setstate__(self, state):
if state['_param_dict']['output'] is None:
del state['_param_dict']['output']
state['_param_dict']['output'] = stdout
state['_param_dict']._type_converter['output'] = OnlyTypes(
_OutputWriter, postprocess=_ensure_writable),
self.__dict__ = state
class Table(_InternalCustomWriter):
"""A delimiter separated value file backend for a Logger.
This can serve as a way to output scalar simulation data to standard out.
However, this is useable to store simulation scalar data to a file as well.
Note:
This only works with scalar and string quantities. If using string
quantities, keep in mind that the default space delimiter will make
strings with spaces in them will cause read errors if attempting to read
the outputed data with a space delimited file reader.
Note:
All attributes for | |
h_obj = cf.create_checkcombo(hP, None, chk_list, has_all=len(chk_list)>1, first_line=first_line)
#
cb_func = functools.partial(self.checkComboUpdate, h_obj, chk_list, i_grp)
h_obj.view().pressed.connect(cb_func)
# sets the initial states
if any_sel:
for i_sel in [self.fields[i_grp][3].index(x) for x in self.f_data[self.fields[i_grp][2]]]:
h_obj.handleItemPressed(i_sel+1)
elif grp_type == 'NumberGroup':
# case is a number group group
# initialisations
n_num, n_txt = len(self.fields[i_grp][3]), self.fields[i_grp][3]
n_val = self.f_data[self.fields[i_grp][2]]
w_num = (self.grp_width - 2*dX) / n_num
h_obj = [[] for _ in range(2 * n_num)]
#
for i_num in range(n_num):
# creates the label text
ind_txt, ind_num = i_num * 2, i_num * 2 + 1
dim_txt = QRect(dX + (2 * i_num) * w_num, dY/2, w_num, 17)
h_obj[ind_txt] = cf.create_label(hP, txt_font_bold, n_txt[i_num], dim=dim_txt, align='right')
# creates the number edit boxes
dim_num = QRect(dX + (2 * i_num + 1) * w_num, dY, w_num, 21)
h_obj[ind_num] = cf.create_edit(hP, txt_font, str(n_val[i_num]), dim=dim_num)
# sets the widgets into the box layout
layout = QHBoxLayout()
if isinstance(h_obj, list):
for hh in h_obj:
layout.addWidget(hh)
else:
layout.addWidget(h_obj)
# sets the groupbox layout and enabled properties
self.h_grpbx[i_grp][0].setLayout(layout)
cf.set_group_enabled_props(self.h_grpbx[i_grp][0], self.fields[i_grp][4])
def get_info(self):
'''
:return:
'''
if not self.is_ok:
# user cancelled
return None
elif all([(len(x)>0) for x in zip(list(self.f_data.values()))]):
# all fields were filled out correctly
return self.f_data
else:
# not all the fields were filled out correctly
return None
def create_control_buttons(self):
'''
:return:
'''
# initialisations
layout = QHBoxLayout()
#
if self.is_exc:
b_txt = ['Update Exclusion Filter', 'Cancel']
cb_fcn = [self.update_exc_filter_only, self.user_cancel]
b_name = ['update_exc_filter', 'user_cancel']
else:
b_txt = ['Update Axes Plot', 'Update Filter Only', 'Cancel']
cb_fcn = [self.update_filter_plot, self.update_filter_only, self.user_cancel]
b_name = ['update_filter_plot', 'update_filter_only', 'user_cancel']
# group box object
b_wid = (self.grp_width - (1 + len(b_txt)) * dX) / len(b_txt)
self.h_grpbx[self.n_grp] = QGroupBox("")
# creates the load config file object
for i in range(len(b_txt)):
# creates the button object
b_dim = QRect((i + 1)*dX + i * b_wid, dY, b_wid, 21)
h_but = cf.create_button(self.h_grpbx[self.n_grp][0], b_dim, button_font, b_txt[i],
cb_fcn=cb_fcn[i], name=b_name[i])
h_but.setAutoDefault(False)
# adds the objects to the layout
layout.addWidget(h_but)
# sets the box layout
self.h_grpbx[self.n_grp][0].setLayout(layout)
def set_button_enabled_props(self):
'''
:return:
'''
if not self.is_init:
return
# initialisations
if self.is_exc:
# determines if not all values have been selected
f_keys, f_values = [x[2] for x in self.fields], [x[3] for x in self.fields]
is_ok = not np.any([len(self.f_data[fk]) == len(fv) for fk, fv in zip(f_keys, f_values)])
# retrieves the save button object and determines if all paths are correct
hUpdateOnly = self.h_grpbx[self.n_grp][0].findChild(QPushButton, 'update_exc_filter')
if hUpdateOnly is not None:
hUpdateOnly.setEnabled(is_ok)
else:
# determines if at least one has been selected
is_ok = all([(len(fv) > 0) for fv in list(self.f_data.values())])
# retrieves the save button object and determines if all paths are correct
hUpdateOnly = self.h_grpbx[self.n_grp][0].findChild(QPushButton, 'update_filter_only')
if hUpdateOnly is not None:
hUpdateOnly.setEnabled(is_ok)
# retrieves the save button object and determines if all paths are correct
hUpdatePlot = self.h_grpbx[self.n_grp][0].findChild(QPushButton, 'update_filter_plot')
if hUpdatePlot is not None:
hUpdatePlot.setEnabled(is_ok)
####################################
#### CALLBACK FUNCTIONS ####
####################################
def checkComboUpdate(self, h_obj, chk_list, i_grp, index):
'''
:return:
'''
#
item, i_sel = h_obj.model().itemFromIndex(index), index.row()
is_Checked = item.checkState() == Qt.Checked
if is_Checked:
if chk_list[i_sel - 1] == 'All':
self.f_data[self.fields[i_grp][2]] = ['All']
else:
self.f_data[self.fields[i_grp][2]].append(chk_list[i_sel - 1])
else:
i_rmv = self.f_data[self.fields[i_grp][2]].index(chk_list[i_sel - 1])
self.f_data[self.fields[i_grp][2]].pop(i_rmv)
#
if len(self.f_data[self.fields[i_grp][2]]):
first_line = '--- Selection: {0} ---'.format(', '.join(self.f_data[self.fields[i_grp][2]]))
else:
first_line = '--- Selection: None ---'
#
h_obj.model().item(0).setText(first_line)
self.set_button_enabled_props()
def update_exc_filter_only(self):
'''
:return:
'''
# resets the close flag and closes the GUI
self.can_close = True
self.close()
def update_filter_only(self):
'''
:return:
'''
# resets the close flag and closes the GUI
self.f_data['is_ud'] = [self.is_ud]
self.can_close = True
self.close()
def update_filter_plot(self):
'''
:return:
'''
# resets the close flag and closes the GUI
self.f_data['is_ud'] = [self.is_ud]
self.can_close = True
self.update_plot = True
self.close()
def user_cancel(self):
'''
:return:
'''
# resets the close flag and closes the GUI
self.is_ok = False
self.can_close = True
self.close()
def closeEvent(self, evnt):
'''
:param evnt:
:return:
'''
if self.can_close:
super(RotationFilter, self).closeEvent(evnt)
else:
evnt.ignore()
########################################################################################################################
########################################################################################################################
class RotationFilteredData(object):
def __init__(self, data, rot_filt, cell_id, plot_exp_name, plot_all_expt, plot_scope, is_ud,
t_ofs=None, t_phase=None, use_raw=False, rmv_empty=True):
# initialisations
self.e_str = None
self.is_ok = True
self.is_ud = is_ud
self._t_ofs = t_ofs
self._t_phase = t_phase
self.plot_exp_name = plot_exp_name
self.plot_all_expt = plot_all_expt
self.plot_scope = plot_scope
self.use_raw = use_raw
self.rmv_empty = rmv_empty
# sets the phase labels based on the experiment stimuli type
if self.is_ud:
self.phase_lbl = ['Baseline', 'Stimuli']
else:
self.phase_lbl = ['Baseline', 'Clockwise', 'Counter-Clockwise']
# sets the rotation filter (depending if one has been previously set)
if rot_filt is None:
self.rot_filt = cf.init_rotation_filter_data(self.is_ud)
else:
self.rot_filt = rot_filt
# sets the other fields
self.i_expt0 = None
self.n_phase = len(self.phase_lbl)
self.is_single_cell = plot_scope == 'Individual Cell'
#
if cf.use_raw_clust(data) or self.use_raw:
self.n_expt = 1 + plot_all_expt * (len(data._cluster) - 1)
else:
self.n_expt = 1 + plot_all_expt * (len(data.cluster) - 1)
# applies the filter and sets up the other plotting field values
self.apply_rotation_filter(data)
self.set_spike_arrays(data, cell_id)
if self.is_ok:
self.set_legend_str()
self.set_final_data_arrays()
#####################################
#### MAIN FILTER FUNCTIONS ####
#####################################
def apply_rotation_filter(self, data):
'''
:return:
'''
# applies the rotational filter to the spike time dataset
if self.plot_scope == 'Individual Cell':
# case is filtering on a single cell level
expt_filt_lvl = 0
self.is_single_cell = True
else:
# case is filtering on a whole cell/multiple experiment level
expt_filt_lvl = 1 + self.plot_all_expt
self.is_single_cell = False
# sets the experiment name (set to none if filtering all experiments)
exp_name = self.plot_exp_name if expt_filt_lvl < 2 else None
# applies all unique filters to the loaded experiments
self.t_spike0, self.wvm_para, self.trial_ind, self.clust_ind, self.i_expt0, self.f_perm, self.f_key, \
self.rot_filt_tot = rot.apply_rot_filter(data, self.rot_filt, expt_filt_lvl, exp_name,
self.use_raw, self.rmv_empty)
# determines the number of plots to be displayed
self.n_filt = len(self.rot_filt_tot)
def set_spike_arrays(self, data, cell_id):
'''
:param data:
:param cell_id:
:return:
'''
# sets the experiment indices
is_rot = cf.det_valid_rotation_expt(data)
clust_ind, trial_ind, e_str = self.clust_ind, self.trial_ind, None
if len(clust_ind) == 0:
# if the cluster index is not valid, then output an error to screen
e_str = 'The input cluster index does not have a feasible match. Please try again with a ' \
'different index or rotation analysis filter.'
elif self.is_single_cell:
#
if cell_id == 'No Valid Cells':
# if there are no valid cells, then output an error to screen
e_str = 'There are no valid cells for this experiment. Retry again with another experiment.'
else:
# otherwise, deteremine the index of the current experiment
i_expt0 = cf.get_expt_index(self.plot_exp_name, data._cluster, np.ones(len(data._cluster)))
self.i_expt0 = [np.array([i_expt0]) for _ in range(self.n_filt)]
# sets the index values for the given experiment
if self.use_raw:
i_cluster = data._cluster[i_expt0]['clustID'].index(int(cell_id[cell_id.index('#') + 1:]))
else:
i_cluster = data.cluster[i_expt0]['clustID'].index(int(cell_id[cell_id.index('#') + 1:]))
clust_ind = [[np.array([i_cluster], dtype=int)] for _ in range(self.n_filt)]
# if there was an error then output a message to screen and exit the function
if e_str is not None:
cf.show_error(e_str, 'Infeasible Cluster Indices')
self.is_ok = False
return
if cf.use_raw_clust(data) or self.use_raw:
s_freq = [[data._cluster[i]['sFreq'] for i in x] for x in self.i_expt0]
else:
s_freq = [[data.cluster[i]['sFreq'] for i in x] for x in self.i_expt0]
# retrieves the sampling frequencies and trial/cell count
n_trial = [[len(x) if x is not None else 0 for x in ss] for ss in trial_ind]
n_cell = [[len(x) if x is not None else 0 for x in ss] for ss in clust_ind]
# sets the stimuli phase duration (depending on the trial type)
if len(s_freq[0]):
if 'tPeriod' in self.wvm_para[0][0].dtype.names:
# case is a sinusoidal pattern
self.t_phase = [
[np.floor(wv['tPeriod'][0] / 2) / jj for wv, jj in zip(wvp, sf)]
for wvp, sf in zip(self.wvm_para, s_freq)
]
else:
# case is a flat pattern
self.t_phase = [
[np.floor(wv['nPts'][0] / 2) / jj for wv, jj in zip(wvp, sf)]
for wvp, sf in zip(self.wvm_para, s_freq)
]
else:
# otherwise, set an empty array
self.t_phase = [[]]
# sets the cluster/channel ID flags
if cf.use_raw_clust(data) or self.use_raw:
self.cl_id = [sum([list(np.array(data._cluster[x]['clustID'])[y])
for x, y in zip(i_ex, cl_ind)], []) for i_ex, cl_ind in zip(self.i_expt0, clust_ind)]
self.ch_id = [sum([list(np.array(data._cluster[x]['chDepth'])[y])
for x, y in zip(i_ex, cl_ind)], []) for i_ex, cl_ind in zip(self.i_expt0, clust_ind)]
else:
self.cl_id = | |
<reponame>charelF/ComplexSystems
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numba import njit, prange
import scipy
from scipy import special, spatial, sparse
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from matplotlib import animation, rc, ticker
from mpl_toolkits.mplot3d import Axes3D
rc('animation', html='jshtml')
from IPython.display import clear_output, HTML
import math
import random
import time
from numba import jit, njit, prange
import sys
sys.path.append('../shared')
from wednesdaySPEED import *
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#----------------------CRASH ANALYSIS-----------------------------|
@jit(nopython=True)
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
idx_copy = np.zeros((idx.shape[0] + 1), dtype = np.int64)
idx_copy[1:] = idx
idx = idx_copy
# If the start of condition is True prepend a 0
## idx = np.r_[0, idx]
if condition[-1]:
idx_copy = np.zeros((idx.shape[0] + 1), dtype = np.int64)
idx_copy[:-1] = idx
idx_copy[-1] = condition.size
idx = idx_copy
# Reshape the result into two columns
idx= np.reshape(idx,(-1,2))
return idx
@jit(nopython=True)
def count_crashes(X, treshold, window=5):
"""
does it better than james
- X: log returns array, in range -1, 1
- treshold: the log return that defines a crash:
- e.g. if 20% drop over 5 days = crash then the treshold should be 0.8
- window: how many days: default: 5 days
"""
crashes = 0
for i in range(len(X)-window):
period = X[i:i+window]+1
prod = np.prod(period)
geo_mean = prod ** (1/window)
if geo_mean < treshold:
crashes += 1
return crashes
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def visualiseNICE(G, P, N, S, X, D, T, U, C):
fig, (ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8) = plt.subplots(
ncols=1, nrows=8, figsize=(12,12), sharex=True, gridspec_kw =
{'wspace':0, 'hspace':0.05, 'height_ratios':[1,2,1,1,1,1,1,1]}
)
im1 = ax1.imshow(G.T, cmap="bwr", interpolation="None", aspect="auto")
im4 = ax4.imshow(P.T, cmap="bwr", interpolation="None", aspect="auto")
amnwc = np.max(np.abs(N-initial_account_balance)) # absolute max net worth change
vmin, vmax = initial_account_balance-amnwc, initial_account_balance+amnwc
im5 = ax5.imshow(N.T, cmap="bwr", interpolation="None", aspect="auto", vmin=vmin, vmax=vmax)
size = "15%"
cax1 = make_axes_locatable(ax1).append_axes('right', size=size, pad=0.05)
fig.colorbar(im1, cax=cax1, orientation='vertical')
cax4 = make_axes_locatable(ax4).append_axes('right', size=size, pad=0.05)
fig.colorbar(im4, cax=cax4, orientation='vertical')
cax5 = make_axes_locatable(ax5).append_axes('right', size=size, pad=0.05)
fig.colorbar(im5, cax=cax5, orientation='vertical')
cax2 = make_axes_locatable(ax2).append_axes('right', size=size, pad=0.05)
cax2.hist(S, orientation="horizontal", bins=np.linspace(np.min(S), np.max(S), len(S)//2))
# cax2.hist(np.log10(S), orientation="horizontal", bins=np.logspace(np.log10(np.min(S)), np.log10(np.max(S)), len(S)//2))
# cax2.set_xscale("log")
# cax2.set_yscale("log")
cax2.get_xaxis().set_visible(False)
cax2.get_yaxis().set_visible(False)
cax3 = make_axes_locatable(ax3).append_axes('right', size=size, pad=0.05)
cax3.hist(X, orientation="horizontal", bins=np.linspace(np.min(X), np.max(X), len(X)//5))
cax3.get_xaxis().set_visible(False)
cax3.get_yaxis().set_visible(False)
cax6 = make_axes_locatable(ax6).append_axes('right', size=size, pad=0.05)
cax6.get_xaxis().set_visible(False)
cax6.get_yaxis().set_visible(False)
cax7 = make_axes_locatable(ax7).append_axes('right', size=size, pad=0.05)
cax7.get_xaxis().set_visible(False)
cax7.get_yaxis().set_visible(False)
cax8 = make_axes_locatable(ax8).append_axes('right', size=size, pad=0.05)
cax8.get_xaxis().set_visible(False)
cax8.get_yaxis().set_visible(False)
# ax2.set_yscale("log")
ax2.plot(S, label="S")
Ws = [25]
for W in Ws:
ax2.plot(np.arange(W-1, len(S)), moving_average(S, W), label=f"MA{W}")
ax2.grid(alpha=0.4)
# ax2.legend(ncol=len(Ws)+1)
ax3.bar(np.arange(len(X)), X)
ax3.grid(alpha=0.4)
# if D.shape[1] < 25:
ax6.plot(np.mean(D[0],axis=1), color="C0", alpha=1, label="CA")
ax6.plot(np.mean(D[1],axis=1), color="C1", alpha=1, label="momentum")
ax6.plot(np.mean(D[2],axis=1), color="C2", alpha=1, label="invert")
ax6.plot(np.max(D[0],axis=1), ":", color="C0", alpha=1, label="CA")
ax6.plot(np.max(D[1],axis=1), ":", color="C1", alpha=1, label="momentum")
ax6.plot(np.max(D[2],axis=1), ":", color="C2", alpha=1, label="invert")
ax6.plot(np.min(D[0],axis=1), "--", color="C0", alpha=1, label="CA")
ax6.plot(np.min(D[1],axis=1), "--", color="C1", alpha=1, label="momentum")
ax6.plot(np.min(D[2],axis=1), "--", color="C2", alpha=1, label="invert")
# ax6.plot(np.mean(D,axis=1), color="black", alpha=1)
ax6.grid(alpha=0.4)
# ax6.legend()
ax7.set_yscale("symlog")
ax7.plot(T, label="stack")
ax7.plot(U, label="called shares")
ax7.grid(alpha=0.4)
ax7.legend()
# if D.shape[1] < 25:
# ax6.plot(D, color="black", alpha=0.3)
# ax6.plot(np.mean(D,axis=1), color="black", alpha=1)
ax8.imshow(C.T, cmap="binary", interpolation="None", aspect="auto")
# ax6.grid(alpha=0.4)
ax8.set_xlabel("time")
# ax2.set_ylabel("standardised log returns")
ax2.set_ylabel("close price")
ax1.set_ylabel("agents")
ax3.set_ylabel("log return")
ax4.set_ylabel("portfolio")
ax5.set_ylabel("net worth")
ax6.set_ylabel("influence (I)")
ax7.set_ylabel("stack")
ax8.set_ylabel("margin calls")
# fig.colorbar(im, cax=ax4)
plt.tight_layout()
# plt.savefig("tmp.png", dpi=300)
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
G,P,N,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = False, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=1000, N1 = 1000, A =0, a=1, h=1,
pi1 = 0.5, pi2 = 0.3, pi3 = 0.2)
visualiseNICE(G,P,N,S,X,D,T,U,C)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#---------------PHASE TRANSITION-------------------#
series = np.load("../../data/ENTROPYPLOT/E1_S_timeseries.npy")
tau = 9
N = 100
# series = S
splt = np.array_split(series, N)
q_vals = np.linspace(-4, 4, 2000)
## structs
C_q = np.zeros(q_vals.shape[0] - 2)
X_q = np.zeros(q_vals.shape[0])
S_q = np.zeros(q_vals.shape[0] - 1)
mu_i = np.zeros(len(splt))
denom_sum = 0
## eq 10
for i in range(len(splt)):
denom_sum += np.abs(splt[i][tau] - splt[i][0])
for j in range(len(splt)):
mu_i[j] = np.abs(splt[j][tau] - splt[j][0]) / denom_sum
lhs = np.zeros((q_vals.shape[0]))
rhs = np.zeros((q_vals.shape[0]))
for k, val in enumerate(q_vals):
## eq 11
lhs[k] = np.log(np.sum(mu_i**val))
rhs[k] = np.log(N)
## solve for slope of log-log
## x_q equivelent to tau(q) in casenna
X_q[k] = lhs[k] / rhs[k]
# ## cannot obtain C_q for first and last q vals
for l in range(1, q_vals.shape[0] - 1):
C_q[l - 1] = X_q[l + 1] - 2 * X_q[l] + X_q[l - 1]
S_q[l - 1] = X_q[l + 1] - X_q[l - 1]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
plt.figure(figsize=(10,5))
plt.plot(q_vals/40, X_q/np.max(X_q), c="r", label="Free Energy - H")
plt.plot(q_vals[2:]/40, S_q[:-1]/np.max(-S_q), c="b", label="Entropy - dH/dT")
plt.plot(q_vals[2:]/40,C_q/np.max(C_q), c="g", label="Specific heat- dH^2/dT^2")
plt.ylabel("")
plt.xlabel("Temperature")
plt.legend()
plt.show()
plt.figure(figsize=(10,5))
plt.plot(q_vals, X_q)
plt.ylabel("H - Free Energy")
plt.xlabel("Temperature")
plt.show()
plt.figure(figsize=(10,5))
plt.plot(q_vals[2:], S_q[:-1])
plt.ylabel("S - Entropy")
plt.xlabel("Temperature")
plt.show()
plt.figure(figsize=(10,5))
plt.plot(q_vals[2:],C_q)
plt.ylabel("C_p - Specific heat")
plt.xlabel("Temperature")
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@njit(parallel=True)
def parallel_simulation_phase_transition(PAR1_range,PAR2_range, PAR3_range,SIM, treshold, N0):
crashes = np.zeros((len(PAR1_range), SIM), dtype=np.float64)
S_arrays = np.zeros((len(PAR1_range), SIM, N0), dtype=np.float64)
for i in prange(len(PAR1_range)):
PAR1_VAL = PAR1_range[i]
PAR2_VAL = PAR2_range[i]
PAR3_VAL = PAR3_range[i]
for j in prange(SIM):
G,P,N,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = False, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=N0, N1 = 100, A =4, a=1, h=1,
pi1 = PAR1_VAL, pi2 = PAR2_VAL, pi3 = PAR3_VAL)
# CRASH DATA
crashes[i,j] = count_crashes(X, treshold, window=5)
S_arrays[i,j] = X
return (crashes, S_arrays)
@njit(parallel=True)
def parallel_simulation_phase_transition2(A_range, SIM, treshold, N0):
crashes = np.zeros((len(A_range), SIM), dtype=np.float64)
S_arrays = np.zeros((len(A_range), SIM, N0), dtype=np.float64)
for i in prange(len(A_range)):
A_val = A_range[i]
for j in prange(SIM):
G,P,N,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = True, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=N0, N1 = 100, A =A_val, a=1, h=1,
pi1 = 0.5, pi2 = 0.3, pi3 = 0.2)
# CRASH DATA
crashes[i,j] = count_crashes(X, treshold, window=5)
S_arrays[i,j] = X
return (crashes, S_arrays)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# A_range = np.linspace(0,10, 50)
PAR1_range = np.linspace(0, 1 ,50)
PAR2_range = (1-PAR1_range)*0.3
PAR3_range = (1-PAR1_range)*0.2
SIM = 10
treshold = 0.8
N0 = 1000
crashes, S_ARRAY = parallel_simulation_phase_transition(PAR1_range,PAR2_range, PAR3_range, SIM, treshold, N0)
# crashes, S_ARRAY = parallel_simulation_phase_transition2(A_range, SIM, treshold, N0)
crashes_mean = np.mean(crashes, axis=1)
crashes_std = np.std(crashes, axis=1)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# PAR1_range = A_range
tau = 2
N = 10
q_vals = np.linspace(-5, 5, 5000)
# q_vals = np.linspace(-1, 1, 10)
C_q = np.zeros(q_vals.shape[0] - 2)
X_q = np.zeros(q_vals.shape[0])
S_q = np.zeros(q_vals.shape[0] - 1)
lhs = np.zeros((q_vals.shape[0]))
rhs = np.zeros((q_vals.shape[0]))
C_q_collector = np.empty((len(PAR1_range), SIM, *C_q.shape))
X_q_collector = np.empty((len(PAR1_range), SIM, *X_q.shape))
S_q_collector = np.empty((len(PAR1_range), SIM, *S_q.shape))
for i_par,par in enumerate(PAR1_range):
print(i_par)
for sim in range(SIM):
series = S_ARRAY[i_par, sim]
splt = np.array_split(series, N)
mu_i = np.zeros(len(splt))
## structs
denom_sum = 0
## eq 10
for i in range(len(splt)):
denom_sum += np.abs(splt[i][tau] - splt[i][0])
for j in range(len(splt)):
mu_i[j] = np.abs(splt[j][tau] - splt[j][0]) / denom_sum
for k, val in enumerate(q_vals):
## eq 11
lhs[k] = np.log(np.sum(mu_i**val))
rhs[k] = np.log(N)
## solve for slope of log-log
## x_q equivelent to tau(q) in casenna
X_q[k] = lhs[k] / rhs[k]
# print(lhs)
# print(X_q)
# ## cannot obtain C_q for first and last q vals
for l in range(1, q_vals.shape[0] - 1):
C_q[l - 1] = X_q[l + 1] - 2 * X_q[l] + X_q[l - 1]
S_q[l - 1] = X_q[l + 1] - X_q[l - 1]
C_q_collector[i_par, sim] = C_q
X_q_collector[i_par, sim] = X_q
S_q_collector[i_par, sim] = S_q
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
C_q_mean = np.nanmean(C_q_collector, axis=1)
X_q_mean = np.nanmean(X_q_collector, axis=1)
S_q_mean = np.nanmean(S_q_collector, axis=1)
# C_q_collector_no_nan = np.nan_to_num(C_q_collector, nan=0)
# C_q_mean = np.mean(C_q_collector_no_nan, axis=1)
# print(C_q_mean.shape)
# # X_q_collector_no_nan = np.nan_to_num(X_q_collector, nan=0)
# X_q_mean = np.mean(X_q_collector_no_nan, axis=1)
# print(X_q_mean.shape)
# # S_q_collector_no_nan = np.nan_to_num(S_q_collector, nan=0)
# S_q_mean = np.mean(S_q_collector_no_nan, axis=1)
# print(S_q_mean.shape)
# np.save("../../data/PHASE_2/C_q_mean", C_q_mean)
# np.save("../../data/PHASE_2/X_q_mean", X_q_mean)
# np.save("../../data/PHASE_2/S_q_mean", S_q_mean)
# np.save("../../data/PHASE_2/q_vals", q_vals)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
plt.figure(figsize=(6, 4))
plt.imshow(C_q_mean, aspect="auto", interpolation="None", vmin=0, vmax=0.01)
plt.colorbar()
plt.show()
plt.figure(figsize=(6, 4))
plt.imshow(S_q_mean, aspect="auto", interpolation="None")
plt.colorbar()
plt.show()
plt.figure(figsize=(6, 4))
plt.imshow(X_q_mean, aspect="auto", interpolation="None")
plt.colorbar()
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# par_range = A_range
par_range = np.linspace(0, 1, 50)
q_vals = np.linspace(-5, 5, 5000)[1:-1]
print(q_vals.shape, par_range.shape)
print(C_q_mean.shape)
xx, yy = np.meshgrid(q_vals, par_range)
fig = plt.figure(figsize=(20,20))
ax = plt.axes(projection='3d')
idk = ax.plot_surface(
xx, yy, C_q_mean, cmap="turbo", rstride=2, cstride=2,
shade=False, linewidth=0.05, antialiased=True, edgecolor="black",
label="String", vmin=0, vmax=8*10**(-6))
# idk._edgecolors2d=idk._edgecolors3d # fixes some weird bug when using ax.legend()
# idk._facecolors2d=idk._facecolors3d
# ax.plot_wireframe(xx, yy, m, cmap = 'coolwarm', lw=1, rstride=1, cstride=1)
# ax.set_title('')
ax.set_xlabel('q_vals')
ax.set_ylabel('par_range')
ax.set_zlabel('Cq')
ax.view_init(90,80)
# ax.legend()
# idk.set_clim(-1,1)
# fig.colorbar(idk, shrink=0.3, aspect=10, pad=0)
# ax.set_xlim(-1,1)
plt.tight_layout()
# fig.legend()
# plt.savefig("CP123", dpi=300)
plt.show()
# plt.savefig("img/E", dpi=300)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# par_range = np.linspace(0, 1, 50)
q_vals = np.linspace(-5, 5, 1000)[1:]
print(q_vals.shape, par_range.shape)
print(S_q_mean.shape)
# S_q_mean_nan = np.where(S_q_mean==0, np.nan, S_q_mean)
xx, yy = np.meshgrid(q_vals, par_range)
fig = plt.figure(figsize=(12,10))
ax | |
except BillingContactInfo.DoesNotExist:
pass
super(EditBillingAccountInfoForm, self).__init__(data, *args, **kwargs)
self.helper = hqcrispy.HQFormHelper()
fields = [
'company_name',
'first_name',
'last_name',
crispy.Field('email_list', css_class='input-xxlarge accounting-email-select2',
data_initial=json.dumps(self.initial.get('email_list'))),
'phone_number'
]
if is_ops_user and self.initial.get('email_list'):
fields.insert(4, crispy.Div(
crispy.Div(
css_class='col-sm-3 col-md-2'
),
crispy.Div(
crispy.HTML(", ".join(self.initial.get('email_list'))),
css_class='col-sm-9 col-md-8 col-lg-6'
),
css_id='emails-text',
css_class='collapse form-group'
))
fields.insert(5, crispy.Div(
crispy.Div(
css_class='col-sm-3 col-md-2'
),
crispy.Div(
StrictButton(
_("Show contact emails as text"),
type="button",
css_class='btn btn-default',
css_id='show_emails'
),
crispy.HTML('<p class="help-block">%s</p>' %
_('Useful when you want to copy contact emails')),
css_class='col-sm-9 col-md-8 col-lg-6'
),
css_class='form-group'
))
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Basic Information"),
*fields
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large accounting-country-select2",
data_country_code=self.current_country or '',
data_country_name=COUNTRIES.get(self.current_country, '')),
),
hqcrispy.FormActions(
StrictButton(
_("Update Billing Information"),
type="submit",
css_class='btn btn-primary',
),
),
)
def clean_phone_number(self):
data = self.cleaned_data['phone_number']
parsed_number = None
if data:
for country in ["US", "GB", None]:
parsed_number = parse_phone_number(data, country, failhard=False)
if parsed_number is not None:
break
if parsed_number is None:
raise forms.ValidationError(_("It looks like this phone number is invalid. "
"Did you forget the country code?"))
return "+%s%s" % (parsed_number.country_code, parsed_number.national_number)
def clean_email_list(self):
return self.data.getlist('email_list')
# Does not use the commit kwarg.
# TODO - Should support it or otherwise change the function name
@transaction.atomic
def save(self, commit=True):
billing_contact_info = super(EditBillingAccountInfoForm, self).save(commit=False)
billing_contact_info.email_list = self.cleaned_data['email_list']
billing_contact_info.account = self.account
billing_contact_info.save()
self.account.save()
return True
class ConfirmNewSubscriptionForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
def __init__(self, account, domain, creating_user, plan_version, current_subscription, data=None,
*args, **kwargs):
self.plan_version = plan_version
self.current_subscription = current_subscription
super(ConfirmNewSubscriptionForm, self).__init__(account, domain, creating_user, data=data,
*args, **kwargs)
self.fields['plan_edition'].initial = self.plan_version.plan.edition
from corehq.apps.domain.views.accounting import DomainSubscriptionView
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('email_list', css_class='input-xxlarge accounting-email-select2',
data_initial=json.dumps(self.initial.get('email_list'))),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large accounting-country-select2",
data_country_code=self.current_country or '',
data_country_name=COUNTRIES.get(self.current_country, ''))
),
hqcrispy.FormActions(
hqcrispy.LinkButton(_("Cancel"),
reverse(DomainSubscriptionView.urlname,
args=[self.domain]),
css_class="btn btn-default"),
StrictButton(_("Subscribe to Plan"),
type="submit",
id='btn-subscribe-to-plan',
css_class='btn btn-primary disable-on-submit-no-spinner '
'add-spinner-on-click'),
),
crispy.Hidden(name="downgrade_email_note", value="", id="downgrade-email-note"),
crispy.Hidden(name="old_plan", value=current_subscription.plan_version.plan.edition),
crispy.Hidden(name="new_plan", value=plan_version.plan.edition)
)
def save(self, commit=True):
try:
with transaction.atomic():
account_save_success = super(ConfirmNewSubscriptionForm, self).save()
if not account_save_success:
return False
cancel_future_subscriptions(self.domain, datetime.date.today(), self.creating_user)
if self.current_subscription is not None:
if self.is_same_edition():
self.current_subscription.update_subscription(
date_start=self.current_subscription.date_start,
date_end=None
)
elif self.is_downgrade_from_paid_plan() and \
self.current_subscription.is_below_minimum_subscription:
self.current_subscription.update_subscription(
date_start=self.current_subscription.date_start,
date_end=self.current_subscription.date_start + datetime.timedelta(days=30)
)
Subscription.new_domain_subscription(
account=self.account,
domain=self.domain,
plan_version=self.plan_version,
date_start=self.current_subscription.date_start + datetime.timedelta(days=30),
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
funding_source=FundingSource.CLIENT,
)
else:
self.current_subscription.change_plan(
self.plan_version,
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
do_not_invoice=False,
no_invoice_reason='',
)
else:
Subscription.new_domain_subscription(
self.account, self.domain, self.plan_version,
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
funding_source=FundingSource.CLIENT,
)
return True
except Exception as e:
log_accounting_error(
"There was an error subscribing the domain '%s' to plan '%s'. Message: %s "
% (self.domain, self.plan_version.plan.name, str(e)),
show_stack_trace=True,
)
return False
def is_same_edition(self):
return self.current_subscription.plan_version.plan.edition == self.plan_version.plan.edition
def is_downgrade_from_paid_plan(self):
if self.current_subscription is None:
return False
elif self.current_subscription.is_trial:
return False
else:
return is_downgrade(
current_edition=self.current_subscription.plan_version.plan.edition,
next_edition=self.plan_version.plan.edition
)
class ConfirmSubscriptionRenewalForm(EditBillingAccountInfoForm):
plan_edition = forms.CharField(
widget=forms.HiddenInput,
)
def __init__(self, account, domain, creating_user, current_subscription,
renewed_version, data=None, *args, **kwargs):
self.current_subscription = current_subscription
super(ConfirmSubscriptionRenewalForm, self).__init__(
account, domain, creating_user, data=data, *args, **kwargs
)
self.renewed_version = renewed_version
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.fields['plan_edition'].initial = renewed_version.plan.edition
from corehq.apps.domain.views.accounting import DomainSubscriptionView
self.helper.layout = crispy.Layout(
'plan_edition',
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('email_list', css_class='input-xxlarge accounting-email-select2',
data_initial=json.dumps(self.initial.get('email_list'))),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large accounting-country-select2",
data_country_code=self.current_country or '',
data_country_name=COUNTRIES.get(self.current_country, ''))
),
hqcrispy.FormActions(
hqcrispy.LinkButton(
_("Cancel"),
reverse(DomainSubscriptionView.urlname, args=[self.domain]),
css_class="btn btn-default"
),
StrictButton(
_("Renew Plan"),
type="submit",
css_class='btn btn-primary',
),
),
)
def save(self, commit=True):
try:
with transaction.atomic():
account_save_success = super(ConfirmSubscriptionRenewalForm, self).save()
if not account_save_success:
return False
cancel_future_subscriptions(self.domain, self.current_subscription.date_start, self.creating_user)
self.current_subscription.renew_subscription(
web_user=self.creating_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
funding_source=FundingSource.CLIENT,
new_version=self.renewed_version,
)
return True
except SubscriptionRenewalError as e:
log_accounting_error(
"Subscription for %(domain)s failed to renew due to: %(error)s." % {
'domain': self.domain,
'error': e,
}
)
return False
class ProBonoForm(forms.Form):
contact_email = MultiCharField(label=ugettext_lazy("Email To"), widget=forms.Select(choices=[]))
organization = forms.CharField(label=ugettext_lazy("Organization"))
project_overview = forms.CharField(widget=forms.Textarea, label="Project overview")
airtime_expense = forms.CharField(label=ugettext_lazy("Estimated annual expenditures on airtime:"))
device_expense = forms.CharField(label=ugettext_lazy("Estimated annual expenditures on devices:"))
pay_only_features_needed = forms.CharField(widget=forms.Textarea, label="Pay only features needed")
duration_of_project = forms.CharField(help_text=ugettext_lazy(
"We grant pro-bono subscriptions to match the duration of your "
"project, up to a maximum of 12 months at a time (at which point "
"you need to reapply)."
))
domain = forms.CharField(label=ugettext_lazy("Project Space"))
dimagi_contact = forms.CharField(
help_text=ugettext_lazy("If you have already been in touch with someone from "
"Dimagi, please list their name."),
required=False)
num_expected_users = forms.CharField(label=ugettext_lazy("Number of expected users"))
def __init__(self, use_domain_field, *args, **kwargs):
super(ProBonoForm, self).__init__(*args, **kwargs)
if not use_domain_field:
self.fields['domain'].required = False
self.helper = hqcrispy.HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Pro-Bono Application'),
'contact_email',
'organization',
crispy.Div(
'domain',
style=('' if use_domain_field else 'display:none'),
),
'project_overview',
'airtime_expense',
'device_expense',
'pay_only_features_needed',
'duration_of_project',
'num_expected_users',
'dimagi_contact',
),
hqcrispy.FormActions(
crispy.ButtonHolder(
crispy.Submit('submit_pro_bono', _('Submit Pro-Bono Application'))
)
),
)
def clean_contact_email(self):
if 'contact_email' in self.cleaned_data:
copy = self.data.copy()
self.data = copy
copy.update({'contact_email': ", ".join(self.data.getlist('contact_email'))})
return self.data.get('contact_email')
def process_submission(self, domain=None):
try:
params = {
'pro_bono_form': self,
'domain': domain,
}
html_content = render_to_string("domain/email/pro_bono_application.html", params)
text_content = render_to_string("domain/email/pro_bono_application.txt", params)
recipient = settings.PROBONO_SUPPORT_EMAIL
subject = "[Pro-Bono Application]"
if domain is not None:
subject = "%s %s" % (subject, domain)
send_html_email_async.delay(subject, recipient, html_content, text_content=text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
except Exception:
logging.error("Couldn't send pro-bono application email. "
"Contact: %s" % self.cleaned_data['contact_email']
)
class InternalSubscriptionManagementForm(forms.Form):
autocomplete_account_types = [
BillingAccountType.CONTRACT,
BillingAccountType.GLOBAL_SERVICES,
BillingAccountType.USER_CREATED,
]
@property
def slug(self):
raise NotImplementedError
@property
def subscription_type(self):
raise NotImplementedError
@property
def account_name(self):
raise NotImplementedError
@property
def account_emails(self):
return []
def process_subscription_management(self):
raise NotImplementedError
@property
@memoized
def next_account(self):
matching_accounts = BillingAccount.objects.filter(
name=self.account_name,
account_type=BillingAccountType.GLOBAL_SERVICES,
).order_by('date_created')
if matching_accounts:
account = matching_accounts[0]
else:
account = BillingAccount(
name=get_account_name_from_default_name(self.account_name),
created_by=self.web_user,
created_by_domain=self.domain,
currency=Currency.get_default(),
dimagi_contact=self.web_user,
account_type=BillingAccountType.GLOBAL_SERVICES,
entry_point=EntryPoint.CONTRACTED,
pre_or_post_pay=PreOrPostPay.POSTPAY
)
account.save()
contact_info, _ = BillingContactInfo.objects.get_or_create(account=account)
for email in self.account_emails:
if email not in contact_info.email_list:
contact_info.email_list.append(email)
contact_info.save()
return account
@property
@memoized
def current_subscription(self):
return Subscription.get_active_subscription_by_domain(self.domain)
@property
@memoized
def should_autocomplete_account(self):
return (
self.current_subscription
and self.current_subscription.account.account_type in self.autocomplete_account_types
)
@property
@memoized
def autocomplete_account_name(self):
if self.should_autocomplete_account:
return self.current_subscription.account.name
return None
@property
@memoized
def current_contact_emails(self):
if self.should_autocomplete_account:
try:
return ','.join(self.current_subscription.account.billingcontactinfo.email_list)
except BillingContactInfo.DoesNotExist:
pass
return None
@property
def subscription_default_fields(self):
return {
'internal_change': True,
'web_user': self.web_user,
}
def __init__(self, domain, web_user, *args, **kwargs):
super(InternalSubscriptionManagementForm, self).__init__(*args, **kwargs)
self.domain = domain
self.web_user = web_user
@property
def form_actions(self):
return (
crispy.Hidden('slug', self.slug),
hqcrispy.FormActions(
crispy.Submit(
self.slug,
ugettext_noop('Update'),
css_class='disable-on-submit',
),
),
)
class DimagiOnlyEnterpriseForm(InternalSubscriptionManagementForm):
slug = 'dimagi_only_enterprise'
subscription_type = ugettext_noop('Test or Demo Project')
def __init__(self, domain, web_user, *args, **kwargs):
super(DimagiOnlyEnterpriseForm, self).__init__(domain, web_user, *args, **kwargs)
self.helper = hqcrispy.HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.HTML('<div class="alert alert-info">' + ugettext_noop(
'<i class="fa fa-info-circle"></i> You will have access to all '
'features for free as soon as you hit "Update". Please make '
'sure this is an internal Dimagi test space, not in use by a '
'partner.<br>Test projects belong to Dimagi and are not subject to '
'Dimagi\'s external terms of service.'
) + '</div>'),
*self.form_actions
)
@transaction.atomic
def process_subscription_management(self):
enterprise_plan_version = DefaultProductPlan.get_default_plan_version(SoftwarePlanEdition.ENTERPRISE)
if self.current_subscription:
self.current_subscription.change_plan(
enterprise_plan_version,
account=self.next_account,
transfer_credits=self.current_subscription.account == self.next_account,
**self.subscription_default_fields
)
else:
Subscription.new_domain_subscription(
self.next_account,
self.domain,
enterprise_plan_version,
**self.subscription_default_fields
)
@property
def subscription_default_fields(self):
fields = super(DimagiOnlyEnterpriseForm, self).subscription_default_fields
fields.update({
'do_not_invoice': True,
'no_invoice_reason': '',
'service_type': SubscriptionType.INTERNAL,
})
return fields
@property
def account_name(self):
return "Dimagi Internal Test Account for Project %s" % self.domain
class AdvancedExtendedTrialForm(InternalSubscriptionManagementForm):
slug = 'advanced_extended_trial'
subscription_type = ugettext_noop('Extended Trial')
organization_name = forms.CharField(
label=ugettext_noop('Organization Name'),
max_length=BillingAccount._meta.get_field('name').max_length,
)
emails = forms.CharField(
label=ugettext_noop('Partner Contact Emails'),
)
trial_length = forms.ChoiceField(
choices=[(days, "%d days" % days) for days in [15, 30, 60, 90]],
label="Trial Length",
)
def __init__(self, domain, web_user, *args, **kwargs):
super(AdvancedExtendedTrialForm, self).__init__(domain, web_user, *args, **kwargs)
self.fields['organization_name'].initial = self.autocomplete_account_name
self.fields['emails'].initial = self.current_contact_emails
self.helper = hqcrispy.HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.Field('organization_name'),
crispy.Field('emails', css_class='input-xxlarge'),
crispy.Field('trial_length', data_bind='value: trialLength'),
crispy.Div(
crispy.Div(
crispy.HTML(_(
'<p><i class="fa fa-info-circle"></i> The trial will begin as soon '
'as you hit "Update" and end on <span data-bind="text: end_date"></span>. '
'On <span data-bind="text: end_date"></span> '
'the project space will be automatically paused.</p>'
)),
css_class='col-sm-offset-3 col-md-offset-2'
),
css_class='form-group'
),
*self.form_actions
)
@transaction.atomic
def process_subscription_management(self):
advanced_trial_plan_version = DefaultProductPlan.get_default_plan_version(
edition=SoftwarePlanEdition.ADVANCED, is_trial=True,
)
if self.current_subscription:
self.current_subscription.change_plan(
advanced_trial_plan_version,
account=self.next_account,
transfer_credits=self.current_subscription.account == self.next_account,
**self.subscription_default_fields
)
else:
Subscription.new_domain_subscription(
self.next_account,
self.domain,
advanced_trial_plan_version,
**self.subscription_default_fields
)
@property
def subscription_default_fields(self):
fields = super(AdvancedExtendedTrialForm, self).subscription_default_fields
fields.update({
'auto_generate_credits': False,
'date_end': datetime.date.today() + relativedelta(days=int(self.cleaned_data['trial_length'])),
'do_not_invoice': False,
'is_trial': True,
'no_invoice_reason': '',
'service_type': SubscriptionType.EXTENDED_TRIAL
})
return fields
@property
def account_name(self):
return self.cleaned_data['organization_name']
| |
# coding=utf-8
from org.meteothink.chart.plot import Plot3D, GraphicFactory
from org.meteothink.chart import ChartText3D
from org.meteothink.legend import LegendManage, BreakTypes, PolylineBreak
from org.meteothink.shape import ShapeTypes, Graphic
from axes import Axes
import numjy as np
import plotutil
import datetime
from java.awt import Font, Color
#########################################################
class Axes3D(Axes):
'''
Axes with 3 dimensional.
'''
def __init__(self, axes=None, figure=None, **kwargs):
self.figure = figure
if axes is None:
self.axes = Plot3D()
else:
self.axes = axes
self.axestype = '3d'
self.projector = self.axes.getProjector()
#distance = kwargs.pop('distance', 10000)
#self.projector.setDistance(distance)
rotation_angle = kwargs.pop('rotation', 225)
self.projector.setRotationAngle(rotation_angle)
elevation_angle = kwargs.pop('elevation', 30)
self.projector.setElevationAngle(elevation_angle)
xyaxis = kwargs.pop('xyaxis', True)
self.axes.setDisplayXY(xyaxis)
zaxis = kwargs.pop('zaxis', True)
self.axes.setDisplayZ(zaxis)
grid = kwargs.pop('grid', True)
self.axes.setDisplayGrids(grid)
boxed = kwargs.pop('boxed', True)
self.axes.setBoxed(boxed)
bbox = kwargs.pop('bbox', False)
self.axes.setDrawBoundingBox(bbox)
def get_distance(self):
'''
Get distance to object.
:returns: Distance to object.
'''
return self.projector.getDistance()
def set_distance(self, dis):
'''
Set distance to object.
:param dis: (*float*) Distance to object.
'''
self.projector.setDistance(dis)
def get_rotation(self):
'''
Get rotation angle.
:returns: Rotation angle.
'''
return self.projector.getRotationAngle()
def set_rotation(self, rotation):
'''
Set rotation angle.
:param rotation: (*float*) Rotation angle.
'''
self.projector.setRotationAngle(rotation)
def get_elevation(self):
'''
Get elevation angle.
:returns: Elevation angle.
'''
return self.projector.getElevationAngle()
def set_elevation(self, elevation):
'''
Set elevation angle.
:param elevation: (*float*) Elevation angle.
'''
self.projector.setElevationAngle(elevation)
def set_draw_xy(self, dxy):
'''
Set draw xy axis or not.
:param dxy: (*boolean*) Draw xy axis or not.
'''
self.axes.setDisplayXY(dxy)
def set_draw_z(self, dz):
'''
Set draw z axis or not.
:param dz: (*boolean*) Draw z axis or not.
'''
self.axes.setDisplayZ(dz)
def set_draw_box(self, db):
'''
Set draw 3D box or not.
:param db: (*boolean*) Draw 3D box or not.
'''
self.axes.setBoxed(db)
def set_draw_bbox(self, bbox):
'''
Set draw bounding box or not.
:param db: (*boolean*) Draw bounding box or not.
'''
self.axes.setDrawBoundingBox(bbox)
def get_xlim(self):
"""
Get the *x* limits of the current axes.
:returns: (*tuple*) x limits.
"""
return self.axes.getXMin(), self.axes.getXMax()
def set_xlim(self, xmin, xmax):
"""
Set the *x* limits of the current axes.
:param xmin: (*float*) Minimum limit of the x axis.
:param xmax: (*float*) Maximum limit of the x axis.
"""
if isinstance(xmin, datetime.datetime):
xmin = np.miutil.date2num(xmin)
if isinstance(xmax, datetime.datetime):
xmax = np.miutil.date2num(xmax)
self.axes.setXMinMax(xmin, xmax)
def get_ylim(self):
"""
Get the *y* limits of the current axes.
:returns: (*tuple*) y limits.
"""
return self.axes.getYMin(), self.axes.getYMax()
def set_ylim(self, ymin, ymax):
"""
Set the *y* limits of the current axes.
:param ymin: (*float*) Minimum limit of the y axis.
:param ymax: (*float*) Maximum limit of the y axis.
"""
if isinstance(ymin, datetime.datetime):
ymin = np.miutil.date2num(ymin)
if isinstance(ymax, datetime.datetime):
ymax = np.miutil.date2num(ymax)
self.axes.setYMinMax(ymin, ymax)
def get_zlim(self):
"""
Get the *z* limits of the current axes.
:returns: (*tuple*) z limits.
"""
return self.axes.getZMin(), self.axes.getZMax()
def set_zlim(self, zmin, zmax):
"""
Set the *z* limits of the current axes.
:param zmin: (*float*) Minimum limit of the z axis.
:param zmax: (*float*) Maximum limit of the z axis.
"""
if isinstance(zmin, datetime.datetime):
zmin = np.miutil.date2num(zmin)
if isinstance(zmax, datetime.datetime):
zmax = np.miutil.date2num(zmax)
self.axes.setZMinMax(zmin, zmax)
def get_zticks(self):
'''
Get z axis tick locations.
'''
axis = self.axes.getZAxis()
axis.updateTickLabels()
return axis.getTickLocations()
def set_zticks(self, locs):
'''
Set z axis tick locations.
'''
axis = self.axes.getZAxis()
if isinstance(locs, (np.NDArray, DimArray)):
locs = labels.aslist()
axis.setTickLocations(locs)
def get_zticklabels(self):
'''
Get z axis tick labels.
'''
axis = self.axes.getZAxis()
axis.updateTickLabels()
return axis.getTickLabelText()
def set_zticklabels(self, labels, **kwargs):
'''
Set z axis tick labels.
'''
axis = self.axes.getZAxis()
if not labels is None:
if isinstance(labels, (np.NDArray, DimArray)):
labels = labels.aslist()
if isinstance(labels[0], (int, long, float)):
axis.setTickLabels_Number(labels)
else:
axis.setTickLabelText(labels)
fontname = kwargs.pop('fontname', axis.getTickLabelFont().getName())
fontsize = kwargs.pop('fontsize', axis.getTickLabelFont().getSize())
bold =kwargs.pop('bold', axis.getTickLabelFont().isBold())
if bold:
font = Font(fontname, Font.BOLD, fontsize)
else:
font = Font(fontname, Font.PLAIN, fontsize)
color = kwargs.pop('color', axis.getTickLabelColor())
c = plotutil.getcolor(color)
angle = kwargs.pop('rotation', 0)
if angle == 'vertical':
angle = 90
axis.setTickLabelFont(font)
axis.setTickLabelColor(c)
axis.setTickLabelAngle(angle)
def zaxis(self, **kwargs):
"""
Set z axis of the axes.
:param color: (*Color*) Color of the z axis. Default is 'black'.
:param shift: (*int) z axis shif along horizontal direction. Units is pixel. Default is 0.
"""
visible = kwargs.pop('visible', None)
shift = kwargs.pop('shift', None)
color = kwargs.pop('color', None)
if not color is None:
color = plotutil.getcolor(color)
linewidth = kwargs.pop('linewidth', None)
linestyle = kwargs.pop('linestyle', None)
tickline = kwargs.pop('tickline', None)
tickline = kwargs.pop('tickvisible', tickline)
ticklabel = kwargs.pop('ticklabel', None)
minortick = kwargs.pop('minortick', False)
minorticknum = kwargs.pop('minorticknum', 5)
tickin = kwargs.pop('tickin', True)
axistype = kwargs.pop('axistype', None)
tickfontname = kwargs.pop('tickfontname', 'Arial')
tickfontsize = kwargs.pop('tickfontsize', 14)
tickbold = kwargs.pop('tickbold', False)
if tickbold:
font = Font(tickfontname, Font.BOLD, tickfontsize)
else:
font = Font(tickfontname, Font.PLAIN, tickfontsize)
axislist = []
axislist.append(self.axes.getZAxis())
for axis in axislist:
if not visible is None:
axis.setVisible(visible)
if not shift is None:
axis.setShift(shift)
if not color is None:
axis.setColor_All(color)
if not linewidth is None:
axis.setLineWidth(linewidth)
if not linestyle is None:
axis.setLineStyle(linestyle)
if not tickline is None:
axis.setDrawTickLine(tickline)
if not ticklabel is None:
axis.setDrawTickLabel(ticklabel)
axis.setMinorTickVisible(minortick)
axis.setMinorTickNum(minorticknum)
axis.setInsideTick(tickin)
axis.setTickLabelFont(font)
def plot(self, x, y, z, *args, **kwargs):
"""
Plot 3D lines and/or markers to the axes. *args* is a variable length argument, allowing
for multiple *x, y* pairs with an optional format string.
:param x: (*array_like*) Input x data.
:param y: (*array_like*) Input y data.
:param z: (*array_like*) Input z data.
:param style: (*string*) Line style for plot.
:returns: Legend breaks of the lines.
The following format string characters are accepted to control the line style or marker:
========= ===========
Character Description
========= ===========
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
's' square marker
'p' pentagon marker
'*' star marker
'x' x marker
'D' diamond marker
========= ===========
The following color abbreviations are supported:
========= =====
Character Color
========= =====
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
========= =====
"""
xdata = plotutil.getplotdata(x)
ydata = plotutil.getplotdata(y)
zdata = plotutil.getplotdata(z)
style = None
if len(args) > 0:
style = args[0]
#Set plot data styles
label = kwargs.pop('label', 'S_1')
mvalues = kwargs.pop('mvalues', None)
if mvalues is None:
if style is None:
line = plotutil.getlegendbreak('line', **kwargs)[0]
line.setCaption(label)
else:
line = plotutil.getplotstyle(style, label, **kwargs)
colors = kwargs.pop('colors', None)
if not colors is None:
colors = plotutil.getcolors(colors)
cbs = []
for color in colors:
cb = line.clone()
cb.setColor(color)
cbs.append(cb)
else:
ls = kwargs.pop('symbolspec', None)
if ls is None:
if isinstance(mvalues, (list, tuple)):
mvalues = np.array(mvalues)
levels = kwargs.pop('levs', None)
if levels is None:
levels = kwargs.pop('levels', None)
if levels is None:
cnum = kwargs.pop('cnum', None)
if cnum is None:
ls = plotutil.getlegendscheme([], mvalues.min(), mvalues.max(), **kwargs)
else:
ls = plotutil.getlegendscheme([cnum], mvalues.min(), mvalues.max(), **kwargs)
else:
ls = plotutil.getlegendscheme([levels], mvalues.min(), mvalues.max(), **kwargs)
ls = plotutil.setlegendscheme_line(ls, **kwargs)
#Add graphics
if mvalues is None:
if colors is None:
graphics = GraphicFactory.createLineString3D(xdata, ydata, zdata, line)
else:
graphics = GraphicFactory.createLineString3D(xdata, ydata, zdata, cbs)
else:
mdata = plotutil.getplotdata(mvalues)
graphics = GraphicFactory.createLineString3D(xdata, ydata, zdata, mdata, ls)
visible = kwargs.pop('visible', True)
if visible:
self.add_graphic(graphics)
return graphics
def scatter(self, x, y, z, s=8, c='b', marker='o', alpha=None, linewidth=None,
verts=None, **kwargs):
"""
Make a 3D scatter plot of x, y and z, where x, y and z are sequence like objects of the same lengths.
:param x: (*array_like*) Input x data.
:param y: (*array_like*) Input y data.
:param z: (*array_like*) Input z data.
:param s: (*int*) Size of points.
:param c: (*Color*) Color of the points. Or z vlaues.
:param alpha: (*int*) The alpha blending value, between 0 (transparent) and 1 (opaque).
:param marker: (*string*) Marker of the points.
:param label: (*string*) Label of the points series.
:param levs: (*array_like*) Optional. A list of floating point numbers indicating the level
| |
<filename>st7735/test.py
import machine
from machine import Pin
import st7735
import time
import random
import vga1_8x8 as font
from struct import unpack
from struct import pack
import binascii
import sys
import uos
import network
import framebuf
# Subclassing FrameBuffer provides support for graphics primitives
# http://docs.micropython.org/en/latest/pyboard/library/framebuf.html
fonts= {
0xe585b3:
[0x10, 0x08, 0x08, 0x00, 0x3F, 0x01, 0x01, 0x01, 0xFF, 0x01, 0x02, 0x02, 0x04, 0x08, 0x30, 0xC0,
0x10, 0x10, 0x20, 0x00, 0xF8, 0x00, 0x00, 0x00, 0xFE, 0x00, 0x80, 0x80, 0x40, 0x20, 0x18, 0x06], # 关",0
0xe788b1:
[0x00, 0x01, 0x7E, 0x22, 0x11, 0x7F, 0x42, 0x82, 0x7F, 0x04, 0x07, 0x0A, 0x11, 0x20, 0x43, 0x1C,
0x08, 0xFC, 0x10, 0x10, 0x20, 0xFE, 0x02, 0x04, 0xF8, 0x00, 0xF0, 0x10, 0x20, 0xC0, 0x30, 0x0E], # 爱",1
0xe58d95:
[0x10, 0x08, 0x04, 0x3F, 0x21, 0x21, 0x3F, 0x21, 0x21, 0x3F, 0x01, 0x01, 0xFF, 0x01, 0x01, 0x01,
0x10, 0x20, 0x40, 0xF8, 0x08, 0x08, 0xF8, 0x08, 0x08, 0xF8, 0x00, 0x00, 0xFE, 0x00, 0x00, 0x00], # 单",2
0xe8baab:
[0x02, 0x04, 0x1F, 0x10, 0x1F, 0x10, 0x1F, 0x10, 0x10, 0x7F, 0x00, 0x00, 0x03, 0x1C, 0xE0, 0x00,
0x00, 0x00, 0xF0, 0x10, 0xF0, 0x10, 0xF2, 0x14, 0x18, 0xF0, 0x50, 0x90, 0x10, 0x10, 0x50, 0x20], # 身",3
0xe78b97:
[0x00, 0x44, 0x29, 0x11, 0x2A, 0x4C, 0x89, 0x09, 0x19, 0x29, 0x49, 0x89, 0x08, 0x08, 0x50, 0x20,
0x80, 0x80, 0x00, 0xFC, 0x04, 0x04, 0xE4, 0x24, 0x24, 0x24, 0xE4, 0x24, 0x04, 0x04, 0x28, 0x10], # 狗",4
0xe68890:
[0x00, 0x00, 0x00, 0x3F, 0x20, 0x20, 0x20, 0x3E, 0x22, 0x22, 0x22, 0x22, 0x2A, 0x44, 0x40, 0x81,
0x50, 0x48, 0x40, 0xFE, 0x40, 0x40, 0x44, 0x44, 0x44, 0x28, 0x28, 0x12, 0x32, 0x4A, 0x86, 0x02], # 成",5
0xe995bf:
[0x08, 0x08, 0x08, 0x08, 0x08, 0x09, 0x08, 0xFF, 0x0A, 0x09, 0x08, 0x08, 0x09, 0x0A, 0x0C, 0x08,
0x00, 0x10, 0x20, 0x40, 0x80, 0x00, 0x00, 0xFE, 0x00, 0x00, 0x80, 0x40, 0x20, 0x18, 0x06, 0x00], # 长",6
0xe58d8f:
[0x20, 0x20, 0x20, 0x20, 0xFB, 0x20, 0x20, 0x22, 0x22, 0x24, 0x28, 0x20, 0x21, 0x21, 0x22, 0x24,
0x80, 0x80, 0x80, 0x80, 0xF0, 0x90, 0x90, 0x98, 0x94, 0x92, 0x92, 0x90, 0x10, 0x10, 0x50, 0x20], # 协",7
0xe4bc9a:
[0x01, 0x01, 0x02, 0x04, 0x08, 0x30, 0xCF, 0x00, 0x00, 0x7F, 0x02, 0x04, 0x08, 0x10, 0x3F, 0x10,
0x00, 0x00, 0x80, 0x40, 0x20, 0x18, 0xE6, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x20, 0x10, 0xF8, 0x08], # 会",8
###
0xe4b8ad:
[0x01,0x01,0x01,0x01,0x3F,0x21,0x21,0x21,0x21,0x21,0x3F,0x21,0x01,0x01,0x01,0x01,0x00,0x00,0x00,0x00,0xF8,0x08,0x08,0x08,0x08,0x08,0xF8,0x08,0x00,0x00,0x00,0x00],#中0
0xe59bbd:
[0x00,0x7F,0x40,0x40,0x5F,0x41,0x41,0x4F,0x41,0x41,0x41,0x5F,0x40,0x40,0x7F,0x40,0x00,0xFC,0x04,0x04,0xF4,0x04,0x04,0xE4,0x04,0x44,0x24,0xF4,0x04,0x04,0xFC,0x04],#国1
0xe4baba:
[0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x02,0x04,0x04,0x08,0x08,0x10,0x20,0x40,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0x40,0x40,0x20,0x20,0x10,0x08,0x04,0x02],#人2
###
}
class st7735_pic_fb(framebuf.FrameBuffer):
def __init__(self, display, width = 160, height = 80):
self.width = width
self.height = height
self.buffer = bytearray(self.width * self.height * 2)
super().__init__(self.buffer, self.width, self.height, framebuf.RGB565)
self.init_display()
def init_display(self):
self.fill(0)
self.show(0, 0)
def load(self, buf):
self.buffer[:] = buf #copy the buf
def show(self, x, y):
display.blit_buffer(self.buffer, x, y, self.width, self.height)
def chinese(self, ch_str, x_axis, y_axis):
offset_ = 0
y_axis = y_axis*8 # 中文高度一行占8个
x_axis = x_axis*16 # 中文宽度占16个
for k in ch_str:
code = 0x00 # 将中文转成16进制编码
data_code = k.encode("utf-8")
code |= data_code[0] << 16
code |= data_code[1] << 8
code |= data_code[2]
byte_data = fonts[code]
for y in range(0, 16):
a_ = bin(byte_data[y]).replace('0b', '')
while len(a_) < 8:
a_ = '0'+a_
b_ = bin(byte_data[y+16]).replace('0b', '')
while len(b_) < 8:
b_ = '0'+b_
for x in range(0, 8):
self.pixel(x_axis+offset_+x, y+y_axis, int(a_[x]))
self.pixel(x_axis+offset_+x+8, y+y_axis, int(b_[x]))
offset_ += 16
def sd_init():
global sd
sd = machine.SDCard(slot=1, width=1, freq=40000000)
uos.mount(sd, '/sd')
print(uos.listdir('/sd'))
def wifi_init():
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
sta_if.connect('TP-LINK_AB2B02', '58930933')
while True:
if sta_if.isconnected():
print(sta_if.ifconfig())
break
def ftp_init():
import uftpd
def display_init():
global display
spi = machine.SPI(1, baudrate=30000000, polarity=0, phase=0, sck=Pin(32), mosi=Pin(5))# 26.6MHz
display = st7735.ST7735(spi, 80, 160, dc=machine.Pin(4, machine.Pin.OUT), rotation=3)
display.init()
def random_color_fill():
global display
while True:
display.fill(
st7735.color565(
random.getrandbits(8),
random.getrandbits(8),
random.getrandbits(8),
),
)
# Pause 1 seconds.
time.sleep_ms(200)
def random_text():
global display
while True:
for rotation in range(4):
display.rotation(rotation)
display.fill(0)
col_max = display.width() - font.WIDTH*6
row_max = display.height() - font.HEIGHT
for _ in range(250):
color1 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
color2 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
display.text(font, "Hello!", random.randint(0, col_max), random.randint(0, row_max), color1, color2)
def random_circle():
global display
while True:
for rotation in range(4):
display.rotation(rotation)
display.fill(0)
col_max = display.width()
row_max = display.height()
for _ in range(250):
color1 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
color2 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
display.circle(random.randint(0, col_max), random.randint(0, row_max), 10, color1, color2)
# display.circle(random.randint(0, col_max), random.randint(0, row_max), 15, st7735.WHITE, st7735.BLUE)
def circle_test():
global display
for x in reversed(range(0, 40)):
color1 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
color2 = st7735.color565(random.getrandbits(8), random.getrandbits(8), random.getrandbits(8))
display.circle(40, 80, x, color1, color2)
def chinese_font_test():
global display
s = u'中国人'
st = s.encode('unicode_escape')
print(st)
for x in range(10):
display.show_chinese(x*16, 0, 0, 16, st7735.WHITE, st7735.BLACK)
for x in range(10):
display.show_chinese(x*16, 16, 1, 16, st7735.WHITE, st7735.BLACK)
for x in range(10):
display.show_chinese(x*16, 32, 2, 16, st7735.WHITE, st7735.BLACK)
for x in range(10):
display.show_chinese(x*16, 48, 3, 16, st7735.WHITE, st7735.BLACK)
for x in range(10):
display.show_chinese(x*16, 64, 4, 16, st7735.WHITE, st7735.BLACK)
def qq_pic():
global display
buf = bytearray(0)
with open('/sd/test.bin', 'rb') as ff:
try:
while True:
rr = unpack('BB', ff.read(2))
buf.append(rr[1])
buf.append(rr[0])
except Exception as e:
print(str(e))
print('finished!')
else:
pass
#print(buf, len(buf))
display.blit_buffer(buf, 0, 0, 40, 40)
def test_scroll():
global display
x = 0
for x in range(163):
display.vscsad(x)
time.sleep_ms(10)
def test_pic():
start = time.ticks_us()
global display
buf = bytearray(160*80*2)
during0 = time.ticks_diff(time.ticks_us(), start)
print('init var: {0:0.3f} ms'.format(during0/1000))
with open('/sd/BEEB_TEST.bin', 'rb') as ff:
buf = ff.read()
during1 = time.ticks_diff(time.ticks_us(), start)
print('read from sdnand: {0:0.3f} ms'.format((during1 - during0)/1000))
display.blit_buffer(buf, 0, 0, 160, 80)
during2 = time.ticks_diff(time.ticks_us(), start)
print('send to lcd: {0:0.3f} ms'.format((during2 - during1)/1000))
def test_framebuf():
global display
buf = bytearray(160*80*2)
with open('/sd/BEEB_TEST.bin', 'rb') as ff:
buf = ff.read()
page0 = st7735_pic_fb(display)
page0.init_display()
page0.load(buf)
for x in range(80):
# page0.text('World!!!', random.randint(0, 160), random.randint(0, 80))
time.sleep_ms(10)
page0.scroll(1, 1)
page0.show()
page0.init_display()
page0.chinese('关爱单身狗成长协会中', 0, 0)
page0.show()
def load_bmp_file(file, x, y):
global display
with open(file, 'rb') as f:
if f.read(2) == b'BM': #header
dummy = f.read(8) #file size(4), creator bytes(4)
offset = int.from_bytes(f.read(4), 'little')
hdrsize = int.from_bytes(f.read(4), 'little')
width = int.from_bytes(f.read(4), 'little')
height = int.from_bytes(f.read(4), 'little')
if int.from_bytes(f.read(2), 'little') == 1: #planes must be 1
depth = int.from_bytes(f.read(2), 'little')
if depth == 24 and int.from_bytes(f.read(4), 'little') == 0:#compress method == uncompressed
print("Image size: w{} x h{}".format(width, height))
rowsize = (width * 3 + 3) & ~3
if height < 0:
height = -height
flip = False
else:
flip = True
w, h = width, height
display.set_window(x, x + w - 1, y, y + h - 1)
for row in range(h):
if flip:
pos = offset + (height - 1 - row) * rowsize
else:
pos = offset + row * rowsize
if f.tell() != pos:
dummy = f.seek(pos)
for col in range(w):
bgr = f.read(3)
display.push_rgb_color(bgr[2], bgr[1], bgr[0])
else:
print('not 24bit bmp.')
def test_load_bmp_file():
load_bmp_file('/sd/qq_logo_24bit.bmp', 0, 0)
def test_framebuf_pic0():
global display
with open('/sd/test0.bmp', 'rb') as f:
if f.read(2) == b'BM': #header
dummy = f.read(8) #file size(4), creator bytes(4)
offset = int.from_bytes(f.read(4), 'little')
hdrsize = int.from_bytes(f.read(4), 'little')
width = int.from_bytes(f.read(4), 'little')
height = int.from_bytes(f.read(4), 'little')
buf = bytearray(width*height*2) #init buf
page0 = st7735_pic_fb(display, width, height)
page0.init_display()
if int.from_bytes(f.read(2), 'little') == 1: #planes must be 1
depth = int.from_bytes(f.read(2), 'little')
if depth == 24 and int.from_bytes(f.read(4), 'little') == 0:#compress method == uncompressed
print("Image size: w{} x h{}".format(width, height))
rowsize = (width * 3 + 3) & ~3
print('rowsize is {}'.format(rowsize))
if height < 0:
height = -height
flip = False
else:
flip = True
w, h = width, height
for row in reversed(range(h)):
if flip:
pos = offset + (height - 1 - row) * rowsize
else:
pos = offset + row * rowsize
if f.tell() != pos:
dummy = f.seek(pos)
for col in range(w):
bgr = f.read(3)
rgb_color = st7735.color565(bgr[2], bgr[1], bgr[0])
buf[row*w*2 + col*2] = rgb_color >> 8
buf[row*w*2 + col*2 + 1] = rgb_color
else:
print('not 24bit bmp.')
page0.load(buf)
# page0.show(0, 0)
# while 1:
# page0.show(random.randint(0, 160), random.randint(0, 80))
with open('/sd/skull-and-crossbones.bmp', 'rb') as f:
if f.read(2) == b'BM': #header
dummy = f.read(8) #file size(4), creator bytes(4)
offset = int.from_bytes(f.read(4), 'little')
hdrsize = int.from_bytes(f.read(4), 'little')
width = int.from_bytes(f.read(4), 'little')
| |
<reponame>thejasvibr/itsfm<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Even though the spectrogram is one of the most dominant time-frequency
representation, there are whole class of alternate representations. This
module has the code which tracks the dominant frequency in a sound using
non-spectrogram methods.
The Pseudo Wigner Ville Distribution
....................................
The Pseudo Wigner Ville Distribution is an accurate but not so well known
method to represent a signal on the time-frequency axis[1]. This time-frequency
representation is implemented in the `get_pwvd_frequency_profile`.
References
[1] <NAME>. (1995). Time-frequency analysis (Vol. 778). Prentice hall.
"""
import numpy as np
import scipy.ndimage as ndimage
import scipy.signal as signal
import skimage.filters as filters
from tftb.processing import PseudoWignerVilleDistribution
import itsfm.signal_cleaning
from itsfm.signal_cleaning import suppress_background_noise, remove_bursts, smooth_over_potholes
from itsfm.signal_cleaning import exterpolate_over_anomalies
from itsfm.signal_cleaning import clip_tfr, smooth_over_potholes
from itsfm.signal_processing import moving_rms_edge_robust, dB
def get_pwvd_frequency_profile(input_signal, fs, **kwargs):
'''Generates a clean frequency profile through the PWVD.
The order of frequency profile processing is as follows:
#. Split input signal into regions that are
greater or equal to the `signal_level`. This
speeds up the whole process of pwvd tracking
multiple sounds, and ignores the fainter samples.
#. Generate PWVD for each above-noise region.
#. Set regions below background noise to 0Hz
#. Remove sudden spikes and set these regions to values
decided by interpolation between adjacent non-spike regions.
Parameters
----------
input_signal : np.array
fs : float
Notes
-----
The fact that each signal part is split into independent
above-background segments and then frequency tracked can
have implications for frequency resolution. Short sounds
may end up with frequency profiles that have a lower
resolution than longer sounds. Each sound is handled separately
primarily for memory and speed considerations.
Example
-------
Create two chirps in the middle of a somewhat silent recording
>>> import matplotlib.pyplot as plt
>>> from itsfm.simulate_calls import make_fm_chirp
>>> from itsfm.view_horseshoebat_call import plot_movingdbrms
>>> from itsfm.view_horseshoebat_call import visualise_call, make_x_time
>>> fs = 44100
>>> start_f, end_f = 1000, 10000
>>> chirp = make_fm_chirp(start_f, end_f, 0.01, fs)
>>> rec = np.random.normal(0,10**(-50/20), 22100)
>>> chirp1_start, chirp1_end = 10000, 10000 + chirp.size
>>> chirp2_start, chirp2_end = np.array([chirp1_start, chirp1_end])+int(fs*0.05)
>>> rec[chirp_start:chirp_end] += chirp
>>> rec[chirp2_start:chirp2_end] += chirp
>>> rec /= np.max(abs(rec))
>>> actual_fp = np.zeros(rec.size)
>>> actual_fp[chirp1_start:chirp1_end] = np.linspace(start_f, end_f, chirp.size)
>>> actual_fp[chirp2_start:chirp2_end] = np.linspace(start_f, end_f, chirp.size)
Check out the dB rms profile of the recording to figure out where the
noise floor is
>>> plot_movingdbrms(rec, fs)
>>> clean_fp, info = get_pwvd_frequency_profile(rec, fs,
signal_level=-9,
extrap_window=10**-3,
max_acc = 0.6)
>>> plt.plot(clean_fp, label='obtained')
>>> plt.plot(actual_fp, label='actual')
>>> plt.legend()
Now, let's overlay the obtained frequency profile onto a spectrogram to
check once more how well the dominant frequency has been tracked.
>>> w,s = visualise_call(rec, fs, fft_size=128)
>>> s.plot(make_x_time(clean_fp, fs), clean_fp)
See Also
--------
itsfm.signal_cleaning.smooth_over_potholes
find_above_noise_regions
'''
info = {}
above_noise_regions, moving_dbrms = find_geq_signallevel(input_signal, fs, **kwargs)
full_fp = np.zeros(input_signal.size)
full_raw_fp = np.zeros(input_signal.size)
acc_profile = np.zeros(input_signal.size)
spikey_regions = np.zeros(input_signal.size)
#print('generating PWVD frequency profile....')
for region in above_noise_regions:
raw_fp, frequency_index = generate_pwvd_frequency_profile(input_signal[region],
fs, **kwargs)
weird_parts, accelaration_profile = frequency_spike_detection(raw_fp, fs, **kwargs)
cleaned_fp = exterpolate_over_anomalies(raw_fp, fs, weird_parts, **kwargs)
full_raw_fp[region] = raw_fp
cleaned_fp = exterpolate_over_anomalies(raw_fp, fs, weird_parts,
**kwargs)
acc_profile[region] = accelaration_profile
full_fp[region] = cleaned_fp
spikey_regions[region[0]][weird_parts] = 1
info['moving_dbrms'] = moving_dbrms
info['geq_signal_level'] = above_noise_regions
info['raw_fp'] = full_raw_fp
info['acc_profile'] = acc_profile
info['spikey_regions'] = spikey_regions
return full_fp, info
def find_geq_signallevel(X, fs, **kwargs):
'''
Find regions greater or equal to signal level
'''
signal_level = kwargs.get('signal_level', -20)
winsize = kwargs.get('window_size', int(fs*0.002))
rec_level = dB(moving_rms_edge_robust(X, window_size=winsize))
ids_above_noise, num_regions = ndimage.label(rec_level>signal_level)
if num_regions <1:
raise ValueError('No regions above signal level found!')
return ndimage.find_objects(ids_above_noise), rec_level
def clean_up_spikes(whole_freqeuncy_profile, fs, **kwargs):
'''Applies smooth_over_potholes on each non-zero frequency segment
in the profile.
Parameters
----------
Returns
-------
See Also
--------
smooth_over_potholes
Example
-------
Let's create a case with an FM and CF tone
>>> from itsfm.simulate_calls import make_tone, make_fm_chirp, silence
>>> fs = 22100
>>> tone = make_tone(5000, 0.01, fs)
>>> sweep = make_fm_chirp(1000, 6000, 0.005, fs)
>>> gap = silence(0.005, fs)
>>> full_call = np.concatenate((tone, gap, sweep))
The raw frequency profile, with very noisy frequency estimates needs
to be further cleaned
>>> raw_fp, frequency_index = generate_pwvd_frequency_profile(full_call,
fs)
>>> noise_supp_fp = noise_supp_fp = suppress_background_noise(raw_fp,
full_call,
window_size=25,
background_noise=-30)
Even after the noisy parts have been suppressed, there're still some
spikes caused by the
>>>
'''
nonzero_freqs, num_regions = ndimage.label(whole_freqeuncy_profile>0)
segment_locations = ndimage.find_objects(nonzero_freqs)
if len(segments) <1 :
raise ValueError('No non-zero frequency sounds found..!')
de_spiked = np.zeros(whole_freqeuncy_profile.size)
for segment in segment_locations:
smoothed, _ = smooth_over_potholes(whole_freqeuncy_profile[segment],
fs, **kwargs)
de_spiked[segment] = smoothed
return de_spiked
def generate_pwvd_frequency_profile(input_signal, fs, **kwargs):
'''Generates the raw instantaneous frequency estimate at each sample.
using the Pseudo Wigner Ville Distribution
Parameters
----------
input_signal : np.array
fs : float
pwvd_filter : Boolean, optional
Whether to perform median filtering with a 2D kernel.
Defaults to False
pwvd_filter_size : int, optional
The size of the square 2D kernel used to median filter the
initial PWVD time-frequency representation.
pwvd_window : float>0, optional
The duration of the window used in the PWVD. See pwvd_transform
for the default value.
tfr_cliprange: float >0, optional
The clip range in dB.
Clips all values in the abs(pwvd) time-frequency
representation to between max and max*10*(-tfr_cliprange/20.0).
Defaults to None, which does not alter the pwvd transform in anyway.
Returns
-------
raw_frequency_profile, frequency_indx : np.array
Both outputs are the same size as input_signal.
raw_frequency_profile is the inst. frequency in Hz.
frequency_indx is the row index of the PWVD array.
See Also
--------
pwvd_transform
track_peak_frequency_over_time
itsfm.signal_cleaning.clip_tfr
'''
pwvd_filter = kwargs.get('pwvd_filter', False)
pwvd_filter_size = kwargs.get('pwvd_filter_size', 10)
filter_dims = (pwvd_filter_size, pwvd_filter_size)
time_freq_rep = np.abs(pwvd_transform(input_signal, fs,
**kwargs))
clipped_tfr = clip_tfr(time_freq_rep, **kwargs)
if pwvd_filter:
print('....A 2D median filter kernel is being applied to the PWVD...')
median_filtered_tf = filters.median_filter(clipped_tfr, size=filter_dims)
print('..done with PWVD filtering..')
raw_frequency_profile, frequency_indx = track_peak_frequency_over_time(input_signal, fs,
median_filtered_tf,
**kwargs)
else:
raw_frequency_profile, frequency_indx = track_peak_frequency_over_time(input_signal, fs,
clipped_tfr,
**kwargs)
return raw_frequency_profile, frequency_indx
def pwvd_transform(input_signal, fs, **kwargs):
'''Converts the input signal into an analytical signal and then generates
the PWVD of the analytical signal.
Uses the PseudoWignerVilleDistribution class from the tftb package [1].
Parameters
----------
input_signal : np.array
fs : float
pwvd_window_type : np.array, optional
The window to be used for the pseudo wigner-ville distribution.
If not given, then a hanning signal is used of the default length.
The window given here supercedes the 'window_length' argument below.
pwvd_window : float>0, optional
The duration of the window used in the PWVD. Defaults to 0.001s
Returns
-------
time_frequency_output : np.array
Two dimensional array with dimensions of NsamplesxNsamples, where
Nsamples is the number of samples in input_signal.
References
----------
[1] <NAME>, tftb 0.1.1 ,Python module for time-frequency analysis,
https://pypi.org/project/tftb/
'''
window_length = kwargs.get('pwvd_window', 0.001)
window = kwargs.get('pwvd_window_type', signal.hanning(int(fs*window_length)))
analytical = signal.hilbert(input_signal)
p = PseudoWignerVilleDistribution(analytical, fwindow=window)
pwvd_output = p.run();
time_frequency_output = pwvd_output[0]
return time_frequency_output
def track_peak_frequency_over_time(input_signal, fs, time_freq_rep, **kwargs):
'''Tracks the lowest possible peak frequency. This ensures that the
lowest harmonic is being tracked in a multiharmonic signal with similar
levels across the harmonics.
EAch 'column' of the 2D PWVD is inspected for the lowest peak that crosses
a percentile threshold, and this is then taken as the peak frequency.
Parameters
----------
input_signal : np.array
fs : float>0
time_freq_rep : np.array
2D array with the PWVD representation.
percentile : 0<float<100, optional
Returns
-------
peak_freqs, peak_inds : np.array
Arrays with same size as the input_signal. peak_freqs is the
frequencies in Hz, peak_inds is the row index.
See Also
--------
find_lowest_intense_harmonic_across_TFR
get_most_intense_harmonic
'''
peak_inds = find_lowest_intense_harmonic_across_TFR(abs(time_freq_rep), **kwargs)
freqs = np.linspace(0, fs*0.5, input_signal.size)
peak_freqs = freqs[peak_inds]
return peak_freqs, peak_inds
def find_lowest_intense_harmonic_across_TFR(tf_representation, **kwargs):
'''
'''
return np.apply_along_axis(get_most_intense_harmonic,0,tf_representation, **kwargs)
def get_most_intense_harmonic(time_slice, **kwargs):
'''Searches a single column in a 2D array for the | |
<reponame>Joshuaalbert/bayes_filter
from .datapack import DataPack
import tensorflow as tf
import numpy as np
import os
from . import logging
from .plotting import DatapackPlotter
import pylab as plt
def callback_sequence(callbacks, args, async=False):
if async:
ops = []
for arg, callback in zip(args, callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
ops.append(callback(*arg))
return tf.group(ops)
lock = [tf.no_op()]
store_ops = []
for arg, callback in zip(args, callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
with tf.control_dependencies(lock):
store_ops.append(callback(*arg))
lock = store_ops[-1]
with tf.control_dependencies(lock):
return tf.no_op()
class Callback(object):
def __init__(self, *args, controls=None, **kwargs):
self._output_dtypes = None
self._name = 'Callback'
self._squeeze = False
self.controls = controls
self.callback_func = self.generate(*args, **kwargs)
@property
def controls(self):
return self._controls
@controls.setter
def controls(self, value):
if value is None:
self._controls = None
return
if not isinstance(value, (list, tuple)):
value = [value]
self._controls = list(value)
@property
def squeeze(self):
return self._squeeze
@squeeze.setter
def squeeze(self, value):
self._squeeze = value
def generate(self, *args, **kwargs):
raise NotImplementedError("Must subclass")
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def output_dtypes(self):
if self._output_dtypes is None:
raise ValueError("Output dtype should be a list of output dtypes.")
return self._output_dtypes
@output_dtypes.setter
def output_dtypes(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("output dtypes must be a list or tuple")
self._output_dtypes = value
def __call__(self, *Tin):
squeeze = len(self.output_dtypes) == 1
def py_func(*Tin):
result = self.callback_func(*[t.numpy() for t in Tin])
if not isinstance(result, (list,tuple)):
result = [result]
if len(result) != len(self.output_dtypes):
raise ValueError("Len of py_function result {} not equal to number of output dtypes {}".format(len(result), len(self.output_dtypes)))
if squeeze and self.squeeze:
return result[0]
return result
if self.controls is not None:
with tf.control_dependencies(self.controls):
if squeeze and self.squeeze:
return tf.py_function(py_func, Tin, self.output_dtypes[0], name=self.name)
return tf.py_function(py_func, Tin, self.output_dtypes, name=self.name)
else:
if squeeze and self.squeeze:
return tf.py_function(py_func, Tin, self.output_dtypes[0], name=self.name)
return tf.py_function(py_func, Tin, self.output_dtypes, name=self.name)
class Chain(Callback):
def __init__(self, *callbacks, async = False):
for cb in callbacks:
if not isinstance(cb, Callback):
raise ValueError("All inputs should be Callbacks, got {}".format(type(cb)))
self._callbacks = callbacks
self._async = async
def __call__(self, *args):
if self._async:
ops = []
for arg, callback in zip(args, self._callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
ops.append(callback(*arg))
return tf.group(ops)
lock = [tf.no_op()]
store_ops = []
for arg, callback in zip(args, self._callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
with tf.control_dependencies(lock):
store_ops.append(callback(*arg))
lock = store_ops[-1]
with tf.control_dependencies(lock):
return tf.no_op()
class SummarySendCallback(Callback):
"""
Callback to submit summaries in-graph mode.
"""
def __init__(self, logdir):
super(SummarySendCallback, self).__init__(logdir=logdir)
def generate(self, logdir):
self.output_dtypes = [tf.int64]
self.name = 'SummarySendCallback'
filewriter = tf.summary.FileWriter(logdir,
graph=tf.get_default_graph(),
flush_secs=30)
def store(i, *summaries):
for summary in summaries:
filewriter.add_summary(summary, i)
return [np.array(len(summaries),dtype=np.int64)]
return store
class DatapackStoreCallback(Callback):
def __init__(self, datapack, solset, soltab, perm=(0,2,3,1),lock=None,index_map=None,**selection):
super(DatapackStoreCallback, self).__init__(datapack=datapack,
solset=solset,
soltab=soltab,
perm=perm,
lock=lock,
index_map=index_map,
**selection)
def generate(self, datapack, solset, soltab, perm, lock, index_map, **selection):
if not isinstance(datapack, str):
datapack = datapack.filename
selection.pop('time',None)
self.output_dtypes = [tf.int64]
self.name = 'DatapackStoreCallback'
def store(time_start, time_stop, array):
time_start = index_map[time_start]
time_stop = index_map[time_stop - 1] + 1
if lock is not None:
lock.acquire()
with DataPack(datapack,readonly=False) as dp:
dp.current_solset = solset
dp.select(time=slice(time_start, time_stop, 1), **selection)
dp.__setattr__(soltab, np.transpose(array, perm))#, dir=dir_sel, ant=ant_sel, freq=freq_sel, pol=pol_sel
if lock is not None:
lock.release()
return [np.array(array.__sizeof__(),dtype=np.int64)]
return store
class GetLearnIndices(Callback):
def __init__(self, dist_cutoff=0.3):
super(GetLearnIndices, self).__init__(dist_cutoff=dist_cutoff)
def generate(self, dist_cutoff):
self.output_dtypes = [tf.int64]
self.name = 'GetLearnIndices'
def get_learn_indices(X):
"""Get the indices of non-redundant antennas
:param X: np.array, float64, [N, 3]
Antenna locations
:param cutoff: float
Mark redundant if antennas within this in km
:return: np.array, int64
indices such that all antennas are at least cutoff apart
"""
N = X.shape[0]
Xa, inverse = np.unique(X, return_inverse=True, axis=0)
Na = len(Xa)
keep = []
for i in range(Na):
if np.all(np.linalg.norm(Xa[i:i + 1, :] - Xa[keep, :], axis=1) > dist_cutoff):
keep.append(i)
logging.info("Training on antennas: {}".format(keep))
return [(np.where(np.isin(inverse, keep, assume_unique=True))[0]).astype(np.int64)]
return get_learn_indices
class StoreHyperparameters(Callback):
def __init__(self, store_file):
super(StoreHyperparameters, self).__init__(store_file=store_file)
def generate(self, store_file):
if not isinstance(store_file, str):
raise ValueError("store_file should be str {}".format(type(store_file)))
store_file=os.path.abspath(store_file)
np.savez(store_file, times=np.array([]), amp=np.array([]), y_sigma=np.array([]), variance=np.array([]), lengthscales=np.array([]), a=np.array([]), b=np.array([]), timescale=np.array([]),
pert_amp=np.array([]), pert_dir_lengthscale=np.array([]), pert_ant_lengthscale=np.array([]))
self.output_dtypes = [tf.int64]
self.name = 'StoreHyperparameters'
def store(time, hyperparams, y_sigma, amp,pert_amp, pert_dir_lengthscale, pert_ant_lengthscale):
data = np.load(store_file)
#must match the order in the Target
variance, lengthscales, a, b, timescale,pert_amp, pert_dir_lengthscale, pert_ant_lengthscale = np.reshape(hyperparams, (-1,))
times = np.array([time] + list(data['times']))
y_sigma = np.array([np.reshape(y_sigma,(-1,))] + list(data['y_sigma']))
amp = np.array([np.reshape(amp, (-1,))] + list(data['amp']))
variance = np.array([variance] + list(data['variance']))
lengthscales = np.array([lengthscales] + list(data['lengthscales']))
a = np.array([a] + list(data['a']))
b = np.array([b] + list(data['b']))
timescale = np.array([timescale] + list(data['timescale']))
np.savez(store_file,
times=times,
y_sigma=y_sigma,
amp=amp,
variance=variance,
lengthscales=lengthscales,
a=a,
b=b,
timescale=timescale
)
return [np.array(len(times),dtype=np.int64)]
return store
class StoreHyperparametersV2(Callback):
def __init__(self, store_file):
super(StoreHyperparametersV2, self).__init__(store_file=store_file)
def generate(self, store_file):
if not isinstance(store_file, str):
raise ValueError("store_file should be str {}".format(type(store_file)))
store_file=os.path.abspath(store_file)
if not os.path.exists(store_file):
np.savez(store_file, times=np.array([]), amp=np.array([]), y_sigma=np.array([]), variance=np.array([]), lengthscales=np.array([]), a=np.array([]), b=np.array([]), timescale=np.array([]),
pert_amp=np.array([]), pert_dir_lengthscale=np.array([]), pert_ant_lengthscale=np.array([]))
self.output_dtypes = [tf.int64]
self.name = 'StoreHyperparametersV2'
def store(time, amp, lengthscales, a, b, timescale, y_sigma):
data = np.load(store_file)
times = np.array([time] + list(data['times']))
y_sigma = np.array([np.reshape(y_sigma,(-1,))] + list(data['y_sigma']))
amp = np.array([np.reshape(amp, (-1,))] + list(data['amp']))
lengthscales = np.array([lengthscales.reshape((-1,))] + list(data['lengthscales']))
a = np.array([a.reshape((-1,))] + list(data['a']))
b = np.array([b.reshape((-1,))] + list(data['b']))
timescale = np.array([timescale.reshape((-1,))] + list(data['timescale']))
np.savez(store_file,
times=times,
y_sigma=y_sigma,
amp=amp,
lengthscales=lengthscales,
a=a,
b=b,
timescale=timescale
)
return [np.array(len(times),dtype=np.int64)]
return store
class PlotResults(Callback):
def __init__(self, hyperparam_store, datapack, solset, lock=None, posterior_name='posterior', plot_directory='./plots', **selection):
super(PlotResults, self).__init__(hyperparam_store=hyperparam_store,
lock=lock,
datapack=datapack,
solset=solset,
posterior_name=posterior_name,
plot_directory=plot_directory,
**selection)
def generate(self, hyperparam_store, datapack, solset, lock, posterior_name, plot_directory, **selection):
self.output_dtypes = [tf.int64]
self.name = 'PlotResults'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.abspath(plot_directory)
fig_directory = os.path.join(plot_directory,'phase_screens')
# bayes_directory = os.path.join(plot_directory, 'bayes_hyperparmeters')
os.makedirs(fig_directory,exist_ok=True)
# os.makedirs(bayes_directory, exist_ok=True)
dp = DatapackPlotter(datapack)
def plot_results(index_start, index_end):
"""Get the indices of non-redundant antennas
:param X: np.array, float64, [N, 3]
Antenna locations
:param cutoff: float
Mark redundant if antennas within this in km
:return: np.array, int64
indices such that all antennas are at least cutoff apart
"""
data = np.load(hyperparam_store)
keys = ['amp','y_sigma','variance', 'lengthscales', 'a', 'b', 'timescale']
if lock is not None:
lock.acquire()
fig, axs = plt.subplots(len(keys),1,sharex='all', figsize=(6,len(keys)*2))
for i,key in enumerate(keys):
ax = axs[i]
if key in ['amp','y_sigma']:
# for t,d in zip(data['times'],data['y_sigma']):
ax.boxplot(data[key].T,positions=data['times'])
ax.set_title(key)
else:
ax.scatter(data['times'], data[key], label=key)
ax.legend()
plt.savefig(os.path.join(plot_directory,'hyperparameters.png'))
plt.close('all')
if lock is not None:
lock.release()
# keys = ['amp', 'y_sigma']
# if lock is not None:
# lock.acquire()
# fig, axs = plt.subplots(len(keys), 1, figsize=(6, len(keys) * 2))
# for i, key in enumerate(keys):
# ax = axs[i]
# ax.hist(data[key][-1], bins=max(10, int(np.sqrt(np.size(data[key][-1])))), label=key)
# ax.legend()
# plt.savefig(os.path.join(bayes_directory, 'bayesian_hyperparameters_{:04d}_{:04d}.png'.format(index_start, index_end)))
# plt.close('all')
# if lock is not None:
# lock.release()
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i,solset)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant',None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq',None),
dir_sel=selection.get('dir',None),
pol_sel=selection.get('pol', slice(0,1,1)),
fignames=fignames,
observable='phase',
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=solset)
plt.close('all')
if lock is not None:
lock.release()
data_posterior = "data_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i, data_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=160e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=data_posterior)
plt.close('all')
if lock is not None:
lock.release()
screen_posterior = "screen_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i, screen_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=160e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=screen_posterior)
plt.close('all')
if lock is not None:
lock.release()
return [np.array(3).astype(np.int64)]
return plot_results
class PlotResultsV2(Callback):
def __init__(self, hyperparam_store, datapack, solset, index_map, lock=None, posterior_name='posterior', plot_directory='./plots', **selection):
super(PlotResultsV2, self).__init__(hyperparam_store=hyperparam_store,
lock=lock,
datapack=datapack,
solset=solset,
index_map=index_map,
posterior_name=posterior_name,
plot_directory=plot_directory,
**selection)
def generate(self, hyperparam_store, datapack, solset, index_map, lock, posterior_name, plot_directory, **selection):
self.output_dtypes = [tf.int64]
self.name = 'PlotResultsV2'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.abspath(plot_directory)
fig_directory = os.path.join(plot_directory,'phase_screens')
# bayes_directory = os.path.join(plot_directory, 'bayes_hyperparmeters')
os.makedirs(fig_directory,exist_ok=True)
# os.makedirs(bayes_directory, exist_ok=True)
dp = DatapackPlotter(datapack)
def plot_results(index_start, index_end):
"""
Plot results.
:param index_start: int
Start index of results to plot relative to 0
:param index_end: int
End index of results to plot relative to 0
:return:
"""
index_start = index_map[index_start]
index_end = index_map[index_end-1] + 1
data = np.load(hyperparam_store)
keys = ['amp','y_sigma', 'lengthscales', 'a', 'b', 'timescale']
if lock is not None:
lock.acquire()
fig, axs = plt.subplots(len(keys),1,sharex='all', | |
"password": <PASSWORD>,
}
tls = {"ca_certs": cert_path, "tls_version": ssl.PROTOCOL_SSLv23}
topic = "devices/{}/messages/events/{}".format(
device_id, url_encode_dict(properties) if properties else ""
)
for _ in range(msg_count):
msgs.append({"topic": topic, "payload": data, "qos": int(qos)})
try:
publish.multiple(
msgs,
client_id=device_id,
hostname=target["entity"],
auth=auth,
port=8883,
protocol=mqtt.MQTTv311,
tls=tls,
)
return
except Exception as x:
raise CLIError(x)
def iot_device_send_message_http(
cmd,
device_id,
data,
hub_name=None,
headers=None,
resource_group_name=None,
login=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_device_send_message_http(target, device_id, data, headers)
def _iot_device_send_message_http(target, device_id, data, headers=None):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.send_device_event(
id=device_id, message=data, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_complete(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_complete(target, device_id, etag)
def _iot_c2d_message_complete(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.complete_device_bound_notification(
id=device_id, etag=etag
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_reject(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_reject(target, device_id, etag)
def _iot_c2d_message_reject(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.complete_device_bound_notification(
id=device_id, etag=etag, reject=""
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_abandon(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_abandon(target, device_id, etag)
def _iot_c2d_message_abandon(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.abandon_device_bound_notification(
id=device_id, etag=etag
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_receive(
cmd,
device_id,
hub_name=None,
lock_timeout=60,
resource_group_name=None,
login=None,
abandon=None,
complete=None,
reject=None,
):
ack = None
ack_vals = [abandon, complete, reject]
if any(ack_vals):
if len(list(filter(lambda val: val, ack_vals))) > 1:
raise CLIError(
"Only one c2d-message ack argument can be used [--complete, --abandon, --reject]"
)
if abandon:
ack = SettleType.abandon.value
elif complete:
ack = SettleType.complete.value
elif reject:
ack = SettleType.reject.value
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_receive(target, device_id, lock_timeout, ack)
def _iot_c2d_message_receive(target, device_id, lock_timeout=60, ack=None):
from azext_iot.constants import MESSAGING_HTTP_C2D_SYSTEM_PROPERTIES
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
request_headers = {}
if lock_timeout:
request_headers["IotHub-MessageLockTimeout"] = str(lock_timeout)
try:
result = device_sdk.device.receive_device_bound_notification(
id=device_id, custom_headers=request_headers, raw=True
).response
if result and result.status_code == 200:
payload = {"properties": {}}
if "etag" in result.headers:
eTag = result.headers["etag"].strip('"')
payload["etag"] = eTag
if ack:
ack_response = {}
if ack == SettleType.abandon.value:
logger.debug("__Abandoning message__")
ack_response = device_sdk.device.abandon_device_bound_notification(
id=device_id, etag=eTag, raw=True
)
elif ack == SettleType.reject.value:
logger.debug("__Rejecting message__")
ack_response = device_sdk.device.complete_device_bound_notification(
id=device_id, etag=eTag, reject="", raw=True
)
else:
logger.debug("__Completing message__")
ack_response = device_sdk.device.complete_device_bound_notification(
id=device_id, etag=eTag, raw=True
)
payload["ack"] = (
ack
if (ack_response and ack_response.response.status_code == 204)
else None
)
app_prop_prefix = "iothub-app-"
app_prop_keys = [
header
for header in result.headers
if header.lower().startswith(app_prop_prefix)
]
app_props = {}
for key in app_prop_keys:
app_props[key[len(app_prop_prefix) :]] = result.headers[key]
if app_props:
payload["properties"]["app"] = app_props
sys_props = {}
for key in MESSAGING_HTTP_C2D_SYSTEM_PROPERTIES:
if key in result.headers:
sys_props[key] = result.headers[key]
if sys_props:
payload["properties"]["system"] = sys_props
if result.text:
payload["data"] = (
result.text
if not isinstance(result.text, six.binary_type)
else result.text.decode("utf-8")
)
return payload
return
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_send(
cmd,
device_id,
hub_name=None,
data="Ping from Az CLI IoT Extension",
message_id=None,
correlation_id=None,
user_id=None,
content_encoding="utf-8",
content_type=None,
expiry_time_utc=None,
properties=None,
ack=None,
wait_on_feedback=False,
yes=False,
repair=False,
resource_group_name=None,
login=None,
):
from azext_iot.common.deps import ensure_uamqp
from azext_iot.common.utility import validate_min_python_version
validate_min_python_version(3, 4)
if wait_on_feedback and not ack:
raise CLIError(
'To wait on device feedback, ack must be "full", "negative" or "positive"'
)
config = cmd.cli_ctx.config
ensure_uamqp(config, yes, repair)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
if properties:
properties = validate_key_value_pairs(properties)
if expiry_time_utc:
now_in_milli = int(time() * 1000)
user_msg_expiry = int(expiry_time_utc)
if user_msg_expiry < now_in_milli:
raise CLIError("Message expiry time utc is in the past!")
from azext_iot.monitor import event
msg_id, errors = event.send_c2d_message(
target=target,
device_id=device_id,
data=data,
message_id=message_id,
correlation_id=correlation_id,
user_id=user_id,
content_encoding=content_encoding,
content_type=content_type,
expiry_time_utc=expiry_time_utc,
properties=properties,
ack=ack,
)
if errors:
raise CLIError(
"C2D message error: {}, use --debug for more details.".format(errors)
)
if wait_on_feedback:
_iot_hub_monitor_feedback(target=target, device_id=device_id, wait_on_id=msg_id)
def iot_simulate_device(
cmd,
device_id,
hub_name=None,
receive_settle="complete",
data="Ping from Az CLI IoT Extension",
msg_count=100,
msg_interval=3,
protocol_type="mqtt",
properties=None,
resource_group_name=None,
login=None,
):
import sys
import uuid
import datetime
import json
from azext_iot.operations._mqtt import mqtt_client_wrap
from azext_iot.common.utility import execute_onthread
from azext_iot.constants import (
MIN_SIM_MSG_INTERVAL,
MIN_SIM_MSG_COUNT,
SIM_RECEIVE_SLEEP_SEC,
)
protocol_type = protocol_type.lower()
if protocol_type == ProtocolType.mqtt.name:
if receive_settle != "complete":
raise CLIError('mqtt protocol only supports settle type of "complete"')
if msg_interval < MIN_SIM_MSG_INTERVAL:
raise CLIError("msg interval must be at least {}".format(MIN_SIM_MSG_INTERVAL))
if msg_count < MIN_SIM_MSG_COUNT:
raise CLIError("msg count must be at least {}".format(MIN_SIM_MSG_COUNT))
properties_to_send = _iot_simulate_get_default_properties(protocol_type)
user_properties = validate_key_value_pairs(properties) or {}
properties_to_send.update(user_properties)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
token = None
class generator(object):
def __init__(self):
self.calls = 0
def generate(self, jsonify=True):
self.calls += 1
payload = {
"id": str(uuid.uuid4()),
"timestamp": str(datetime.datetime.utcnow()),
"data": str(data + " #{}".format(self.calls)),
}
return json.dumps(payload) if jsonify else payload
def http_wrap(target, device_id, generator):
d = generator.generate(False)
_iot_device_send_message_http(target, device_id, d, headers=properties_to_send)
six.print_(".", end="", flush=True)
try:
if protocol_type == ProtocolType.mqtt.name:
wrap = mqtt_client_wrap(
target=target,
device_id=device_id,
properties=properties_to_send,
sas_duration=(msg_count * msg_interval)
+ 60, # int type is enforced for msg_count and msg_interval
)
wrap.execute(generator(), publish_delay=msg_interval, msg_count=msg_count)
else:
six.print_("Sending and receiving events via https")
token, op = execute_onthread(
method=http_wrap,
args=[target, device_id, generator()],
interval=msg_interval,
max_runs=msg_count,
return_handle=True,
)
while op.is_alive():
_handle_c2d_msg(target, device_id, receive_settle)
sleep(SIM_RECEIVE_SLEEP_SEC)
except KeyboardInterrupt:
sys.exit()
except Exception as x:
raise CLIError(x)
finally:
if token:
token.set()
def iot_c2d_message_purge(
cmd, device_id, hub_name=None, resource_group_name=None, login=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
return service_sdk.cloud_to_device_messages.purge_cloud_to_device_message_queue(device_id)
def _iot_simulate_get_default_properties(protocol):
default_properties = {}
is_mqtt = protocol == ProtocolType.mqtt.name
default_properties["$.ct" if is_mqtt else "content-type"] = "application/json"
default_properties["$.ce" if is_mqtt else "content-encoding"] = "utf-8"
return default_properties
def _handle_c2d_msg(target, device_id, receive_settle, lock_timeout=60):
result = _iot_c2d_message_receive(target, device_id, lock_timeout)
if result:
six.print_()
six.print_("__Received C2D Message__")
six.print_(result)
if receive_settle == "reject":
six.print_("__Rejecting message__")
_iot_c2d_message_reject(target, device_id, result["etag"])
elif receive_settle == "abandon":
six.print_("__Abandoning message__")
_iot_c2d_message_abandon(target, device_id, result["etag"])
else:
six.print_("__Completing message__")
_iot_c2d_message_complete(target, device_id, result["etag"])
return True
return False
def iot_device_export(
cmd,
hub_name,
blob_container_uri,
include_keys=False,
storage_authentication_type=None,
resource_group_name=None,
):
from azext_iot._factory import iot_hub_service_factory
from azure.mgmt.iothub import __version__ as iot_sdk_version
client = iot_hub_service_factory(cmd.cli_ctx)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name
)
if exists(blob_container_uri):
blob_container_uri = read_file_content(blob_container_uri)
if ensure_min_version(iot_sdk_version, "0.12.0"):
from azure.mgmt.iothub.models import ExportDevicesRequest
from azext_iot.common.shared import AuthenticationType
storage_authentication_type = (
AuthenticationType(storage_authentication_type).name
if storage_authentication_type
else None
)
export_request = ExportDevicesRequest(
export_blob_container_uri=blob_container_uri,
exclude_keys=not include_keys,
authentication_type=storage_authentication_type,
)
return client.export_devices(
target["resourcegroup"], hub_name, export_devices_parameters=export_request,
)
if storage_authentication_type:
raise CLIError(
"Device export authentication-type properties require a dependency of azure-mgmt-iothub>=0.12.0"
)
return client.export_devices(
target["resourcegroup"],
hub_name,
export_blob_container_uri=blob_container_uri,
exclude_keys=not include_keys,
)
def iot_device_import(
cmd,
hub_name,
input_blob_container_uri,
output_blob_container_uri,
storage_authentication_type=None,
resource_group_name=None,
):
from azext_iot._factory import iot_hub_service_factory
from azure.mgmt.iothub import __version__ as iot_sdk_version
client = iot_hub_service_factory(cmd.cli_ctx)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name
)
if exists(input_blob_container_uri):
input_blob_container_uri = read_file_content(input_blob_container_uri)
if exists(output_blob_container_uri):
output_blob_container_uri = read_file_content(output_blob_container_uri)
if ensure_min_version(iot_sdk_version, "0.12.0"):
from azure.mgmt.iothub.models import ImportDevicesRequest
from azext_iot.common.shared import AuthenticationType
storage_authentication_type = (
AuthenticationType(storage_authentication_type).name
if storage_authentication_type
else None
)
import_request = ImportDevicesRequest(
input_blob_container_uri=input_blob_container_uri,
output_blob_container_uri=output_blob_container_uri,
input_blob_name=None,
output_blob_name=None,
authentication_type=storage_authentication_type,
)
return client.import_devices(
target["resourcegroup"], hub_name, import_devices_parameters=import_request,
)
if storage_authentication_type:
raise CLIError(
"Device import authentication-type properties require a dependency of azure-mgmt-iothub>=0.12.0"
)
return client.import_devices(
target["resourcegroup"],
hub_name,
input_blob_container_uri=input_blob_container_uri,
output_blob_container_uri=output_blob_container_uri,
)
def iot_device_upload_file(
cmd,
device_id,
file_path,
content_type,
hub_name=None,
resource_group_name=None,
login=None,
):
from azext_iot.sdk.iothub.device.models import FileUploadCompletionStatus
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
if not exists(file_path):
raise CLIError('File path "{}" does not exist!'.format(file_path))
content = read_file_content(file_path)
file_name = basename(file_path)
try:
upload_meta = device_sdk.device.create_file_upload_sas_uri(
device_id=device_id, blob_name=file_name, raw=True
).response.json()
storage_endpoint = "{}/{}/{}{}".format(
upload_meta["hostName"],
upload_meta["containerName"],
upload_meta["blobName"],
upload_meta["sasToken"],
)
completion_status = FileUploadCompletionStatus(
correlation_id=upload_meta["correlationId"], is_success=True
)
upload_response = device_sdk.device.upload_file_to_container(
storage_endpoint=storage_endpoint,
content=content,
content_type=content_type,
)
completion_status.status_code = upload_response.status_code
completion_status.status_reason = upload_response.reason
return device_sdk.device.update_file_upload_status(
device_id=device_id, file_upload_completion_status=completion_status
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_hub_monitor_events(
cmd,
hub_name=None,
device_id=None,
interface=None,
module_id=None,
consumer_group="$Default",
timeout=300,
enqueued_time=None,
resource_group_name=None,
yes=False,
properties=None,
repair=False,
login=None,
content_type=None,
device_query=None,
):
try:
_iot_hub_monitor_events(
cmd,
hub_name=hub_name,
device_id=device_id,
interface_name=interface,
module_id=module_id,
consumer_group=consumer_group,
timeout=timeout,
enqueued_time=enqueued_time,
resource_group_name=resource_group_name,
yes=yes,
properties=properties,
repair=repair,
login=login,
content_type=content_type,
device_query=device_query,
)
except RuntimeError as e:
raise CLIError(e)
def iot_hub_monitor_feedback(
cmd,
hub_name=None,
device_id=None,
yes=False,
wait_on_id=None,
repair=False,
resource_group_name=None,
login=None,
):
from azext_iot.common.deps import ensure_uamqp
from azext_iot.common.utility import validate_min_python_version
validate_min_python_version(3, 4)
config = cmd.cli_ctx.config
ensure_uamqp(config, yes, repair)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
hub_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_hub_monitor_feedback(
target=target, device_id=device_id, wait_on_id=wait_on_id
)
def iot_hub_distributed_tracing_show(
cmd, hub_name, | |
= 0
tot_mult_count = 0
all_mh_wgaps = {}
all_mh_wogaps = {}
with open ('%s/%s_out.txt'%(path, name), 'w') as mh_table:
# Wgaps includes mismatches
# Wogaps excludes mismatches
with open(align, 'r') as f:
logging.info('Opening ' + align)
reader = csv.reader(f, dialect='excel', delimiter='\t')
headers = next(reader, None) #stores header for later use
headers = '\t'.join(headers)
head = (
f'{headers}\tDel_len\tMH_check\tMH\tMH_wmm\t'
f'MH_mm_len\tMH_nomm\tMH_nomm_len\tMismatch\t'
f'ReadID\tTanDup\tInsDict\n'
)
mh_table.write(head)
for row in reader:
if tool == 'CE':
count = count + 1
alseq = row[0]
refseq = row[1]
unmodified = row[3]
n_deleted = float(row[5])
read_count = row[8]
logging.debug('ID is ' + str(count) + ', TargetSeq is ' +
'\n' + alseq + '\n' + ', RefSeq is ' +
'\n' + refseq + '\n' + ', Edit type is: ' +
unmodified + ', Read count is: ' +
str(read_count) + '\n')
elif tool == 'CA':
count = count + 1
alseq = row[2]
refseq = row[1]
realigned = realign(refseq,alseq)
corrected = correct_realign(realigned[0],realigned[1])
alseq = corrected[1]
refseq = corrected[0]
edit_type = row[5] # 'Ins|del|WT or Sub'
read_count = row[4]
logging.debug('ID is ' + str(count) + ', TargetSeq is ' +
alseq + ', RefSeq is ' + refseq +
', Edit type is: ' + edit_type +
', Read count is: ' + str(read_count) +'\n')
else:
logging.error('Tool must be CA or CE')
if len(alseq) != len(refseq):
logging.warn('''Aligned sequence and reference have
different lengths! Microhomology
calculations may be incorrect''')
printrow = "\t".join(row)
deletion = False
insert = False
if tool == 'CE':
if unmodified == 'False' and n_deleted > 0:
deletion = True
elif tool == 'CA':
if edit_type == 'del':
deletion = True
elif edit_type == 'Ins':
insert = True
if not deletion and not insert:
logging.debug('No deletion identified in allele ' +
str(count) + '\n')
outrow = (
f'{printrow}\tNA\tN\tN\t.\t.\t.\t.\tNA\t'
f'{count}\tNA\tNA\n'
)
mh_table.write(outrow)
else:
if insert:
# Get number of tandem duplications
td = tandup(alseq,refseq,minin,ref_fasta)
td_count = td['tdcount']
outrow = (
f'{printrow}\tNA\tN\tN\t.\t.\t.\t.\t'
f'NA\t{count}\t{td_count}\t{td}\n'
)
mh_table.write(outrow)
else:
# Rare cases when indels do not have CasAn category
td = 'NA'
td_count = 'NA'
# Gets nested list of gaps start/stop positions in sequence
del_ss = getgaps(alseq)
# If not empty
if check_if_mult_deletions(alseq):
# Starting Multi Gap MH analysis
logging.debug('Identified multiple deletion in allele '
+ str(count) + '\n')
tot_mult_count = tot_mult_count + 1
res = find_mult_mh(alseq,refseq)
mh_wgaps = []
mh_wgaps_l = []
mh_wogaps = []
mh_wogaps_l = []
mult_count = mult_count + 1
# Using id_count we can get the len of each del
# from del_len even for multiple deletions
id_count = 0
for mh in res:
mh_first = mh[0]
# Get start/end pos for del in list
my_del_ss = del_ss[id_count]
# Subtract start pos from end to get len
del_len = my_del_ss[1] - my_del_ss[0] + 1
id_count = id_count + 1
# Stringify
res_s = ','.join(str(v) for v in res)
printcount = str(count) + '_' + str(id_count)
# If no microhomology only mismatch bool is in list
if mh_first is 'Y' or mh_first is 'N':
outrow = (
f'{printrow}\t{del_len}\tY\tN\t.\t.\t.\t.\t'
f'{mh_first}\t{printcount}\t{td_count}\t{td}\n'
)
mh_table.write(outrow)
else:
mh_wgaps.append(mh[0])
logging.debug('Microhomology found in allele ' +
str(count) + '. Microhomology is: ' +
res_s + '\n')
mykey = str(count) + '_' + str(read_count)
if mh[0] != '':
all_mh_wgaps[mykey] = mh[0]
logging.debug('''Appending the following non-empty
microhomology (with mismatches): ''' +
str(mh[0]) + '\n')
mh_wgaps_l.append(str(len(mh[0])).replace('0','.'))
mh_wogaps.append(mh[1])
if mh[1] != '':
all_mh_wogaps[mykey] = mh[1]
logging.debug('''Appending the following non-empty
microhomology (without mismatches): ''' +
str(mh[1]) + '\n')
mh_wogaps_l.append(str(len(mh[1])).replace('0','.'))
printlen_wmm = str(len(mh[0]))
printlen_womm = str(len(mh[1]))
if mh[0]:
printmh_wmm = mh[0]
else:
printmh_wmm = "."
if mh[1]:
printmh_womm = mh[1]
else:
printmh_womm = "."
mismatch_bool = mh[2]
outrow = (
f'{printrow}\t{del_len}\tY\tY\t{printmh_wmm}\t'
f'{printlen_wmm}\t{printmh_womm}\t{printlen_womm}\t'
f'{mismatch_bool}\t{printcount}\t{td_count}\t{td}\n'
)
mh_table.write(outrow)
else:
# Starting single gap MH analysis
logging.debug('Identified single deletion in allele ' +
str(count) + "\n")
# Subtract start from end to get len
del_len = del_ss[0][1] - del_ss[0][0] + 1
tot_single_count = tot_single_count + 1
res = find_single_mh(alseq,refseq)
res_first = res[0]
# If no microhomology only the mismatch pseudobool will be in list item
if res_first is 'Y' or res_first is 'N':
outrow = (
f'{printrow}\t{del_len}\tY\tN\t.\t.\t.\t.\t'
f'{res_first}\t{count}\t{td_count}\t{td}\n'
)
mh_table.write(outrow)
logging.debug('No microhomology in allele ' +
str(count) +
'. Printing empty columns to row.\n')
else:
res_s = ','.join(str(v) for v in res)
logging.debug('Microhomology found in allele ' +
str(count) + '. Microhomology is: ' +
res_s + '\n')
mykey = str(count) + '_' + str(read_count)
single_count = single_count + 1
if res[0] != '':
all_mh_wgaps[mykey] = res[0]
logging.debug('''Appending the following non-empty
microhomology (with mismatches): ''' +
str(res[0]) + '\n')
if res[1] != '':
all_mh_wogaps[mykey] = res[1]
logging.debug('''Appending the following non-empty
microhomology (without mismatches): ''' +
str(res[1]) + '\n')
printlen_wmm = str(len(res[0]))
printlen_womm = str(len(res[1]))
if res[0]:
printmh_wmm = res[0]
else:
printmh_wmm = '.'
if res[1]:
printmh_womm = res[1]
else:
printmh_womm = '.'
res_third = res[2]
outrow = (
f'{printrow}\t{del_len}\tY\tY\t{printmh_wmm}\t'
f'{printlen_wmm}\t{printmh_womm}\t{printlen_womm}\t'
f'{res_third}\t{count}\t{td_count}\t{td}\n'
)
mh_table.write(outrow)
logging.info('Done! ' + str(count) + ' alleles written to ' +
path + '/' + name + '_mh.txt')
#Summary statistics
if stats == 1:
with open ('%s/%s_mh_stats.txt'%(path, name), 'w') as mh_stats:
totalmh = mult_count + single_count
mh_stats.write('Total number of single deletion alleles: '
+ str(tot_single_count) + '\n')
mh_stats.write('Microhomologies identified in single deletion alleles: '
+ str(single_count) + '\n')
mh_stats.write('Total number of multiple deletion alleles: '
+ str(tot_mult_count) + '\n')
mh_stats.write('Microhomologies identified in multiple deletion alleles: '
+ str(mult_count) + '\n')
mh_stats.write('Total alleles with microhomologies found (with and without mismatches): '
+ str(totalmh) + '\n')
mylist1 = []
mylist2 = []
lenset1 = set()
lenset2 = set()
countdic1 = {}
countdic2 = {}
print('Starting analysis of homologies with mismatches')
for key, value in all_mh_wgaps.items():
nbases = len(value)
nreads = int(key.split('_')[1])
logging.debug('Key is ' + key + ', Value is: ' +
value + ', NReads is : ' +
str(nreads) + '\n')
mylist1.append(nbases)
if nbases in lenset1:
current_nreads = int(countdic1[nbases])
incremented_nreads = current_nreads + nreads
logging.debug('Current reads for ' + str(nbases) +
' are : ' + str(current_nreads))
logging.debug('Incremented reads for ' + str(nbases) +
' are : ' + str(incremented_nreads))
countdic1[nbases] = incremented_nreads
else:
lenset1.add(nbases)
countdic1[nbases] = nreads
logging.info('''Completed analysis of homologies with mismatches.
Starting analysis of homologies without mismatches.''')
for key, value in all_mh_wogaps.items():
nbases = len(value)
nreads = int(key.split('_')[1])
logging.debug('Key is ' + key + ', Value is: ' + value +
', NReads is : ' + str(nreads) + '\n')
mylist2.append(nbases)
if nbases in lenset2:
current_nreads = int(countdic2[nbases])
incremented_nreads = current_nreads + nreads
logging.debug('Current reads for ' + str(nbases) +
' are : ' + str(current_nreads))
logging.debug('Incremented reads for ' + str(nbases) +
' are : ' + str(incremented_nreads))
countdic2[nbases] = incremented_nreads
else:
lenset2.add(nbases)
countdic2[nbases] = nreads
counts1 = Counter(mylist1)
counts2 = Counter(mylist2)
mh_stats.write('Allele counts of microhomology len including mismatches' + '\n')
for key, count in sorted(counts1.most_common()):
mh_stats.write('%s: %s\n'%(key, count))
mh_stats.write('Read counts of microhomology len including mismatches' + '\n')
for key, count in countdic1.items():
mh_stats.write('%s: %s\n'%(key, count))
mh_stats.write('Allele counts of microhomology len excluding mismatches' + '\n')
for key, count in sorted(counts2.most_common()):
mh_stats.write('%s: %s\n'%(key, count))
mh_stats.write('Read counts of microhomology len excluding mismatches' + '\n')
for key, count in countdic2.items():
mh_stats.write('%s: %s\n'%(key, count))
print('All done!')
parser = ArgumentParser(description='''Search Cas-Analyser or CRISPResso allele
outputs for microhomologies associated with deletions''')
parser.add_argument('-v', '--version', action='version', version='1.1')
parser.add_argument('-i',
dest='table',
help='the tab-separated allele output file')
parser.add_argument('-p',
dest='prefix',
help='prefix of outputs. Default: prefix of the input file')
parser.add_argument('-r',
dest='reference',
help='''FASTA format sequence to search
for source of inserted sequences''')
parser.add_argument('-o',
dest='output',
help='Output directory')
parser.add_argument('-t',
dest='tool',
help='''Tool used to generate input alleles;
can be Cas-Analyser or CRISPResso: CA|CE''')
parser.add_argument('-m',
dest='min_mh_len',
help='''Minimum length of microhomology
in basepairs [int]''')
parser.add_argument('-n',
dest='minin',
help='Minimum length of insert in basepairs [int]. Default is 4')
parser.add_argument('-s', dest='stats', help='Generate microhomology statistics file')
parser.add_argument('-log',
dest='loglevel',
help='Set logging level')
args = parser.parse_args()
if None not in [args.table, args.output]:
args.output = os.path.abspath(args.output)
if not os.path.isdir(args.output):
print('\nOops! Output directory does not exist. Please check!\n')
sys.exit(1)
if args.tool == None:
print('\nPlease set the tool used to generate the allele table!\n')
sys.exit(1)
if | |
<reponame>marcelosalles/pyidf
""" Data objects in group "Setpoint Managers"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class SetpointManagerScheduled(DataObject):
""" Corresponds to IDD object `SetpointManager:Scheduled`
The simplest Setpoint Manager simply uses a schedule to determine one
or more setpoints. Values of the nodes are not used as input.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Temperature',
u'MaximumTemperature',
u'MinimumTemperature',
u'HumidityRatio',
u'MaximumHumidityRatio',
u'MinimumHumidityRatio',
u'MassFlowRate',
u'MaximumMassFlowRate',
u'MinimumMassFlowRate'],
'autocalculatable': False,
'type': 'alpha'}),
(u'schedule name',
{'name': u'Schedule Name',
'pyname': u'schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:Scheduled',
'pyname': u'SetpointManagerScheduled',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value=None):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def schedule_name(self):
"""field `Schedule Name`
Args:
value (str): value for IDD Field `Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `schedule_name` or None if not set
"""
return self["Schedule Name"]
@schedule_name.setter
def schedule_name(self, value=None):
"""Corresponds to IDD field `Schedule Name`"""
self["Schedule Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which control variable will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerScheduledDualSetpoint(DataObject):
""" Corresponds to IDD object `SetpointManager:Scheduled:DualSetpoint`
This setpoint manager places a high and low schedule value
on one or more nodes.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'high setpoint schedule name',
{'name': u'High Setpoint Schedule Name',
'pyname': u'high_setpoint_schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'low setpoint schedule name',
{'name': u'Low Setpoint Schedule Name',
'pyname': u'low_setpoint_schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:Scheduled:DualSetpoint',
'pyname': u'SetpointManagerScheduledDualSetpoint',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def high_setpoint_schedule_name(self):
"""field `High Setpoint Schedule Name`
Args:
value (str): value for IDD Field `High Setpoint Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `high_setpoint_schedule_name` or None if not set
"""
return self["High Setpoint Schedule Name"]
@high_setpoint_schedule_name.setter
def high_setpoint_schedule_name(self, value=None):
"""Corresponds to IDD field `High Setpoint Schedule Name`"""
self["High Setpoint Schedule Name"] = value
@property
def low_setpoint_schedule_name(self):
"""field `Low Setpoint Schedule Name`
Args:
value (str): value for IDD Field `Low Setpoint Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `low_setpoint_schedule_name` or None if not set
"""
return self["Low Setpoint Schedule Name"]
@low_setpoint_schedule_name.setter
def low_setpoint_schedule_name(self, value=None):
"""Corresponds to IDD field `Low Setpoint Schedule Name`"""
self["Low Setpoint Schedule Name"] = value
@property
def setpoint_node_or_nodelist_name(self):
"""field `Setpoint Node or NodeList Name`
| Node(s) at which temperature will be set
Args:
value (str): value for IDD Field `Setpoint Node or NodeList Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `setpoint_node_or_nodelist_name` or None if not set
"""
return self["Setpoint Node or NodeList Name"]
@setpoint_node_or_nodelist_name.setter
def setpoint_node_or_nodelist_name(self, value=None):
"""Corresponds to IDD field `Setpoint Node or NodeList Name`"""
self["Setpoint Node or NodeList Name"] = value
class SetpointManagerOutdoorAirReset(DataObject):
""" Corresponds to IDD object `SetpointManager:OutdoorAirReset`
The Outdoor Air Reset Setpoint Manager sets the supply air
temperature according to the outdoor air temperature using a reset rule.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'control variable',
{'name': u'Control Variable',
'pyname': u'control_variable',
'default': u'Temperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Temperature'],
'autocalculatable': False,
'type': 'alpha'}),
(u'setpoint at outdoor low temperature',
{'name': u'Setpoint at Outdoor Low Temperature',
'pyname': u'setpoint_at_outdoor_low_temperature',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'outdoor low temperature',
{'name': u'Outdoor Low Temperature',
'pyname': u'outdoor_low_temperature',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'setpoint at outdoor high temperature',
{'name': u'Setpoint at Outdoor High Temperature',
'pyname': u'setpoint_at_outdoor_high_temperature',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'outdoor high temperature',
{'name': u'Outdoor High Temperature',
'pyname': u'outdoor_high_temperature',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'setpoint node or nodelist name',
{'name': u'Setpoint Node or NodeList Name',
'pyname': u'setpoint_node_or_nodelist_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'schedule name',
{'name': u'Schedule Name',
'pyname': u'schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'setpoint at outdoor low temperature 2',
{'name': u'Setpoint at Outdoor Low Temperature 2',
'pyname': u'setpoint_at_outdoor_low_temperature_2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'outdoor low temperature 2',
{'name': u'Outdoor Low Temperature 2',
'pyname': u'outdoor_low_temperature_2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'setpoint at outdoor high temperature 2',
{'name': u'Setpoint at Outdoor High Temperature 2',
'pyname': u'setpoint_at_outdoor_high_temperature_2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'}),
(u'outdoor high temperature 2',
{'name': u'Outdoor High Temperature 2',
'pyname': u'outdoor_high_temperature_2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'C'})]),
'format': None,
'group': u'Setpoint Managers',
'min-fields': 0,
'name': u'SetpointManager:OutdoorAirReset',
'pyname': u'SetpointManagerOutdoorAirReset',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def control_variable(self):
"""field `Control Variable`
| Default value: Temperature
Args:
value (str): value for IDD Field `Control Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_variable` or None if not set
"""
return self["Control Variable"]
@control_variable.setter
def control_variable(self, value="Temperature"):
"""Corresponds to IDD field `Control Variable`"""
self["Control Variable"] = value
@property
def setpoint_at_outdoor_low_temperature(self):
"""field `Setpoint at Outdoor Low Temperature`
| Units: C
Args:
value (float): value for IDD Field `Setpoint at Outdoor Low Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `setpoint_at_outdoor_low_temperature` or None if not set
"""
return self["Setpoint at Outdoor Low Temperature"]
@setpoint_at_outdoor_low_temperature.setter
def setpoint_at_outdoor_low_temperature(self, value=None):
"""Corresponds to IDD field `Setpoint at Outdoor Low Temperature`"""
self["Setpoint at Outdoor Low Temperature"] | |
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def single_mode_proportional_to_time_supertranslated(**kwargs):
"""Return WaveformModes as in single_mode_proportional_to_time, with analytical supertranslation
This function constructs the same basic object as the `single_mode_proportional_to_time`, but then applies an
analytical supertranslation. The arguments to this function are the same as to the other, with two additions:
Additional parameters
---------------------
supertranslation : complex array, optional
Spherical-harmonic modes of the supertranslation to apply to the waveform. This is overwritten by
`space_translation` if present. Default value is `None`.
space_translation : float array of length 3, optional
This is just the 3-vector representing the displacement to apply to the waveform. Note that if
`supertranslation`, this parameter overwrites it. Default value is [1.0, 0.0, 0.0].
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
beta = kwargs.pop("beta", 1.0)
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = beta * t
supertranslation = np.array(kwargs.pop("supertranslation", np.array([], dtype=complex)), dtype=complex)
if "space_translation" in kwargs:
if supertranslation.size < 4:
supertranslation.resize((4,))
supertranslation[1:4] = -sf.vector_as_ell_1_modes(kwargs.pop("space_translation"))
supertranslation_ell_max = int(math.sqrt(supertranslation.size) - 1)
if supertranslation_ell_max * (supertranslation_ell_max + 2) + 1 != supertranslation.size:
raise ValueError(f"Bad number of elements in supertranslation: {supertranslation.size}")
for i, (ellpp, mpp) in enumerate(sf.LM_range(0, supertranslation_ell_max)):
if supertranslation[i] != 0.0:
mp = m + mpp
for ellp in range(ell_min, min(ell_max, (ell + ellpp)) + 1):
if ellp >= abs(mp):
addition = (
beta
* supertranslation[i]
* math.sqrt(((2 * ellpp + 1) * (2 * ell + 1) * (2 * ellp + 1)) / (4 * math.pi))
* sf.Wigner3j(ellpp, ell, ellp, 0, -s, s)
* sf.Wigner3j(ellpp, ell, ellp, mpp, m, -mp)
)
if (s + mp) % 2 == 1:
addition *= -1
data[:, sf.LM_index(ellp, mp, ell_min)] += addition
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def fake_precessing_waveform(
t_0=-20.0,
t_1=20_000.0,
dt=0.1,
ell_max=8,
mass_ratio=2.0,
precession_opening_angle=np.pi / 6.0,
precession_opening_angle_dot=None,
precession_relative_rate=0.1,
precession_nutation_angle=None,
inertial=True,
):
"""Construct a strain waveform with realistic precession effects.
This model is intended to be weird enough that it breaks any overly simplistic assumptions about
waveform symmetries while still being (mostly) realistic.
This waveform uses only the very lowest-order terms from PN theory to evolve the orbital
frequency up to a typical constant value (with a smooth transition), and to construct modes that
have very roughly the correct amplitudes as a function of the orbital frequency. Modes with
equal ell values but opposite m values are modulated antisymmetrically, though this modulation
decays quickly after merger -- roughly as it would behave in a precessing system. The modes are
then smoothly transitioned to an exponential decay after merger. The frame is a simulated
precession involving the basic orbital rotation precessing about a cone of increasing opening
angle and nutating about that cone on the orbital time scale, but settling down to a constant
direction shortly after merger. (Though there is little precise physical content, these
features are all found in real waveforms.) If the input argument `inertial` is `True` (the
default), the waveform is transformed back to the inertial frame before returning.
Parameters
==========
t_0: float [defaults to -20.0]
t_1: float [defaults to 20_000.0]
The initial and final times in the output waveform. Note that the merger is placed 100.0
time units before `t_1`, and several transitions are made before this, so `t_0` must be that
far ahead of `t_1`.
dt: float [defaults to 0.1]
Spacing of output time series.
ell_max: int [defaults to 8]
Largest ell value in the output modes.
mass_ratio: float [defaults to 2.0]
Ratio of BH masses to use as input to rough approximations for orbital evolution and mode
amplitudes.
precession_opening_angle: float [defaults to pi/6]
Opening angle of the precession cone.
precession_opening_angle_dot: float [defaults to 2*precession_opening_angle/(t_merger-t_0)]
Rate at which precession cone opens.
precession_relative_rate: float [defaults to 0.1]
Fraction of the magnitude of the orbital angular velocity at which it precesses.
precession_nutation_angle: float [defaults to precession_opening_angle/10]
Angle (relative to precession_opening_angle) by which the orbital angular velocity nutates.
"""
import warnings
import numpy as np
import quaternion
from quaternion.calculus import indefinite_integral
from .utilities import transition_function, transition_to_constant
if mass_ratio < 1.0:
mass_ratio = 1.0 / mass_ratio
s = -2
ell_min = abs(s)
data_type = scri.h
nu = mass_ratio / (1 + mass_ratio) ** 2
t = np.arange(t_0, t_1 + 0.99 * dt, dt)
t_merger = t_1 - 100.0
i_merger = np.argmin(abs(t - t_merger))
if i_merger < 20:
raise ValueError(f"Insufficient space between initial time (t={t_merger}) and merger (t={t_0}).")
n_times = t.size
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
# Get a rough approximation to the phasing through merger
tau = nu * (t_merger - t) / 5
with warnings.catch_warnings(): # phi and omega will have NaNs after t_merger for now
warnings.simplefilter("ignore")
phi = -4 * tau ** (5 / 8)
omega = (nu / 2) * tau ** (-3 / 8)
# Now, transition omega smoothly up to a constant value of 0.25
omega_transition_width = 5.0
i1 = np.argmin(np.abs(omega[~np.isnan(omega)] - 0.25))
i0 = np.argmin(np.abs(t - (t[i1] - omega_transition_width)))
transition = transition_function(t, t[i0], t[i1])
zero_point_two_five = 0.25 * np.ones_like(t)
omega[:i1] = omega[:i1] * (1 - transition[:i1]) + zero_point_two_five[:i1] * transition[:i1]
omega[i1:] = 0.25
# Integrate phi after i0 to agree with the new omega
phi[i0:] = phi[i0] + indefinite_integral(omega[i0:], t[i0:])
# Construct ringdown-transition function
ringdown_transition_width = 20
t0 = t_merger
i0 = np.argmin(np.abs(t - t_merger))
i1 = np.argmin(np.abs(t - (t[i0] + ringdown_transition_width)))
t0 = t[i0]
t1 = t[i1]
transition = transition_function(t, t0, t1)
ringdown = np.ones_like(t)
ringdown[i0:] = ringdown[i0:] * (1 - transition[i0:]) + 2.25 * np.exp(-(t[i0:] - t_merger) / 11.5) * transition[i0:]
# Construct frame
if precession_opening_angle_dot is None:
precession_opening_angle_dot = 2.0 * precession_opening_angle / (t[i1] - t[0])
if precession_nutation_angle is None:
precession_nutation_angle = precession_opening_angle / 10.0
R_orbital = np.exp(phi * quaternion.z / 2)
R_opening = np.exp(
transition_to_constant(precession_opening_angle + precession_opening_angle_dot * t, t, t0, t1)
* quaternion.x
/ 2
)
R_precession = np.exp(transition_to_constant(phi / precession_relative_rate, t, t0, t1) * quaternion.z / 2)
R_nutation = np.exp(precession_nutation_angle * transition * quaternion.x / 2)
frame = (
R_orbital * R_nutation * R_orbital.conjugate() * R_precession * R_opening * R_precession.conjugate() * R_orbital
)
frame = frame[0].sqrt().conjugate() * frame # Just give the initial angle a weird little tweak to screw things up
# Construct the modes
x = omega ** (2 / 3)
modulation = transition_function(t, t[i0], t[i1], 1, 0) * np.cos(phi) / 40.0
for ell in range(ell_min, ell_max + 1):
for m in range(-ell, ell + 1):
data[:, sf.LM_index(ell, m, ell_min)] = pn_leading_order_amplitude(ell, m, x, mass_ratio=mass_ratio) * (
1 + np.sign(m) * modulation
)
# Apply ringdown (mode amplitudes are constant after t_merger)
data *= ringdown[:, np.newaxis]
h_corot = scri.WaveformModes(
t=t,
frame=frame,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Corotating,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
if inertial:
return h_corot.to_inertial_frame()
else:
return h_corot
def pn_leading_order_amplitude(ell, m, x, mass_ratio=1.0):
"""Return the leading-order amplitude of r*h/M in PN theory
These expressions are from Eqs. (330) of Blanchet's Living Review (2014).
Note that `x` is just the orbital angular velocity to the (2/3) power.
"""
from scipy.special import factorial, factorial2
if m < 0:
return (-1) ** ell * np.conjugate(pn_leading_order_amplitude(ell, -m, x, mass_ratio=mass_ratio))
if mass_ratio < 1.0:
mass_ratio = 1.0 / mass_ratio
nu = mass_ratio / (1 + mass_ratio) ** 2
X1 = mass_ratio / (mass_ratio + 1)
X2 = 1 / (mass_ratio + 1)
def sigma(ell):
return X2 ** (ell - 1) + (-1) ** ell * X1 ** (ell - 1)
if (ell + m) % 2 == 0:
amplitude = (
(
(-1) ** ((ell - m + 2) / 2)
/ (2 ** (ell + 1) * factorial((ell + m) // 2) * factorial((ell - m) // 2) * factorial2(2 | |
# display statistics every 100 iterations
#print(count)
if count%1000 == 0:
print('Its %d Collected %d/%d Acc %.3f L %.3f k %d Prec %f \n' % (count, \
sample, mcmc_samples, np.sum(acc)/np.sum(prop), marg_lik, k, prec))
# at each iteration: first make a copy of the current model
beta_prop = np.array(beta)
X_prop = np.array(X_mars[:, :k])
Xt_prop = np.array(Xt_mars[:, :k])
k_prop = int(k)
if SAVE_SAMPLES:
basis_params_prop = list(basis_parameters)
#.....anything with a _prop extension is used to denote it as a proposal
# now choose a move
birth=0; death=0; move=0 # no move chosen yet
u = np.random.uniform(0, 1, 1) # uniform random variable on U(0,1)
if u < 0.33:
# add a basis function
birth=1; flag=1
# check for boundary, not allowed more than k_max
if k == k_max:
birth=0; move=1; flag=3 # make a "move" move instead
else:
if u < 0.66:
# delete a basis function
death=1; flag=2
# check for boundary, not allowed to delete the intercept
if k == 1:
death=0; move=0; flag=3 # note move is set to zero! as we will just re-draw beta if k==1
else:
# move a basis function
move=1; flag=3
if k == 1:
move=0 # just re-draw coefficient
# store which move we are attempting
prop[flag-1] = prop[flag-1] + 1
# now depending on move type update the model
if birth:
# we're adding a basis function
k_prop = k + 1
# choose a random depth for the new basis function
temp = np.random.uniform(0, 1, 1)
indx_d = np.ceil(temp*n_inter)
# choose a random order
temp = np.random.uniform(0, 1, 1)
indx_o = np.ceil(temp*n_order)
# update design matrix with a draw from a Mars basis function
X_prop_temp, Xt_prop_temp, basis_prop = gen_mars_basis(X, Xt, interaction[int(indx_d-1)], order[int(indx_o-1)])
X_prop = np.hstack((X_prop, X_prop_temp.reshape((n, 1))))
Xt_prop = np.hstack((Xt_prop, Xt_prop_temp.reshape((nt, 1))))
if SAVE_SAMPLES:
# update basis_parameters
basis_params_prop.append(basis_prop)
else:
if death:
# we've lost a basis function
k_prop = k - 1
# choose a basis from the model to delete, NOT THE INTERCEPT THOUGH
temp = np.random.uniform(0, 1, 1)
indx = np.ceil(temp*(k-1)) + 1
# update design matrix
X_prop = np.delete(X_prop, int(indx-1), 1)
Xt_prop = np.delete(Xt_prop, int(indx-1), 1)
if SAVE_SAMPLES:
del basis_params_prop[int(indx-1)]
if move:
# choose a basis from the model to swap with another in dictionary, not the intercept
temp = np.random.uniform(0, 1, 1)
indx = np.ceil(temp*(k-1))+1;
# choose a depth for the new basis function
temp = np.random.uniform(0, 1, 1)
indx_d = np.ceil(temp*n_inter);
# choose an order for the new basis function
temp = np.random.uniform(0, 1, 1)
indx_o = np.ceil(temp*n_order);
# update design matrix
X_prop[:, int(indx-1)], Xt_prop[:, int(indx-1)], basis_prop = gen_mars_basis(X, Xt, \
interaction[int(indx_d-1)], order[int(indx_o-1)])
if SAVE_SAMPLES:
# update basis function parameters
basis_params_prop[int(indx-1)] = basis_prop
# get marginal log likelihood of proposed model and a draw of coefficients
marg_lik_prop, beta_prop, beta_mean_prop, alpha_star_prop = get_ml(X_prop[:, :k_prop], Y, YtY, sig2, alpha_1, alpha_2, prec)
# now see if we accept the proposed change to the model using ratio of probabilities.
# note that as we draw a new basis function from the prior we only need marginal likelihoods
rand = np.random.uniform(0, 1, 1)
if rand < np.exp(marg_lik_prop - marg_lik):
# we accept the proposed changes: hence update the state of the Markov chain
beta = np.array(beta_prop)
beta_mean = np.array(beta_mean_prop)
alpha_star = float(alpha_star_prop)
if SAVE_SAMPLES:
basis_parameters = list(basis_params_prop)
k = int(k_prop)
X_mars[:, :k] = np.array(X_prop)
Xt_mars[:, :k] = np.array(Xt_prop)
acc[flag-1] = acc[flag-1] + 1
marg_lik = float(marg_lik_prop)
# update prior precision on beta every 10 iterations after first 200 mcmc its
if count%10 == 0 and count > 200 and k > 1:
# get sum squared value of coefficients
sumsq = np.sum(beta[1:k, ]**2)
# prec = (1/(0.05+0.5*(1/sig2)*sumsq))*randgamma_mat(0.05+0.5*(k-1),1,1)
prec = np.random.gamma(shape = 0.05+0.5*(k-1), scale = (1/(0.05+0.5*(1/sig2)*sumsq)), size = 1)
# prior precision has changed and hence marginal likelihood of current model has changed, so recalculate
marg_lik, beta, beta_mean, alpha_star = get_ml(X_mars[:, :k], Y, YtY, sig2, alpha_1, alpha_2, prec)
# draw a value for the noise variance - this is needed to draw beta in function get_ml()
# inverse variance is Gamma
#sig2_inv = (1/(0.5*(alpha_star + alpha_1)))*randgamma_mat(0.5*(n+alpha_2),1,1)
sig2_inv = np.random.gamma(shape = 0.5*(n+alpha_2), scale = (1/(0.5*(alpha_star + alpha_1))), size = 1)
sig2 = 1/sig2_inv
if count > burn_in:
# start collecting samples
sample = sample + 1
# get mean predictions
if sample % thin == 0:
basis_parameters_seq.append(basis_parameters)
a = Xt_mars[:, :k].dot(beta_mean[:k, ]) # using the posterior mean of beta
# store statistics
pred_store = pred_store + a
chain_stats_k_store[sample_thin] = int(k)
chain_stats_LL_store[sample_thin] = float(marg_lik)
# store credibles
a = Xt_mars[:, :k].dot(beta[:k, ]) # using draw of beta not mean of beta
a_seq[:, sample_thin] = np.array(a)
sample_thin = sample_thin + 1
# end the mcmc loop
# get 95% interval
cred_upper = np.percentile(a_seq, 97.5, axis=1)
cred_lower = np.percentile(a_seq, 2.5, axis=1)
# get MCMC mean
#test_set_predictions_pred_store = pred_store/cred_n
test_set_predictions_pred_store = np.mean(a_seq, axis = 1)
# check the final test error and display
pred_t = pred_store/sample
test_er = np.sum((Yt-pred_t)**2)
print('Final Test er %.3f \n' % test_er)
# calculate credibles
#test_set_predictions_credibles = np.array((min_cred_upper, max_cred_lower))
test_set_predictions_credibles = np.hstack((cred_upper.reshape(nt, 1), cred_lower.reshape(nt, 1)))
return test_set_predictions_credibles, test_set_predictions_pred_store, chain_stats_k_store, \
chain_stats_LL_store, a_seq, basis_parameters_seq
#################################### other functions needed by the main function
# function that gets marginal likelihood and draws beta
def get_ml(X, Y, YtY, sig2, a, b, prec):
"""
function to calculate marginal likelihood of Bayes linear model, Y ~ N(X beta, sig2 I)
with normal-inverse-gamma prior on beta, sig2 ~ NIG(0,prec I, a, b)
Parameters
----------
X : array
the design matrix.
Y : array
the response.
YtY : float
the sum squared of response values, Y.T.dot(Y).
sig2 : float
a draw from the noise variance.
a : float
prior parameters for noise variance.
b : float
prior parameters for noise variance.
prec : float
precision of normal prior on beta: beta | sig2 ~ N(0, sig2 * (1/prec) * I).
Returns
-------
log_ML - log marginal likelihood (up to a constant)
beta - a draw from the posterior distribution of beta
beta_mean - the posterior mean vector for beta
a_star - the posterior sum_squares
"""
import numpy as np
import random
import scipy.linalg
from scipy.stats import invgauss
import os
import pickle
n, p = np.shape(X)
# make prior precision (inverse-variance) matrix......
prior_prec = prec*np.identity(p)
prior_prec[0, 0] = 0 # improper prior on intercept (first col of X)
# calculate posterior variance covariance matrix and precision
post_P = X.T.dot(X) + prior_prec
post_V = np.linalg.pinv(post_P)
# get posterior mean of beta
beta_mean = post_V.dot(X.T.dot(Y))
# calculate log of the square root of determinant of post_V by using Cholesky decomposition
R = np.linalg.cholesky(post_V).T
# this is nice as the log of square root of determinant of post_V is just the
# sum of the log of the diagonal elements of R, where post_V = R'*R, R is upper triangular
half_log_det_post = np.sum(np.log(np.diag(R)))
# now calculate log of square root of determinant of prior (this is easy as prior on beta is diagonal)
half_log_det_prior = -0.5*(p-1)*np.log(prec)
#.......note that we use (p-1) as we use improper prior on intecept, beta(1) ~ N(0, infinity)
#.....this does not cause (Lyndley-Bartlett paradox) problems as we allways include an intercept in the model
# now calculate posterior sum_squares
a_star = YtY - beta_mean.T.dot(post_P).dot(beta_mean)
# finally log marginal likelihood is
log_ML = half_log_det_post - half_log_det_prior - (0.5*(n+b))*np.log(0.5*(a+a_star))
#log_ML = half_log_det_post - half_log_det_prior - 0.5*(a+a_star) - (0.5*(n+b))*np.log(sig2)
# Now draw a value of beta from conditional posterior distribution....
# making use of previous cholesky decomposition
Rsig2 = np.sqrt(sig2)*R
Rsig2 = Rsig2.T
randn = np.random.normal(0, 1, p)
beta = beta_mean + Rsig2.dot(randn)
return log_ML, beta, beta_mean, a_star
# function that generates a MARS basis function
def gen_mars_basis(X, Xt, interaction, order):
"""
generates random mars | |
wikidata information relevant to deduplication into
bibliographical metadata, similar to CrossRef JSON format (but simplified) and feed the signature index
'''
metadata = {}
local_title = None
local_first_author = None
if "claims" in entity:
for the_property in entity["claims"]:
claims = entity["claims"][the_property]
if the_property == "P356":
# P356 DOI
if len(claims) > 0 and "value" in claims[0]:
local_doi = claims[0]["value"]
if local_doi != None and len(local_doi) > 0:
metadata["DOI"] = local_doi
# DOI index
entity["index_doi"] = local_doi.lower()
# we can simply get the crossref entry and stop the conversion at this stage
metadata = self.biblio_glutton_lookup(doi=local_doi)
if metadata != None:
if "title" in metadata and "author" in metadata:
local_key = self.title_author_key(metadata["title"], metadata["author"])
if local_key != None:
entity["title_author_key"] = local_key
entity["metadata"] = metadata
return entity
elif the_property == "P1476":
# P1476 gives the article title
if len(claims) > 0 and "value" in claims[0]:
if "text" in claims[0]["value"]:
local_title = claims[0]["value"]["text"]
else:
local_title = claims[0]["value"]
if local_title != None and len(local_title) > 0:
metadata["title"] = [ local_title ]
'''
elif "P50" in claim:
# P50 (entity value) and P2093 (string value) gives the list of authors
# P50 entity value is annoying because it requires an additional web access to get the author string form
# in addition the partition of authors into entity value and string value complicates the
# retrieval of the order of the authors and exploiting the first author as traditional look-up key
if len(claim["P50"]) > 0:
for author_claim in claim["P50"]:
if "value" in author_claim:
local_author_entity = author_claim["value"]
# get the author rank
# the actual author order is given by the property P1545 (series ordinal) in the qualifiers
elif "P2093" in claim:
# unfortunately, the person name string value is raw name string, without identification of
# first/middle/last names
if len(claim["P2093"]) > 0:
for author_claim in claim["P2093"]:
if "value" in author_claim:
local_author_string = author_claim["value"]
# get the author rank
'''
elif the_property == "P577":
# P577 publication date
if len(claims) > 0 and "value" in claims[0]:
local_date = claims[0]["value"]["time"]
# ISO format e.g. "+2002-01-00T00:00:00Z"
if local_date.startswith("+"):
local_date = local_date[1:]
ind = local_date.find("T")
if ind != -1:
local_date = local_date[:ind]
metadata['date'] = local_date
parts = []
date_parts = local_date.split("-")
if len(date_parts) > 0:
# year
parts.append(date_parts[0])
if len(date_parts) > 1:
# month
parts.append(date_parts[1])
if len(date_parts) > 2:
# day
parts.append(date_parts[2])
metadata["published-online"] = { "date-parts": [ parts ] }
elif the_property == "P818":
# P818 arXiv ID
if len(claims) > 0 and "value" in claims[0]:
local_arxiv = claims[0]["value"]
if local_arxiv != None and len(local_arxiv) > 0:
metadata["arXiv"] = local_arxiv
elif the_property == "P698":
# P698 PMID
if len(claims) > 0 and "value" in claims[0]:
local_pmid = claims[0]["value"]
if local_pmid != None and len(local_pmid) > 0:
metadata["PMID"] = local_pmid
elif the_property == "P932":
# P932 PMC ID
if len(claims) > 0 and "value" in claims[0]:
local_pmcid = claims[0]["value"]
if local_pmcid != None and len(local_pmcid) > 0:
metadata["PMID"] = local_pmcid
# no need to go further
# set title + first author last name index
if local_title != None and local_first_author != None:
entity["index_title_author"] = self.title_author_key(local_title, local_first_author)
entity["metadata"] = metadata
return entity
def title_author_key(self, title, author_block):
'''
Generate a key for a document hash index based on the title and first author last name.
If no key is possible, return None
'''
if title == None or len(title) == 0 or author_block == None or len(author_block) == 0:
return None
# normally title is a list, but for safety we cover also a string value
if isinstance(title, list):
simplified_title = title[0].replace(" ", "").lower()
else:
simplified_title = title.replace(" ", "").lower()
if "family" in author_block[0]:
simplified_name = author_block[0]['family'].replace(" ", "").lower()
return simplified_title + '_' + simplified_name
return None
def register_merging(self, entity1, entity2):
'''
Store a merging decision:
- create or extend the merging list related to the entities
- index the merging list for the two entities
'''
# check if merging_entities and merging_lists collections exist, if not create them
'''
if not self.staging_graph.has_vertex_collection('merging_entities'):
self.merging_entities = self.staging_graph.create_vertex_collection('merging_entities')
else:
self.merging_entities = self.staging_graph.vertex_collection('merging_entities')
if not self.staging_graph.has_vertex_collection('merging_lists'):
self.merging_lists = self.staging_graph.create_vertex_collection('merging_lists')
else:
self.merging_lists = self.staging_graph.vertex_collection('merging_lists')
'''
# do we have a merging list for one of these entities?
merging_list1_id = None
if self.staging_graph.has_vertex("merging_entities/" + entity1['_key']):
merging_list1_item = self.merging_entities.get("merging_entities/" + entity1['_key'])
merging_list1_id = merging_list1_item['list_id']
merging_list2_id = None
if self.staging_graph.has_vertex("merging_entities/" + entity2['_key']):
merging_list2_item = self.merging_entities.get("merging_entities/" + entity2['_key'])
merging_list2_id = merging_list2_item['list_id']
if merging_list1_id != None and merging_list2_id != None and merging_list1_id == merging_list2_id:
# entities already registered for merging, nothing to do...
return True
#print(merging_list1_id, merging_list2_id)
# get the corresponding lists
merging_list1 = None
if merging_list1_id != None and self.staging_graph.has_vertex(merging_list1_id):
merging_list1_item = self.merging_lists.get(merging_list1_id)
merging_list1 = merging_list1_item['data']
merging_list2 = None
if merging_list2_id != None and self.staging_graph.has_vertex(merging_list2_id):
merging_list2_item = self.merging_lists.get(merging_list2_id)
merging_list2 = merging_list2_item['data']
if merging_list1 != None and merging_list2 != None:
# merge the second list into the first one
for local_entity_id in merging_list2:
if not local_entity_id in merging_list1:
merging_list1.append(local_entity_id)
merging_list1_item['data'] = merging_list1
# update first list
self.staging_graph.update_vertex(merging_list1_item)
# update index for all the entities of the second list
for local_id in merging_list2:
entity_item = self.merging_entities.get(_project_entity_id_collection(local_id, "merging_entities"))
entity_item['list_id'] = merging_list1_item['_id']
entity_item['collection'] = _get_collection_name(local_id)
self.staging_graph.update_vertex(entity_item)
# remove second list
self.staging_graph.delete_vertex(merging_list2_item['_id'])
if merging_list1 != None and merging_list2 == None:
# add entity2 into the first list
if entity2['_id'] not in merging_list1:
merging_list1.append(entity2['_id'])
merging_list1_item['data'] = merging_list1
# update first list
self.staging_graph.update_vertex(merging_list1_item)
# update index for entity2
entity2_item = {}
entity2_item['_key'] = entity2['_key']
entity2_item['_id'] = "merging_entities/" + entity2['_key']
entity2_item['list_id'] = merging_list1_item["_id"]
entity2_item['collection'] = _get_collection_name(entity2['_id'])
self.staging_graph.insert_vertex('merging_entities', entity2_item)
elif merging_list1 == None and merging_list2 != None:
# add entity1 into the second list
if not entity1['_id'] in merging_list2:
merging_list2.append(entity1['_id'])
merging_list2_item['data'] = merging_list2
# update second list
self.staging_graph.update_vertex(merging_list2_item)
# update index for entity1
entity1_item = {}
entity1_item['_key'] = entity1['_key']
entity1_item['_id'] = "merging_entities/" + entity1['_key']
entity1_item['list_id'] = merging_list2_item["_id"]
entity1_item['collection'] = _get_collection_name(entity1['_id'])
self.staging_graph.insert_vertex('merging_entities', entity1_item)
elif merging_list1 == None and merging_list2 == None:
# create a new list
merging_list = []
merging_list.append(entity1['_id'])
merging_list.append(entity2['_id'])
local_id = self.get_uid()
merging_list_item = {}
merging_list_item["_key"] = local_id
merging_list_item["_id"] = "merging_lists/" + local_id
merging_list_item['data'] = merging_list
# insert the new list
self.staging_graph.insert_vertex('merging_lists', merging_list_item)
# update index for the 2 entities
entity1_item = {}
entity1_item['_key'] = entity1['_key']
entity1_item['_id'] = "merging_entities/" + entity1['_key']
entity1_item['list_id'] = merging_list_item["_id"]
entity1_item['collection'] = _get_collection_name(entity1['_id'])
self.staging_graph.insert_vertex('merging_entities', entity1_item)
entity2_item = {}
entity2_item['_key'] = entity2['_key']
entity2_item['_id'] = "merging_entities/" + entity2['_key']
entity2_item['list_id'] = merging_list_item["_id"]
entity2_item['collection'] = _get_collection_name(entity2['_id'])
self.staging_graph.insert_vertex('merging_entities', entity2_item)
return True
def _get_first_value_xpath(node, xpath_exp):
values = node.xpath(xpath_exp)
value = None
if values is not None and len(values)>0:
value = values[0].text
return value
def _get_first_attribute_value_xpath(node, xpath_exp):
values = node.xpath(xpath_exp)
value = None
if values is not None and len(values)>0:
value = values[0]
return value
def _get_date_xpath(node, xpath_exp):
dates = node.xpath(xpath_exp)
date = None
if dates is not None and len(dates)>0:
date = dates[0].get("when")
return date
def _get_all_values_authors_xpath(node, xpath_exp):
values = node.xpath(xpath_exp)
result = []
if values is not None and len(values)>0:
for val in values:
# each val is a person
person = {}
fornames = val.xpath('./forename')
surname = val.xpath('./surname')
if surname != None and len(surname)>0 and surname[0].text != None:
person['family'] = surname[0].text.strip()
if fornames != None:
for forname in fornames:
if forname.text != None:
if not 'given' in person:
person['given'] = forname.text.strip()
else:
person['given'] += " " + forname.text
result.append(person)
# family, given - there is no middle name in crossref, it is just concatenated to "given" without any normalization
return result
def _project_entity_id_collection(entity_id, collection_name):
'''
Take an entity id and replace the collection prefix with the provided one
'''
ind = entity_id.find("/")
if ind == -1:
return collection_name+"/"+entity_id
else:
return collection_name+entity_id[ind:]
def _get_collection_name(entity_id):
'''
return the name of the collection based on the given identifier
'''
ind = entity_id.find("/")
if ind != -1:
return entity_id[:ind]
else:
return None
def _biblio_glutton_url(biblio_glutton_protocol, biblio_glutton_host, biblio_glutton_port):
biblio_glutton_base = biblio_glutton_protocol + "://" + biblio_glutton_host
if biblio_glutton_base.endswith("/"):
res = biblio_glutton_base[:-1]
else:
res = | |
<gh_stars>1-10
# coding: utf-8
"""
Convenience methods wrapping openpyxl module for handling .xlsx file I/O.
(Despite the module name, this doesn't actually do any parsing of the file
format, but it attempts to intelligently interpret the content.)
"""
import logging
import math
import sys
from openpyxl import Workbook, load_workbook
from openpyxl.writer.excel import save_virtual_workbook
from six import string_types
logger = logging.getLogger(__name__)
def export_to_xlsx(table, headers=None, title="Exported table", file_name=None):
"""
Create a simple Excel workbook from the given table and optional headers.
"""
assert_is_two_dimensional_list(table)
# XXX https://bitbucket.org/openpyxl/openpyxl/issue/375/use-save_virtual_workbook-and-optimized
wb = Workbook(write_only=False)
ws = wb.active
ws.title = title
if headers is not None:
assert (len(table) == 0) or (len(headers) == len(table[0]))
ws.append(headers)
for row in table:
ws.append(row)
if file_name is None:
return save_virtual_workbook(wb)
else:
wb.save(file_name)
return file_name
def _get_sheets_from_workbook(wb, name, worksheet_id):
ws = None
if name is not None:
ws = [sheet for sheet in wb.worksheets if sheet.title == name]
if len(ws) == 0:
raise KeyError(f"Cannot find worksheet named {name}")
elif worksheet_id is not None:
ws = [wb.worksheets[worksheet_id]]
else:
ws = wb.worksheets
return ws
def import_xlsx_tables(
file,
column_labels=None,
column_search_text=None,
worksheet_id=None,
worksheet_name=None,
enforce_non_blank_cells=False,
expect_numeric_data=False,
):
"""
Process an excel file and extract the coherent table(s). This will attempt
to use several heuristics to identify what parts of the worksheet(s) contain
a table. The most reliable method is to specify column labels to search for,
at least two of which must be found. A more approximate method is to search
for a specific keyword in the headers. If neither of these are given, any
table-like block of cells will be extracted. In all cases, a row where the
first expected cell is blank signals the end of a contiguous table, but
additional rules apply for the fully-automatic extraction.
:param file: Excel 2007+ (.xlsx) file name or handle
:param column_labels: specific columns names to extract (not case sensitive)
:param column_search_text: string to search for as column header (not case
sensitive)
:param worksheet_id: index of worksheet to extract from
:param worksheet_name: name of worksheet to extract from
:param enforce_non_blank_cells: treat rows containing blank internal cells
as the end of a possible table
:param expect_numeric_data: indicates that tables should contain numeric
values (as opposed to text)
:returns: dict of results with single 'worksheets' key (suitable for JSON
export)
"""
wb = load_workbook(file, read_only=True, data_only=True)
if len(wb.worksheets) == 0:
raise ValueError("No worksheets found in Excel file!")
ws = _get_sheets_from_workbook(wb, worksheet_name, worksheet_id)
ws_results = []
for ws_ in ws:
try:
ws_results.append(
_find_table_of_values(
worksheet=ws_,
column_labels=column_labels,
column_search_text=column_search_text,
enforce_non_blank_cells=enforce_non_blank_cells,
expect_numeric_data=expect_numeric_data,
)
)
except ValueError as e:
logger.exception(f"Error finding table: {e!r}")
# FIXME we can do better than this...
if len(ws_results) == 0:
raise ValueError(
"No valid table-like blocks of contiguous cells found in the file. "
"Please make sure your spreadsheet follows format restrictions."
)
return {"worksheets": ws_results}
def import_xlsx_table(*args, **kwds):
"""
Identical to import_xlsx_tables, but only returns a single table dict (or
raises an error if more than one was found).
"""
result = import_xlsx_tables(*args, **kwds)
tables = result["worksheets"][0]
if (len(result["worksheets"]) > 1) or (len(tables) > 1):
raise ValueError("Multiple tables found.")
return tables[0]
# XXX This might be generalizable to other applications (e.g. processing
# various text files), since it can accept a Python list-of-lists as input
def _find_table_of_values(
worksheet,
column_labels=None,
column_search_text=None,
expect_numeric_data=False,
minimum_number_of_data_rows=2,
maximum_number_of_tables=sys.maxsize,
enforce_non_blank_cells=False,
):
"""
Scan a worksheet for a block of cells resembling a regular table structure,
using optional clues about content. Returns a list of dicts with separate
keys/value pairs for headers and actual data. (The header list may be empty
if no key was defined and the first row contains numeric data.)
"""
rows = _worksheet_to_rows(worksheet)
# We assume these are case insensitive, so Joe Scientist doesn't need to
# learn about Caps Lock.
if column_search_text is not None:
column_search_text = column_search_text.lower()
if column_labels is not None:
column_labels = {cl.lower() for cl in column_labels}
possible_tables = []
row_index = _find_starting_row(rows)
if column_labels is not None:
possible_tables = [_find_table_known_labels(rows, column_labels)]
elif column_search_text is not None:
possible_tables = [_find_table_matched_labels(rows, column_search_text)]
else:
possible_tables = _find_possible_tables(
rows, row_index, enforce_non_blank_cells, minimum_number_of_data_rows
)
if len(possible_tables) > maximum_number_of_tables:
raise ValueError(
"Multiple table-like blocks of cells identified in "
"this worksheet - please specify a (partial) column label to search "
"for, or provide a simpler file containing only the table of interest."
)
elif len(possible_tables) == 0:
raise ValueError(
"No table-like blocks of cells could be automatically "
"identified in this worksheet."
)
return [{"headers": tmp[0], "values": tmp[1:]} for tmp in possible_tables]
def _worksheet_to_rows(worksheet):
rows = worksheet
if not isinstance(worksheet, list):
rows = worksheet_as_list_of_lists(worksheet)
else:
assert_is_two_dimensional_list(rows)
return rows
def _find_starting_row(rows):
for index, row in enumerate(rows):
values_count = number_of_non_blank_cells(row)
if values_count >= 2:
return index
raise ValueError("Could not find a starting row for table.")
def _find_table_known_labels(rows, column_labels):
for row_index, row in enumerate(rows):
column_indices = []
headers = []
for i_cell, value in enumerate(row):
if isinstance(value, string_types) and value.lower() in column_labels:
column_indices.append(i_cell)
headers.append(value)
if len(headers) >= 2:
row_generator = (
[row[k] for k in column_indices if k < len(row)]
for row in rows[row_index + 1 :]
)
table = _find_rows_until_end(row_generator)
if len(table) == 0:
header_text = ";".join(headers)
raise ValueError(
f"The column labels '{header_text}' were found in the worksheet, "
"but no recognizable table of values was associated with them."
)
return [headers, *table]
raise ValueError(
f"The specified labels '{column_labels}' could not be found in "
"this spreadsheet. Make sure the table you wish to extract obeys "
"the required formatting rules."
)
def _find_table_matched_labels(rows, column_search_text):
for row_index, row in enumerate(rows):
headers = []
table = []
start_column = 0
# looking for a specific column header in the row
for i_cell, value in enumerate(row):
if (
isinstance(value, string_types)
and column_search_text.lower() in value.lower()
):
headers = row[i_cell:]
start_column = i_cell
break
if len(headers) > 0:
row_generator = (row[start_column:] for row in rows[row_index + 1 :])
table = _find_rows_until_end(row_generator)
if len(table) == 0:
raise ValueError(
f"The search text '{column_search_text}' was found in the "
"worksheet, but no recognizable table of values was associated "
"with it."
)
return [headers, *table]
raise ValueError(
f"The specified search text '{column_search_text}' could not be associated "
"with a column label in this spreadsheet. Make sure the table you "
"wish to extract obeys the required formatting rules."
)
def _find_rows_until_end(row_generator):
table = []
for row in row_generator:
if row and row[0] is not None:
table.append(row)
else:
break
return table
def _find_possible_tables(
rows, start_row_index, enforce_non_blank_cells=False, minimum_number_of_data_rows=2
):
possible_tables = []
row_index = start_row_index
while row_index < len(rows):
contiguous_rows, row_index = _collect_contiguous_rows(rows, row_index)
# If we only found one row, it might be a headerless run of values.
# But if it doesn't contain anything that looks numeric, forget it.
if len(contiguous_rows) < 2:
if not has_numerical_cells(contiguous_rows):
# Continue outer while loop - go to next chunk
continue
(
first_non_empty_column,
last_non_empty_column,
irregular_row_sizes,
found_blank_cells,
) = _compute_contiguous_stats(contiguous_rows)
# It would be extremely odd if we got this.
if (first_non_empty_column is math.inf) or (last_non_empty_column is -1):
continue # Outer while loop
# Enforcing non-blank-ness means we want a rectangular table, with no holes.
if enforce_non_blank_cells and (irregular_row_sizes or found_blank_cells):
continue # Outer while loop
largest_row_size = (last_non_empty_column - first_non_empty_column) + 1
# We are not going to bother with a 'table' that is 1x1, 1x2, 2x1, 1x3, or 3x1.
if largest_row_size * len(contiguous_rows) < 4:
continue # Outer while loop
# We are going to push these rows in 'unfiltered', starting
# from the first non-empty column, under the assumption that
# empty leading cells are structurally relevant -
# e.g. they indicate null values, or act as spacing for headers.
tmp = []
for c_row in contiguous_rows:
c_row_part = c_row[first_non_empty_column:]
tmp.append(list(c_row_part))
# check that we have a reasonable number of rows in current table
if len(tmp) > minimum_number_of_data_rows:
possible_tables.append(tmp)
return possible_tables
def _collect_contiguous_rows(rows, start_row_index):
contiguous_rows = []
row_index = start_row_index
# scan ahead in the table for additional rows that are non-blank, and collect them
while row_index < len(rows):
row = rows[row_index]
row_index += 1
nb_values = number_of_non_blank_cells(row)
if nb_values > 0:
contiguous_rows.append(row)
elif len(contiguous_rows) == | |
0) * ((epoch + 1) / self.epoch) # Eq. 21
r_idx = np.random.choice(list(set(range(0, self.pop_size)) - {idx}))
x_r = pop[r_idx][self.ID_POS]
# x_r = pop[np.random.randint(0, self.pop_size-1)][self.ID_POS]
if np.random.random() < 0.5:
x_new = beta * x_r + (1 - beta) * pop[idx][self.ID_POS]
else:
x_new = (1 - beta) * x_r + beta * pop[idx][self.ID_POS]
else:
x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop[idx][self.ID_POS])
# x_new = best[self.ID_POS] + np.random.normal() * best[self.ID_POS]
pos_new = self.amend_position_faster(x_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size - 1))
## Production - Update the worst agent
# Eq. 13
a = 2 * (1 - (epoch + 1) / self.epoch)
x1 = (1 - a) * pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(x1)
fit_new = self.get_fitness_position(x1)
pop[-1] = [pos_new, fit_new]
## Consumption - Update the whole population left
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child(idx, pop_copy) for idx in pop_idx]
pop_new.append(pop[-1])
## find current best used in decomposition
_, best = self.get_global_best_solution(pop_new)
pop_idx = np.array(range(0, self.pop_size))
## Decomposition
### Eq. 10, 11, 12, 9
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best, pop=pop_new, epoch=epoch), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best, pop=pop_new, epoch=epoch), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child2(idx, best, pop_new, epoch) for idx in pop_idx]
return pop_new
class ModifiedAEO(Optimizer):
"""
Original version of: Modified Artificial Ecosystem-Based Optimization
(Effective Parameter Extraction of Different Polymer Electrolyte Membrane Fuel Cell Stack Models Using a
Modified Artificial Ecosystem Optimization Algorithm)
Link:
https://doi.org/10.1109/ACCESS.2020.2973351
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
def create_child(self, idx, pop, H):
rand = np.random.random()
# Eq. 4, 5, 6
v1 = np.random.normal(0, 1)
v2 = np.random.normal(0, 1)
c = 0.5 * v1 / abs(v2) # Consumption factor
if idx == 0:
j = 1
else:
j = np.random.randint(0, idx)
### Herbivore
if rand <= 1.0 / 3: # Eq. 23
pos_new = pop[idx][self.ID_POS] + H * c * (pop[idx][self.ID_POS] - pop[0][self.ID_POS])
### Carnivore
elif 1.0 / 3 <= rand and rand <= 2.0 / 3: # Eq. 24
pos_new = pop[idx][self.ID_POS] + H * c * (pop[idx][self.ID_POS] - pop[j][self.ID_POS])
### Omnivore
else: # Eq. 25
r5 = np.random.random()
pos_new = pop[idx][self.ID_POS] + H * c * (r5 * (pop[idx][self.ID_POS] - pop[0][self.ID_POS]) +
(1 - r5) * (pop[idx][self.ID_POS] - pop[j][self.ID_POS]))
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def create_child2(self, idx, best, pop, epoch):
r3 = np.random.uniform()
d = 3 * np.random.normal(0, 1)
e = r3 * np.random.randint(1, 3) - 1
h = 2 * r3 - 1
# x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * agent_i[self.ID_POS])
if np.random.random() < 0.5:
beta = 1 - (1 - 0) * ((epoch + 1) / self.epoch) # Eq. 21
r_idx = np.random.choice(list(set(range(0, self.pop_size)) - {idx}))
x_r = pop[r_idx][self.ID_POS]
# x_r = pop[np.random.randint(0, self.pop_size-1)][self.ID_POS]
if np.random.random() < 0.5:
x_new = beta * x_r + (1 - beta) * pop[idx][self.ID_POS]
else:
x_new = (1 - beta) * x_r + beta * pop[idx][self.ID_POS]
else:
x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop[idx][self.ID_POS])
# x_new = best[self.ID_POS] + np.random.normal() * best[self.ID_POS]
pos_new = self.amend_position_faster(x_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size - 1))
## Production
# Eq. 22
H = 2 * (1 - (epoch + 1) / self.epoch)
a = (1 - (epoch + 1) / self.epoch) * np.random.random()
x1 = (1 - a) * pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(x1)
fit_new = self.get_fitness_position(pos_new)
pop[-1] = [pos_new, fit_new]
## Consumption - Update the whole population left
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy, H=H), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy, H=H), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child(idx, pop_copy, H) for idx in pop_idx]
pop_new.append(pop[-1])
## find current best used in decomposition
_, best = self.get_global_best_solution(pop_new)
pop_idx = np.array(range(0, self.pop_size))
## Decomposition
### Eq. 10, 11, 12, 9
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best, pop=pop_new, epoch=epoch), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child2, best=best, pop=pop_new, epoch=epoch), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child2(idx, best, pop_new, epoch) for idx in pop_idx]
return pop_new
class AdaptiveAEO(Optimizer):
"""
This is Adaptive Artificial Ecosystem Optimization based on
+ Linear weight factor reduce from 2 to 0 through time
+ Levy_flight
+ Global best solution
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
def create_child(self, idx, pop, g_best, epoch, wf):
if np.random.random() < 0.5:
rand = np.random.random()
# Eq. 4, 5, 6
c = 0.5 * np.random.normal(0, 1) / abs(np.random.normal(0, 1)) # Consumption factor
if idx == 0:
j = 1
else:
j = np.random.randint(0, idx)
### Herbivore
if rand < 1.0 / 3:
pos_new = pop[idx][self.ID_POS] + wf * c * (pop[idx][self.ID_POS] - pop[0][self.ID_POS]) # Eq. 6
### Omnivore
elif 1.0 / 3 <= rand <= 2.0 / 3:
pos_new = pop[idx][self.ID_POS] + wf * c * (pop[idx][self.ID_POS] - pop[j][self.ID_POS]) # Eq. 7
### Carnivore
else:
r2 = np.random.uniform()
pos_new = pop[idx][self.ID_POS] + wf * c * (r2 * (pop[idx][self.ID_POS] - pop[0][self.ID_POS]) +
(1 - r2) * (pop[idx][self.ID_POS] - pop[j][self.ID_POS]))
else:
pos_new = pop[idx][self.ID_POS] + self.get_levy_flight_step(0.001, 1., case=-1) * \
(1.0 / np.sqrt(epoch + 1)) * np.sign(np.random.random() - 0.5) * (pop[idx][self.ID_POS] - g_best[self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def create_child2(self, idx, pop, g_best, local_best, epoch):
if np.random.random() < 0.5:
pos_new = local_best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * (local_best[self.ID_POS] - pop[idx][self.ID_POS])
else:
pos_new = g_best[self.ID_POS] + self.get_levy_flight_step(0.001, 0.75, case=-1) * \
1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * (g_best[self.ID_POS] - pop[idx][self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if self.compare_agent([pos_new, fit_new], pop[idx]):
return [pos_new, fit_new]
return pop[idx].copy()
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import copy, warnings
from os import path
from collections import OrderedDict
import itertools
import numpy as np
from scipy import stats, signal, interpolate
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from . import six, afni, io, utils, dicom, math
def convolve_HRF(starts, lens, TR=2, scan_time=None, HRF=None):
if np.isscalar(lens):
lens = lens * np.ones(len(starts))
numout_cmd = '' if scan_time is None else f"-numout {np.ceil(scan_time/TR)}"
HRF_cmd = '-GAM' if HRF is None else HRF
res = utils.run(f"waver -TR {TR} {numout_cmd} {HRF_cmd} -tstim {' '.join([f'{t:g}%{l:g}' for t, l in zip(starts, lens)])}", verbose=0)
return np.float_(afni.filter_output(res['output'], ex_tags=['++']))
def create_ideal(stimuli, lens, **kwargs):
'''
Parameters
----------
stimuli : list of fname
'''
starts = [io.read_stim(fname) for fname in stimuli]
n_stims = len(starts)
n_runs = len(starts[0])
assert(np.all(np.array([len(runs) for runs in starts]) == n_runs))
if lens == 'alterating':
lens = [[[] for run in range(n_runs)] for stim in range(n_stims)]
for run in range(n_runs):
curr_state, curr_time = -1, np.nan
n_events = np.array([len(starts[stim][run]) for stim in range(n_stims)])
ids = np.zeros(n_stims, dtype=int)
while np.any(ids < n_events):
wavefront = [(starts[stim][run][ids[stim]] if ids[stim] < n_events[stim] else np.inf) for stim in range(n_stims)]
next_state, next_time = np.argmin(wavefront), np.min(wavefront)
if next_state != curr_state:
if curr_state != -1:
lens[curr_state][run].append(next_time - curr_time)
curr_state, curr_time = next_state, next_time
ids[curr_state] += 1
elif np.isscalar(lens):
lens = [[[lens]*len(starts[stim][run]) for run in range(n_runs)] for stim in range(n_stims)]
ideal = [[convolve_HRF(starts[stim][run], lens[stim][run], **kwargs) for run in range(n_runs)] for stim in range(n_stims)]
return ideal
def create_times(tmin, tmax, dt):
if tmin < 0 and tmax > 0:
times = np.r_[np.arange(-dt, tmin-dt/2, -dt)[::-1], np.arange(0, tmax+dt/2, dt)]
else:
times = np.arange(tmin, tmax+dt/2, dt)
return times
def create_ERP(t, x, events, tmin=-8, tmax=16, dt=0.1, baseline=[-2,0], interp='linear'):
'''
t : time for each data point (can be non-contiguous)
x : [event, feature, time]
events : event onset time (can be on non-integer time point)
'''
times = create_times(tmin, tmax, dt)
f = interpolate.interp1d(t, x, axis=-1, kind=interp, fill_value=np.nan, bounds_error=False)
base_corr = create_base_corr_func(times, baseline=baseline)
ERP = np.zeros(np.r_[len(events), x.shape[1:-1], len(times)].astype(int), dtype=x.dtype)
for k, t in enumerate(events):
ERP[k] = base_corr(f(np.arange(t+tmin, t+tmax+dt/2, dt)))
return ERP, times
class Attributes(object):
def __init__(self, shape):
super().__setattr__('attributes', {})
self.shape = shape
shape = property(lambda self: self._shape, lambda self, x: setattr(self, '_shape', np.array(x)))
def add(self, name, value, axis):
assert(len(value) == self.shape[axis])
self.attributes[name] = {'axis': axis, 'value': np.array(value)}
def drop(self, name):
self.attributes.pop(name)
def drop_all_with_axis(self, axis):
axes = axis if np.iterable(axis) else [axis]
for axis in axes:
for name in self.names_with_axis(axis):
self.drop(name)
def __getattr__(self, name):
return self.attributes[name]['value']
def __setattr__(self, name, value):
if name in self.attributes:
assert(len(value) == self.shape[self.attributes[name]['axis']])
self.attributes[name]['value'] = np.array(value)
else:
# If the attribute is not added before, it will become a instance attribute
super().__setattr__(name, value)
def names_with_axis(self, axis):
return [name for name, attr in self.attributes.items() if attr['axis']==axis]
def __repr__(self):
names = '\n'.join([f" axis={axis} ({self.shape[axis]}) | {', '.join(self.names_with_axis(axis))}" for axis in range(len(self.shape))])
return f"<Attributes | shape = {self.shape}\n" + names
def __copy__(self):
inst = type(self)(self.shape)
inst.attributes = copy.deepcopy(self.attributes)
return inst
def pick(self, index, axis):
inst = copy.copy(self)
if np.iterable(axis):
indices, axes = index, axis
else:
indices, axes = [index], [axis]
for index, axis in zip(indices, axes):
# Update shape first via a virtual attribute (in case there is not attribute at all)
inst.shape[axis] = len(np.arange(inst.shape[axis])[index])
# Update attributes belonging to the axis
for name in inst.names_with_axis(axis):
attr = inst.attributes[name]
attr['value'] = attr['value'][index]
return inst
@classmethod
def concatinate(cls, attributes_list, axis):
# Concat shape along axis, and check shape compatibility along other axis
inst = attributes_list[0]
self = cls(inst.shape)
self.shape[axis] = np.sum([attributes.shape[axis] for attributes in attributes_list])
other_axes = np.r_[0:axis, axis+1:len(self.shape)]
for attributes in attributes_list[1:]:
assert(np.all(attributes.shape[other_axes] == self.shape[other_axes]))
# Concat attributes along axis, and check attributes compatibility (identity) along other axis
for name, attr in inst.attributes.items():
if attr['axis'] == axis:
value = np.concatenate([attributes.attributes[name]['value'] for attributes in attributes_list])
else:
value = attr['value']
for attributes in attributes_list[1:]:
assert(np.all(attributes.attributes[name]['value'] == value))
self.add(name, value, axis)
return self
def to_dict(self):
return dict(attributes=self.attributes, shape=self.shape)
@classmethod
def from_dict(cls, d):
self = cls(None)
for k, v in d.items():
setattr(self, k, v)
return self
class Raw(utils.Savable, object):
def __init__(self, fname, mask=None, TR=None):
if fname is None:
return # Skip __init__(), create an empty Raw object, and manually initialize it later.
if mask is not None:
self.mask = mask if isinstance(mask, io.Mask) else io.Mask(mask)
self.data = self.mask.dump(fname)
else:
self.mask = None
self.data = io.read_vol(fname)
self.info = {}
self.info['sfreq'] = 1 / (afni.get_TR(fname) if TR is None else TR)
self.info['feature_name'] = 'voxel'
self.info['value_name'] = 'value'
self.times = np.arange(self.n_times) * self.TR
shape = property(lambda self: self.data.shape)
n_features = property(lambda self: self.data.shape[0])
n_times = property(lambda self: self.data.shape[1])
TR = property(lambda self: 1 / self.info['sfreq'])
@classmethod
def from_array(cls, data, TR):
'''
data : 2D array, [n_features, n_times]
TR : in sec
'''
self = cls(None)
self.mask = None
self.data = np.array(data, copy=False)
self.info = dict(sfreq=1/TR, feature_name='voxel', value_name='value')
self.times = np.arange(self.n_times) * self.TR
return self
def __repr__(self):
# return f"<Raw | {self.n_features} {self.info['feature_name']}s, {self.times[0]:.3f} - {self.times[-1]:.3f} sec, TR = {self.TR} sec, {self.n_times} TRs>"
return f"<Raw | {self.n_features} {self.info['feature_name']}s, {self.times[0]:.3f} - {self.times[-1]+self.TR:.3f} sec, TR = {self.TR} sec, {self.n_times} TRs>"
def copy(self):
return _copy(self)
def plot(self, events=None, event_id=None, color=None, palette=None, figsize=None, event_kws=None, **kwargs):
# Plot mean time course
data = np.mean(self.data, axis=0)
if events is not None: # If going to plot events, plot data in black by default
color = 'k' if color is None else color
if figsize is not None:
plt.gcf().set_figwidth(figsize[0])
plt.gcf().set_figheight(figsize[1])
plt.plot(self.times, data, color=color, **kwargs)
plt.xlabel('Time (s)')
plt.ylabel('Signal change (%)')
# Plot events
if events is not None:
if event_id is None:
event_id = _default_event_id(events)
if palette is None: # A palette is eventually a list of colors
palette = plt.rcParams['axes.prop_cycle'].by_key()['color']
id2ev = {id: [eid, ev, None] for eid, (ev, id) in enumerate(event_id.items())}
event_kws = dict(dict(), **(event_kws if event_kws is not None else {}))
for event in events:
t, id = event[0], event[-1]
id2ev[id][2] = plt.axvline(t, color=palette[id2ev[id][0]], **event_kws)
plt.legend(*zip(*[(h, ev) for eid, ev, h in id2ev.values()]))
def to_dict(self):
return dict(info=self.info, data=self.data, mask=self.mask.to_dict(), times=self.times)
@classmethod
def from_dict(cls, d):
self = cls(None)
for k, v in d.items():
setattr(self, k, v)
self.mask = io.Mask.from_dict(self.mask)
return self
def _copy(self):
'''Copy all object attributes other than `data`, which is simply referred to.'''
# TODO: .info and events etc. should be deep copied
data = self.data
del self.data
inst = copy.copy(self)
inst.data = self.data = data
return inst
class RawCache(utils.Savable, object):
def __init__(self, fnames, mask, TR=None, cache_file=None, force_redo=False):
if fnames is None:
return # Skip __init__(), create an empty RawCache object, and manually initialize it later.
if cache_file is None or not utils.exists(cache_file, force_redo=force_redo):
self.mask = mask if isinstance(mask, io.Mask) else io.Mask(mask)
self.raws = [Raw(fname, mask=self.mask, TR=TR) for fname in fnames]
if cache_file is not None:
self.save(cache_file)
else:
inst = self.load(cache_file)
self.mask = inst.mask
self.raws = inst.raws
n_runs = property(lambda self: len(self.raws))
def subset(self, mask, cache_file=None):
inst = type(self)(None, None)
inst.mask = mask if isinstance(mask, io.Mask) else io.Mask(mask)
inst.raws = self.get_raws(mask) # TODO: copy???
if cache_file is not None:
inst.save(cache_file)
return inst
def get_raws(self, mask, ids=None):
return_scalar = False
if ids is None:
ids = range(self.n_runs)
elif not utils.iterable(ids):
return_scalar = True
ids = [ids]
if isinstance(mask, six.string_types):
mask = io.Mask(mask)
selector = self.mask.infer_selector(mask)
elif isinstance(mask, io.Mask):
selector = self.mask.infer_selector(mask)
else: # boolean index
selector = mask
mask = self.mask.pick(selector)
raws = []
for idx in ids:
raw = self.raws[idx].copy()
raw.data = raw.data[selector]
raw.mask = mask
raws.append(raw)
return raws[0] if return_scalar else raws
def get_epochs(self, mask, events, event_id, ids=None, cache_file=None, **kwargs):
assert(len(events) == self.n_runs or len(events) == len(ids))
if cache_file is None or not utils.exists(cache_file):
epochs = [Epochs(raw, events[idx], event_id=event_id, **kwargs) for idx, raw in enumerate(self.get_raws(mask, ids=ids))]
epochs = concatinate_epochs(epochs)
if cache_file is not None:
epochs.save(cache_file)
else:
epochs = Epochs.load(cache_file)
return epochs
def to_dict(self):
return dict(raws=[raw.to_dict() for raw in self.raws], mask=self.mask.to_dict())
@classmethod
def from_dict(cls, d):
self = cls(None, None)
for k, v in d.items():
setattr(self, k, v)
self.raws = [Raw.from_dict(raw) for raw in self.raws]
self.mask = io.Mask.from_dict(self.mask)
return self
def read_events(event_files):
'''
Read events from | |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
import os
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
from skimage.transform import resize
from fastmri.data.poisson_disc_variable_density import PoissonSampler
@contextlib.contextmanager
def temp_seed(rng: np.random.RandomState, seed: Optional[Union[int, Tuple[int, ...]]]):
"""A context manager for temporarily adjusting the random seed."""
if seed is None:
try:
yield
finally:
pass
else:
state = rng.get_state()
rng.seed(seed)
try:
yield
finally:
rng.set_state(state)
class MaskFunc:
"""
An object for GRAPPA-style sampling masks.
This crates a sampling mask that densely samples the center while
subsampling outer k-space regions based on the undersampling factor.
When called, ``MaskFunc`` uses internal functions create mask by 1)
creating a mask for the k-space center, 2) create a mask outside of the
k-space center, and 3) combining them into a total mask. The internals are
handled by ``sample_mask``, which calls ``calculate_center_mask`` for (1)
and ``calculate_acceleration_mask`` for (2). The combination is executed
in the ``MaskFunc`` ``__call__`` function.
If you would like to implement a new mask, simply subclass ``MaskFunc``
and overwrite the ``sample_mask`` logic. See examples in ``RandomMaskFunc``
and ``EquispacedMaskFunc``.
"""
def __init__(
self,
center_fractions: Sequence[float],
accelerations: Sequence[int],
allow_any_combination: bool = False,
seed: Optional[int] = None,
):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
allow_any_combination: Whether to allow cross combinations of
elements from ``center_fractions`` and ``accelerations``.
seed: Seed for starting the internal random number generator of the
``MaskFunc``.
"""
if len(center_fractions) != len(accelerations) and not allow_any_combination:
raise ValueError(
"Number of center fractions should match number of accelerations "
"if allow_any_combination is False."
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.allow_any_combination = allow_any_combination
self.rng = np.random.RandomState(seed)
def __call__(
self,
shape: Sequence[int],
offset: Optional[int] = None,
num_offsets=8,
seed: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Tuple[torch.Tensor, int]:
"""
Sample and return a k-space mask.
Args:
shape: Shape of k-space.
offset: Offset from 0 to begin mask (for equispaced masks). If no
offset is given, then one is selected randomly.
seed: Seed for random number generator for reproducibility.
Returns:
A 2-tuple containing 1) the k-space mask and 2) the number of
center frequency lines.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
center_mask, accel_mask, num_low_frequencies = self.sample_mask(
shape, offset, num_offsets, seed
)
# combine masks together
return torch.max(center_mask, accel_mask), num_low_frequencies
def sample_mask(
self,
shape: Sequence[int],
offset: Optional[int],
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Sample a new k-space mask.
This function samples and returns two components of a k-space mask: 1)
the center mask (e.g., for sensitivity map calculation) and 2) the
acceleration mask (for the edge of k-space). Both of these masks, as
well as the integer of low frequency samples, are returned.
Args:
shape: Shape of the k-space to subsample.
offset: Offset from 0 to begin mask (for equispaced masks).
Returns:
A 3-tuple contaiing 1) the mask for the center of k-space, 2) the
mask for the high frequencies of k-space, and 3) the integer count
of low frequency samples.
"""
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
num_low_frequencies = round(num_cols * center_fraction)
center_mask = self.reshape_mask(
self.calculate_center_mask(shape, num_low_frequencies), shape
)
acceleration_mask = self.reshape_mask(
self.calculate_acceleration_mask(
num_cols, acceleration, offset, num_low_frequencies
),
shape,
)
return center_mask, acceleration_mask, num_low_frequencies
def reshape_mask(self, mask: np.ndarray, shape: Sequence[int]) -> torch.Tensor:
"""Reshape mask to desired output shape."""
num_cols = shape[-2]
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
return torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking (for equispaced masks).
num_low_frequencies: Integer count of low-frequency lines sampled.
Returns:
A mask for the high spatial frequencies of k-space.
"""
raise NotImplementedError
def calculate_center_mask(
self, shape: Sequence[int], num_low_freqs: int
) -> np.ndarray:
"""
Build center mask based on number of low frequencies.
Args:
shape: Shape of k-space to mask.
num_low_freqs: Number of low-frequency lines to sample.
Returns:
A mask for hte low spatial frequencies of k-space.
"""
num_cols = shape[-2]
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = 1
assert mask.sum() == num_low_freqs
return mask
def choose_acceleration(self):
"""Choose acceleration based on class parameters."""
if self.allow_any_combination:
return self.rng.choice(self.center_fractions), self.rng.choice(
self.accelerations
)
else:
choice = self.rng.randint(len(self.center_fractions))
return self.center_fractions[choice], self.accelerations[choice]
class MaskFunc3D(MaskFunc):
def sample_mask(
self,
shape: Sequence[int],
offset: Optional[int],
num_offsets=16,
seed = None,
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Sample a new k-space mask.
This function samples and returns two components of a k-space mask: 1)
the center mask (e.g., for sensitivity map calculation) and 2) the
acceleration mask (for the edge of k-space). Both of these masks, as
well as the integer of low frequency samples, are returned.
Args:
shape: Shape of the k-space to subsample.
offset: Offset from 0 to begin mask (for equispaced masks).
Returns:
A 3-tuple contaiing 1) the mask for the center of k-space, 2) the
mask for the high frequencies of k-space, and 3) the integer count
of low frequency samples.
"""
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
num_low_frequencies = round(num_cols * center_fraction)
acceleration_mask = self.reshape_mask(
self.calculate_acceleration_mask_3D(
num_cols, acceleration, offset, num_low_frequencies, shape, seed # num_offsets, seed
),
shape
)
center_mask = torch.zeros_like(acceleration_mask)
return center_mask, acceleration_mask, num_low_frequencies
def reshape_mask(self, mask: np.ndarray, shape: Sequence[int]) -> torch.Tensor:
return torch.from_numpy(mask.astype(np.float32))
def calculate_acceleration_mask_3D(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
shape,
seed
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking (for equispaced masks).
num_low_frequencies: Integer count of low-frequency lines sampled.
Returns:
A mask for the high spatial frequencies of k-space.
"""
raise NotImplementedError
def calculate_center_mask(
self, shape: Sequence[int], num_low_freqs: int
) -> np.ndarray:
"""
Build center mask based on number of low frequencies.
Args:
shape: Shape of k-space to mask.
num_low_freqs: Number of low-frequency lines to sample.
Returns:
A mask for hte low spatial frequencies of k-space.
"""
num_cols = shape[-2]
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = 1
assert mask.sum() == num_low_freqs
return mask
def choose_acceleration(self):
"""Choose acceleration based on class parameters."""
if self.allow_any_combination:
return self.rng.choice(self.center_fractions), self.rng.choice(
self.accelerations
)
else:
choice = self.rng.randint(len(self.center_fractions))
return self.center_fractions[choice], self.accelerations[choice]
class RandomMaskFunc(MaskFunc):
"""
Creates a random sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected uniformly at random with a
probability equal to: prob = (N / acceleration - N_low_freqs) /
(N - N_low_freqs). This ensures that the expected number of columns
selected is equal to (N / acceleration).
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the ``RandomMaskFunc`` object is called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
then there is a 50% probability that 4-fold acceleration with 8% center
fraction is selected and a 50% probability that 8-fold acceleration with 4%
center fraction is selected.
"""
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
prob = (num_cols / acceleration - num_low_frequencies) / (
num_cols - num_low_frequencies
)
return self.rng.uniform(size=num_cols) < prob
class EquiSpacedMaskFunc(MaskFunc):
"""
Sample data with equally-spaced k-space lines.
The | |
rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_348(self):
inp = '''.'''
fmt = '''(E3.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_349(self):
inp = '''.1'''
fmt = '''(E3.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_350(self):
inp = '''0.1D+200'''
fmt = '''(E3.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_351(self):
inp = '''3.'''
fmt = '''(E4.1E3)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_352(self):
inp = '''-3.'''
fmt = '''(E4.1E3)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_353(self):
inp = '''10.'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_354(self):
inp = '''-10.'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_355(self):
inp = '''100.'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_356(self):
inp = '''-100.'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_357(self):
inp = '''1000.'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_358(self):
inp = '''-1000.'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_359(self):
inp = '''10000.'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_360(self):
inp = '''-10000.'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_361(self):
inp = '''100000.'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_362(self):
inp = '''-100000.'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_363(self):
inp = '''123456789.'''
fmt = '''(E4.1E3)'''
result = [1.2340000000000001e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_364(self):
inp = '''0.1'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_365(self):
inp = '''-0.1'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_366(self):
inp = '''0.01'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_367(self):
inp = '''-0.01'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_368(self):
inp = '''0.001'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_369(self):
inp = '''-0.001'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_370(self):
inp = '''0.0001'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_371(self):
inp = '''-0.0001'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_372(self):
inp = '''-1.96e-16'''
fmt = '''(E4.1E3)'''
result = [-1.8999999999999999e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_373(self):
inp = '''3.14159'''
fmt = '''(E4.1E3)'''
result = [3.1400000000000001e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_374(self):
inp = '''- 1.0'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_375(self):
inp = '''1e12'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+11]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_376(self):
inp = '''1E12'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000000e+11]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_377(self):
inp = '''-1 e12'''
fmt = '''(E4.1E3)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_378(self):
inp = '''.'''
fmt = '''(E4.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_379(self):
inp = '''.1'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_380(self):
inp = '''0.1D+200'''
fmt = '''(E4.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_381(self):
inp = '''3.'''
fmt = '''(E5.1E3)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_382(self):
inp = '''-3.'''
fmt = '''(E5.1E3)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_383(self):
inp = '''10.'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_384(self):
inp = '''-10.'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_385(self):
inp = '''100.'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_386(self):
inp = '''-100.'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_387(self):
inp = '''1000.'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_388(self):
inp = '''-1000.'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_389(self):
inp = '''10000.'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_390(self):
inp = '''-10000.'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_391(self):
inp = '''100000.'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_392(self):
inp = '''-100000.'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_393(self):
inp = '''123456789.'''
fmt = '''(E5.1E3)'''
result = [1.2345000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_394(self):
inp = '''0.1'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_395(self):
inp = '''-0.1'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_396(self):
inp = '''0.01'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_397(self):
inp = '''-0.01'''
fmt = '''(E5.1E3)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_398(self):
inp = '''0.001'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_399(self):
inp = '''-0.001'''
fmt = '''(E5.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_400(self):
inp = '''0.0001'''
fmt = '''(E5.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_401(self):
inp = '''-0.0001'''
fmt = '''(E5.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_402(self):
inp = '''-1.96e-16'''
fmt = '''(E5.1E3)'''
result = [-1.9600000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_403(self):
inp = '''3.14159'''
fmt = '''(E5.1E3)'''
result = [3.1410000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_404(self):
inp = '''- 1.0'''
fmt = '''(E5.1E3)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_405(self):
inp = '''1e12'''
fmt = '''(E5.1E3)'''
result = [1.0000000000000000e+11]
eds, rev_eds | |
# Required for rest of hug scripts
from bitshares import BitShares
from bitshares.account import Account
from bitshares.amount import Amount
from bitshares.asset import Asset
from bitshares.blockchain import Blockchain
from bitshares.block import Block
from bitshares.dex import Dex
from bitshares.price import Price
from bitshares.market import Market
from bitshares.witness import Witness # Retrieving 1
from bitshares.witness import Witnesses # Listing many
from bitshares.proposal import Proposal # Retrieving 1
from bitshares.proposal import Proposals # Listing many
from bitshares.instance import shared_bitshares_instance # Used to reduce bitshares instance load
from bitshares.instance import set_shared_bitshares_instance # Used to reduce bitshares instance load
import bitshares
import hug
import requests
import pendulum
import math
import statistics
import uuid
import os
from bs4 import BeautifulSoup
import re
import json
from pycoingecko import CoinGeckoAPI
full_node_list = [
"wss://bitshares.openledger.info/ws",
"wss://na.openledger.info/ws",
"wss://api.bts.blckchnd.com",
"wss://eu.nodes.bitshares.ws",
"wss://us.nodes.bitshares.ws",
"wss://btsws.roelandp.nl/ws"
]
full_node_list_http = [
#"https://bitshares.crypto.fans/ws", #location: "Munich, Germany"
"https://bit.btsabc.org/ws", #location: "Hong Kong"
"https://api.bts.blckchnd.com", #location: "Falkenstein, Germany"
"https://openledger.hk/ws", #location: "Hong Kong"
"https://bitshares-api.wancloud.io/ws", #location: "China"
"https://dex.rnglab.org", #location: "Netherlands"
"https://dexnode.net/ws", #location: "Dallas, USA"
"https://kc-us-dex.xeldal.com/ws", #location: "Kansas City, USA"
"https://la.dexnode.net/ws", #location: "Los Angeles, USA"
]
bitshares_api_node = BitShares(full_node_list, nobroadcast=True) # True prevents TX being broadcast through the HUG REST API
set_shared_bitshares_instance(bitshares_api_node)
# End of node configuration
def google_analytics(request, function_name):
"""
#Tracking usage via Google Analytics (using the measurement protocol).
#Why? Because the only insight into the use of HUG currently is the access & error logs (insufficient).
"""
google_analytics_code = 'UA-117263042-1'
user_agent = str(request.user_agent)
user_source = str(request.referer)
user_request = str(request.uri)
headers = {'User-Agent': user_agent}
#function_dp = 'https://btsapi.grcnode.co.uk/' + function_name
payload = { 'v': 1,
'an': 'HUG',
'tid': google_analytics_code,
'cid': str(uuid.uuid4()),
't': 'pageview',
'ec': 'HUG',
'ds': 'HUG',
'el': 'HUG',
'ea': 'Action',
'dr': user_source,
'de': 'JSON',
'ua': user_agent,
'dt': function_name,
'dl': user_request,
'ev': 0}
try:
r = requests.post('https://www.google-analytics.com/collect', params=payload, headers=headers)
# r = requests.post('www.google-analytics.com/collect', data=payload) # Either data or params
except:
print("COULD NOT POST TO GOOGLE ANALYTICS!")
def check_api_token(api_key):
"""Check if the user's API key is valid. Change the API key if you want it to be private!"""
if (api_key == '123abc'):
return True
else:
return False
def request_json(input_data):
"""Request JSON data from full node, given request data input.
More info: http://docs.python-requests.org/en/master/"""
requested_data = None # Prevent no state if all servers fail!
for full_node_url in full_node_list_http:
try:
requested_data = requests.get(full_node_url, data=input_data)
except requests.exceptions.ConnectionError:
continue
if requested_data.status_code is not 200:
# Fail! Move onto the next URL!
continue
else:
# Stop iterating through the list of servers!
break
return requested_data
def extract_object(input_object):
"""Chunk of code to extract the inner JSON from objects.
Required to reduce unneccessary lines in HUG script & improve maintainability."""
temp_dict = {}
for item in input_object:
temp_dict[str(item)] = input_object[item]
return temp_dict
def get_hertz_feed(reference_timestamp, current_timestamp, period_days, phase_days, reference_asset_value, amplitude):
"""
Given the reference timestamp, the current timestamp, the period (in days), the phase (in days), the reference asset value (ie 1.00) and the amplitude (> 0 && < 1), output the current hertz value.
You can use this for an alternative HERTZ asset!
"""
hz_reference_timestamp = pendulum.parse(reference_timestamp).timestamp() # Retrieving the Bitshares2.0 genesis block timestamp
hz_period = pendulum.SECONDS_PER_DAY * period_days
hz_phase = pendulum.SECONDS_PER_DAY * phase_days
hz_waveform = math.sin(((((current_timestamp - (hz_reference_timestamp + hz_phase))/hz_period) % 1) * hz_period) * ((2*math.pi)/hz_period)) # Only change for an alternative HERTZ ABA.
hz_value = reference_asset_value + ((amplitude * reference_asset_value) * hz_waveform)
return hz_value
@hug.get('/home', output=hug.output_format.html)
def root(request, hug_timer=60):
"""
Hertz price feed HTML page
"""
try:
google_analytics(request, 'hertz price feed page')
except:
return "<html><body><h4>Internal HUG Server error!</h4></body></html>"
hertz_json = get_hertz_value('123abc', request)
html_start = "<html><head><title>Hertz Price feed page!</title><meta name='viewport' content='width=device-width, initial-scale=1'><link rel='stylesheet' href='https://cdnjs.cloudflare.com/ajax/libs/pure/1.0.0/tables-min.css' integrity='sha256-V3z3FoW8AUbK98fJsgyLL7scF5dNrNStj6Rp8tPsJs0=' crossorigin='anonymous' /></head><body>"
table_start = "<h1>Hertz price feeds</h1><h2><a href='https://sites.google.com/view/hertz-aba/'>Hertz technical documentation</a></h2><h3>White Paper: <a href='https://steemit.com/bitshares/@cm-steem/hertz-is-now-live-on-the-bts-dex'>Steemit Post with PDF mirrors</a></h3><table class='pure-table pure-table-bordered'><thead><tr><th>Name</th><th>Timestamp</th><th>Settlement Price</th><th>CER</th><th>MCR</th><th>MSSR</th><th>URL</th></tr></thead><tbody>"
table_rows = ""
witness = hertz_json['witness_feeds']
# Unofficial reference row
unofficial_reference = hertz_json['unofficial_reference']
settlement_price_list = []
cer_list = []
for key, value in witness.items():
settlement_price = value['settlement_price']['api_calculated_rate']
if (settlement_price > 0):
settlement_price_list.append(settlement_price)
try:
witness_url = value['witness_url']
except:
witness_url = None
core_exchange_rate = value['core_exchange_rate']['api_calculated_rate']
cer_list.append(core_exchange_rate)
maintenance_collateral_ratio = value['maintenance_collateral_ratio']
maximum_short_squeeze_ratio = value['maximum_short_squeeze_ratio']
witness_name = value['witness_name']
parsed_timestamp = pendulum.parse(value['publish_timestamp'])
current_timestamp = pendulum.now()
time_difference = current_timestamp.diff(parsed_timestamp).in_minutes()
if (time_difference > 0):
time_difference_text = str(time_difference) + " Mins ago"
else:
time_difference_text = "< 1 Min ago"
usd_settlement_price = settlement_price * unofficial_reference['bts_price_in_usd']
if witness_url is None:
table_rows += "<tr><td><a href='http://open-explorer.io/#/accounts/" + str(witness_name) + "'>" + str(witness_name) + "</a></td><td>" + time_difference_text + "</td><td>" + "{0:.2f}".format(settlement_price) + " BTS ($" + "{0:.2f}".format(usd_settlement_price) + ")</td><td>" + "{0:.2f}".format(core_exchange_rate) + "</td><td>" + str(maintenance_collateral_ratio/10) + "%</td><td>" + str(maximum_short_squeeze_ratio/10) + "%</td><td>N/A</td></tr>"
else:
table_rows += "<tr><td><a href='http://open-explorer.io/#/accounts/" + str(witness_name) + "'>" + str(witness_name) + "</a></td><td>" + time_difference_text + "</td><td>" + "{0:.2f}".format(settlement_price) + " BTS ($" + "{0:.2f}".format(usd_settlement_price) + ")</td><td>" + "{0:.2f}".format(core_exchange_rate) + "</td><td>" + str(maintenance_collateral_ratio/10) + "%</td><td>" + str(maximum_short_squeeze_ratio/10) + "%</td><td><a href='" + str(witness_url) + "'>Link</a></td></tr>"
else:
continue
table_rows += "<tr><td>Unofficial reference</td><td>Now</td><td>" + "{0:.2f}".format(unofficial_reference['hertz_price_in_bts']) + "</td><td>" + "{0:.2f}".format(unofficial_reference['core_exchange_rate']) + "</td><td>200.0%</td><td>110.0%</td><td><a href='https://btsapi.grcnode.co.uk'>Link</a></td></tr>"
table_end = "</tbody></table></br>"
active_feeds = hertz_json['current_feeds']
if (active_feeds['settlement_price']['api_calculated_rate'] > 0):
hertz_status = "Active"
else:
hertz_status = "Not Active"
#active_details = "<h2>Price feed summary</h2><ul><li>Status: " + hertz_status + "</li><li>Settlement rate: " + "{0:.2f}".format(int(active_feeds['settlement_price']['api_calculated_rate'])/10) + "</li><li>CER: " + "{0:.2f}".format(int(active_feeds['core_exchange_rate']['api_calculated_rate'])/10) + "</li><li>MCR: " + "{0:.2f}".format(int(active_feeds['maintenance_collateral_ratio'])/10) + "</li><li>MSSR: " + "{0:.2f}".format((int(active_feeds['maximum_short_squeeze_ratio'])/10)) + "</li></ul>"
settlement_price_median = statistics.median(settlement_price_list)
cer_median = statistics.median(cer_list)
#extra_details = "<h2>Extra reference info</h2><ul><li>Median settle price: " + "{0:.2f}".format(settlement_price_median) + "</li><li>Median CER: " + "{0:.2f}".format(cer_median) + "</li><li>BTS price in USD: " + "{0:.2f}".format(unofficial_reference['bts_price_in_usd']) + "</li><li>USD price in BTS: " + "{0:.2f}".format(unofficial_reference['usd_price_in_bts']) + "</li><li> Hertz price in USD: " + "{0:.2f}".format(unofficial_reference['hertz_price_in_usd']) + "</li><li><a href='https://btsapi.grcnode.co.uk/get_hertz_value?api_key=123abc'>Hertz JSON price feed data</a></li></ul>"
extra_details = "<h2>Extra reference info</h2><ul><li>Median settle price: "
extra_details += "{0:.2f}".format(settlement_price_median)
extra_details += "</li><li>Median CER: "
extra_details += "{0:.2f}".format(cer_median)
extra_details += "</li><li>BTS price in USD: "
extra_details += "{0:.2f}".format(unofficial_reference['bts_price_in_usd'])
extra_details += "</li><li>USD price in BTS: "
extra_details += "{0:.2f}".format(unofficial_reference['usd_price_in_bts'])
extra_details += "</li><li> Hertz price in USD: "
extra_details += "{0:.2f}".format(unofficial_reference['hertz_price_in_usd'])
extra_details += "</li><li><a href='https://btsapi.grcnode.co.uk/get_hertz_value?api_key=123abc'>Hertz JSON price feed data</a></li></ul>"
html_end = "</body></html>"
output_html = html_start + table_start + table_rows + table_end + extra_details + html_end
return output_html
@hug.get(examples='api_key=API_KEY')
def get_hertz_value(api_key: hug.types.text, request, hug_timer=15):
"""Retrieve reference Hertz feed price value in JSON."""
if (check_api_token(api_key) == True): # Check the api key
google_analytics(request, 'get_hertz_value')
# Getting the value of USD in BTS
#market = Market("USD:BTS") # Set reference market to USD:BTS
#price = market.ticker()["quoteSettlement_price"] # Get Settlement price of USD
#price.invert() # Switching from quantity of BTS per USD to USD price of one BTS.
hertz_reference_timestamp = "2015-10-13T14:12:24+00:00" # Bitshares 2.0 genesis block timestamp
hertz_current_timestamp = pendulum.now().timestamp() # Current timestamp for reference within the hertz script
hertz_amplitude = 0.14 # 14% fluctuating the price feed $+-0.14 (1% per day)
hertz_period_days = 28 # Aka wavelength, time for one full SIN wave cycle.
hertz_phase_days = 0.908056 # Time offset from genesis till the first wednesday, to set wednesday as the primary Hz day.
hertz_reference_asset_value = 1.00 # $1.00 USD, not much point changing as the ratio will be the same.
hertz_value = get_hertz_feed(hertz_reference_timestamp, hertz_current_timestamp, hertz_period_days, hertz_phase_days, hertz_reference_asset_value, hertz_amplitude)
hertz = Price(hertz_value, "USD/HERTZ") # Limit the hertz_usd decimal places & convert from float.
cg = CoinGeckoAPI() # Initialise coingecko
bts_usd_coingecko = cg.get_price(ids='bitshares', vs_currencies='usd') # Price of BTS in USD from coingecko
bts_usd_coingecko_value = Price(bts_usd_coingecko["bitshares"]["usd"], "USD/BTS") # Price format
hertz_bts = bts_usd_coingecko_value.invert() * hertz.as_quote("HERTZ") # Feed price
unofficial_data = {
'hertz_price_in_usd': hertz['price'],
'hertz_price_in_bts': hertz_bts['price'],
'core_exchange_rate': hertz_bts['price']*0.80,
'usd_price_in_bts': 1/(bts_usd_coingecko["bitshares"]["usd"]),
'bts_price_in_usd': bts_usd_coingecko["bitshares"]["usd"]
}
########
try:
target_asset = Asset("HERTZ", full=True)
except:
return {'valid_asset': False,
'valid_key': True,
'took': float(hug_timer)}
try:
bitasset_data = target_asset['bitasset_data_id']
except:
bitasset_data = None
extracted_object = extract_object(target_asset)
bitasset_data = extracted_object['bitasset_data']
current_feeds = bitasset_data['current_feed']
current_feed_settlement_price = current_feeds['settlement_price']
current_feed_cer = current_feeds['core_exchange_rate']
if (int(current_feed_settlement_price['base']['amount']) > 0 and int(current_feed_settlement_price['quote']['amount']) > 0):
current_feed_settlement_price['api_calculated_rate'] = int(current_feed_settlement_price['quote']['amount'])/int(current_feed_settlement_price['base']['amount'])
else:
current_feed_settlement_price['api_calculated_rate'] = 0
if (int(current_feed_cer['base']['amount']) > 0 and int(current_feed_cer['quote']['amount']) > 0):
current_feed_cer['api_calculated_rate'] = int(current_feed_cer['quote']['amount'])/int(current_feed_cer['base']['amount'])
else:
current_feed_cer['api_calculated_rate'] = 0
witness_feeds = bitasset_data['feeds']
witness_feed_data = {}
witness_iterator = 0
for witness_feed in witness_feeds:
# Extract that data!
witness_id = witness_feed[0]
witness_iterator += 1
try:
target_account = Account(str(witness_id))
except:
print("Witness account doesn't work?!")
extracted_object = extract_object(target_account)
witness_name = extracted_object['name']
publish_timestamp = witness_feed[1][0]
feed_data = witness_feed[1][1]
settlement_price = feed_data['settlement_price']
if (int(settlement_price['quote']['amount']) > 0):
maintenance_collateral_ratio = feed_data['maintenance_collateral_ratio']
maximum_short_squeeze_ratio = feed_data['maximum_short_squeeze_ratio']
core_exchange_rate = feed_data['core_exchange_rate']
settlement_price_before = int(settlement_price['quote']['amount'])/int(settlement_price['base']['amount'])
core_exchange_rate_before = int(core_exchange_rate['quote']['amount'])/(int(core_exchange_rate['base']['amount']))
settlement_price['api_calculated_rate'] = settlement_price_before / 10
core_exchange_rate['api_calculated_rate'] = core_exchange_rate_before / 10
try:
target_witness = Witness(witness_name)
except:
target_witness = None
if (target_witness is not None):
witness_role_data = extract_object(target_witness)
witness_identity = witness_role_data['id']
witness_url = witness_role_data['url']
witness_feed_data[str(witness_iterator)] = {'witness_account_id': witness_id,
'witness_name': witness_name,
'witness_id': witness_identity,
'witness_url': witness_url,
'publish_timestamp': publish_timestamp,
'settlement_price': settlement_price,
'maintenance_collateral_ratio': maintenance_collateral_ratio,
'maximum_short_squeeze_ratio': maximum_short_squeeze_ratio,
'core_exchange_rate': core_exchange_rate}
else:
witness_feed_data[str(witness_iterator)] = {'witness_account_id': witness_id,
'witness_name': witness_name,
'witness_id': "N/A",
'witness_url': "#",
'publish_timestamp': publish_timestamp,
'settlement_price': settlement_price,
'maintenance_collateral_ratio': maintenance_collateral_ratio,
'maximum_short_squeeze_ratio': maximum_short_squeeze_ratio,
'core_exchange_rate': core_exchange_rate}
else:
continue
return {'unofficial_reference': unofficial_data,
'witness_feeds': witness_feed_data,
'current_feeds': current_feeds,
'valid_key': True,
'took': float(hug_timer)}
else:
# API KEY INVALID!
return {'valid_key': False,
'took': float(hug_timer)}
@hug.get(examples='object_id=1.2.0&api_key=API_KEY')
def get_bts_object(object_id: hug.types.text, api_key: hug.types.text, request, hug_timer=15):
"""Enable retrieving and displaying any BTS object in JSON."""
if (check_api_token(api_key) == True): # Check the api key
google_analytics(request, 'get_bts_object')
try:
retrieved_object = bitshares_api_node.rpc.get_objects([object_id])[0]
except:
return {'valid_object_id': False,
'valid_key': True,
'took': float(hug_timer)}
if retrieved_object is not None:
return {'retrieved_object': retrieved_object,
'valid_object_id': True,
'valid_key': True,
'took': float(hug_timer)}
else:
return {'valid_object_id': False,
'valid_key': True,
'took': float(hug_timer)}
else:
# API KEY INVALID!
return {'valid_key': False,
'took': float(hug_timer)}
@hug.get(examples='committee_id=1.5.10&api_key=API_KEY')
def get_committee_member(committee_id: hug.types.text, api_key: hug.types.text, request, hug_timer=15):
"""Retrieve information about a single committee member (inc full account details)!"""
if (check_api_token(api_key) == True): # Check the api key
google_analytics(request, 'get_committee_member')
if ("1.5." not in committee_id):
return {'valid_committee_id': False,
'valid_key': True,
'took': float(hug_timer)}
try:
target_committee_member = bitshares_api_node.rpc.get_objects([committee_id])[0]
except:
return {'valid_committee_id': False,
'valid_key': True,
'took': float(hug_timer)}
if target_committee_member is None:
return {'valid_committee_id': False,
'valid_key': True,
'took': float(hug_timer)}
target_account = Account(target_committee_member['committee_member_account'], full=True) # Full info!
target_account_data = extract_object(target_account)
active_committee_members = Blockchain().config()['active_committee_members']
if committee_id in active_committee_members:
target_account_data['status'] = True
else:
target_account_data['status'] = False
target_committee_member['committee_member_details'] = target_account_data
return {'get_committee_member': target_committee_member,
'valid_committee_id': True,
'valid_key': True,
'took': float(hug_timer)}
else:
# API KEY INVALID!
return {'valid_key': False,
'took': float(hug_timer)}
@hug.get(examples='api_key=API_KEY')
def get_committee_members(api_key: hug.types.text, request, hug_timer=15):
"""Get a list of all committee members!"""
if (check_api_token(api_key) == True): # Check the api key
google_analytics(request, 'get_committee_members')
# API KEY VALID
num_committee_members_request = request_json('{"jsonrpc": "2.0", "method": "get_committee_count", "params": [], | |
#######################################################################################
### This is the RESTful API I will use for the Mercantile application.
### It includes Seven databases, The CLIENT TRIPS, the COMPANIES, the DRIVERS, the
### VEHICLES, DRIVER TRIPS, INDIVIDUAL TRIPS, and SELF TRIPS.
#######################################################################################
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import datetime
import os
import sys
import logging
app = Flask(__name__)
# Helps with the location of the directory
basedir = os.path.abspath(os.path.dirname(__file__))
# Which database will we fetch from?
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'crud.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
#######################################################################################
# Bind the SQLAlchemy and Marshmallow
class Trip(db.Model):
trip_id = db.Column(db.Integer, primary_key = True)
comp_id = db.Column(db.Integer, unique = False)
vehicle_id = db.Column(db.Unicode, unique = False)
driver_id = db.Column(db.Integer, unique = False)
start_date = db.Column(db.Unicode, unique = False)
departure = db.Column(db.Unicode, unique = False)
start_mileage = db.Column(db.Integer, unique = False)
end_date = db.Column(db.Unicode, unique = False)
destination = db.Column(db.Unicode, unique = False)
end_mileage = db.Column(db.Integer, unique = False)
client = db.Column(db.Unicode, unique = False)
status = db.Column(db.Unicode, unique = False)
trip_type = db.Column(db.Unicode, unique = False)
signature = db.Column(db.Unicode, unique = False)
departure_geo_lat = db.Column(db.Unicode, unique = False)
departure_geo_long = db.Column(db.Unicode, unique = False)
destination_geo_lat = db.Column(db.Unicode, unique = False)
destination_geo_long = db.Column(db.Unicode, unique = False)
def __init__(self, trip_type, comp_id, vehicle_id, driver_id, start_date,\
departure, start_mileage, client, departure_geo_lat, departure_geo_long, status = 'In Progress'):
self.comp_id = comp_id
self.vehicle_id = vehicle_id
self.driver_id = driver_id
self.start_date = start_date
self.departure = departure
self.start_mileage = start_mileage
self.client = client
self.status = status
self.trip_type = trip_type
self.departure_geo_lat = departure_geo_lat
self.departure_geo_long = departure_geo_long
class TripSchema(ma.Schema):
class Meta:
fields = ('trip_id','comp_id','vehicle_id','driver_id','start_date',\
'departure','start_mileage','end_date','destination',\
'end_mileage','client', 'status','trip_type','signature',\
'departure_geo_lat', 'departure_geo_long', 'destination_geo_lat', 'destination_geo_long')
trip_schema = TripSchema()
trips_schema = TripSchema(many = True)
#######################################################################################
class SelfDriverTrip(db.Model):
trip_id = db.Column(db.Integer, primary_key = True)
vehicle_id = db.Column(db.Unicode, unique = False)
start_date = db.Column(db.Unicode, unique = False)
start_mileage = db.Column(db.Integer, unique = False)
client = db.Column(db.Unicode, unique = False)
end_mileage = db.Column(db.Integer, unique = False)
end_date = db.Column(db.Unicode, unique = False)
email = db.Column(db.Unicode, unique = False)
license_number = db.Column(db.Unicode, unique = False)
phone = db.Column(db.Integer, unique = False)
address = db.Column(db.Unicode, unique = False)
purpose = db.Column(db.Unicode, unique = False)
signature = db.Column(db.Unicode, unique = False)
status = db.Column(db.Unicode, unique = False)
def __init__(self, vehicle_id, start_date, start_mileage, client,\
email, license_number, phone, address, purpose, status="In Progress"):
self.vehicle_id = vehicle_id
self.start_date = start_date
self.start_mileage = start_mileage
self.client = client
self.email = email
self.license_number = license_number
self.phone = phone
self.address = address
self.purpose = purpose
self.status = status
class SelfSchema(ma.Schema):
class Meta:
fields = ('trip_id','vehicle_id','start_date','start_mileage', 'client',\
'end_mileage','end_date','email','license_number','phone','address',\
'purpose','signature','status')
self_schema = SelfSchema()
selfs_schema = SelfSchema(many = True)
#######################################################################################
class DriverTrip(db.Model):
trip_id = db.Column(db.Integer, primary_key = True)
vehicle_id = db.Column(db.Unicode, unique = False)
driver_id = db.Column(db.Integer, unique = False)
start_date = db.Column(db.Unicode, unique = False)
departure = db.Column(db.Unicode, unique = False)
start_mileage = db.Column(db.Integer, unique = False)
end_date = db.Column(db.Unicode, unique = False)
destination = db.Column(db.Unicode, unique = False)
end_mileage = db.Column(db.Integer, unique = False)
status = db.Column(db.Unicode, unique = False)
departure_geo_lat = db.Column(db.Unicode, unique = False)
departure_geo_long = db.Column(db.Unicode, unique = False)
destination_geo_lat = db.Column(db.Unicode, unique = False)
destination_geo_long = db.Column(db.Unicode, unique = False)
def __init__(self, vehicle_id, driver_id, start_date, departure,\
start_mileage, departure_geo_lat, departure_geo_long, status = 'In Progress'):
self.vehicle_id = vehicle_id
self.driver_id = driver_id
self.start_date = start_date
self.departure = departure
self.start_mileage = start_mileage
self.status = status
self.departure_geo_lat = departure_geo_lat
self.departure_geo_long = departure_geo_long
class DriverTripSchema(ma.Schema):
class Meta:
fields = ('trip_id','vehicle_id','driver_id', 'start_date',\
'departure','start_mileage', 'end_date', 'destination', 'end_mileage',\
'status', 'departure_geo_lat', 'departure_geo_long', 'destination_geo_lat', 'destination_geo_long')
driver_trip_schema = DriverTripSchema()
driver_trip_schemas = DriverTripSchema(many = True)
#######################################################################################
class IndividualTrip(db.Model):
trip_id = db.Column(db.Integer, primary_key = True)
vehicle_id = db.Column(db.Unicode, unique = False)
driver_id = db.Column(db.Integer, unique = False)
start_date = db.Column(db.Unicode, unique = False)
departure = db.Column(db.Unicode, unique = False)
start_mileage = db.Column(db.Integer, unique = False)
end_date = db.Column(db.Unicode, unique = False)
destination = db.Column(db.Unicode, unique = False)
end_mileage = db.Column(db.Integer, unique = False)
client = db.Column(db.Unicode, unique = False)
status = db.Column(db.Unicode, unique = False)
trip_type = db.Column(db.Unicode, unique = False)
signature = db.Column(db.Unicode, unique = False)
departure_geo_lat = db.Column(db.Unicode, unique = False)
departure_geo_long = db.Column(db.Unicode, unique = False)
destination_geo_lat = db.Column(db.Unicode, unique = False)
destination_geo_long = db.Column(db.Unicode, unique = False)
def __init__(self, trip_type, vehicle_id, driver_id, start_date, departure,\
start_mileage, client, departure_geo_lat, departure_geo_long, status = 'In Progress'):
self.vehicle_id = vehicle_id
self.driver_id = driver_id
self.start_date = start_date
self.departure = departure
self.start_mileage = start_mileage
self.client = client
self.status = status
self.trip_type = trip_type
self.departure_geo_lat = departure_geo_lat
self.departure_geo_long = departure_geo_long
class IndividualTripSchema(ma.Schema):
class Meta:
fields = ('trip_id', 'vehicle_id', 'driver_id', 'start_date', 'departure',\
'start_mileage', 'end_date', 'destination', 'end_mileage',\
'client', 'status', 'trip_type','signature', 'departure_geo_lat',\
'departure_geo_long', 'destination_geo_lat', 'destination_geo_long')
individual_trip_schema = IndividualTripSchema()
individual_trip_schemas = IndividualTripSchema(many = True)
#######################################################################################
class Company(db.Model):
comp_id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.Unicode, unique = True)
def __init__(self, name):
self.name = name
class CompanySchema(ma.Schema):
class Meta:
fields = ('comp_id', 'name')
comp_schema = CompanySchema()
comps_schema = CompanySchema(many = True)
#######################################################################################
class Driver(db.Model):
driver_id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.Unicode, unique = True)
password = db.Column(db.Unicode)
company_id = db.Column(db.Unicode, unique = True)
license_number = db.Column(db.Unicode, unique = True)
email = db.Column(db.Unicode, unique = True)
status = db.Column(db.Unicode)
phone = db.Column(db.Integer)
current_trip = db.Column(db.Integer)
current_trip_type = db.Column(db.Unicode)
def __init__(self, name, password, company_id, license_number, email, phone, status="Available"):
self.name = name
self.password = password
self.company_id = company_id
self.license_number = license_number
self.email = email
self.status = status
self.phone = phone
class DriverSchema(ma.Schema):
class Meta:
fields = ('driver_id', 'name', 'password', 'company_id','license_number','email',\
'status','phone','current_trip','current_trip_type')
driver_schema = DriverSchema()
drivers_schema = DriverSchema(many = True)
#######################################################################################
class Vehicle(db.Model):
vehicle_id = db.Column(db.Unicode, primary_key = True)
make = db.Column(db.Unicode)
current_mileage = db.Column(db.Integer)
status = db.Column(db.Unicode)
def __init__(self, vehicle_id, make, current_mileage, status = "Available"):
self.vehicle_id = vehicle_id
self.make = make
self.current_mileage = current_mileage
self.status = status
class VehicleSchema(ma.Schema):
class Meta:
fields = ('vehicle_id', 'make', 'current_mileage', 'status')
vehicle_schema = VehicleSchema()
vehicles_schema = VehicleSchema(many = True)
#######################################################################################
########### TRIPS ################# TRIPS #################### TRIPS ##################
# Endpoint to create new trips
@app.route("/trip", methods = ["POST"])
def add_trip():
# Start info
comp_id = request.json['comp_id']
vehicle_id = request.json['vehicle_id']
driver_id = request.json['driver_id']
start_date = request.json['start_date']
departure = request.json['departure']
start_mileage = request.json['start_mileage']
client = request.json['client']
trip_type = request.json['trip_type']
departure_geo_lat = request.json['departure_geo_lat']
departure_geo_long = request.json['departure_geo_long']
new_trip = Trip(trip_type, comp_id, vehicle_id, driver_id, \
start_date, departure, start_mileage, client, departure_geo_lat, departure_geo_long)
db.session.add(new_trip)
db.session.commit()
return trip_schema.jsonify(new_trip)
#######################################################################################
# Endpoint to show all client trips
@app.route("/trip", methods = ["GET"])
def get_trips():
all_trips = Trip.query.all()
result = trips_schema.dump(all_trips)
return jsonify(result.data)
#######################################################################################
# Endpoint to get trip detail by id
@app.route("/trip/<trip_id>", methods = ["GET"])
def trip_detail(trip_id):
trip = Trip.query.get(trip_id)
return trip_schema.jsonify(trip)
#######################################################################################
# Endpoint to update trip
@app.route("/trip/<trip_id>", methods = ["PUT"])
def trip_update(trip_id):
try:
trip = Trip.query.get(trip_id)
comp_id = request.json['comp_id']
vehicle_id = request.json['vehicle_id']
driver_id = request.json['driver_id']
start_date = request.json['start_date']
departure = request.json['departure']
start_mileage = request.json['start_mileage']
client = request.json['client']
end_date = request.json['end_date']
destination = request.json['destination']
end_mileage = request.json['end_mileage']
status = request.json['status']
trip_type = request.json['trip_type']
destination_geo_lat = request.json['destination_geo_lat']
destination_geo_long = request.json['destination_geo_long']
trip.comp_id = comp_id
trip.vehicle_id = vehicle_id
trip.driver_id = driver_id
trip.start_date = start_date
trip.departure = departure
trip.start_mileage = start_mileage
trip.client = client
trip.end_date = end_date
trip.destination = destination
trip.end_mileage = end_mileage
trip.status = status
trip.trip_type = trip_type
trip.destination_geo_lat = destination_geo_lat
trip.destination_geo_long = destination_geo_long
except Exception as e:
return {
'error':'An error occurred, %s' % e
}
db.session.commit()
return trip_schema.jsonify(trip)
#######################################################################################
# Endpoint to update trip signature
@app.route("/trip/signature/<trip_id>", methods=["PUT"])
def trip_signature(trip_id):
trip = Trip.query.get(trip_id)
signature = request.json['signature']
trip.signature = signature
db.session.commit()
return trip_schema.jsonify(trip)
#######################################################################################
# Endpoint to delete trip
@app.route("/trip/<trip_id>", methods = ["DELETE"])
def trip_delete(trip_id):
trip = Trip.query.get(trip_id)
db.session.delete(trip)
db.session.commit()
return trip_schema.jsonify(trip)
#######################################################################################
############ SELF___TRIP ############ SELF___TRIP ############ SELF___TRIP ############
#Endpoint to create a new self trip
@app.route("/self", methods = ["POST"])
def self_new():
vehicle_id = request.json['vehicle_id']
start_date = request.json['start_date']
start_mileage = request.json['start_mileage']
client = request.json['client']
email = request.json['email']
license_number = request.json['license_number']
phone = request.json['phone']
address = request.json['address']
purpose = request.json['purpose']
new_trip = SelfDriverTrip(vehicle_id, start_date, start_mileage,\
client, email, license_number, phone, address, purpose)
db.session.add(new_trip)
db.session.commit()
return self_schema.jsonify(new_trip)
#######################################################################################
# ENd point for all self trips
@app.route("/self", methods=["GET"])
def self_get_trips():
all_trips = SelfDriverTrip.query.all()
result = selfs_schema.dump(all_trips)
return jsonify(result.data)
#######################################################################################
# Endpoint for a particular self trip
@app.route("/self/<trip_id>", methods=["GET"])
def self_trip_detail(trip_id):
trip = SelfDriverTrip.query.get(trip_id)
return self_schema.jsonify(trip)
#######################################################################################
@app.route("/self/<trip_id>", methods=["PUT"])
def self_update(trip_id):
trip = SelfDriverTrip.query.get(trip_id)
vehicle_id = request.json['vehicle_id']
start_date = request.json['start_date']
start_mileage = request.json['start_mileage']
client = request.json['client']
end_mileage = request.json['end_mileage']
end_date = request.json['end_date']
email = request.json['email']
license_number = request.json['license_number']
phone = request.json['phone']
address = request.json['address']
purpose = request.json['purpose']
status = request.json['status']
trip.vehicle_id = vehicle_id
trip.start_date = | |
pass
return field_value
def _make_repr_table_from_sframe(X):
"""
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
"""
assert isinstance(X, _SFrame)
column_names = X.column_names()
out_data = [[None] * len(column_names) for i in range(X.num_rows())]
column_sizes = [len(s) for s in column_names]
for i, c in enumerate(column_names):
for j, e in enumerate(X[c]):
out_data[j][i] = str(e)
column_sizes[i] = max(column_sizes[i], len(e))
# now, go through and pad everything.
out_data = [
[cn.ljust(k, " ") for cn, k in zip(column_names, column_sizes)],
["-" * k for k in column_sizes],
] + [[e.ljust(k, " ") for e, k in zip(row, column_sizes)] for row in out_data]
return [" ".join(row) for row in out_data]
def _toolkit_repr_print(model, fields, section_titles, width=None, class_name="auto"):
"""
Display a toolkit repr according to some simple rules.
Parameters
----------
model : Turi Create model
fields: List of lists of tuples
Each tuple should be (display_name, field_name), where field_name can
be a string or a _precomputed_field object.
section_titles: List of section titles, one per list in the fields arg.
Example
-------
model_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty'),
("Examples", 'num_examples'),
("Features", 'num_features'),
("Coefficients", 'num_coefficients')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
fields = [model_fields, solver_fields, training_fields]:
section_titles = ['Model description',
'Solver description',
'Training information']
_toolkit_repr_print(model, fields, section_titles)
"""
assert len(section_titles) == len(
fields
), "The number of section titles ({0}) ".format(
len(section_titles)
) + "doesn't match the number of groups of fields, {0}.".format(
len(fields)
)
if class_name == "auto":
out_fields = [("Class", model.__class__.__name__), ""]
else:
out_fields = [("Class", class_name), ""]
# Record the max_width so that if width is not provided, we calculate it.
max_width = len("Class")
for index, (section_title, field_list) in enumerate(zip(section_titles, fields)):
# Add in the section header.
out_fields += [section_title, "-" * len(section_title)]
# Add in all the key-value pairs
for f in field_list:
if isinstance(f, tuple):
f = (str(f[0]), f[1])
out_fields.append((f[0], __extract_model_summary_value(model, f[1])))
max_width = max(max_width, len(f[0]))
elif isinstance(f, _SFrame):
out_fields.append("")
out_fields += _make_repr_table_from_sframe(f)
out_fields.append("")
else:
raise TypeError("Type of field %s not recognized." % str(f))
# Add in the empty footer.
out_fields.append("")
if width is None:
width = max_width
# Now, go through and format the key_value pairs nicely.
def format_key_pair(key, value):
if type(key) is list:
key = ",".join(str(k) for k in key)
return key.ljust(width, " ") + " : " + str(value)
out_fields = [s if type(s) is str else format_key_pair(*s) for s in out_fields]
return "\n".join(out_fields)
def _map_unity_proxy_to_object(value):
"""
Map returning value, if it is unity SFrame, SArray, map it
"""
vtype = type(value)
if vtype in _proxy_map:
return _proxy_map[vtype](value)
elif vtype == list:
return [_map_unity_proxy_to_object(v) for v in value]
elif vtype == dict:
return {k: _map_unity_proxy_to_object(v) for k, v in value.items()}
else:
return value
def _toolkits_select_columns(dataset, columns):
"""
Same as select columns but redirect runtime error to ToolkitError.
"""
try:
return dataset.select_columns(columns)
except RuntimeError:
missing_features = list(set(columns).difference(set(dataset.column_names())))
raise ToolkitError(
"Input data does not contain the following columns: "
+ "{}".format(missing_features)
)
def _raise_error_if_column_exists(
dataset,
column_name="dataset",
dataset_variable_name="dataset",
column_name_error_message_name="column_name",
):
"""
Check if a column exists in an SFrame with error message.
"""
err_msg = "The SFrame {0} must contain the column {1}.".format(
dataset_variable_name, column_name_error_message_name
)
if column_name not in dataset.column_names():
raise ToolkitError(str(err_msg))
def _check_categorical_option_type(option_name, option_value, possible_values):
"""
Check whether or not the requested option is one of the allowed values.
"""
err_msg = "{0} is not a valid option for {1}. ".format(option_value, option_name)
err_msg += " Expected one of: ".format(possible_values)
err_msg += ", ".join(map(str, possible_values))
if option_value not in possible_values:
raise ToolkitError(err_msg)
def _raise_error_if_not_sarray(dataset, variable_name="SArray"):
"""
Check if the input is an SArray. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SArray."
if not isinstance(dataset, _SArray):
raise ToolkitError(err_msg % variable_name)
def _raise_error_if_sarray_not_expected_dtype(sa, name, types):
err_msg = (
"Column '%s' cannot be of type %s. Expecting a column of type in [%s]."
% (name, sa.dtype.__name__, ", ".join([x.__name__ for x in types]))
)
if sa.dtype not in types:
raise ToolkitError(err_msg)
def _raise_error_if_not_sframe(dataset, variable_name="SFrame"):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SFrame. If it is a Pandas DataFrame,"
err_msg += " you may use the to_sframe() function to convert it to an SFrame."
if not isinstance(dataset, _SFrame):
raise ToolkitError(err_msg % variable_name)
def _raise_error_if_sframe_empty(dataset, variable_name="SFrame"):
"""
Check if the input is empty.
"""
err_msg = "Input %s either has no rows or no columns. A non-empty SFrame "
err_msg += "is required."
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ToolkitError(err_msg % variable_name)
def _raise_error_if_not_iterable(dataset, variable_name="SFrame"):
"""
Check if the input is iterable.
"""
err_msg = "Input %s is not iterable: hasattr(%s, '__iter__') must be true."
if not hasattr(dataset, "__iter__"):
raise ToolkitError(err_msg % variable_name)
def _raise_error_evaluation_metric_is_valid(metric, allowed_metrics):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Evaluation metric '%s' not recognized. The supported evaluation"
err_msg += " metrics are (%s)."
if metric not in allowed_metrics:
raise ToolkitError(
err_msg % (metric, ", ".join(map(lambda x: "'%s'" % x, allowed_metrics)))
)
def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top):
"""
Checks if numeric parameter is within given range
"""
err_msg = "%s must be between %i and %i"
if variable_value < range_bottom or variable_value > range_top:
raise ToolkitError(err_msg % (variable_name, range_bottom, range_top))
def _validate_data(dataset, target, features=None, validation_set="auto"):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, "__iter__"):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError("Invalid 'features' value. Feature names must all be of type str.")
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != "auto":
raise TypeError("Unrecognized value for validation_set.")
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(validation_set, features + [target])
elif not validation_set is None:
raise TypeError(
"validation_set must be either 'auto', None, or an "
"SFrame matching the training data."
)
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set
def _handle_missing_values(dataset, feature_column_name, variable_name="dataset"):
if any(feat is None for feat in dataset[feature_column_name]):
raise ToolkitError(
"Missing value (None) encountered in column "
+ str(feature_column_name)
+ " in the "
+ variable_name
+ ". Use the SFrame's dropna function to drop rows with 'None' values in them."
)
def _validate_row_label(dataset, label=None, default_label="__id"):
"""
Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label | |
# param1: ptr to matrix 1
# param2: ptr to matrix 2
# param3: dim_x of matrix
# param4: dim_y of matrix
# param5: output ptr
builder = _setup_builtin_func_builder(ctx, "mat_add", (double_ptr_ty, double_ptr_ty, ctx.int32_ty, ctx.int32_ty, double_ptr_ty))
m1, m2, dim_x, dim_y, o = builder.function.args
with helpers.for_loop_zero_inc(builder, dim_x, "zero") as (b1, x):
with helpers.for_loop_zero_inc(b1, dim_y, "zero_inner") as (b2, y):
matrix_index = b2.mul(x, dim_y)
matrix_index = b2.add(matrix_index, y)
m1_ptr = b2.gep(m1, [matrix_index])
m2_ptr = b2.gep(m2, [matrix_index])
o_ptr = b2.gep(o, [matrix_index])
m1_val = b2.load(m1_ptr)
m2_val = b2.load(m2_ptr)
o_val = b2.fadd(m1_val, m2_val)
b2.store(o_val, o_ptr)
builder.ret_void()
def setup_is_close(ctx):
builder = _setup_builtin_func_builder(ctx, "is_close", [ctx.float_ty,
ctx.float_ty,
ctx.float_ty,
ctx.float_ty],
return_type=ctx.bool_ty)
val1, val2, rtol, atol = builder.function.args
fabs_f = ctx.get_builtin("fabs", [val2.type])
diff = builder.fsub(val1, val2, "is_close_diff")
abs_diff = builder.call(fabs_f, [diff], "is_close_abs")
abs2 = builder.call(fabs_f, [val2], "abs_val2")
rtol = builder.fmul(rtol, abs2, "is_close_rtol")
tol = builder.fadd(rtol, atol, "is_close_atol")
res = builder.fcmp_ordered("<=", abs_diff, tol, "is_close_cmp")
builder.ret(res)
def setup_csch(ctx):
builder = _setup_builtin_func_builder(ctx, "csch", (ctx.float_ty,),
return_type=ctx.float_ty)
x = builder.function.args[0]
exp_f = ctx.get_builtin("exp", [x.type])
# (2e**x)/(e**2x - 1)
ex = builder.call(exp_f, [x])
num = builder.fmul(ex.type(2), ex)
_2x = builder.fmul(x.type(2), x)
e2x = builder.call(exp_f, [_2x])
den = builder.fsub(e2x, e2x.type(1))
res = builder.fdiv(num, den)
builder.ret(res)
def setup_tanh(ctx):
builder = _setup_builtin_func_builder(ctx, "tanh", (ctx.float_ty,),
return_type=ctx.float_ty)
x = builder.function.args[0]
exp_f = ctx.get_builtin("exp", [x.type])
# (e**2x - 1)/(e**2x + 1)
_2x = builder.fmul(x.type(2), x)
e2x = builder.call(exp_f, [_2x])
num = builder.fsub(e2x, e2x.type(1))
den = builder.fadd(e2x, e2x.type(1))
res = builder.fdiv(num, den)
builder.ret(res)
def setup_coth(ctx):
builder = _setup_builtin_func_builder(ctx, "coth", (ctx.float_ty,),
return_type=ctx.float_ty)
x = builder.function.args[0]
exp_f = ctx.get_builtin("exp", [x.type])
# (e**2x + 1)/(e**2x - 1)
_2x = builder.fmul(x.type(2), x)
e2x = builder.call(exp_f, [_2x])
num = builder.fadd(e2x, e2x.type(1))
den = builder.fsub(e2x, e2x.type(1))
res = builder.fdiv(num, den)
builder.ret(res)
def setup_pnl_intrinsics(ctx):
# Setup types
single_intr_ty = ir.FunctionType(ctx.float_ty, [ctx.float_ty])
double_intr_ty = ir.FunctionType(ctx.float_ty, (ctx.float_ty, ctx.float_ty))
# Create function declarations
ir.Function(ctx.module, single_intr_ty, name=_BUILTIN_PREFIX + "exp")
ir.Function(ctx.module, single_intr_ty, name=_BUILTIN_PREFIX + "log")
ir.Function(ctx.module, double_intr_ty, name=_BUILTIN_PREFIX + "pow")
def _generate_intrinsic_wrapper(module, name, ret, args):
intrinsic = module.declare_intrinsic("llvm." + name, list(set(args)))
func_ty = ir.FunctionType(ret, args)
function = ir.Function(module, func_ty, name=_BUILTIN_PREFIX + name)
function.attributes.add('alwaysinline')
block = function.append_basic_block(name="entry")
builder = ir.IRBuilder(block)
builder.debug_metadata = LLVMBuilderContext.get_debug_location(function, None)
builder.ret(builder.call(intrinsic, function.args))
def _generate_cpu_builtins_module(_float_ty):
"""Generate function wrappers for log, exp, and pow intrinsics."""
module = ir.Module(name="cpu_builtins")
for intrinsic in ('exp', 'log'):
_generate_intrinsic_wrapper(module, intrinsic, _float_ty, [_float_ty])
_generate_intrinsic_wrapper(module, "pow", _float_ty, [_float_ty, _float_ty])
return module
_MERSENNE_N = 624
_MERSENNE_M = 397
def _setup_mt_rand_init_scalar(ctx, state_ty):
seed_ty = state_ty.elements[0].element
builder = _setup_builtin_func_builder(ctx, "mt_rand_init_scalar", (state_ty.as_pointer(), seed_ty))
state, seed = builder.function.args
array = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0)])
# Store seed to the 0-th element
a_0 = builder.gep(array, [ctx.int32_ty(0), ctx.int32_ty(0)])
seed_lo = builder.and_(seed, seed.type(0xffffffff))
seed_lo = builder.trunc(seed_lo, a_0.type.pointee)
builder.store(seed_lo, a_0)
# clear gauss helpers
last_g_avail = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)])
builder.store(last_g_avail.type.pointee(0), last_g_avail)
last_g = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)])
builder.store(last_g.type.pointee(0), last_g)
with helpers.for_loop(builder,
ctx.int32_ty(1),
ctx.int32_ty(_MERSENNE_N),
ctx.int32_ty(1), "init_seed") as (b, i):
a_i = b.gep(array, [ctx.int32_ty(0), i])
i_m1 = b.sub(i, ctx.int32_ty(1))
a_i_m1 = b.gep(array, [ctx.int32_ty(0), i_m1])
val = b.load(a_i_m1)
val_shift = b.lshr(val, val.type(30))
val = b.xor(val, val_shift)
val = b.mul(val, val.type(1812433253))
i_ext = b.zext(i, val.type)
val = b.add(val, i_ext)
val = b.and_(val, val.type(0xffffffff))
b.store(val, a_i)
pidx = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)])
builder.store(pidx.type.pointee(_MERSENNE_N), pidx)
seed_p = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)])
builder.store(seed, seed_p)
builder.ret_void()
return builder.function
def _setup_mt_rand_init(ctx, state_ty, init_scalar):
seed_ty = state_ty.elements[0].element
builder = _setup_builtin_func_builder(ctx, "mt_rand_init", (state_ty.as_pointer(), seed_ty))
state, seed = builder.function.args
default_seed = seed.type(19650218)
builder.call(init_scalar, [state, default_seed])
# python considers everything to be an array
key_array = builder.alloca(ir.ArrayType(seed.type, 1))
key_p = builder.gep(key_array, [ctx.int32_ty(0), ctx.int32_ty(0)])
builder.store(seed, key_p)
pi = builder.alloca(ctx.int32_ty)
builder.store(ctx.int32_ty(1), pi)
pj = builder.alloca(ctx.int32_ty)
builder.store(ctx.int32_ty(0), pj)
array = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0)])
a_0 = builder.gep(array, [ctx.int32_ty(0), ctx.int32_ty(0)])
# This loop should go from max(N, len(key)) -> 0,
# but we know the key length so we can hardcode it
with helpers.for_loop_zero_inc(builder, ctx.int32_ty(_MERSENNE_N),
"add_key") as (b, _):
i = builder.load(pi)
i_m1 = b.sub(i, ctx.int32_ty(1))
pa_i = b.gep(array, [ctx.int32_ty(0), i])
pa_i_m1 = b.gep(array, [ctx.int32_ty(0), i_m1])
# Load key element
j = b.load(pj)
pkey = b.gep(key_array, [ctx.int32_ty(0), j])
# Update key index
j_new = b.add(j, ctx.int32_ty(1))
j_ovf = b.icmp_unsigned(">=", j_new, ctx.int32_ty(1))
j_new = b.select(j_ovf, ctx.int32_ty(0), j_new)
b.store(j_new, pj)
# Mix in the key
val = b.load(pa_i_m1)
val = b.xor(val, b.lshr(val, val.type(30)))
val = b.mul(val, val.type(1664525))
val = b.xor(b.load(pa_i), val)
val = b.add(val, b.load(pkey))
val = b.add(val, b.zext(j, val.type))
val = b.and_(val, val.type(0xffffffff))
b.store(val, pa_i)
# Update the index
i = b.add(i, ctx.int32_ty(1))
b.store(i, pi)
i_ovf = b.icmp_unsigned(">=", i, ctx.int32_ty(_MERSENNE_N))
with b.if_then(i_ovf, likely=False):
b.store(ctx.int32_ty(1), pi)
b.store(val, a_0)
with helpers.for_loop_zero_inc(builder,
ctx.int32_ty(_MERSENNE_N - 1),
"second_shuffle") as (b, _):
i = builder.load(pi)
i_m1 = b.sub(i, ctx.int32_ty(1))
pa_i = b.gep(array, [ctx.int32_ty(0), i])
pa_i_m1 = b.gep(array, [ctx.int32_ty(0), i_m1])
val = b.load(pa_i_m1)
val = b.xor(val, b.lshr(val, val.type(30)))
val = b.mul(val, val.type(1566083941))
val = b.xor(b.load(pa_i), val)
val = b.sub(val, b.zext(i, val.type))
val = b.and_(val, val.type(0xffffffff))
b.store(val, pa_i)
# Update the index
i = b.add(i, ctx.int32_ty(1))
b.store(i, pi)
i_ovf = b.icmp_unsigned(">=", i, ctx.int32_ty(_MERSENNE_N))
with b.if_then(i_ovf, likely=False):
b.store(ctx.int32_ty(1), pi)
b.store(val, a_0)
# set the 0th element to INT_MIN
builder.store(a_0.type.pointee(0x80000000), a_0)
# store used seed
used_seed_p = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)])
builder.store(seed, used_seed_p)
builder.ret_void()
return builder.function
def _setup_mt_rand_integer(ctx, state_ty):
int64_ty = ir.IntType(64)
# Generate random number generator function.
# It produces random 32bit numberin a 64bit word
builder = _setup_builtin_func_builder(ctx, "mt_rand_int32", (state_ty.as_pointer(), int64_ty.as_pointer()))
state, out = builder.function.args
array = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0)])
pidx = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)])
idx = builder.load(pidx)
cond = builder.icmp_signed(">=", idx, ctx.int32_ty(_MERSENNE_N))
with builder.if_then(cond, likely=False):
mag01 = ir.ArrayType(array.type.pointee.element, 2)([0, 0x9908b0df])
pmag01 = builder.alloca(mag01.type)
builder.store(mag01, pmag01)
with helpers.for_loop_zero_inc(builder,
ctx.int32_ty(_MERSENNE_N - _MERSENNE_M),
"first_half") as (b, kk):
pkk = b.gep(array, [ctx.int32_ty(0), kk])
pkk_1 = b.gep(array, [ctx.int32_ty(0), b.add(kk, ctx.int32_ty(1))])
val_kk = b.and_(b.load(pkk), pkk.type.pointee(0x80000000))
val_kk_1 = b.and_(b.load(pkk_1), pkk_1.type.pointee(0x7fffffff))
val = b.or_(val_kk, val_kk_1)
val_1 = b.and_(val, val.type(1))
pval_mag = b.gep(pmag01, [ctx.int32_ty(0), val_1])
val_mag = b.load(pval_mag)
val_shift = b.lshr(val, val.type(1))
kk_m = b.add(kk, ctx.int32_ty(_MERSENNE_M))
pval_kk_m = b.gep(array, [ctx.int32_ty(0), kk_m])
val_kk_m = b.load(pval_kk_m)
val = b.xor(val_kk_m, val_shift)
val = b.xor(val, val_mag)
b.store(val, pkk)
with helpers.for_loop(builder,
ctx.int32_ty(_MERSENNE_N - _MERSENNE_M),
ctx.int32_ty(_MERSENNE_N),
ctx.int32_ty(1), "second_half") as (b, kk):
pkk = b.gep(array, [ctx.int32_ty(0), kk])
is_last = b.icmp_unsigned("==", kk, ctx.int32_ty(_MERSENNE_N - 1))
idx_1 = b.select(is_last, ctx.int32_ty(0), b.add(kk, ctx.int32_ty(1)))
pkk_1 = b.gep(array, [ctx.int32_ty(0), idx_1])
val_kk = b.and_(b.load(pkk), pkk.type.pointee(0x80000000))
val_kk_1 = b.and_(b.load(pkk_1), pkk.type.pointee(0x7fffffff))
val = b.or_(val_kk, val_kk_1)
val_1 = b.and_(val, val.type(1))
pval_mag = b.gep(pmag01, [ctx.int32_ty(0), val_1])
val_mag = b.load(pval_mag)
val_shift = b.lshr(val, val.type(1))
kk_m = b.add(kk, ctx.int32_ty(_MERSENNE_M - _MERSENNE_N))
pval_kk_m = b.gep(array, [ctx.int32_ty(0), kk_m])
val_kk_m = b.load(pval_kk_m)
val = b.xor(val_kk_m, val_shift)
val = b.xor(val, val_mag)
b.store(val, pkk)
builder.store(pidx.type.pointee(0), pidx)
# Get pointer and update index
idx = builder.load(pidx)
pval = builder.gep(array, [ctx.int32_ty(0), idx])
idx = builder.add(idx, idx.type(1))
builder.store(idx, pidx)
# Load and temper
val = builder.load(pval)
tmp = builder.lshr(val, val.type(11))
val = builder.xor(val, tmp)
tmp = builder.shl(val, val.type(7))
tmp = builder.and_(tmp, tmp.type(0x9d2c5680))
val = builder.xor(val, tmp)
tmp = builder.shl(val, val.type(15))
tmp = builder.and_(tmp, tmp.type(0xefc60000))
val = builder.xor(val, tmp)
tmp = builder.lshr(val, val.type(18))
val = builder.xor(val, tmp)
# val is now random 32bit integer
val = builder.zext(val, out.type.pointee)
builder.store(val, out)
builder.ret_void()
return builder.function
def _setup_mt_rand_float(ctx, state_ty, gen_int):
"""
Mersenne Twister double prcision random number generation.
LLVM IR implementation of the MT19937 algorithm from [0],
also used by CPython and numpy.
[0] http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
"""
# Generate random float number generator function
builder = _setup_builtin_func_builder(ctx, "mt_rand_double", (state_ty.as_pointer(), ctx.float_ty.as_pointer()))
state, out = builder.function.args
al = builder.alloca(gen_int.args[1].type.pointee)
builder.call(gen_int, [state, al])
bl = builder.alloca(gen_int.args[1].type.pointee)
builder.call(gen_int, [state, bl])
a = builder.load(al)
b = builder.load(bl)
a = builder.lshr(a, a.type(5)) # 27bit random value
b = builder.lshr(b, b.type(6)) # 26bit random value
af = builder.uitofp(a, ctx.float_ty)
bf = builder.uitofp(b, ctx.float_ty)
# NOTE: The combination below could be implemented using bit ops,
# but due to floating point rounding it'd give slightly different
# random numbers
val = builder.fmul(af, ctx.float_ty(67108864.0)) # Shift left 26
val = builder.fadd(val, bf) # Combine
val = builder.fdiv(val, ctx.float_ty(9007199254740992.0)) # Scale
# The value is in interval [0, 1)
lower_bound = builder.fcmp_ordered(">=", val, val.type(0.0))
builder.assume(lower_bound)
upper_bound = builder.fcmp_ordered("<", val, val.type(1.0))
builder.assume(upper_bound)
builder.store(val, out)
builder.ret_void()
return builder.function
def _setup_mt_rand_normal(ctx, state_ty, gen_float):
"""
Generate random float from Normal distribution generator.
The implementation uses polar method [0], same as CPython and Numpy.
The range is -Inf to Inf.
[0] https://en.wikipedia.org/wiki/Marsaglia_polar_method
"""
| |
res[...] = value
else:
res = constructNumpyArray(cls, obj, cls.spatialDimensions, cls.channels, dtype, order, init)
# FIXME: this should work for arbitrary many dimensions
if axistags is None:
if order == 'A' and not isinstance(obj, numpy.ndarray):
order = 'V'
if hasattr(obj, 'axistags'):
axistags = obj.axistags
elif order == 'V' or order == 'F':
axistags = [AxisInfo.x, AxisInfo.y, AxisInfo.z][:cls.spatialDimensions]
elif order == 'C':
axistags = [AxisInfo.z, AxisInfo.y, AxisInfo.x][-cls.spatialDimensions:]
elif order == 'A':
strideOrder = [int(k) for k in numpy.array(res.strides).argsort()]
strideOrder.reverse()
strideOrder = strideOrder[:cls.spatialDimensions]
strideOrder = [int(k) for k in numpy.array(strideOrder).argsort()]
axistags = [AxisInfo.z, AxisInfo.y, AxisInfo.x][-cls.spatialDimensions:]
axistags = [axistags[k] for k in strideOrder]
if res.ndim > len(axistags):
axistags.append(AxisInfo.c)
elif hasattr(obj, 'axistags'):
pass # FIXME: need to check consistency here
res.axistags = AxisTags(axistags)
return res
def __array_finalize__(self, obj):
if hasattr(obj, 'axistags'):
self.axistags = obj.axistags
@property
def order(self):
if self.flags.c_contiguous:
return 'C'
elif self.flags.f_contiguous:
return 'F'
elif self.channels > 1 and self.itemsize == self.strides[-1] and \
reduce(lambda x, y: y if y >= x and x >= 0 else -1, self.strides[:-1], 0) >= 0:
return 'V'
return 'A'
def astype(self, dtype):
return self.__class__(self, dtype=dtype)
def copy(self, order = 'A'):
return self.__class__(self, dtype=self.dtype, order=order)
def __copy__(self, order = 'A'):
return self.copy(order)
def __deepcopy__(self, memo):
result = self.__class__(self, dtype=self.dtype, order="A")
memo[id(self)] = result
result.__dict__ = copy.deepcopy(self.__dict__, memo)
return result
# FIXME: this should depend on axistags
def __str__(self, separator = ' ', precision=2, suppress_small=True):
return numpy.array2string(self.T, separator = separator, precision=precision, suppress_small=suppress_small)
def __repr__(self):
return "%s(dtype=%s, shape=%s, data=\n%s)" % \
(self.__class__.__name__, str(self.dtype), str(self.shape), self.__str__(', '))
def bands(self):
if len(self.shape) == self.spatialDimensions:
return 1
else:
return self.shape[-1]
channels = classproperty(lambda cls: 0, bands)
@property
# FIXME: this should depend on axistags
def flat(self):
return self.view(numpy.ndarray).swapaxes(0, self.spatialDimensions-1).flat
__array_priority__ = 10.0
# we reimplement the numerical operators in order to make sure that array order is preserved
def __abs__(self):
return ufunc.absolute(self)
def __add__(self, other):
return ufunc.add(self, other)
def __and__(self, other):
return ufunc.bitwise_and(self, other)
def __div__(self, other):
return ufunc.divide(self, other)
def __divmod__(self, other):
return ufunc.floor_divide(self, other), ufunc.remainder(self, other)
def __eq__(self, other):
return ufunc.equal(self, other)
def __floordiv__(self, other):
return ufunc.floor_divide(self, other)
def __ge__(self, other):
return ufunc.greater_equal(self, other)
def __gt__(self, other):
return ufunc.greater(self, other)
def __invert__(self):
return ufunc.invert(self)
def __le__(self, other):
return ufunc.less_equal(self, other)
def __lshift__(self, other):
return ufunc.left_shift(self, other)
def __lt__(self, other):
return ufunc.less(self, other)
def __mod__(self, other):
return ufunc.remainder(self, other)
def __mul__(self, other):
return ufunc.multiply(self, other)
def __ne__(self, other):
return ufunc.not_equal(self, other)
def __neg__(self):
return ufunc.negative(self)
def __or__(self, other):
return ufunc.bitwise_or(self, other)
def __pos__(self):
return self
def __pow__(self, other):
return ufunc.power(self, other)
def __radd__(self, other):
return ufunc.add(other, self)
def __radd__(self, other):
return ufunc.add(other, self)
def __rand__(self, other):
return ufunc.bitwise_and(other, self)
def __rdiv__(self, other):
return ufunc.divide(other, self)
def __rdivmod__(self, other):
return ufunc.floor_divide(other, self), ufunc.remainder(other, self)
def __rfloordiv__(self, other):
return ufunc.floor_divide(other, self)
def __rlshift__(self, other):
return ufunc.left_shoft(other, self)
def __rmod__(self, other):
return ufunc.remainder(other, self)
def __rmul__(self, other):
return ufunc.multiply(other, self)
def __ror__(self, other):
return ufunc.bitwise_or(other, self)
def __rpow__(self, other):
return ufunc.power(other, self)
def __rrshift__(self, other):
return ufunc.right_shift(other, self)
def __rshift__(self, other):
return ufunc.right_shift(self, other)
def __rsub__(self, other):
return ufunc.subtract(other, self)
def __rtruediv__(self, other):
return ufunc.true_divide(other, self)
def __rxor__(self, other):
return ufunc.bitwise_xor(other, self)
def __sub__(self, other):
return ufunc.subtract(self, other)
def __truediv__(self, other):
return ufunc.true_divide(self, other)
def __xor__(self, other):
return ufunc.bitwise_xor(self, other)
def all(self, axis=None, out=None):
res = numpy.ndarray.all(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def any(self, axis=None, out=None):
res = numpy.ndarray.any(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def argmax(self, axis=None, out=None):
res = numpy.ndarray.argmax(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def argmin(self, axis=None, out=None):
res = numpy.ndarray.argmin(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def cumsum(self, axis=None, dtype=None, out=None):
res = numpy.ndarray.cumsum(self, axis, dtype, out)
if res.ndim != self.ndim:
res.axistags = AxisTags(res.ndim)
return res
def cumprod(self, axis=None, dtype=None, out=None):
res = numpy.ndarray.cumprod(self, axis, dtype, out)
if res.ndim != self.ndim:
res.axistags = AxisTags(res.ndim)
return res
# FIXME: this should depend on axistags
def flatten(self, order='C'):
res = numpy.ndarray.flatten(self.swapaxes(0, self.spatialDimensions-1), order)
res.axistags = AxisTags(1)
return res
def max(self, axis=None, out=None):
res = numpy.ndarray.max(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def mean(self, axis=None, out=None):
res = numpy.ndarray.mean(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def min(self, axis=None, out=None):
res = numpy.ndarray.min(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def nonzero(self):
res = numpy.ndarray.nonzero(self)
for k in xrange(len(res)):
res[k].axistags = AxisTags(self.axistags[k])
return res
def prod(self, axis=None, dtype=None, out=None):
res = numpy.ndarray.prod(self, axis, dtype, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def ptp(self, axis=None, out=None):
res = numpy.ndarray.ptp(self, axis, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
# FIXME: this should depend on axistags
def ravel(self, order='C'):
res = numpy.ndarray.ravel(self, order)
res.axistags = AxisTags(1)
return res
def repeat(self, repeats, axis=None):
res = numpy.ndarray.repeat(self, repeats, axis)
if axis is None:
res.axistags = AxisTags(res.ndim)
return res
def reshape(self, shape, order='C'):
res = numpy.ndarray.reshape(self, shape, order)
res.axistags = AxisTags(res.ndim)
return res
def resize(self, new_shape, refcheck=True, order=False):
res = numpy.ndarray.reshape(self, new_shape, refcheck, order)
res.axistags = AxisTags(res.ndim)
return res
def squeeze(self):
res = numpy.ndarray.squeeze(self)
if self.ndim != res.ndim:
res.axistags = AxisTags(res.axistags)
for k in xrange(self.ndim-1, -1, -1):
if self.shape[k] == 1:
del res.axistags[k]
return res
def std(self, axis=None, dtype=None, out=None, ddof=0):
res = numpy.ndarray.std(self, axis, dtype, out, ddof)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
if len(res.shape) == 0:
res = res.item()
return res
def sum(self, axis=None, dtype=None, out=None):
res = numpy.ndarray.sum(self, axis, dtype, out)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
return res
def swapaxes(self, i, j):
res = numpy.ndarray.swapaxes(self, i, j)
if not hasattr(res, 'axistags'):
return res
res.axistags = AxisTags(res.axistags)
res.axistags[i], res.axistags[j] = res.axistags[j], res.axistags[i]
return res
def take(self, indices, axis=None, out=None, mode='raise'):
res = numpy.ndarray.take(self, indices, axis, out, mode)
if axis is None:
res.axistags = AxisTags(res.ndim)
return res
def transpose(self, *axes):
res = numpy.ndarray.transpose(self, *axes)
if not hasattr(res, 'axistags'):
return res
res.axistags = AxisTags(res.axistags)
if len(axes) == 1:
axes = axes[0]
if not axes:
res.axistags.transpose()
else:
res.axistags.transpose(axes)
return res
def var(self, axis=None, dtype=None, out=None, ddof=0):
res = numpy.ndarray.var(self, axis, dtype, out, ddof)
if axis is not None:
res.axistags = AxisTags(res.axistags)
del res.axistags[axis]
if len(res.shape) == 0:
res = res.item()
return res
def transposeToOrder(self, order = 'C'):
if order == 'A':
return self
permutation = [int(k) for k in numpy.array(self.strides).argsort()]
if order == 'C':
permutation.reverse()
elif order == 'V':
if hasattr(self, 'axistags'):
permutation = self.axistags.canonicalOrdering()
else:
permutation.reverse()
d = self.spatialDimensions - 1
permutation[0], permutation[d] = permutation[d], permutation[0]
return self.transpose(permutation)
def transposeToVigraOrder(self):
return self.transposeToOrder('V')
def transposeToNumpyOrder(self):
return self.transposeToOrder('C')
@property
def T(self):
return self.transpose()
def __getitem__(self, index):
'''x.__getitem__(y) <==> x[y]
In addition to the usual indexing functionality, this function
also updates the axistags of the result array. There are three cases:
* getitem creates a value => no axistags are required
* getitem creates an arrayview => axistags are transferred from the
corresponding axes of the base array,
axes resulting from 'newaxis' get tag 'None'
* getitem creates a copy of an array (fancy indexing) => all axistags are 'None'
'''
res = numpy.ndarray.__getitem__(self, index)
if res is not self and hasattr(res, 'axistags'):
if(res.base is self):
res.axistags = res.axistags.transform(index, res.ndim)
else:
res.axistags = AxisTags(res.ndim)
return res
for | |
'object'},
'ScaleApplicationResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/ScaleApplicationInfo'}},
'type': 'object'},
'ScaleApplicationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ScaleApplicationResult'},
'type': 'array'}},
'type': 'object'},
'ScaleApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/ScaleApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'SetConstraints': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'}},
'required': ['application', 'constraints'],
'type': 'object'},
'StorageConstraints': {'additionalProperties': False,
'properties': {'count': {'type': 'integer'},
'pool': {'type': 'string'},
'size': {'type': 'integer'}},
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'},
'Subnet': {'additionalProperties': False,
'properties': {'cidr': {'type': 'string'},
'life': {'type': 'string'},
'provider-id': {'type': 'string'},
'provider-network-id': {'type': 'string'},
'provider-space-id': {'type': 'string'},
'space-tag': {'type': 'string'},
'status': {'type': 'string'},
'vlan-tag': {'type': 'integer'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['cidr',
'vlan-tag',
'life',
'space-tag',
'zones'],
'type': 'object'},
'UnitsResolved': {'additionalProperties': False,
'properties': {'all': {'type': 'boolean'},
'retry': {'type': 'boolean'},
'tags': {'$ref': '#/definitions/Entities'}},
'type': 'object'},
'UpdateSeriesArg': {'additionalProperties': False,
'properties': {'force': {'type': 'boolean'},
'series': {'type': 'string'},
'tag': {'$ref': '#/definitions/Entity'}},
'required': ['tag', 'force', 'series'],
'type': 'object'},
'UpdateSeriesArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/UpdateSeriesArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'Value': {'additionalProperties': False,
'properties': {'arch': {'type': 'string'},
'container': {'type': 'string'},
'cores': {'type': 'integer'},
'cpu-power': {'type': 'integer'},
'instance-type': {'type': 'string'},
'mem': {'type': 'integer'},
'root-disk': {'type': 'integer'},
'spaces': {'items': {'type': 'string'},
'type': 'array'},
'tags': {'items': {'type': 'string'},
'type': 'array'},
'virt-type': {'type': 'string'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'}},
'properties': {'AddRelation': {'properties': {'Params': {'$ref': '#/definitions/AddRelation'},
'Result': {'$ref': '#/definitions/AddRelationResults'}},
'type': 'object'},
'AddUnits': {'properties': {'Params': {'$ref': '#/definitions/AddApplicationUnits'},
'Result': {'$ref': '#/definitions/AddApplicationUnitsResults'}},
'type': 'object'},
'CharmConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'CharmRelations': {'properties': {'Params': {'$ref': '#/definitions/ApplicationCharmRelations'},
'Result': {'$ref': '#/definitions/ApplicationCharmRelationsResults'}},
'type': 'object'},
'Consume': {'properties': {'Params': {'$ref': '#/definitions/ConsumeApplicationArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Deploy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationsDeploy'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Destroy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationDestroy'}},
'type': 'object'},
'DestroyApplication': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationsParams'},
'Result': {'$ref': '#/definitions/DestroyApplicationResults'}},
'type': 'object'},
'DestroyConsumedApplications': {'properties': {'Params': {'$ref': '#/definitions/DestroyConsumedApplicationsParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'DestroyRelation': {'properties': {'Params': {'$ref': '#/definitions/DestroyRelation'}},
'type': 'object'},
'DestroyUnit': {'properties': {'Params': {'$ref': '#/definitions/DestroyUnitsParams'},
'Result': {'$ref': '#/definitions/DestroyUnitResults'}},
'type': 'object'},
'DestroyUnits': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationUnits'}},
'type': 'object'},
'Expose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationExpose'}},
'type': 'object'},
'Get': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/ApplicationGetResults'}},
'type': 'object'},
'GetCharmURL': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'GetConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'GetConstraints': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConstraintsResults'}},
'type': 'object'},
'GetLXDProfileUpgradeMessages': {'properties': {'Params': {'$ref': '#/definitions/LXDProfileUpgradeMessages'},
'Result': {'$ref': '#/definitions/LXDProfileUpgradeMessagesResults'}},
'type': 'object'},
'ResolveUnitErrors': {'properties': {'Params': {'$ref': '#/definitions/UnitsResolved'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ScaleApplications': {'properties': {'Params': {'$ref': '#/definitions/ScaleApplicationsParams'},
'Result': {'$ref': '#/definitions/ScaleApplicationResults'}},
'type': 'object'},
'Set': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSet'}},
'type': 'object'},
'SetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigSetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetCharm': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSetCharm'}},
'type': 'object'},
'SetCharmProfile': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSetCharmProfile'}},
'type': 'object'},
'SetConstraints': {'properties': {'Params': {'$ref': '#/definitions/SetConstraints'}},
'type': 'object'},
'SetMetricCredentials': {'properties': {'Params': {'$ref': '#/definitions/ApplicationMetricCredentials'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetRelationsSuspended': {'properties': {'Params': {'$ref': '#/definitions/RelationSuspendedArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Unexpose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnexpose'}},
'type': 'object'},
'Unset': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnset'}},
'type': 'object'},
'UnsetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigUnsetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Update': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUpdate'}},
'type': 'object'},
'UpdateApplicationSeries': {'properties': {'Params': {'$ref': '#/definitions/UpdateSeriesArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'WatchLXDProfileUpgradeNotifications': {'properties': {'Params': {'$ref': '#/definitions/Entity'},
'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(AddRelationResults)
async def AddRelation(self, endpoints):
'''
endpoints : typing.Sequence[str]
Returns -> typing.Mapping[str, ~CharmRelation]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddRelation',
version=8,
params=_params)
_params['endpoints'] = endpoints
reply = await self.rpc(msg)
return reply
@ReturnMapping(AddApplicationUnitsResults)
async def AddUnits(self, application, num_units, placement):
'''
application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddUnits',
version=8,
params=_params)
_params['application'] = application
_params['num-units'] = num_units
_params['placement'] = placement
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def CharmConfig(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ConfigResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmConfig',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationCharmRelationsResults)
async def CharmRelations(self, application):
'''
application : str
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmRelations',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Consume(self, args):
'''
args : typing.Sequence[~ConsumeApplicationArg]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Consume',
version=8,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Deploy(self, applications):
'''
applications : typing.Sequence[~ApplicationDeploy]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Deploy',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Destroy(self, application):
'''
application : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Destroy',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyApplicationResults)
async def DestroyApplication(self, applications):
'''
applications : typing.Sequence[~DestroyApplicationParams]
Returns -> typing.Sequence[~DestroyApplicationResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyApplication',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def DestroyConsumedApplications(self, applications):
'''
applications : typing.Sequence[~DestroyConsumedApplicationParams]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyConsumedApplications',
version=8,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyRelation(self, endpoints):
'''
endpoints : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyRelation',
version=8,
params=_params)
_params['endpoints'] = endpoints
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyUnitResults)
async def DestroyUnit(self, units):
'''
units : typing.Sequence[~DestroyUnitParams]
Returns -> typing.Sequence[~DestroyUnitResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnit',
version=8,
params=_params)
_params['units'] = units
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyUnits(self, unit_names):
'''
unit_names : typing.Sequence[str]
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnits',
version=8,
params=_params)
_params['unit-names'] = unit_names
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Expose(self, application):
'''
application : str
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Expose',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetResults)
async def Get(self, application):
'''
application : str
Returns -> typing.Union[str, typing.Mapping[str, typing.Any], _ForwardRef('Value')]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Get',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def GetCharmURL(self, application):
'''
application : str
Returns -> typing.Union[_ForwardRef('Error'), str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetCharmURL',
version=8,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def GetConfig(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ConfigResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConfig',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConstraintsResults)
async def GetConstraints(self, entities):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ApplicationConstraint]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConstraints',
version=8,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(LXDProfileUpgradeMessagesResults)
async def GetLXDProfileUpgradeMessages(self, application, watcher_id):
'''
application : Entity
watcher_id : str
Returns -> typing.Sequence[~LXDProfileUpgradeMessagesResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetLXDProfileUpgradeMessages',
version=8,
params=_params)
_params['application'] = application
_params['watcher-id'] = watcher_id
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def ResolveUnitErrors(self, all_, retry, tags):
'''
all_ : bool
retry : bool
tags : Entities
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ResolveUnitErrors',
version=8,
params=_params)
_params['all'] = all_
_params['retry'] = retry
_params['tags'] = tags
reply = | |
0] = minmax(re_fs[i+1, j-1, 0]+(3/16.0)*quant_error_b)
re_fs[i+1, j-1, 1] = minmax(re_fs[i+1, j-1, 1]+(3/16.0)*quant_error_g)
re_fs[i+1, j-1, 2] = minmax(re_fs[i+1, j-1, 2]+(3/16.0)*quant_error_r)
re_fs[i+1, j, 0] = minmax(re_fs[i+1, j, 0]+(5/16.0)*quant_error_b)
re_fs[i+1, j, 1] = minmax(re_fs[i+1, j, 1]+(5/16.0)*quant_error_g)
re_fs[i+1, j, 2] = minmax(re_fs[i+1, j, 2]+(5/16.0)*quant_error_r)
re_fs[i+1, j+1, 0] = minmax(re_fs[i+1, j+1, 0]+(1/16.0)*quant_error_b)
re_fs[i+1, j+1, 1] = minmax(re_fs[i+1, j+1, 1]+(1/16.0)*quant_error_g)
re_fs[i+1, j+1, 2] = minmax(re_fs[i+1, j+1, 2]+(1/16.0)*quant_error_r)
noise_img = re_fs/255.0
elif noise_type == 'clean': # Pass clean image, without noise
noise_img = img_LR
#img_LR = np.clip(noise_img, 0, 1)
return noise_img, noise_type
def random_pix(size):
# Amount of pixels to translate
# Higher probability for 0 shift
# Caution: It can be very relevant how many pixels are shifted.
#"""
if size <= 64:
return random.choice([-1,0,0,1]) #pixels_translation
elif size > 64 and size <= 96:
return random.choice([-2,-1,0,0,1,2]) #pixels_translation
elif size > 96:
return random.choice([-3,-2,-1,0,0,1,2,3]) #pixels_translation
#"""
#return random.choice([-3,-2,-1,0,0,1,2,3]) #pixels_translation
# Note: the translate_chan() has limited success in fixing chromatic aberrations,
# because the patterns are very specific. The function works for the models to
# learn how to align fringes, but in this function the displacements are random
# and natural aberrations are more like purple fringing, axial (longitudinal),
# and transverse (lateral), which are specific cases of these displacements and
# could be modeled here.
def translate_chan(img_or):
# Independently translate image channels to create color fringes
rows, cols, _ = img_or.shape
# Split the image into its BGR components
(blue, green, red) = cv2.split(img_or)
""" #V1: randomly displace each channel
new_channels = []
for values, channel in zip((blue, green, red), (0,1,2)):
M = np.float32([[1,0,random_pix(rows)],[0,1,random_pix(cols)]])
dst = cv2.warpAffine(values,M,(cols,rows))
new_channels.append(dst)
b_channel = new_channels[0]
g_channel = new_channels[1]
r_channel = new_channels[2]
#"""
#""" V2: only displace one channel at a time
M = np.float32([[1,0,random_pix(rows)],[0,1,random_pix(cols)]])
color = random.choice(["blue","green","red"])
if color == "blue":
b_channel = cv2.warpAffine(blue,M,(cols,rows))
g_channel = green
r_channel = red
elif color == "green":
b_channel = blue
g_channel = cv2.warpAffine(green,M,(cols,rows))
r_channel = red
else: # color == red:
b_channel = blue
g_channel = green
r_channel = cv2.warpAffine(red,M,(cols,rows))
#"""
""" V3: only displace a random crop of one channel at a time (INCOMPLETE)
# randomly crop
rnd_h = random.randint(0, max(0, rows - rows/2))
rnd_w = random.randint(0, max(0, cols - cols/2))
img_crop = img_or[rnd_h:rnd_h + rows/2, rnd_w:rnd_w + cols/2, :]
(blue_c, green_c, red_c) = cv2.split(img_crop)
rows_c, cols_c, _ = img_crop.shape
M = np.float32([[1,0,random_pix(rows_c)],[0,1,random_pix(cols_c)]])
color = random.choice(["blue","green","red"])
if color == "blue":
b_channel = cv2.warpAffine(blue_c,M,(cols_c,rows_c))
g_channel = green_c
r_channel = red_c
elif color == "green":
b_channel = blue_c
g_channel = cv2.warpAffine(green_c,M,(cols_c,rows_c))
r_channel = red_c
else: # color == red:
b_channel = blue_c
g_channel = green_c
r_channel = cv2.warpAffine(red_c,M,(cols_c,rows_c))
merged_crop = cv2.merge((b_channel, g_channel, r_channel))
image[rnd_h:rnd_h + rows/2, rnd_w:rnd_w + cols/2, :] = merged_crop
return image
#"""
# merge the channels back together and return the image
return cv2.merge((b_channel, g_channel, r_channel))
# https://www.pyimagesearch.com/2015/09/28/implementing-the-max-rgb-filter-in-opencv/
def max_rgb_filter(image):
# split the image into its BGR components
(B, G, R) = cv2.split(image)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
# Simple color balance algorithm (similar to Photoshop "auto levels")
# https://gist.github.com/DavidYKay/9dad6c4ab0d8d7dbf3dc#gistcomment-3025656
# http://www.morethantechnical.com/2015/01/14/simplest-color-balance-with-opencv-wcode/
# https://web.stanford.edu/~sujason/ColorBalancing/simplestcb.html
def simplest_cb(img, percent=1, znorm=False):
if znorm == True: # img is znorm'ed in the [-1,1] range, else img in the [0,1] range
img = (img + 1.0)/2.0
# back to the OpenCV [0,255] range
img = (img * 255.0).round().astype(np.uint8) # need to add astype(np.uint8) so cv2.LUT doesn't fail later
out_channels = []
cumstops = (
img.shape[0] * img.shape[1] * percent / 200.0,
img.shape[0] * img.shape[1] * (1 - percent / 200.0)
)
for channel in cv2.split(img):
cumhist = np.cumsum(cv2.calcHist([channel], [0], None, [256], (0,256)))
low_cut, high_cut = np.searchsorted(cumhist, cumstops)
lut = np.concatenate((
np.zeros(low_cut),
np.around(np.linspace(0, 255, high_cut - low_cut + 1)),
255 * np.ones(255 - high_cut)
))
out_channels.append(cv2.LUT(channel, lut.astype('uint8')))
img_out = cv2.merge(out_channels)
# Re-normalize
if znorm == False: # normalize img_or back to the [0,1] range
img_out = img_out/255.0
if znorm==True: # normalize images back to range [-1, 1]
img_out = (img_out/255.0 - 0.5) * 2 # xi' = (xi - mu)/sigma
return img_out
#https://www.idtools.com.au/unsharp-masking-python-opencv/
def unsharp_mask(img, blur_algo='median', kernel_size=None, strength=None, unsharp_algo='laplacian', znorm=False):
#h,w,c = img.shape
if znorm == True: # img is znorm'ed in the [-1,1] range, else img in the [0,1] range
img = (img + 1.0)/2.0
# back to the OpenCV [0,255] range
img = (img * 255.0).round().astype(np.uint8)
#randomize strenght from 0.5 to 0.8
if strength is None:
strength = np.random.uniform(0.3, 0.9)
if unsharp_algo == 'DoG':
#If using Difference of Gauss (DoG)
#run a 5x5 gaussian blur then a 3x3 gaussian blr
blur5 = cv2.GaussianBlur(img,(5,5),0)
blur3 = cv2.GaussianBlur(img,(3,3),0)
DoGim = blur5 - blur3
img_out = img - strength*DoGim
else: # 'laplacian': using LoG (actually, median blur instead of gaussian)
#randomize kernel_size between 1, 3 and 5
if kernel_size is None:
kernel_sizes = [1, 3, 5]
kernel_size = random.choice(kernel_sizes)
# Median filtering (could be Gaussian for proper LoG)
#gray_image_mf = median_filter(gray_image, 1)
if blur_algo == 'median':
smooth = cv2.medianBlur(img, kernel_size)
# Calculate the Laplacian (LoG, or in this case, Laplacian of Median)
lap = cv2.Laplacian(smooth,cv2.CV_64F)
# Calculate the sharpened image
img_out = img - strength*lap
# Saturate the pixels in either direction
img_out[img_out>255] = 255
img_out[img_out<0] = 0
# Re-normalize
if znorm == False: # normalize img_or back to the [0,1] range
img_out = img_out/255.0
if znorm==True: # normalize images back to range [-1, 1]
img_out = (img_out/255.0 - 0.5) * 2 # xi' = (xi - mu)/sigma
return img_out
def random_img(img_dir, save_path, crop_size=(128, 128), scale=1, blur_algos=['clean'], noise_types=['clean'], noise_types2=['clean']):
img_list = _get_paths_from_dir(img_dir)
random_img_path = random.choice(img_list)
env = None
img = util.read_img(env, random_img_path) #read image from path, opens with OpenCV, value ranges from 0 to 1
img_crop = random_crop(img, crop_size)
print(img_crop.shape)
#"""
cv2.imwrite(save_path+'/crop_.png',img_crop*255)
#"""
img_resize, _ = resize_img(img, crop_size)
print(img_resize.shape)
#"""
cv2.imwrite(save_path+'/resize_.png',img_resize*255)
#"""
img_random_resize, _ = random_resize_img(img, crop_size)
print(img_random_resize.shape)
#"""
cv2.imwrite(save_path+'/random_resize_.png',img_random_resize*255)
#"""
img_cutout = cutout(img, img.shape[0] // 2)
print(img_cutout.shape)
#"""
cv2.imwrite(save_path+'/cutout_.png',img_cutout*255)
#"""
img_erasing = random_erasing(img)
print(img_erasing.shape)
#"""
cv2.imwrite(save_path+'/erasing_.png',img_erasing*255)
#"""
#scale = 4
img_scale, interpol_algo = scale_img(img, scale)
print(img_scale.shape)
#"""
cv2.imwrite(save_path+'/scale_'+str(scale)+'_'+str(interpol_algo)+'_.png',img_scale*255)
#"""
img_blur, blur_algo, blur_kernel_size = blur_img(img, blur_algos)
print(img_blur.shape)
#"""
cv2.imwrite(save_path+'/blur_'+str(blur_kernel_size)+'_'+str(blur_algo)+'_.png',img_blur*255)
#"""
img_noise, noise_algo = noise_img(img, noise_types)
print(img_noise.shape)
#"""
cv2.imwrite(save_path+'/noise_'+str(noise_algo)+'_.png',img_noise*255)
#"""
#img_noise2, noise_algo2 = noise_img(img_noise, noise_types2)
#print(img_noise2.shape)
#"""
#cv2.imwrite(save_path+'/noise2_'+str(noise_algo2)+'_.png',img_noise2*255)
#"""
img_rrot, angle = random_rotate(img)
print(img_rrot.shape)
#"""
cv2.imwrite(save_path+'/rrot_'+str(angle)+'_.png',img_rrot*255)
#"""
print('Finished')
def single_image(img_path, save_path, crop_size=(128, 128), scale=1, blur_algos=['clean'], noise_types=['clean'], noise_types2=['clean']):
env = None
img = util.read_img(env, img_path) #read image from path, opens with OpenCV, value ranges from 0 to 1
print(img.shape)
img_crop = random_crop(img, crop_size)
print(img_crop.shape)
#"""
cv2.imwrite(save_path+'/crop_.png',img_crop*255)
#"""
img_resize, _ = resize_img(img, crop_size)
print(img_resize.shape)
#"""
cv2.imwrite(save_path+'/resize_.png',img_resize*255)
#"""
img_random_resize, _ = random_resize_img(img, crop_size)
print(img_random_resize.shape)
#"""
cv2.imwrite(save_path+'/random_resize_.png',img_random_resize*255)
#"""
img_cutout = cutout(img, img.shape[0] // 2)
print(img_cutout.shape)
#"""
cv2.imwrite(save_path+'/cutout_.png',img_cutout*255)
#"""
img_erasing = random_erasing(img)
print(img_erasing.shape)
#"""
cv2.imwrite(save_path+'/erasing_.png',img_erasing*255)
#"""
#scale = 4
img_scale, interpol_algo = scale_img(img, scale)
print(img_scale.shape)
#"""
cv2.imwrite(save_path+'/scale_'+str(scale)+'_'+str(interpol_algo)+'_.png',img_scale*255)
#"""
img_blur, blur_algo, blur_kernel_size = blur_img(img, blur_algos)
print(img_blur.shape)
#"""
cv2.imwrite(save_path+'/blur_'+str(blur_kernel_size)+'_'+str(blur_algo)+'_.png',img_blur*255)
#"""
img_noise, noise_algo = noise_img(img, noise_types)
#img_noise, noise_algo = noise_img(img_scale, noise_types)
print(img_noise.shape)
#"""
cv2.imwrite(save_path+'/noise_'+str(noise_algo)+'_.png',img_noise*255)
#"""
img_noise2, noise_algo2 = noise_img(img_noise, noise_types2)
print(img_noise2.shape)
#"""
cv2.imwrite(save_path+'/noise2_'+str(noise_algo2)+'_.png',img_noise2*255)
#"""
img_rrot, angle = random_rotate(img)
print(img_rrot.shape)
#"""
cv2.imwrite(save_path+'/rrot_'+str(angle)+'_.png',img_rrot*255)
#"""
print('Finished')
def apply_dir(img_path, save_path, crop_size=(128, 128), scale=1, blur_algos=['clean'], noise_types=['clean'], noise_types2=['clean']):
img_list = _get_paths_from_dir(img_dir)
for path in img_list:
rann = ''
env = None
img = util.read_img(env, path)
import uuid
rann = uuid.uuid4().hex
img_crop = random_crop(img, crop_size)
print(img_crop.shape)
#"""
cv2.imwrite(save_path+'/'+rann+'mask_.png',img_crop*255)
#"""
img_resize, _ = resize_img(img, crop_size)
| |
<gh_stars>0
import sys
sys.path.append('/root/csdc3/lib/ablib')
sys.path.append('/root/csdc3/src/logs')
sys.path.append('/root/csdc3/src/logs/config_setup')
sys.path.append("/root/csdc3/src/utils")
from ablib import Pin
from ablib import DS18B20
from chomsky import *
from time import sleep
from sensor_entropy import *
from sensor_constants import *
import smbus
import time
from SharedLock import Lock
import utility
class SensorManager:
bus = smbus.SMBus(0)
payloadbus = smbus.SMBus(1)
active_gpio_pins = {}
channel = None
old_mux = None
# sensorReadingLock = Lock("/root/csdc3/src/utils/sensorReadingLock.tmp")
""" -------------------- Initialization --------------------- """
@staticmethod
def init_gyroscope(sensorId):
# SensorManager.sensorReadingLock.acquire()
#insertDebugLog(NOTICE, "Initialized gyroscope: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, GYRO):
raise Exception('Incorrect sensor specified')
SensorManager.mux_select(sensorId)
try:
SensorManager.bus.write_byte(SensorEntropy.addr(sensorId), 0x00)
except(IOError, OSError):
print('[INIT] Error writing to gyroscope at address ' + \
str(SensorEntropy.addr(sensorId)))
insertDebugLog(NOTICE, "[INIT] Error writing to gyroscope: {}".format(sensorId),
CDH, int(time.time()))
return None
time.sleep(0.1)
@staticmethod
def init_magnetometer(sensorId):
# SensorManager.sensorReadingLock.acquire()
#insertDebugLog(NOTICE, "Initialized magnetometer: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, MAG):
raise Exception('Incorrect sensor specified')
mag_reg = SensorEntropy.reg(MAG)
SensorManager.mux_select(sensorId)
try:
SensorManager.bus.write_byte_data(SensorEntropy.addr(sensorId), \
mag_reg['INIT'], 0x01)
except(IOError, OSError):
print('[INIT] Error writing to magnetometer at address ' + \
str(SensorEntropy.addr(sensorId)))
insertDebugLog(NOTICE, "[INIT] Error writing to magnetometer: {}".format(sensorId),
CDH, int(time.time()))
return None
time.sleep(0.1)
@staticmethod
def init_temp_sensor(sensorId):
# SensorManager.sensorReadingLock.acquire()
#insertDebugLog(NOTICE, "Initialized temp sensor: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, TEMP):
print('Sensor Id: ' + str(sensorId))
raise Exception('Incorrect sensor specified')
addr = SensorEntropy.addr(sensorId)
reg = SensorEntropy.reg(TEMP)
if sensorId == TEMP_PAYLOAD_A or sensorId == TEMP_PAYLOAD_B:
bus = SensorManager.payloadbus
else:
SensorManager.mux_select(sensorId)
bus = SensorManager.bus
try:
# Start data conversion
startReg = reg[START]
configReg = reg[CONFIG]
bus.write_byte_data(addr, startReg, 0x01)
# Enable continuous mode
bus.write_byte_data(addr, configReg, 0x00)
except(IOError, OSError):
print("[INIT] Error writing to temperature sensor: {}".format(sensorId))
insertDebugLog(NOTICE, "[INIT] Error writing to temperature sensor: {}".format(sensorId),
CDH, int(time.time()))
return None
time.sleep(0.1)
@staticmethod
def init_adc(sensorId):
# SensorManager.sensorReadingLock.acquire()
#insertDebugLog(NOTICE, "Initialized ADC: {}".format(sensorId), CDH, int(time.time()))
bus = SensorManager.payloadbus
addr = SensorEntropy.addr(sensorId)
adc_reg = SensorEntropy.reg(ADC)
busy_reg = bus.read_byte_data(addr, \
adc_reg['REG_BUSY_STATUS'])
"""
while busy_reg:
time.sleep(1)
"""
try:
bus.write_byte_data(addr, adc_reg['REG_ADV_CONFIG'], \
adc_reg['CONFIG_EXTERNAL_VREF'])
bus.write_byte_data(addr, adc_reg['REG_CONV_RATE'], \
adc_reg['CONFIG_CONTINUOUS'])
bus.write_byte_data(addr, adc_reg['REG_CHANNEL_DISABLE'], \
adc_reg['CONFIG_ENABLE_ALL_CHANNELS'])
bus.write_byte_data(addr, adc_reg['REG_LIMIT_BASE'], \
adc_reg['CONFIG_LIMIT_BASE'])
bus.write_byte_data(addr, adc_reg['REG_LIMIT_BASE2'], \
adc_reg['CONFIG_LIMIT_BASE'])
bus.write_byte_data(addr, adc_reg['REG_CONFIG'], \
adc_reg['CONFIG_NO_INTERRUPTS'])
except(IOError, OSError):
print('[INIT] Error writing to ADC at address ' + str(addr))
insertDebugLog(NOTICE, "[INIT] Error writing to ADC: {}".format(sensorId),
CDH, int(time.time()))
return None
@staticmethod
def init_power_sensor(sensorId):
#insertDebugLog(NOTICE, "Initialized power sensor: {}".format(sensorId), CDH, int(time.time()))
SensorManager.mux_select(sensorId)
try:
if not os.path.isdir(INA219_PATH):
with open(I2C_DEVICE_PATH, "w") as f:
f.write("ina219 0x40")
with open(INA219_RESISTOR, "w") as f:
f.write("2000")
except(IOError, OSError):
print('[INIT] Error reading from Power sensor')
insertDebugLog(NOTICE, "[INIT] Error reading from Power sensor: {}".format(sensorId),
CDH, int(time.time()))
return None
@staticmethod
def init_adc_driver():
#insertDebugLog(NOTICE, "Initialized adc sensor", PAYLOAD, int(time.time()))
try:
if not os.path.isdir(ADC_PATH):
with open(I2C1_DEVICE_PATH, "w") as f:
f.write("adc128d818 0x1d")
except(IOError, OSError):
print('[INIT] Error initializing adc sensor')
insertDebugLog(NOTICE, "[INIT] Error initializing adc sensor", CDH, int(time.time()))
return None
""" -------------------- Stop --------------------- """
@staticmethod
def stop_gyroscope(sensorId):
#insertDebugLog(NOTICE, "Stop gyroscope: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, GYRO):
raise Exception('Incorrect sensor specified')
SensorManager.mux_select(sensorId)
addr = SensorEntropy.addr(sensorId)
try:
SensorManager.bus.write_byte(addr, 0x01)
except(IOError, OSError):
print('[STOP] Error writing to gyroscope at address ' + str(addr))
insertDebugLog(NOTICE, "[STOP] Error writing to gyroscope: {}".format(sensorId),
CDH, int(time.time()))
return None
# finally:
# SensorManager.sensorReadingLock.release()
time.sleep(0.1)
@staticmethod
def stop_magnetometer(sensorId):
#insertDebugLog(NOTICE, "Stop magnetometer: {}".format(sensorId), CDH, int(time.time()))
# SensorManager.sensorReadingLock.release()
pass
@staticmethod
def stop_temp_sensor(sensorId):
#insertDebugLog(NOTICE, "Stop temp sensor: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, TEMP):
raise Exception('Incorrect sensor specified')
addr = SensorEntropy.addr(sensorId)
stopReg = SensorEntropy.reg(TEMP)[STOP]
if sensorId == TEMP_PAYLOAD_A or sensorId == TEMP_PAYLOAD_B:
bus = SensorManager.payloadbus
else:
SensorManager.mux_select(sensorId)
bus = SensorManager.bus
try:
bus.write_byte_data(addr, stopReg, 0x01)
except(IOError, OSError):
print("[STOP] Error writing to temperature sensor: {}".format(sensorId))
insertDebugLog(NOTICE, "[STOP] Error writing to temperature sensor: {}".format(sensorId),
CDH, int(time.time()))
return None
# finally:
# SensorManager.sensorReadingLock.release()
@staticmethod
def stop_adc_sensor(sensorId):
#insertDebugLog(NOTICE, "Stop adc: {}".format(sensorId), CDH, int(time.time()))
addr = SensorEntropy.addr(sensorId)
configReg = SensorEntropy.reg(ADC)['REG_CONFIG']
try:
SensorManager.payloadbus.write_byte_data(addr, configReg, 0x00)
except (IOError, OSError):
print("[STOP] Error writing to ADC: {}".format(sensorId))
insertDebugLog(NOTICE, "[STOP] Error writing to ADC: {}".format(sensorId),
CDH, int(time.time()))
return None
# finally:
# SensorManager.sensorReadingLock.release()
@staticmethod
def stop_power_sensor(sensorId):
#insertDebugLog(NOTICE, "Stop power sensor: {}".format(sensorId), CDH, int(time.time()))
# SensorManager.sensorReadingLock.release()
pass
""" -------------------- Reading --------------------- """
@staticmethod
def read_gyroscope(sensorId):
insertDebugLog(NOTICE, "Read gyroscope: {}".format(sensorId),
CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, GYRO):
raise Exception('Incorrect sensor specified')
SensorManager.mux_select(sensorId)
address = SensorEntropy.addr(sensorId)
gyro_reg = SensorEntropy.reg(GYRO)
# Get the values from the sensor
reg_x_h = gyro_reg['X-H']
reg_x_l = gyro_reg['X-L']
reg_y_h = gyro_reg['Y-H']
reg_y_l = gyro_reg['Y-L']
reg_z_h = gyro_reg['Z-H']
reg_z_l = gyro_reg['Z-L']
try:
valX = (SensorManager.bus.read_byte_data(address, reg_x_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_x_l)
sleep(0.1)
valY = (SensorManager.bus.read_byte_data(address, reg_y_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_y_l)
sleep(0.1)
valZ = (SensorManager.bus.read_byte_data(address, reg_z_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_z_l)
sleep(0.1)
except(IOError, OSError):
print('[READ] Error reading from gyroscope at address ' + \
str(address))
insertDebugLog(NOTICE, "[READ] Error reading from gyroscope: {}".format(sensorId),
CDH, int(time.time()))
return None
# Apply two's complement
valX = utility.twos_to_int(valX)
valY = utility.twos_to_int(valY)
valZ = utility.twos_to_int(valZ)
sleep(0.1)
# Log data
value = (valX, valY, valZ)
sub = SensorEntropy.subsystem(sensorId)
insertTelemetryLog(sensorId, value, sub, int(time.time()))
return value
@staticmethod
def read_magnetometer(sensorId):
#insertDebugLog(NOTICE, "Read magnetometer: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, MAG):
raise Exception('Incorrect sensor specified')
SensorManager.mux_select(sensorId)
address = SensorEntropy.addr(sensorId)
mag_reg = SensorEntropy.reg(MAG)
# Get the values from the sensor
reg_x_h = mag_reg['X-H']
reg_x_l = mag_reg['X-L']
reg_y_h = mag_reg['Y-H']
reg_y_l = mag_reg['Y-L']
reg_z_h = mag_reg['Z-H']
reg_z_l = mag_reg['Z-L']
try:
valX = (SensorManager.bus.read_byte_data(address, reg_x_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_x_l)
sleep(0.1)
valY = (SensorManager.bus.read_byte_data(address, reg_y_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_y_l)
sleep(0.1)
valZ = (SensorManager.bus.read_byte_data(address, reg_z_h) << 8) \
| SensorManager.bus.read_byte_data(address, reg_z_l)
except(IOError, OSError):
print('[READ] Error reading from magnetometer at address ' + \
str(address))
insertDebugLog(NOTICE, "[READ] Error reading from magnetometer: {}".format(sensorId),
CDH, int(time.time()))
return None
# Update the values to be of two compliment
valX = utility.twos_to_int(valX, 16);
valY = utility.twos_to_int(valY, 16);
valZ = utility.twos_to_int(valZ, 16);
"""
# Change valX and valY to radians
import math
radians = math.atan2(valY, valX)
radians += -0.0197
# Compensate for errors
if radians < 0:
radians += 2*math.pi
if radians > 2*math.pi:
radians -= 2*math.pi
# Turn radians into degrees
degrees = math.floor(radians * 180 / math.pi)
# Log data
value = (radians, degrees)
"""
value = (valX, valY, valZ)
sub = SensorEntropy.subsystem(sensorId)
insertTelemetryLog(sensorId, value, sub, int(time.time()))
return value
@staticmethod
def read_temp_sensor(sensorId):
#insertDebugLog(NOTICE, "Read temp sensor: {}".format(sensorId), CDH, int(time.time()))
if not SensorManager.isCorrectSensor(sensorId, TEMP):
raise Exception('Incorrect sensor specified')
insertDebugLog(NOTICE, "Incorrect sensor specified: {}".format(sensorId),
CDH, int(time.time()))
return None
addr = SensorEntropy.addr(sensorId)
if sensorId == TEMP_PAYLOAD_A or sensorId == TEMP_PAYLOAD_B:
bus = SensorManager.payloadbus
else:
SensorManager.mux_select(sensorId)
bus = SensorManager.bus
try:
bus.write_byte(addr, SensorEntropy.reg(TEMP)[VAL])
except(IOError, OSError):
print("[READ] Error writing to temperature sensor: {}".format(sensorId))
insertDebugLog(NOTICE, "[READ] Error writing to temperature sensor: {}".format(sensorId),
CDH, int(time.time()))
return None
try:
decValue = bus.read_byte(addr)
fractValue = bus.read_byte(addr)
sleep(0.02)
except(IOError, OSError):
print('[READ] Error reading from temperature sensor at address ' + \
str(addr))
insertDebugLog(NOTICE, "[READ] Error reading from temperature sensor: {}".format(sensorId),
CDH, int(time.time()))
return None
# Log data
value = utility.conv_bin_to_float(decValue, fractValue)
sub = SensorEntropy.subsystem(sensorId)
insertTelemetryLog(sensorId, value, sub, int(time.time()))
return value
@staticmethod
def read_adc(experiment, sensorId):
#insertDebugLog(NOTICE, "Read adc: {}".format(sensorId), CDH, int(time.time()))
addr = SensorEntropy.addr(sensorId)
adc_reg = SensorEntropy.reg(ADC)
bus = SensorManager.payloadbus
try:
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + experiment)
strain = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFFF0
strain = strain >> 4
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + experiment + 1)
force = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFFF0
force = force >> 4
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + 7)
temp = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFF80
temp = temp >> 7
except(IOError, OSError):
print('[READ] Error reading from ADC at address ' + \
str(addr))
insertDebugLog(NOTICE, "[READ] Error reading from ADC: {}".format(sensorId),
CDH, int(time.time()))
return None, None, None
if temp & 0x100 == 0:
temp /= 2.
else:
temp = -((512 - temp) / 2.)
sleep(0.01)
# Log data
value = (strain, force, temp)
sub = SensorEntropy.subsystem(sensorId)
insertTelemetryLog(ADC_0, value, sub, int(time.time()))
return value
@staticmethod
def read_payload(experiment, sensorId):
addr = SensorEntropy.addr(ADC)
adc_reg = SensorEntropy.reg(ADC)
bus = SensorManager.payloadbus
try:
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + experiment)
strain = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFFF0
strain = strain >> 4
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + experiment + 1)
force = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFFF0
force = force >> 4
bus.write_byte(addr, adc_reg['READ_REG_BASE'] + 7)
temp = ((bus.read_byte(addr) << 8) | (bus.read_byte(addr))) & 0xFF80
temp = temp >> 7
except(IOError, OSError):
print('[READ] Error reading payload')
insertDebugLog(NOTICE, "[READ] Error reading payload",
PAYLOAD, int(time.time()))
return None, | |
0, 0, 0, 0, 0, 0, 0,
0, 0.000, 0.000 ],
[ 0.1244, -0.6134, 0.1823, 0.3012, -0.1968, 0.1616, 0.1025,
-0.1972, 0.1162, -0.2079, -0.3062, 0.0585, -0.3286,
0.3187, -0.0812, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0 ],
[ 0.1832, -0.1559, -0.4327, 0.2059, 0.4677, 0.0317, 0.2233,
-0.3589, 0.2393, 0.2468, 0.0148, 0.1193, -0.0279,
-0.3600, -0.2261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0 ],
[ 0.5027, 0.1935, 0.1571, 0.0503, -0.0503, -0.1443, -0.3080,
-0.4939, 0.1847, -0.2762, 0.0042, 0.0960, 0.2239,
-0.0579, 0.3840, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0 ],
[ 0.3948, -0.0002, 0.2172, -0.0293, -0.0835, 0.1614, 0.4559,
0.1626, -0.1155, -0.3087, 0.4331, -0.2223, -0.2213,
-0.3658, -0.0188, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ] ]
LAM = [ 0.113, 0.105, 0.117, 0.119, 0.108, 0.110, 0.101, 0.107, 0.111,
0.109, 0.120, 0.108, 0.101, 0.105, 0.116, 1.000, 1.000, 1.000,
1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000,
1.000, 1.000, 1.000 ]
def __init__(self):
super(UF12, self).__init__(30, 5)
self.types[:] = [Real(UF11.LB[i], UF11.UB[i]) for i in range(self.nvars)]
self.internal_problem = DTLZ3(self.nobjs, self.nvars)
def evaluate(self, solution):
zz, psum = _transform(solution.variables[:], UF11.M, UF11.LAM, self.nvars, self.nobjs)
transformed_solution = Solution(self.internal_problem)
transformed_solution.variables[:] = zz
transformed_solution.evaluate()
for i in range(self.nobjs):
solution.objectives[i] = [2.0 / (1.0 + math.exp(-psum[i])) * (transformed_solution.objectives[i] + 1.0)]
class UF13(WFG):
def __init__(self):
super(UF13, self).__init__(8, 22, 5)
def evaluate(self, solution):
y = _normalize_z(solution.variables[:])
y = _WFG1_t1(y, self.k)
y = _WFG1_t2(y, self.k)
y = _WFG1_t3(y)
y = _WFG1_t4(y, self.k, self.m)
y = _WFG1_shape(y)
solution.objectives[:] = y
def random(self):
solution = Solution(self)
solution.variables[:self.k] = [math.pow(random.uniform(0.0, 1.0), 50.0) for _ in range(self.k)]
solution.variables[self.k:] = 0.35
solution.variables[:] = [solution.variables[i] * 2.0 * (i+1) for i in range(self.nvars)]
self.evaluate(solution)
return solution
class CF1(Problem):
def __init__(self, nvars = 10):
super(CF1, self).__init__(nvars, 2, 1)
self.types[:] = Real(0, 1)
self.constraints[:] = ">=0"
def evaluate(self, solution):
x = solution.variables[:]
count1 = 0
count2 = 0
sum1 = 0.0
sum2 = 0.0
N = 10.0
a = 1.0
for j in range(2, self.nvars+1):
yj = x[j-1] - math.pow(x[0], 0.5 * (1.0 + 3.0 * (j - 2.0) / (self.nvars - 2.0)))
if j % 2 == 1:
sum1 += yj * yj
count1 += 1
else:
sum2 += yj * yj
count2 += 1
f1 = x[0] + 2.0 * sum1 / count1
f2 = 1.0 - x[0] + 2.0 * sum2 / count2
solution.objectives[:] = [f1, f2]
solution.constraints[0] = f1 + f2 - a * abs(math.sin(N * math.pi * (f1 - f2 + 1.0))) - 1.0
class CF2(Problem):
def __init__(self, nvars = 10):
super(CF2, self).__init__(nvars, 2, 1)
self.types[0] = Real(0, 1)
self.types[1:] = Real(-1, 1)
self.constraints[:] = ">=0"
def evaluate(self, solution):
x = solution.variables[:]
count1 = 0
count2 = 0
sum1 = 0.0
sum2 = 0.0
N = 2.0
a = 1.0
for j in range(2, self.nvars+1):
if j % 2 == 1:
yj = x[j-1] - math.sin(6.0*math.pi*x[0] + j*math.pi/self.nvars)
sum1 += yj * yj
count1 += 1
else:
yj = x[j-1] - math.cos(6.0*math.pi*x[0] + j*math.pi/self.nvars)
sum2 += yj * yj
count2 += 1
f1 = x[0] + 2.0 * sum1 / count1
f2 = 1.0 - math.sqrt(x[0]) + 2.0 * sum2 / count2
t = f2 + math.sqrt(f1) - a * math.sin(N * math.pi * (math.sqrt(f1) - f2 + 1.0)) - 1.0
solution.objectives[:] = [f1, f2]
solution.constraints[0] = (1 if t >= 0 else -1) * abs(t) / (1.0 + math.exp(4.0 * abs(t)))
class CF3(Problem):
def __init__(self, nvars = 10):
super(CF3, self).__init__(nvars, 2, 1)
self.types[0] = Real(0, 1)
self.types[1:] = Real(-2, 2)
self.constraints[:] = ">=0"
def evaluate(self, solution):
x = solution.variables[:]
count1 = 0
count2 = 0
sum1 = 0.0
sum2 = 0.0
prod1 = 1.0
prod2 = 1.0
N = 2.0
a = 1.0
for j in range(2, self.nvars+1):
yj = x[j-1] - math.sin(6.0*math.pi*x[0] + j*math.pi/self.nvars)
pj = math.cos(20.0 * yj * math.pi / math.sqrt(j))
if j % 2 == 1:
sum1 += yj * yj
prod1 *= pj
count1 += 1
else:
sum2 += yj * yj
prod2 *= pj
count2 += 1
f1 = x[0] + 2.0 * (4.0 * sum1 - 2.0 * prod1 + 2.0) / count1
f2 = 1.0 - x[0]**2 + 2.0 * (4.0 * sum2 - 2.0 * prod2 + 2.0) / count2
solution.objectives[:] = [f1, f2]
solution.constraints[0] = f2 + f1**2 - a * math.sin(N * math.pi * (f1**2 - f2 + 1.0)) - 1.0
class CF4(Problem):
def __init__(self, nvars = 10):
super(CF4, self).__init__(nvars, 2, 1)
self.types[0] = Real(0, 1)
self.types[1:] = Real(-2, 2)
self.constraints[:] = ">=0"
def evaluate(self, solution):
x = solution.variables[:]
sum1 = 0.0
sum2 = 0.0
for j in range(2, self.nvars+1):
yj = x[j-1] - math.sin(6.0*math.pi*x[0] + j*math.pi/self.nvars)
| |
<gh_stars>1-10
import copy
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
from topaz.celldict import CellDict, VersionTag
from topaz.coerce import Coerce
from topaz.module import ClassDef, check_frozen
from topaz.objects.functionobject import W_FunctionObject
from topaz.objects.objectobject import W_RootObject
from topaz.objects.procobject import W_ProcObject
from topaz.scope import StaticScope
class AttributeReader(W_FunctionObject):
_immutable_fields_ = ["varname"]
def __init__(self, varname):
W_FunctionObject.__init__(self, varname)
self.varname = varname
def __deepcopy__(self, memo):
obj = super(W_FunctionObject, self).__deepcopy__(memo)
obj.varname = self.varname
return obj
def call(self, space, w_obj, args_w, block):
c_var = space.newconstraintvariable(w_owner=w_obj, ivar=self.varname)
if c_var:
if space.is_constructing_constraint() and c_var.is_solveable(space):
return c_var.get_external_variable(space)
else:
return space.get_value(c_var)
return space.find_instance_var(w_obj, self.varname) or space.w_nil
class AttributeWriter(W_FunctionObject):
_immutable_fields_ = ["varname"]
def __init__(self, varname):
W_FunctionObject.__init__(self, varname)
self.varname = varname
def __deepcopy__(self, memo):
obj = super(W_FunctionObject, self).__deepcopy__(memo)
obj.varname = self.varname
return obj
def call(self, space, w_obj, args_w, block):
[w_value] = args_w
c_var = space.newconstraintvariable(w_owner=w_obj, ivar=self.varname)
if not c_var or not space.assign_value(c_var, w_value):
space.set_instance_var(w_obj, self.varname, w_value)
return w_value
def arity(self, space):
return space.newint(1)
class UndefMethod(W_FunctionObject):
_immutable_fields_ = ["name"]
def __init__(self, name):
W_FunctionObject.__init__(self, name)
self.name = name
def call(self, space, w_obj, args_w, block):
args_w.insert(0, space.newsymbol(self.name))
return space.send(w_obj, "method_missing", args_w, block)
class DefineMethodBlock(W_FunctionObject):
_immutable_fields_ = ["name", "block"]
def __init__(self, name, block):
W_FunctionObject.__init__(self, name)
self.name = name
self.block = block
def call(self, space, w_obj, args_w, block):
from topaz.interpreter import RaiseReturn
method_block = self.block.copy(space, w_self=w_obj, is_lambda=True)
try:
return space.invoke_block(method_block, args_w, block)
except RaiseReturn as e:
return e.w_value
def arity(self, space):
return space.newint(self.block.bytecode.arity(negative_defaults=True))
class DefineMethodMethod(W_FunctionObject):
_immutable_fields_ = ["name", "w_unbound_method"]
def __init__(self, name, w_unbound_method):
W_FunctionObject.__init__(self, name)
self.name = name
self.w_unbound_method = w_unbound_method
def call(self, space, w_obj, args_w, block):
w_bound_method = space.send(self.w_unbound_method, "bind", [w_obj])
return space.send(w_bound_method, "call", args_w, block)
class W_Autoload(W_RootObject):
def __init__(self, space, path):
self.space = space
self.path = path
def load(self):
self.space.send(
self.space.w_kernel,
"require",
[self.space.newstr_fromstr(self.path)]
)
class W_ModuleObject(W_RootObject):
_immutable_fields_ = ["version?", "included_modules?[*]", "klass?", "name?"]
classdef = ClassDef("Module", W_RootObject.classdef)
def __init__(self, space, name, klass=None):
self.name = name
self.klass = klass
self.version = VersionTag()
self.methods_w = {}
self.constants_w = {}
self.class_variables = CellDict()
self.class_variable_constraints = CellDict()
self.instance_variables = CellDict()
self.instance_variable_constraints = CellDict()
self.flags = CellDict()
self.included_modules = []
self.descendants = []
def __deepcopy__(self, memo):
obj = super(W_ModuleObject, self).__deepcopy__(memo)
obj.name = self.name
obj.klass = copy.deepcopy(self.klass, memo)
obj.version = copy.deepcopy(self.version, memo)
obj.methods_w = copy.deepcopy(self.methods_w, memo)
obj.constants_w = copy.deepcopy(self.constants_w, memo)
obj.class_variables = copy.deepcopy(self.class_variables, memo)
obj.class_variable_constraints = copy.deepcopy(self.class_variable_constraints, memo)
obj.instance_variables = copy.deepcopy(self.instance_variables, memo)
obj.instance_variable_constraints = copy.deepcopy(self.instance_variable_constraints, memo)
obj.flags = copy.deepcopy(self.flags, memo)
obj.included_modules = copy.deepcopy(self.included_modules, memo)
obj.descendants = copy.deepcopy(self.descendants, memo)
obj.flags = copy.deepcopy(self.flags, memo)
return obj
def getclass(self, space):
if self.klass is not None:
return jit.promote(self).klass
return W_RootObject.getclass(self, space)
def getsingletonclass(self, space):
if self.klass is None or not self.klass.is_singleton:
self.klass = space.newclass(
"#<Class:%s>" % self.name, self.klass or space.w_module, is_singleton=True, attached=self
)
return self.klass
def mutated(self):
self.version = VersionTag()
def define_method(self, space, name, method):
if (name == "initialize" or name == "initialize_copy" or
method.visibility == W_FunctionObject.MODULE_FUNCTION):
method.update_visibility(W_FunctionObject.PRIVATE)
self.mutated()
self.methods_w[name] = method
if not space.bootstrap:
if isinstance(method, UndefMethod):
self.method_undefined(space, space.newsymbol(name))
else:
self.method_added(space, space.newsymbol(name))
@jit.unroll_safe
def find_method(self, space, name):
method = self._find_method_pure(space, name, self.version)
if method is None:
for module in self.included_modules:
method = module.find_method(space, name)
if method is not None:
return method
return method
@jit.unroll_safe
def find_method_super(self, space, name):
for module in self.included_modules:
method = module.find_method(space, name)
if method is not None:
return method
return None
@jit.elidable
def _find_method_pure(self, space, method, version):
return self.methods_w.get(method, None)
@specialize.argtype(2)
def methods(self, space, visibility=None, inherit=True):
methods = {}
for name, method in self.methods_w.iteritems():
if (not isinstance(method, UndefMethod) and
(visibility is None or method.visibility == visibility)):
methods[name] = None
if inherit:
for w_mod in self.included_modules:
for name in w_mod.methods(space, visibility=visibility):
method = self._find_method_pure(space, name, self.version)
if method is None or not isinstance(method, UndefMethod):
methods[name] = None
return methods.keys()
def set_const(self, space, name, w_obj):
self.mutated()
self.constants_w[name] = w_obj
if isinstance(w_obj, W_ModuleObject) and w_obj.name is None and self.name is not None:
w_obj.set_name_in_scope(space, name, self)
def find_const(self, space, name, autoload=True):
w_res = self.find_included_const(space, name, autoload=autoload)
if w_res is None:
return space.w_object.find_const(space, name, autoload=autoload)
else:
return w_res
@jit.unroll_safe
def find_included_const(self, space, name, autoload=True):
w_res = self.find_local_const(space, name, autoload=autoload)
if w_res is None:
for w_mod in self.included_modules:
w_res = w_mod.find_local_const(space, name, autoload=autoload)
if w_res is not None:
break
return w_res
def included_constants(self, space):
consts = {}
for const in self.constants_w:
consts[const] = None
for w_mod in self.included_modules:
for const in w_mod.included_constants(space):
consts[const] = None
return consts.keys()
def lexical_constants(self, space):
consts = {}
frame = space.getexecutioncontext().gettoprubyframe()
scope = frame.lexical_scope
while scope is not None:
assert isinstance(scope, W_ModuleObject)
for const in scope.w_mod.constants_w:
consts[const] = None
scope = scope.backscope
return consts.keys()
def local_constants(self, space):
return self.constants_w.keys()
def inherited_constants(self, space):
return self.local_constants(space)
def find_local_const(self, space, name, autoload=True):
w_res = self._find_const_pure(name, self.version)
if autoload and isinstance(w_res, W_Autoload):
self.constants_w[name] = None
try:
w_res.load()
finally:
w_new_res = self.constants_w.get(name, None)
if not w_res:
self.constants_w[name] = w_res
w_res = w_new_res
return w_res
else:
return w_res
@jit.elidable
def _find_const_pure(self, name, version):
return self.constants_w.get(name, None)
@jit.unroll_safe
def set_class_var(self, space, name, w_obj):
for module in reversed(self.ancestors()):
assert isinstance(module, W_ModuleObject)
w_res = module.class_variables.get(space, name)
if w_res is not None or module is self:
module.class_variables.set(space, name, w_obj)
if module is self:
for descendant in self.descendants:
descendant.remove_class_var(space, name)
@jit.unroll_safe
def find_class_var(self, space, name):
w_res = self.class_variables.get(space, name)
if w_res is None:
ancestors = self.ancestors()
for idx in xrange(1, len(ancestors)):
module = ancestors[idx]
assert isinstance(module, W_ModuleObject)
w_res = module.class_variables.get(space, name)
if w_res is not None:
break
return w_res
@jit.unroll_safe
def remove_class_var(self, space, name):
self.class_variables.delete(name)
for descendant in self.descendants:
descendant.remove_class_var(space, name)
def set_instance_var(self, space, name, w_value):
return self.instance_variables.set(space, name, w_value)
def find_instance_var(self, space, name):
return self.instance_variables.get(space, name)
def set_constraint_var(self, space, name, w_value):
return self.instance_variable_constraints.set(space, name, w_value)
def find_constraint_var(self, space, name):
return self.instance_variable_constraints.get(space, name)
def set_class_constraint_var(self, space, name, w_value):
return self.class_variable_constraints.set(space, name, w_value)
def find_class_constraint_var(self, space, name):
return self.class_variable_constraints.get(space, name)
def copy_instance_vars(self, space, w_other):
assert isinstance(w_other, W_ModuleObject)
for key in w_other.instance_variables:
w_value = w_other.instance_variables.get(space, key)
self.set_instance_var(space, key, w_value)
def set_flag(self, space, name):
self.flags.set(space, name, space.w_true)
def unset_flag(self, space, name):
self.flags.set(space, name, space.w_false)
def get_flag(self, space, name):
return self.flags.get(space, name) or space.w_false
def ancestors(self, include_singleton=True, include_self=True):
if include_self:
return [self] + self.included_modules
else:
return self.included_modules[:]
@jit.unroll_safe
def is_ancestor_of(self, w_cls):
if self is w_cls:
return True
for w_mod in w_cls.included_modules:
if self is w_mod:
return True
if w_cls.superclass is not None:
return self.is_ancestor_of(w_cls.superclass)
return False
def include_module(self, space, w_mod):
assert isinstance(w_mod, W_ModuleObject)
if w_mod not in self.ancestors():
self.included_modules = [w_mod] + self.included_modules
w_mod.included(space, self)
def included(self, space, w_mod):
self.descendants.append(w_mod)
if space.respond_to(self, "included"):
space.send(self, "included", [w_mod])
def extend_object(self, space, w_mod):
if self not in w_mod.ancestors():
self.descendants.append(w_mod)
w_mod.included_modules = [self] + w_mod.included_modules
def set_visibility(self, space, names_w, visibility):
names = [space.symbol_w(w_name) for w_name in names_w]
if names:
for name in names:
self.set_method_visibility(space, name, visibility)
else:
self.set_default_visibility(space, visibility)
def set_default_visibility(self, space, visibility):
frame = space.getexecutioncontext().gettoprubyframe()
frame.visibility = visibility
def set_method_visibility(self, space, name, visibility):
w_method = self.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
w_method = space.w_object.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"undefined method `%s' for class `%s'" % (name, cls_name)
)
w_method.update_visibility(visibility)
def method_added(self, space, w_name):
space.send(self, "method_added", [w_name])
def method_undefined(self, space, w_name):
space.send(self, "method_undefined", [w_name])
def method_removed(self, space, w_name):
space.send(self, "method_removed", [w_name])
def set_name_in_scope(self, space, name, w_scope):
self.name = space.buildname(name, w_scope)
for name, w_const in self.constants_w.iteritems():
if isinstance(w_const, W_ModuleObject):
w_const.set_name_in_scope(space, name, self)
@classdef.singleton_method("nesting")
def singleton_method_nesting(self, space):
frame = space.getexecutioncontext().gettoprubyframe()
modules_w = []
scope = frame.lexical_scope
while scope is not None:
modules_w.append(scope.w_mod)
scope = scope.backscope
return space.newarray(modules_w)
@classdef.singleton_method("allocate")
def method_allocate(self, space):
return W_ModuleObject(space, None, self)
@classdef.method("initialize")
def method_initialize(self, space, block):
if block is not None:
space.send(self, "module_exec", [self], block)
@classdef.method("to_s")
def method_to_s(self, space):
name = self.name
if name is None:
return space.newstr_fromstr(space.any_to_s(self))
return space.newstr_fromstr(name)
@classdef.method("include")
def method_include(self, space, args_w):
for w_mod in args_w:
if type(w_mod) is not W_ModuleObject:
raise space.error(
space.w_TypeError,
"wrong argument type %s (expected Module)" % space.obj_to_s(space.getclass(w_mod))
)
for w_mod in reversed(args_w):
space.send(w_mod, "append_features", [self])
return self
@classdef.method("include?")
def method_includep(self, space, w_mod):
if type(w_mod) is not W_ModuleObject:
raise space.error(
space.w_TypeError,
"wrong argument type %s (expected Module)" % space.obj_to_s(space.getclass(w_mod))
)
if w_mod is self:
return space.w_false
return space.newbool(w_mod in self.ancestors())
@classdef.method("append_features")
def method_append_features(self, space, w_mod):
if w_mod in self.ancestors():
raise space.error(space.w_ArgumentError, | |
-1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank0(self):
arg=Data(-14.3673757927,self.functionspace)
arg.setTaggedValue(1,-91.0616949648)
res=abs(arg)
ref=Data(14.3673757927,self.functionspace)
ref.setTaggedValue(1,91.0616949648)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank1(self):
arg=Data(numpy.array([-81.821732775420642, -68.22226512766818]),self.functionspace)
arg.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
res=abs(arg)
ref=Data(numpy.array([81.821732775420642, 68.22226512766818]),self.functionspace)
ref.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank2(self):
arg=Data(numpy.array([[-35.703961827623615, 94.271132011685381, -77.391701661321079, -48.396751261576078,
-89.628632351273765], [49.30062196572834, -45.716685546575796, -91.97360399287524, -46.086717554689407, 94.50160817876062],
[23.260490557882292, -46.121623208221905, 64.433592032582311, 18.144341652350775, -44.21085548471779], [-61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-51.972473313741155, -55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [-48.087336536814519, -76.786375607585896, 37.410127192208563, 87.684260266087875, -26.905076717599144],
[16.189496788130981, 87.750429072332139, -36.767204229576599, -71.524650024934132, 81.291275301664541], [7.3770859265969762,
-24.93630589052367, 61.708601944027265, 89.294133020898926, -2.7788897536858315]]))
res=abs(arg)
ref=Data(numpy.array([[35.703961827623615, 94.271132011685381, 77.391701661321079, 48.396751261576078,
89.628632351273765], [49.30062196572834, 45.716685546575796, 91.97360399287524, 46.086717554689407, 94.50160817876062],
[23.260490557882292, 46.121623208221905, 64.433592032582311, 18.144341652350775, 44.21085548471779], [61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[51.972473313741155, 55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [48.087336536814519, 76.786375607585896, 37.410127192208563, 87.684260266087875, 26.905076717599144],
[16.189496788130981, 87.750429072332139, 36.767204229576599, 71.524650024934132, 81.291275301664541], [7.3770859265969762,
24.93630589052367, 61.708601944027265, 89.294133020898926, 2.7788897536858315]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank3(self):
arg=Data(numpy.array([[[-73.879162728531952, 53.891922757125315], [-30.709972491596574, -49.27453562582631]],
[[99.200427899109769, -0.10455889631015225], [24.929977391825204, -25.196431617614095]], [[99.69470286180362,
49.629118870818502], [-18.286571682827372, -99.882333404908422]], [[94.596602624460871, -48.944752738316531],
[-86.357256849018469, 94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614,
-48.600306234165757]], [[-1.3540803820464049, 43.87503589064076], [24.242456069744136,
86.552268702416399]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[37.413937980923265, -0.28411691206147793], [33.044360612769196, -66.03355173059704]],
[[37.536911848971073, 0.023208526118992268], [-26.527789666862972, -33.2402194708271]], [[55.327425103705878,
-84.395066891225468], [45.013227563401301, -95.875031468356525]], [[64.760193848108571, -73.302359966808424],
[54.095816937340203, 37.527678340113113]], [[-76.71381733348575, -39.352383403035063], [80.080299993848996,
0.010359221408108965]], [[-96.050890564474372, -42.823985894886071], [3.4476034725966258, -36.523928707662435]]]))
res=abs(arg)
ref=Data(numpy.array([[[73.879162728531952, 53.891922757125315], [30.709972491596574, 49.27453562582631]],
[[99.200427899109769, 0.10455889631015225], [24.929977391825204, 25.196431617614095]], [[99.69470286180362,
49.629118870818502], [18.286571682827372, 99.882333404908422]], [[94.596602624460871, 48.944752738316531], [86.357256849018469,
94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614, 48.600306234165757]],
[[1.3540803820464049, 43.87503589064076], [24.242456069744136, 86.552268702416399]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[37.413937980923265, 0.28411691206147793], [33.044360612769196, 66.03355173059704]],
[[37.536911848971073, 0.023208526118992268], [26.527789666862972, 33.2402194708271]], [[55.327425103705878,
84.395066891225468], [45.013227563401301, 95.875031468356525]], [[64.760193848108571, 73.302359966808424], [54.095816937340203,
37.527678340113113]], [[76.71381733348575, 39.352383403035063], [80.080299993848996, 0.010359221408108965]],
[[96.050890564474372, 42.823985894886071], [3.4476034725966258, 36.523928707662435]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank4(self):
arg=Data(numpy.array([[[[19.834948759065469, -88.423654358742937, -3.7896795469446403, 90.413210798680979],
[-25.385662633962866, 52.183056091414045, -99.49780362285307, -31.235081588621895], [-99.70036777743492, 29.485863942948555,
38.1126429021798, 42.984689715140888]], [[27.006030037219915, -32.962297315637798, 73.148237069388955, 14.390644834253024],
[10.073562289878168, 1.0118517682240196, 68.902436033381321, -49.896632248801367], [75.141298395785128, 83.15384865784992,
62.286701933145707, -70.233955546976915]]], [[[28.90835831201062, -93.402007113192667, 60.022984931751211, 42.105451316412314],
[-5.8776161547639418, -30.767571979578307, 22.107942044796999, 88.562401747987877], [-11.20004264511995, -76.166717134240727,
90.327718641335366, -63.619067436488663]], [[61.760636603356744, 63.532544897685085, 12.695030988835626, -72.470637851208224],
[35.616750250889851, -47.984761590856408, 46.377995043509088, 70.069148102663178], [71.939740735848517, 60.377059082125186,
53.39384866277004, 4.4345554627479515]]], [[[40.870145540859255, 96.113732253205882, -19.523812196208908, -94.457638344656488],
[-39.941605835336325, 29.189824293279798, 27.298137473725333, 95.978114614227195], [-98.911416748736187, 81.302220082165206,
-70.484408590592508, 82.172581716251415]], [[-24.045113821484222, 58.192111786183631, 39.743958607008949, 6.9836272098514627],
[12.807988012918514, -49.209827092167366, -77.845334523657925, -85.486568474094412], [4.895784651511434, 58.888254548139173,
22.796583205570116, 67.681339974157936]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-21.339241360814867, 53.771385770613193, -93.689799860345531, -59.85130407171102],
[-91.676322024879113, -86.725986612907889, 6.8756644612703468, 2.4970468120601055], [86.426318617205339, 30.622935649678567,
38.69730973485099, 52.92337459984978]], [[8.8277708157388872, 80.007316235717923, -40.336819994603054, -24.353422327790653],
[-13.747314026960296, 16.958567973724541, -57.50595682676709, -56.25177925251905], [-31.207714298736988, -22.275561102343119,
16.313479988183616, -76.010284971162221]]], [[[26.998597697380859, -15.622385252752323, -52.52173936476985,
82.819884716042083], [78.907717062539973, -79.272005467613681, -83.47340370656633, 73.867592414028053], [28.703033182348804,
-20.988872679092665, 37.290447061925107, 8.1014432535999816]], [[-45.048580940662731, 19.491468419339085, 64.568062512177647,
11.178618880088734], [34.691570775011911, -69.589063689142193, 61.364777508593363, 77.280249139969868], [-1.1830828331200678,
91.262256646734187, -5.021627081867905, 93.388437572311091]]], [[[21.298620038202813, -13.98893927515401, 49.182789882221499,
57.595487238415643], [-79.702455143171449, 70.925982455775426, 81.897869050808879, -60.930959954287275], [-57.754562218588148,
-29.858113075280372, -14.897533692783952, 6.0864257187503057]], [[-3.5671759547432771, 84.139996576651015, 39.806429474961305,
9.3646747259164727], [45.475947995072914, 10.14946725212269, -3.9530147571287699, 23.62077091218417], [-34.033830893546195,
-8.3157508831654496, -64.196930272577688, 73.499380413212378]]]]))
res=abs(arg)
ref=Data(numpy.array([[[[19.834948759065469, 88.423654358742937, 3.7896795469446403, 90.413210798680979],
[25.385662633962866, 52.183056091414045, 99.49780362285307, 31.235081588621895], [99.70036777743492, 29.485863942948555,
38.1126429021798, 42.984689715140888]], [[27.006030037219915, 32.962297315637798, 73.148237069388955, 14.390644834253024],
[10.073562289878168, 1.0118517682240196, 68.902436033381321, 49.896632248801367], [75.141298395785128, 83.15384865784992,
62.286701933145707, 70.233955546976915]]], [[[28.90835831201062, 93.402007113192667, 60.022984931751211, 42.105451316412314],
[5.8776161547639418, 30.767571979578307, 22.107942044796999, 88.562401747987877], [11.20004264511995, 76.166717134240727,
90.327718641335366, 63.619067436488663]], [[61.760636603356744, 63.532544897685085, 12.695030988835626, 72.470637851208224],
[35.616750250889851, 47.984761590856408, 46.377995043509088, 70.069148102663178], [71.939740735848517, 60.377059082125186,
53.39384866277004, 4.4345554627479515]]], [[[40.870145540859255, 96.113732253205882, 19.523812196208908, 94.457638344656488],
[39.941605835336325, 29.189824293279798, 27.298137473725333, 95.978114614227195], [98.911416748736187, 81.302220082165206,
70.484408590592508, 82.172581716251415]], [[24.045113821484222, 58.192111786183631, 39.743958607008949, 6.9836272098514627],
[12.807988012918514, 49.209827092167366, 77.845334523657925, 85.486568474094412], [4.895784651511434, 58.888254548139173,
22.796583205570116, 67.681339974157936]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[21.339241360814867, 53.771385770613193, 93.689799860345531, 59.85130407171102],
[91.676322024879113, 86.725986612907889, 6.8756644612703468, 2.4970468120601055], [86.426318617205339, 30.622935649678567,
38.69730973485099, 52.92337459984978]], [[8.8277708157388872, 80.007316235717923, 40.336819994603054, 24.353422327790653],
[13.747314026960296, 16.958567973724541, 57.50595682676709, 56.25177925251905], [31.207714298736988, 22.275561102343119,
16.313479988183616, 76.010284971162221]]], [[[26.998597697380859, 15.622385252752323, 52.52173936476985, 82.819884716042083],
[78.907717062539973, 79.272005467613681, 83.47340370656633, 73.867592414028053], [28.703033182348804, 20.988872679092665,
37.290447061925107, 8.1014432535999816]], [[45.048580940662731, 19.491468419339085, 64.568062512177647, 11.178618880088734],
[34.691570775011911, 69.589063689142193, 61.364777508593363, 77.280249139969868], [1.1830828331200678, 91.262256646734187,
5.021627081867905, 93.388437572311091]]], [[[21.298620038202813, 13.98893927515401, 49.182789882221499, 57.595487238415643],
[79.702455143171449, 70.925982455775426, 81.897869050808879, 60.930959954287275], [57.754562218588148, 29.858113075280372,
14.897533692783952, 6.0864257187503057]], [[3.5671759547432771, 84.139996576651015, 39.806429474961305, 9.3646747259164727],
[45.475947995072914, 10.14946725212269, 3.9530147571287699, 23.62077091218417], [34.033830893546195, 8.3157508831654496,
64.196930272577688, 73.499380413212378]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank0(self):
arg=Data(0.0304173948959,self.functionspace)
arg.setTaggedValue(1,0.218413236568)
res=length(arg)
ref=Data(0.0304173948959,self.functionspace)
ref.setTaggedValue(1,0.218413236568)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank1(self):
arg=Data(numpy.array([0.39703364688152853, -0.33246454817593807]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.53598331151915435, 0.50067334409291053]))
res=length(arg)
ref=Data(0.517849777976,self.functionspace)
ref.setTaggedValue(1,0.73345204868)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank2(self):
arg=Data(numpy.array([[0.6907462872229877, -0.90522553862549726, 0.087785407451554276, 0.30466419540456768,
0.79322552033540972], [0.88191058742529571, 0.99529532885936489, 0.41490962783197238, -0.0016893790093754912,
-0.95814885065677502], [-0.060249764286741447, 0.63991926602596116, -0.086836131633126534, 0.18124915949321885,
0.68271069967418541], [0.64740861624348423, -0.57455334179273243, -0.5571704702710476, 0.2573850096331336,
-0.34168400956685985]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.81018021133002383, 0.99939497604482352, -0.14079723796118393, 0.40272555558488365,
0.18472338544851841], [-0.07594389046701755, 0.63538212017493612, -0.60010668894251618, -0.33610184381106811,
-0.15191875538531718], [-0.24439106568273194, 0.66671313634788354, 0.14904931462513904, -0.58653890475427217,
-0.58062369844301442], [0.098248585440467551, 0.20530555521782179, -0.51610019710067645, 0.16323562948354797,
-0.71041456409833881]]))
res=length(arg)
ref=Data(2.6546513714,self.functionspace)
ref.setTaggedValue(1,2.19865063671)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.90704201804086337, 0.6329509253938721], [0.21915003710942882, -0.77286765743151387]],
[[-0.49454988231884833, -0.52787084998857448], [-0.50038642296401559, 0.25066877240869223]], [[-0.11435301241890539,
-0.43863272457515157], [-0.21789841788237019, 0.67485153176592272]], [[-0.55566679864765667, -0.57930055750016884],
[0.86011645143557036, -0.7526814967676656]], [[0.51094878077660111, 0.77929881123688749], [-0.42495639450230005,
-0.07585333420623952]], [[-0.89054330821722716, -0.35325589691741888], [-0.3768246899267691,
-0.41975230182765833]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.12217784564610956, -0.42220631009895904], [-0.61429599365799681,
0.61618111022446365]], [[-0.97675816669720295, 0.82574624011080133], [0.81295724921140167, 0.25317345312076855]],
[[-0.051786152179434497, 0.7305249935930429], [-0.93380271417452732, 0.50018267655097737]], [[-0.80264399896632499,
0.79509218774376844], [-0.21791667132633941, 0.66634447245200645]], [[-0.55794532541196795, -0.048202617623965605],
[-0.05960274244353414, 0.74611871917265127]], [[0.88304823875965166, 0.42665187568627805], [-0.43824304428388317,
-0.62742457744585889]]]))
res=length(arg)
ref=Data(2.76676324475,self.functionspace)
ref.setTaggedValue(1,3.02637754858)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.031685672552886901, -0.9267701908635706, -0.056148458903377607, -0.84202454095770185],
[-0.3616646666858836, -0.29520741814627049, 0.024595806152944988, -0.71158936975814591], [0.12440081378701895,
0.72960501658634347, -0.87170545922253151, 0.10882643738812559]], [[0.88800242901141169, 0.48693301422640589,
-0.13887736360495673, -0.743971681222801], [-0.74717247198853753, 0.35260473534815429, 0.43837149883392601,
-0.55027619071689737], [0.66850441314063103, 0.7728717623705943, 0.21470523696142552, -0.71541050236116877]]],
[[[-0.48211303782598347, 0.3644457740018654, 0.68984391713960602, 0.74515540572117134], [0.53899835618675929,
-0.70996632321229947, -0.51515930082178918, -0.36888505048093223], [0.78774470226335747, -0.39544353241612185,
0.32281697817612787, -0.16311128990188162]], [[-0.51374217556516255, -0.45792789001444856, 0.47007708506811818,
-0.43333371235667362], [-0.02632140668309213, 0.93007210792179462, 0.59736202366291802, 0.22152676969085516],
[0.39775547303207204, 0.53313877938239496, 0.77934427730455358, -0.21566366366398793]]], [[[0.91343257162829294,
-0.77320607588319645, -0.85087366672245945, -0.8231988743945351], [0.2844336912954244, -0.91728899258227847,
-0.46154275241222287, -0.93255280333208801], [-0.53369991345904522, 0.12949000049493731, 0.53421477536661266,
-0.63975708880504234]], [[0.058270730436794649, 0.0515918698875375, -0.24523619977036026, 0.29671975332241707],
[-0.95019879958514597, -0.94737283445325193, -0.41748226318386861, -0.048340741857560765], [0.59312485406738369,
-0.30988717510892605, 0.090027828305644153, -0.51722372921834436]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.20169792059747449, 0.13920330493621691, 0.034614492760971061,
-0.82851290218784412], [-0.01651072019815425, -0.78097847045185942, -0.13764015571975197, -0.35461745589441884],
[-0.49540307831103148, 0.2474487427834613, 0.22695961470352821, -0.37681697269105396]], [[0.68611428610099079,
-0.29255652866384385, -0.42345462193199213, 0.82794567130265717], [-0.092325345849896712, -0.15183768034385192,
0.13712248884188671, -0.72118044002659931], [-0.050604578031925973, -0.58555183018848322, -0.92016117326965108,
0.90294256985722066]]], [[[0.37734783987332321, 0.50440698564950592, -0.98813708121482202, 0.35026860039322605],
[0.69439644470699591, -0.065391298373910445, 0.22537555580617075, -0.56010684906819108], [0.8850708627713344,
0.33442383771972017, -0.88133340777125468, 0.79499967022722062]], [[0.84658289102126205, -0.45737265507509539,
0.22891245018035788, 0.66506738603993654], [0.30854215900653492, -0.15997939628404678, 0.60133183458548922,
0.41180859119482771], [-0.82182443995887455, 0.40193978476563985, -0.47097558780935489, -0.78813126661061927]]],
[[[-0.60025729863753186, -0.47916988408835803, -0.66879674780784004, -0.34290183723542933], [0.86889784066785403,
0.32127797136956282, 0.96139056560192393, 0.19777452842099286], [-0.52352911870216756, 0.70260881377974083,
-0.83733962168226328, -0.56735885586741075]], [[-0.94301726877443093, -0.25226331153593828, 0.52038556769907629,
0.53828722724477851], [-0.70767715580900048, -0.57712655180776129, -0.14200458485618395, -0.1111721398291996],
[0.64852743898007059, 0.99188751270956743, 0.55982434354197941, 0.038358717131004916]]]]))
res=length(arg)
ref=Data(4.84097039803,self.functionspace)
ref.setTaggedValue(1,4.824055271)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank0(self):
arg=Data(-0.219558082185,self.functionspace)
arg.setTaggedValue(1,0.373894454941)
res=maxval(arg)
ref=Data(-0.219558082185,self.functionspace)
ref.setTaggedValue(1,0.373894454941)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank1(self):
arg=Data(numpy.array([0.64744990357184862, -0.52792070755787024]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.10929307972444979, 0.83862721932489936]))
res=maxval(arg)
ref=Data(0.647449903572,self.functionspace)
ref.setTaggedValue(1,0.838627219325)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank2(self):
arg=Data(numpy.array([[-0.27229065227200344, 0.82294949506313886, -0.17177977432508462, -0.010882074546768816,
0.21768109521645918], [-0.29157181238782481, -0.25380425885757485, 0.027706303762511597, -0.94845012536927964,
0.87176092732644639], [-0.51643332578214518, 0.71998926614777581, 0.40354991809580687, 0.70904315000536799,
0.54655648312080007], [0.32165817766188853, -0.20424131255028888, 0.42895961651274672, -0.99791274480618064,
-0.85669519376242986]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.97262314128809613, 0.48547675148247826, 0.042278165643087728, -0.41624875992248667,
0.030567800083523444], [-0.049791194357233781, -0.79704488987202815, -0.96082903842770118, -0.83554878345036676,
0.60236115537073709], [0.28354667286636603, -0.29929954525932323, 0.022969958455315576, -0.24737146774844909,
0.19469978983867731], [-0.35513081769146426, -0.1046032314241474, 0.49567238233255839, -0.80993625419310633,
-0.9139531605288036]]))
res=maxval(arg)
ref=Data(0.871760927326,self.functionspace)
ref.setTaggedValue(1,0.972623141288)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank3(self):
arg=Data(numpy.array([[[0.61474553298852697, 0.55779486217197505], [-0.31091458562805419, 0.016419889635135521]],
[[0.21004151551334682, 0.027687106765762914], [0.6637113716450791, -0.95040841718825075]], [[-0.9300566761481408,
-0.68906964030797435], [-0.97014359375905679, -0.74418973910997255]], [[0.97835172429442774, -0.46756642182408092],
[-0.42578086461554476, 0.52069167480569556]], [[-0.38782064307268715, 0.49053364163876134], [0.068892813320603263,
-0.053107367737293076]], [[-0.48133213301475331, 0.25593099013174481], [0.44390577068431614,
-0.97257874780052989]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.257603663342449, -0.038214017051409144], [-0.66873613005791666,
-0.035099420794828529]], [[0.91705389480638777, -0.92302237979729274], [0.7106922910822433, -0.94404085701758933]],
[[-0.79556970254884352, -0.25132479916123152], [0.29858220297465121, 0.90707472048112803]], [[-0.29244201831636918,
-0.017346997146175047], [0.12733928111159498, -0.38855138005928658]], [[0.14291175066952921, -0.49761469275017678],
[-0.76189392983334514, 0.84493776228691786]], [[-0.22005917389939156, -0.61656374043989004], [0.99298796284139845,
-0.067815876101644967]]]))
res=maxval(arg)
ref=Data(0.978351724294,self.functionspace)
ref.setTaggedValue(1,0.992987962841)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.38913213142622194, -0.54148345216537774, 0.41714738540363649, 0.71966537757449256],
[-0.88902480268446227, -0.3777307304947799, -0.13183640051157219, 0.81415347389128234], [-0.74384993993115311,
-0.89127969698262977, -0.55809388984463593, 0.83808549468591687]], [[-0.0072160550394104739, -0.32635219120000691,
0.62522095163012725, -0.84470730211227218], [-0.76620143726977852, -0.49704334323428423, 0.65091921570676603,
0.37557075348586233], [-0.88570985653924961, -0.14885693428091606, -0.1460372910003831, 0.46444747179886625]]],
[[[0.30454098886894498, 0.6867161497858465, 0.72424680264691355, 0.5095615427094411], [0.072474613257559994,
0.43806936539601549, -0.59905605757280056, -0.45990321243729815], [-0.72712992491035378, -0.55689232155025548,
0.36037470124764459, -0.57195607819276018]], [[0.0051060589653528776, -0.47599982553998998, -0.39156196066990367,
-0.71880248868370389], [0.41451955450758748, 0.0028147774045290674, -0.6972003711983854, 0.78507608882318736],
[0.25418862509575768, 0.2284337652701498, 0.61856440627353049, 0.98714160660309891]]], [[[-0.47720293386376555,
-0.65125648891362786, -0.30435692372835654, 0.31977497838442503], [0.72827978446594854, -0.63983256938337552,
0.78982468457827881, 0.22954824117307959], [0.32315333011323544, 0.53527371494472065, -0.4131594330366064,
0.99215992692482535]], [[-0.74789735956161274, -0.62925352602039042, 0.71361119864052269, -0.98014330258009075],
[-0.89800389430130223, -0.37060754911664562, 0.3856639538855593, 0.034422663486305183], [-0.34490780926818876,
0.47458909120499637, 0.94818559671902958, 0.1617906804998257]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.99933153424495091, -0.39534988719092179, -0.16778504692207585,
-0.5906967701363508], [0.43387683071959104, 0.99162615949373745, 0.10547807703791179, -0.8113777964293909],
[0.24291475766513315, -0.71669244345435779, -0.76485897580613305, 0.15564799608712043]], [[-0.75156489257223669,
-0.055450247835175936, 0.3346750287693776, -0.66254424416459123], [0.35374045325725345, -0.051590559912436884,
-0.587757300739292, -0.33917336326606917], [-0.57544619252547657, 0.20907053572412782, 0.68711149771337832,
-0.056393263581338671]]], [[[0.75211852960020509, -0.10030934714915718, 0.33951992771212902, 0.60018880521446327],
[0.78716758837909295, -0.059231168586686644, -0.35866282572045227, 0.85083431016927791], [0.15298677857710419,
0.89780425582787293, -0.20576313384645473, 0.062421360873735843]], [[-0.70974271086498986, -0.45339037418498562,
0.41140062690705359, -0.37665346319424886], [-0.044537762904711675, -0.39079696673697262, 0.089532841376569916,
0.2190192547531522], [0.36139300850043266, -0.44279309647849896, -0.86452061630608856, -0.1231662099055526]]],
[[[-0.58039192544896112, 0.53706765389132238, -0.72356516474408639, 0.6503741573846944], [-0.30912719510660591,
-0.83285543652320859, -0.37306494080273778, 0.6518672264629326], [0.98787250878747979, 0.54733052031198159,
-0.15622032199949798, 0.09467999908286262]], [[0.40533336391796038, 0.73239200515802327, 0.39369121056194256,
0.081340379201521706], [-0.88455610311843214, 0.51118489146623691, -0.19795740083901325, 0.46388740676326989],
[0.54780674501660931, 0.63586854173407947, 0.92134722611145814, -0.39904465723137394]]]]))
res=maxval(arg)
ref=Data(0.992159926925,self.functionspace)
ref.setTaggedValue(1,0.999331534245)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank0(self):
arg=Data(0.00722788253378,self.functionspace)
arg.setTaggedValue(1,0.691024712935)
res=minval(arg)
ref=Data(0.00722788253378,self.functionspace)
ref.setTaggedValue(1,0.691024712935)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank1(self):
arg=Data(numpy.array([-0.47859075115756422, -0.15003593348682531]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.067933816863879004, -0.74579305994260148]))
res=minval(arg)
ref=Data(-0.478590751158,self.functionspace)
ref.setTaggedValue(1,-0.745793059943)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank2(self):
arg=Data(numpy.array([[-0.36168873882657993, -0.67372921463813573, 0.95569685886688305, 0.15779096197431586,
-0.24898227425545327], [-0.27356968554638628, 0.084426955507445944, -0.87908626632112941, -0.46051995344239027,
-0.42541441304041916], [-0.14074836177854189, 0.75123070420356286, 0.86230982812739998, -0.54837108857321315,
-0.77749802778211086], [-0.022482114313683077, 0.54155540121340873, -0.96328224231771142, 0.14101127782001344,
0.44096380596153772]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-0.4800201278086158, 0.030265479756139024, 0.18506553588051, 0.034952750086585604,
0.31613749260546875], [0.21702894874281076, 0.9905115362133845, 0.12091812867766771, | |
keeps reading forever
def __len__(self):
return 0 # so that the main() send loop is never entered
def append(self, text_and_image):
self.send_q.append(text_and_image)
def send_messages_forever(self):
# this will run in a separate thread
# the "sleep()" calls allow main thread more time for image capture
while self.keep_sending:
if len(self.send_q) > 0: # send until send_q is empty
text, image = self.send_q.popleft()
sleep(0.0000001) # sleep before sending
hub_reply = self.send_frame(text, image)
self.process_hub_reply(hub_reply)
else:
sleep(0.0000001) # sleep before checking send_q again
def start(self):
# start the thread to read frames from the video stream
t = threading.Thread(target=self.send_messages_forever)
t.daemon = True
t.start()
def stop_sending(self):
self.keep_sending = False
sleep(0.0000001) # sleep to allow ZMQ to clear buffer
class Sensor:
""" Methods and attributes of a sensor, such as a temperature sensor
Each sensor is setup and started using the settings in the yaml file.
Includes methods for reading, and closing the sensor and GPIO pins.
Parameters:
sensor (text): dictionary key of current sensor being instantiated
sensors (dict): dictionary of all the sensors in the YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, sensor, sensors, settings, tiny_image, send_q):
""" Initializes a specific sensor using settings in the YAML file.
"""
self.tiny_image = tiny_image
self.send_q = send_q
if 'name' in sensors[sensor]:
self.name = sensors[sensor]['name']
else:
self.name = sensor
if 'gpio' in sensors[sensor]:
self.gpio = sensors[sensor]['gpio']
else:
self.gpio = 4 # GPIO pin 4 is default for testing
if 'type' in sensors[sensor]:
self.type = sensors[sensor]['type']
else:
self.type = 'Unknown'
if 'unit' in sensors[sensor]:
self.unit = sensors[sensor]['unit'].upper()
else:
self.unit = 'F'
if 'read_interval_minutes' in sensors[sensor]:
self.interval = sensors[sensor]['read_interval_minutes']
else:
self.interval = 10 # how often to read sensor in minutes
if 'min_difference' in sensors[sensor]:
self.min_difference = sensors[sensor]['min_difference']
else:
self.min_difference = 1 # minimum difference to count as reportable
self.interval *= 60.0 # convert often to check sensor to seconds
# self.event_text is the text message for this sensor that is
# sent when the sensor value changes
# example: Barn|Temperaure|85 F
# example: Barn|Humidity|42 %
# example: Garage|Temperature|71 F
# example: Compost|Moisture|95 %
# self.event_text will have self.current_reading appended when events are sent
# self.event_text = '|'.join([settings.nodename, self.name]).strip()
self.event_text = settings.nodename
# Initialize last_reading and temp_sensor variables
self.last_reading_temp = -999 # will ensure first temp reading is a change
self.last_reading_humidity = -999 # will ensure first humidity reading is a change
self.temp_sensor = None
# Sensor types
if self.type == 'DS18B20':
# note that DS18B20 requires GPIO pin 4 (unless kernel is modified)
global W1ThermSensor # for DS18B20 temperature sensor
from w1thermsensor import W1ThermSensor
self.temp_sensor = W1ThermSensor()
if (self.type == 'DHT11') or (self.type == 'DHT22'):
global adafruit_dht # for DHT11 & DHT22 temperature sensor
import adafruit_dht
if self.type == 'DHT11':
self.temp_sensor = adafruit_dht.DHT11(self.gpio)
if self.type == 'DHT22':
self.temp_sensor = adafruit_dht.DHT22(self.gpio)
if self.temp_sensor is not None:
self.check_temperature() # check one time, then start interval_timer
threading.Thread(daemon=True,
target=lambda: interval_timer(self.interval, self.check_temperature)).start()
def check_temperature(self):
""" adds temperature & humidity (if available) value from a sensor to senq_q message queue
"""
if self.type == 'DS18B20':
if self.unit == 'C':
temperature = int(self.temp_sensor.get_temperature(W1ThermSensor.DEGREES_C))
else:
temperature = int(self.temp_sensor.get_temperature(W1ThermSensor.DEGREES_F))
humidity = -999
if (self.type == 'DHT11') or (self.type == 'DHT22'):
for i in range(5): # try for valid readings 5 times; break if valid
try:
if self.unit == 'C':
temperature = self.temp_sensor.temperature
else:
temperature = self.temp_sensor.temperature * (9 / 5) + 32
temperature = float(format(temperature, '.1f'))
humidity = self.temp_sensor.humidity
humidity = float(format(humidity, '.1f'))
break # break out of for loop if got valid readings
except RuntimeError:
sleep(3) # wait 3 seconds and try again
pass # this will retry up to 5 times before exiting the for loop
if abs(temperature - self.last_reading_temp) >= self.min_difference:
# temperature has changed from last reported temperature, therefore
# send an event message reporting temperature by appending to send_q
temp_text = str(temperature) + " " + self.unit
text = '|'.join([self.event_text, 'Temp', temp_text])
text_and_image = (text, self.tiny_image)
self.send_q.append(text_and_image)
self.last_reading_temp = temperature
if abs(humidity - self.last_reading_humidity) >= self.min_difference:
# humidity has changed from last reported humidity, therefore
# send an event message reporting humidity by appending to send_q
humidity_text = str(humidity) + " %"
# Spelling of humidity all lower case is intentional to avoid
# first letter test of "Heartbeat" in imagehub
text = '|'.join([self.event_text, 'humidity', humidity_text])
text_and_image = (text, self.tiny_image)
self.send_q.append(text_and_image)
self.last_reading_humidity = humidity
class Light:
""" Methods and attributes of a light controlled by an RPi GPIO pin
Each light is setup and started using the settings in the yaml file.
Includes methods for turning the light on and off using the GPIO pins.
Parameters:
light (text): dictionary key of the current light being instantiated
lights (dict): dictionary of all the lights in the YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, light, lights, settings):
""" Initializes a specific light using settings in the YAML file.
"""
if 'name' in lights[light]:
self.name = lights[light]['name']
else:
self.name = light
if 'gpio' in lights[light]:
self.gpio = lights[light]['gpio']
else:
self.gpio = 18 # GPIO pin 18 is the default for testing
if 'on' in lights[light]:
self.on = lights[light]['on']
else:
self.on = 'continuous'
GPIO.setup(self.gpio, GPIO.OUT)
if self.on == 'continuous':
self.turn_on()
else: # set up light on/off cyclying other than continuous
pass # for example, during certain hours
def turn_on(self):
""" Turns on the light using the GPIO pins
"""
GPIO.output(self.gpio, True) # turn on light
def turn_off(self):
""" Turns off the light using the GPIO pins
"""
GPIO.output(self.gpio, False) # turn off light
class PiCameraUnthreadedStream():
""" Rreads the PiCamera without threading.
The PiVideoStream class within imutils.VideoStream provides a threaded way
to read the PiCamera images. This class provides a way to read the PiCamera
without threading, primarily intended for testing. For compatibility, the
method names are the same as imutils.VideoStream.
"""
def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
from picamera.array import PiRGBArray
from picamera import PiCamera
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr",
use_video_port=True)
self.frame = None
def read(self):
f = next(self.stream) # or f = self.stream.read()?
self.frame = f.array
self.rawCapture.truncate(0)
return self.frame
def stop(self):
self.close()
def close(self):
self.stream.close()
self.rawCapture.close()
self.camera.close()
class Camera:
""" Methods and attributes of a camera
Each camera is setup and started using the settings in the yaml file.
Includes setup of detectors, e.g., detector for motion
Parameters:
camera (text): dict key of current camera being instantiated
cameras (dict): dictionary of all cameras named in YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, camera, cameras, settings):
""" Initializes all the camera settings from settings in the YAML file.
"""
self.cam = None
self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
# check picamera version
try:
picamversion = require('picamera')[0].version
except:
picamversion = '0'
if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading
self.threaded_read = cameras[camera]['threaded_read']
else:
self.threaded_read = True
if 'resolution' in cameras[camera]:
self.resolution = literal_eval(cameras[camera]['resolution'])
else:
self.resolution = (320, 240)
if 'framerate' in cameras[camera]:
self.framerate = cameras[camera]['framerate']
else:
self.framerate = 32
if 'vflip' in cameras[camera]:
self.vflip = cameras[camera]['vflip']
else:
self.vflip = False
if 'resize_width' in cameras[camera]:
# resize_width is a percentage value
# width in pixels will be computed later after reading a test image
self.resize_width = cameras[camera]['resize_width']
else:
self.resize_width = None
if 'viewname' in cameras[camera]:
self.viewname = cameras[camera]['viewname']
else:
self.viewname = ' '
if 'src' in cameras[camera]:
self.src = cameras[camera]['src']
else:
self.src = 0
if 'exposure_mode' in cameras[camera]:
self.exposure_mode = cameras[camera]['exposure_mode']
else:
self.exposure_mode = None
if 'iso' in cameras[camera]:
self.iso = cameras[camera]['iso']
else:
self.iso = 0 # default value
if 'shutter_speed' in cameras[camera]:
self.shutter_speed = cameras[camera]['shutter_speed']
else:
self.shutter_speed = 0 # default value
if 'sharpness' in cameras[camera]:
self.sharpness = cameras[camera]['sharpness']
else:
self.sharpness = 0 # default value
if 'contrast' in cameras[camera]:
self.contrast = cameras[camera]['contrast']
else:
self.contrast = 0 # default value
| |
ORN1_ES = a_ORN1['ES'].values[0]
ORN1_parent_n = a_ORN1['parent_n'].values[0]
ORN1_offspring_n = a_ORN1['offspring_n'].values[0]
ORN1_N = (ORN1_parent_n + ORN1_offspring_n) / 2.
ORN1_parent_sd = a_ORN1['parent_std'].values[0]
ORN1_offspring_sd = a_ORN1['offspring_std'].values[0]
## Calculate pooled SD for ORN1
m, ORN1_sd = calculatePooledMOE(ORN1_parent_sd, ORN1_offspring_sd, n1=ORN1_parent_n, n2=ORN1_offspring_n)
## Get ES, SD, N info for the second ORN
ORN2_ES = a_ORN2['ES'].values[0]
ORN2_parent_n = a_ORN2['parent_n'].values[0]
ORN2_offspring_n = a_ORN2['offspring_n'].values[0]
ORN2_N = (ORN2_parent_n + ORN2_offspring_n) / 2.
ORN2_parent_sd = a_ORN2['parent_std'].values[0]
ORN2_offspring_sd = a_ORN2['offspring_std'].values[0]
## Calculate pooled SD for ORN2
m, ORN2_sd = calculatePooledMOE(ORN2_parent_sd, ORN2_offspring_sd, n1=ORN2_parent_n, n2=ORN2_offspring_n)
## Calculate pooled MOE for ORN1 and ORN2 COMBO
ORN1_2_pooled_moe, ORN1_2_pooled_SD = calculatePooledMOE(ORN1_sd, ORN2_sd, n1=ORN1_N, n2=ORN2_N)
## Summation model
ORN1_2_summation_ES = ORN1_ES + ORN2_ES
ORN1_2_summation_CIs = [ORN1_2_summation_ES-ORN1_2_pooled_moe, ORN1_2_summation_ES+ORN1_2_pooled_moe]
## Maxpooling model
ORN1_2_maxPooling_ES = max(ORN1_ES, ORN2_ES)
ORN1_2_maxPooling_CIs = [ORN1_2_maxPooling_ES-ORN1_2_pooled_moe, ORN1_2_maxPooling_ES+ORN1_2_pooled_moe]
# ## Minimization model
# ORN1_2_minimization_ES = min(ORN1_ES, ORN2_ES)
# ORN1_2_minimization_CIs = [ORN1_2_minimization_ES-ORN1_2_pooled_moe, ORN1_2_minimization_ES+ORN1_2_pooled_moe]
## DELTA-DELTA: Check similarities between observed and predcitions
a_ORN_combo = a[(a['ORNs'] == ORNs[0] + '-' + ORNs[1]) & (a['LightIntensity'] == intensity)]
a_ORN_combo_ES = a_ORN_combo['ES'].values[0]
ORN_combo_parent_n = a_ORN_combo['parent_n'].values[0]
ORN_combo_offspring_n = a_ORN_combo['offspring_n'].values[0]
ORN_combo_N = (ORN_combo_parent_n + ORN_combo_offspring_n) / 2.
ORN_combo_parent_sd = a_ORN_combo['parent_std'].values[0]
ORN_combo_offspring_sd = a_ORN_combo['offspring_std'].values[0]
## Calculate pooled SD for ORN_combo
m, ORN_combo_sd = calculatePooledMOE(ORN_combo_parent_sd, ORN_combo_offspring_sd, n1=ORN_combo_parent_n, n2=ORN_combo_offspring_n)
dd_summation_v_observed_moe, dd_summation_v_observed_sd = calculatePooledMOE(ORN1_2_pooled_SD, ORN1_2_pooled_SD, n1=ORN_combo_N, n2=ORN_combo_N)
### delta-delta summation
dd_summation_v_observed_ES = ORN1_2_summation_ES - a_ORN_combo_ES
dd_summation_v_observed_CIs = [dd_summation_v_observed_ES-dd_summation_v_observed_moe, dd_summation_v_observed_ES+dd_summation_v_observed_moe]
### delta-delta max-pooling
dd_maxpool_v_observed_ES = ORN1_2_maxPooling_ES - a_ORN_combo_ES
dd_maxpool_v_observed_CIs = [dd_maxpool_v_observed_ES-dd_summation_v_observed_moe, dd_maxpool_v_observed_ES+dd_summation_v_observed_moe]
## Add the new values to the df
row = len(a)
a.loc[row, 'ORNs'] = ORNs[0] + '_' + ORNs[1] + '-summation'
a.loc[row, 'LightIntensity'] = intensity
a.loc[row, 'Sex-Satiety-Air'] = a_ORN1['Sex-Satiety-Air'].values[0]
a.loc[row, 'ES'] = ORN1_2_summation_ES
a.at[row, 'CIs'] = ORN1_2_summation_CIs
a.loc[row+1, 'ORNs'] = ORNs[0] + '_' + ORNs[1] + '-maxPooling'
a.loc[row+1, 'LightIntensity'] = intensity
a.loc[row+1, 'Sex-Satiety-Air'] = a_ORN1['Sex-Satiety-Air'].values[0]
a.loc[row+1, 'ES'] = ORN1_2_maxPooling_ES
a.at[row+1, 'CIs'] = ORN1_2_maxPooling_CIs
a.loc[row+2, 'ORNs'] = ORNs[0] + '_' + ORNs[1] + '-dd-summation'
a.loc[row+2, 'LightIntensity'] = intensity
a.loc[row+2, 'Sex-Satiety-Air'] = a_ORN1['Sex-Satiety-Air'].values[0]
a.loc[row+2, 'ES'] = dd_summation_v_observed_ES
a.at[row+2, 'CIs'] = dd_summation_v_observed_CIs
a.loc[row+3, 'ORNs'] = ORNs[0] + '_' + ORNs[1] + '-dd-maxPooling'
a.loc[row+3, 'LightIntensity'] = intensity
a.loc[row+3, 'Sex-Satiety-Air'] = a_ORN1['Sex-Satiety-Air'].values[0]
a.loc[row+3, 'ES'] = dd_maxpool_v_observed_ES
a.at[row+3, 'CIs'] = dd_maxpool_v_observed_CIs
# In[38]:
a
# ## Plot Combos: End
# In[169]:
df_order = pd.read_csv("C:/Users/tumkayat/Desktop/allData_Z_Gr66aREMOVED_CombosADDED_RESULTS.csv")
# In[174]:
df_order['Condition'] = df_order['ZI.Protein'].apply(lambda x: x.split('_')[1] + '_' + x.split('_')[2] +'_' + x.split('_')[3])
df_order['ORNs'] = df_order['ZI.Protein'].apply(lambda x: x.split('_')[0])
# In[202]:
df_order
# In[203]:
medianRatio_order = df_order[(df_order['Condition'] == 'male_fed_NoAir')].sort_values(by=['ZI.Probability'],ascending=True)['ORNs']
# In[204]:
exclude = ['Or33b-Or47a','Or33b-Or85a','Or42b-Or92a','Gr66a']
medianRatio_order = medianRatio_order[~medianRatio_order.isin(exclude)]
# In[254]:
a = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_starved_NoAir')]
# In[274]:
select_ORNs = ['Orco','Or47b','Or67d','Or88a','Or65a']
a = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'female_fed_NoAir') & (All_ORN_EffectSizes_df['ORNs'].apply(lambda x: x in select_ORNs))]
a
# In[62]:
# f, o = WALiForest(a, column='ES', sort_by=None ,sort_by_order=list(reversed(select_ORNs)),fsize=(5,5))
## 'red','steelblue','seagreen' used for combo plots
# f, o = WALiForest(a, column='ES', sort_by=None,sort_by_order = list(reversed(['Or42b','Or92a','Or42b-Or92a'])),fsize=(5,4),c='red')
# f, o = WALiForest(a, column='ES', sort_by=None,sort_by_order = list(reversed(['Or42b_Or92a-summation','Or42b_Or92a-maxPooling'])),fsize=(5,3),c='steelblue')
f, o = WALiForest(a, column='ES', sort_by=None,sort_by_order = list(reversed(['Or42b_Or92a-dd-summation','Or42b_Or92a-dd-maxPooling'])),fsize=(5,3),c='seagreen')
# In[63]:
fname = 'Or42b-Or92a_combo_air_dd'
f.savefig("C:/Users/tumkayat/Desktop/" + fname + ".pdf",dpi=1000,bbox_inches='tight')
a.to_csv("C:/Users/tumkayat/Desktop/" + fname + ".csv")
# In[169]:
a
# ## Calculate the diff between observed and predicted vaues
# In[171]:
a = pd.DataFrame.from_csv("C:/Users/tumkayat/Desktop/Or33b-Or47a_combo_air.csv")
combo = ['Or33b','Or47a']
intensities = ['14uW','42uW','70uW']
temp = {'ORNs':[], 'Int':[], 'summation_diff':[], 'maxpool_diff': [], 'minimization_diff': []} #,'Sum':[]}
for i in intensities:
observed_ES = a[(a['ORNs'] == combo[0] + '-' + combo[1]) & (a['LightIntensity'] == i)]['ES'].values[0]
summation_ES = a[(a['ORNs'] == combo[0] + '_' + combo[1] + '-summation') & (a['LightIntensity'] == i)]['ES'].values[0]
maxpool_ES = a[(a['ORNs'] == combo[0] + '_' + combo[1] + '-maxPooling') & (a['LightIntensity'] == i)]['ES'].values[0]
minimization_ES = a[(a['ORNs'] == combo[0] + '_' + combo[1] + '-minimization') & (a['LightIntensity'] == i)]['ES'].values[0]
temp['ORNs'].append(combo[0] + '-' + combo[1])
temp['Int'].append(i)
temp['summation_diff'].append(abs(observed_ES - summation_ES))
temp['maxpool_diff'].append(abs(observed_ES - maxpool_ES))
temp['minimization_diff'].append(abs(observed_ES - minimization_ES))
# temp['Sum'].append('NA')
temp['ORNs'].append('TotalDiff')
temp['Int'].append('NA')
temp['summation_diff'].append(np.sum(temp['summation_diff']))
temp['maxpool_diff'].append(np.sum(temp['maxpool_diff']))
temp['minimization_diff'].append(np.sum(temp['minimization_diff']))
combo_df = pd.DataFrame(temp)
combo_df.to_csv("C:/Users/tumkayat/Desktop/" + combo[0] + '-' + combo[1] + ".csv")
# In[161]:
np.sum(temp['maxpool_diff'])
# ### Replicated ORN data
# In[ ]:
## Select data if Sex-Satiety-Air column contains 'asghar' or '2' in it.
df_replication = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric']=='weighted_TSALE') & ((All_ORN_EffectSizes_df['Sex-Satiety-Air'].str.contains("-asghar"))
| (All_ORN_EffectSizes_df['Sex-Satiety-Air'].str.contains("2")))]
df_replication
# In[ ]:
df_replication_male_fed_NoAir = df_replication[((df_replication['Sex-Satiety-Air'] == ("male_fed-asghar_NoAir"))
| (df_replication['Sex-Satiety-Air'] == ("male_fed2_NoAir")))]
df_replication_male_fed_Air = df_replication[((df_replication['Sex-Satiety-Air'] == ("male_fed-asghar_Air"))
| (df_replication['Sex-Satiety-Air'] == ("male_fed2_Air")))]
df_replication_male_starved_NoAir = df_replication[((df_replication['Sex-Satiety-Air'] == ("male_starved-asghar_NoAir"))
| (df_replication['Sex-Satiety-Air'] == ("male_starved2_NoAir")))]
df_replication_male_starved_Air = df_replication[((df_replication['Sex-Satiety-Air'] == ("male_starved-asghar_Air"))
| (df_replication['Sex-Satiety-Air'] == ("male_starved2_Air")))]
# In[ ]:
## Replication female Or67d
df_replication_female = df_replication[df_replication['Sex-Satiety-Air'].str.contains('female')]
df_replication_female_fed_NoAir = df_replication_female[df_replication_female['Sex-Satiety-Air'] == ('female_fed-asghar_NoAir')]
df_replication_female_fed_Air = df_replication_female[df_replication_female['Sex-Satiety-Air'] == ('female_fed-asghar_Air')]
df_replication_female_starved_NoAir = df_replication_female[df_replication_female['Sex-Satiety-Air'] == ('female_starved-asghar_NoAir')]
df_replication_female_starved_Air = df_replication_female[df_replication_female['Sex-Satiety-Air'] == ('female_starved-asghar_Air')]
# In[ ]:
## Original female Or67d
df_female_Or67d_fed_NoAir = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric']=='weighted_TSALE') & (All_ORN_EffectSizes_df['ORNs'] == ("Or67d"))
& (All_ORN_EffectSizes_df['Sex-Satiety-Air'] == ("female_fed_NoAir"))]
df_female_Or67d_fed_Air = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric']=='weighted_TSALE') & (All_ORN_EffectSizes_df['ORNs'] == ("Or67d"))
& (All_ORN_EffectSizes_df['Sex-Satiety-Air'] == ("female_fed_Air"))]
df_female_Or67d_starved_NoAir = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric']=='weighted_TSALE') & (All_ORN_EffectSizes_df['ORNs'] == ("Or67d"))
& (All_ORN_EffectSizes_df['Sex-Satiety-Air'] == ("female_starved_NoAir"))]
df_female_Or67d_starved_Air = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric']=='weighted_TSALE') & (All_ORN_EffectSizes_df['ORNs'] == ("Or67d"))
& (All_ORN_EffectSizes_df['Sex-Satiety-Air'] == ("female_starved_Air"))]
# In[ ]:
len(df_replication_male_fed_NoAir) + len(df_replication_male_fed_Air) + len(df_replication_male_starved_NoAir) + len(df_replication_male_starved_Air)
# In[ ]:
column = 'ES'
f, ORNorder = WALiForest(df_replication_male_starved_Air,column,sort_by=False, sort_by_order=order)
order = ORNorder
plt.title('male_starved_Air')
plt.savefig("C:/Users/tumkayat/Desktop/WALi Figures/ForestPlots/deltaPlots/ORNsReplications/" + 'male_starved_Air' + '.pdf',dpi=1000,bbox_inches='tight')
# ### Sanity checks
# In[ ]:
x = data[(data['ORNs'] == 'Or88a') & (data['LightInt'] == '70uW')]['male_fed_noair']
y = data[(data['ORNs'] == 'Or88a') & (data['LightInt'] == '70uW')]['male_fed_air']
# In[ ]:
x - y
# In[ ]:
data[(data['ORNs'] == 'Or88a') & (data['LightInt'] == '70uW')]['delta_male_fed_Noair_V_male_fed_air']
# In[ ]:
All_ORN_EffectSizes_df
# In[ ]:
y_df = All_ORN_EffectSizes_df[(All_ORN_EffectSizes_df['Metric'] == 'weighted_TSALE') & (All_ORN_EffectSizes_df['ORNs'] == 'Or42b')
& (All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_fed_Air')
]
# In[ ]:
from scipy.interpolate import interp1d
# In[ ]:
x = [14,42,70]
y = y_df['ES'].values
# In[ ]:
f = interp1d(x, y)
# In[ ]:
xnew = np.linspace(14, 70, num=100, endpoint=True)
# In[ ]:
plt.plot(x, y, 'o', xnew, f(xnew), '-')
plt.legend(['data', 'linear'], loc='best')
plt.ylim(ymin=0)
plt.xlabel('Light Intensity (uW/mm2)')
plt.ylabel('weighted_TSALE')
sns.set(style="ticks")
sns.despine(trim=False)
# In[ ]:
f(xnew)
# In[ ]:
t = pd.read_pickle('K:/ACC/Tayfuntumkaya/DATA/CompleteWALiSARORNData_Analyzed/Gr63a/weighted_TSALE/weighted_TSALE_values.pkl')
# In[ ]:
t['Sex-Satiety-Wind'] = t['Sex'] + '-' + t['Satiety'] +'-' + t['Wind status']
# In[ ]:
k = t[(t['Sex-Satiety-Wind'] == 'male-fed-Air') & (t['Genotype'] == 'w1118-UAS-CsChrimson') & (t['Light Intensity(uW/mm2)'] == '14uW')]['weighted_TSALE_P10']
l = t[(t['Sex-Satiety-Wind'] == 'male-fed-Air') & (t['Genotype'] == 'w1118-Gr63a-Gal4') & (t['Light Intensity(uW/mm2)'] == '14uW')]['weighted_TSALE_P10']
# In[ ]:
a = t['Genotype'].unique()
# In[ ]:
for i in range(len(a)):
if (('w1118' in a[i]) or ('W1118' in a[i])) and ('UAS' in a[i]):
responder_ctrl = a[i]
if (('w1118' in a[i]) or ('W1118' in a[i])) and (('Gal4' in a[i]) or ('gal4' in a[i])) :
driver_ctrl = a[i]
# In[ ]:
driver_ctrl
# In[ ]:
dabest.bootstrap_tools.bootstrap(k.dropna().values)
# In[ ]:
t
# ### Analyses of the control groups
# In[2]:
def GetGenotypeMeans(temp, rootDirectory):
ornList = os.listdir(rootDirectory)
bar = progressbar.ProgressBar()
for i in bar(range(len(ornList))):
## read the data in a df
ORN = ornList[i]
print '%s is in progress...' %(ORN)
rootDir = os.path.join(rootDirectory,ORN)
df = pd.read_pickle(rootDir + '/weighted_TSALE_values.pkl')
## combined these variables so can choose groups of data
df['Sex-Satiety-Wind-Intensity'] = df['Sex'] + '-' + df['Satiety'] + '-' + df['Wind status'] + '-' + df['Light Intensity(uW/mm2)']
## detect responder and driver control genetoypes
genotypes = df['Genotype'].unique()
for i in range(len(genotypes)):
if (('w1118' in genotypes[i]) or ('W1118' in genotypes[i])) and ('UAS' in genotypes[i]):
responder_ctrl = genotypes[i]
elif (('w1118' in genotypes[i]) or ('W1118' in genotypes[i])) and (('Gal4' in genotypes[i]) or ('gal4' in genotypes[i])):
driver_ctrl = genotypes[i]
elif (('UAS' in genotypes[i]) or ('uas' in genotypes[i])) and (('Gal4' in genotypes[i]) or ('gal4' in genotypes[i])):
experiment = genotypes[i]
conditions = df['Sex-Satiety-Wind-Intensity'].unique()
if len(conditions) == 12:
for c in conditions:
responder_ctrl_values_P10 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == responder_ctrl)]['weighted_TSALE_P10']
responder_ctrl_values_P01 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == responder_ctrl)]['weighted_TSALE_P01']
driver_ctrl_values_P10 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == driver_ctrl)]['weighted_TSALE_P10']
driver_ctrl_values_P01 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == driver_ctrl)]['weighted_TSALE_P01']
exp_values_P10 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == experiment)]['weighted_TSALE_P10']
exp_values_P01 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == experiment)]['weighted_TSALE_P01']
try:
ResponderCtrl_means_P01 = dabest.bootstrap_tools.bootstrap(responder_ctrl_values_P01.dropna().values).summary
temp['weighted-TSALE'].append(ResponderCtrl_means_P01)
temp['Status'].append('Responder')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(responder_ctrl)
temp['ORN'].append(ORN)
ResponderCtrl_means_P10 = dabest.bootstrap_tools.bootstrap(responder_ctrl_values_P10.dropna().values).summary
temp['weighted-TSALE'].append(ResponderCtrl_means_P10)
temp['Status'].append('Responder')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(responder_ctrl)
temp['ORN'].append(ORN)
except:
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Responder')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(responder_ctrl)
temp['ORN'].append(ORN)
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Responder')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(responder_ctrl)
temp['ORN'].append(ORN)
try:
DriverCtrl_means_P01 = dabest.bootstrap_tools.bootstrap(driver_ctrl_values_P01.dropna().values).summary
temp['weighted-TSALE'].append(DriverCtrl_means_P01)
temp['Status'].append('Driver')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(driver_ctrl)
temp['ORN'].append(ORN)
DriverCtrl_means_P10 = dabest.bootstrap_tools.bootstrap(driver_ctrl_values_P10.dropna().values).summary
temp['weighted-TSALE'].append(DriverCtrl_means_P10)
temp['Status'].append('Driver')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(driver_ctrl)
temp['ORN'].append(ORN)
except:
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Driver')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(driver_ctrl)
temp['ORN'].append(ORN)
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Driver')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(driver_ctrl)
temp['ORN'].append(ORN)
try:
exp_means_P01 = dabest.bootstrap_tools.bootstrap(exp_values_P01.dropna().values).summary
temp['weighted-TSALE'].append(exp_means_P01)
temp['Status'].append('Experiment')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(experiment)
temp['ORN'].append(ORN)
exp_means_P10 = dabest.bootstrap_tools.bootstrap(exp_values_P10.dropna().values).summary
temp['weighted-TSALE'].append(exp_means_P10)
temp['Status'].append('Experiment')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(experiment)
temp['ORN'].append(ORN)
except:
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Experiment')
temp['Epoch'].append('P01')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(experiment)
temp['ORN'].append(ORN)
temp['weighted-TSALE'].append(np.nan)
temp['Status'].append('Experiment')
temp['Epoch'].append('P10')
temp['Sex-Satiety-Wind-Intensity'].append(c)
temp['Genotypes'].append(experiment)
temp['ORN'].append(ORN)
elif len(conditions) > 12:
shortened_conditions = []
for item in conditions:
if (not 'asghar' in item) & (not 'safwan' in item) & (not 'fed2' in item) & (not 'starved2' in item) & (not '28uW' in item) & (not '56uW' in item) & (not 'female' in item):
shortened_conditions.append(item)
if len(shortened_conditions) == 12:
print '%s shortened conditions...' %(ORN)
for c in shortened_conditions:
responder_ctrl_values_P10 = df[(df['Sex-Satiety-Wind-Intensity'] == c) & (df['Genotype'] == responder_ctrl)]['weighted_TSALE_P10']
responder_ctrl_values_P01 = df[(df['Sex-Satiety-Wind-Intensity'] == | |
# ------------ Helper Functions -------------------------------------------------------------------------
#
# This file provides all necessary helper functions.
#
# ----- (0) Imports --------------------------------------------------------------------------------------
import random
import numpy as np
import keras
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.regularizers import l1_l2, l2
from keras import backend as K
from keras.models import Sequential, load_model, Model
from keras.optimizers import Adam
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, Flatten, Lambda, Cropping2D, Activation, Input, merge, Concatenate
# ---- (1) Functions -------------------------------------------------------------------------------------
def generateSurfaceCodeLattice(d):
""""
This function generates a distance d square surface code lattice. in particular, the function returns
an array which, for each physical qubit, details the code-stabilizers supported on that qubit. To be more
precise:
- qubits[i,j,:,:] is a 4x3 array describing all the stabilizers supported on physical qubit(i,j)
-> for the surface code geometry each qubit can support up to 4 stabilizers
- qubits[i,j,k,:] is a 3-vector describing the k'th stabilizer supported on physical qubit(i,j)
-> qubits[i,j,k,:] = [x_lattice_address, y_lattice_address, I or X or Y or Z]
:param: d: The lattice width and height (or, equivalently, for the surface code, the code distance)
:return: qubits: np.array listing and describing the code-stabilizers supported on each qubit
"""
if np.mod(d,2) != 1:
raise Exception("for the surface code d must be odd!")
qubits = [ [ [
[ x, y, ((x+y)%2)*2+1],
[ x, y+1, ((x+y+1)%2)*2+1],
[ x+1, y, ((x+1+y)%2)*2+1],
[ x+1, y+1, ((x+1+y+1)%2)*2+1]
] for y in range(d)] for x in range(d)]
qubits = np.array(qubits)
for x in range(d):
for y in range(d):
for k in range(4):
if (qubits[x,y,k,0] == 0 and qubits[x,y,k,1]%2 == 0):
qubits[x,y,k,2] = 0
if (qubits[x,y,k,0] == d and qubits[x,y,k,1]%2 == 1):
qubits[x,y,k,2] = 0
if (qubits[x,y,k,1] == 0 and qubits[x,y,k,0]%2 == 1):
qubits[x,y,k,2] = 0
if (qubits[x,y,k,1] == d and qubits[x,y,k,0]%2 == 0):
qubits[x,y,k,2] = 0
return qubits
def multiplyPaulis(a,b):
""""
A simple helper function for multiplying Pauli Matrices. Returns ab.
:param: a: an int in [0,1,2,3] representing [I,X,Y,Z]
:param: b: an int in [0,1,2,3] representing [I,X,Y,Z]
"""
out = [[0,1,2,3],[1,0,3,2],[2,3,0,1],[3,2,1,0]]
return out[int(a)][int(b)]
# 2) Error generation
def generate_error(d,p_phys,error_model):
""""
This function generates an error configuration, via a single application of the specified error channel, on a square dxd lattice.
:param: d: The code distance/lattice width and height (for surface/toric codes)
:param: p_phys: The physical error rate.
:param: error_model: A string in ["X", "DP", "IIDXZ"] indicating the desired error model.
:return: error: The error configuration
"""
if error_model == "X":
return generate_X_error(d,p_phys)
elif error_model == "DP":
return generate_DP_error(d,p_phys)
elif error_model == "IIDXZ":
return generate_IIDXZ_error(d,p_phys)
return error
def generate_DP_error(d,p_phys):
""""
This function generates an error configuration, via a single application of the depolarizing noise channel, on a square dxd lattice.
:param: d: The code distance/lattice width and height (for surface/toric codes)
:param: p_phys: The physical error rate.
:return: error: The error configuration
"""
error = np.zeros((d,d),int)
for i in range(d):
for j in range(d):
p = 0
if np.random.rand() < p_phys:
p = np.random.randint(1,4)
error[i,j] = p
return error
def generate_X_error(d,p_phys):
""""
This function generates an error configuration, via a single application of the bitflip noise channel, on a square dxd lattice.
:param: d: The code distance/lattice width and height (for surface/toric codes)
:param: p_phys: The physical error rate.
:return: error: The error configuration
"""
error = np.zeros((d,d),int)
for i in range(d):
for j in range(d):
p = 0
if np.random.rand() < p_phys:
error[i,j] = 1
return error
def generate_IIDXZ_error(d,p_phys):
""""
This function generates an error configuration, via a single application of the IIDXZ noise channel, on a square dxd lattice.
:param: d: The code distance/lattice width and height (for surface/toric codes)
:param: p_phys: The physical error rate.
:return: error: The error configuration
"""
error = np.zeros((d,d),int)
for i in range(d):
for j in range(d):
X_err = False
Z_err = False
p = 0
if np.random.rand() < p_phys:
X_err = True
p = 1
if np.random.rand() < p_phys:
Z_err = True
p = 3
if X_err and Z_err:
p = 2
error[i,j] = p
return error
def generate_surface_code_syndrome_NoFT_efficient(error,qubits):
""""
This function generates the syndrome (violated stabilizers) corresponding to the input error configuration,
for the surface code.
:param: error: An error configuration on a square lattice
:param: qubits: The qubit configuration
:return: syndrome: The syndrome corresponding to input error
"""
d = np.shape(error)[0]
syndrome = np.zeros((d+1,d+1),int)
for i in range(d):
for j in range(d):
if error[i,j] != 0:
for k in range(qubits.shape[2]):
if qubits[i,j,k,2] != error[i,j] and qubits[i,j,k,2] != 0:
a = qubits[i,j,k,0]
b = qubits[i,j,k,1]
syndrome[a,b] = 1 - syndrome[a,b]
return syndrome
def generate_faulty_syndrome(true_syndrome, p_measurement_error):
""""
This function takes in a true syndrome, and generates a faulty syndrome according to some
given probability of measurement errors.
:param: true_syndrome: The original perfect measurement syndrome
:return: p_measurement_error: The probability of measurement error per stabilizer
:return: faulty_syndrome: The faulty syndrome
"""
faulty_syndrome = np.zeros(np.shape(true_syndrome),int)
# First we take care of the "bulk stabilizers"
for row in range(1, true_syndrome.shape[0]-1):
for col in range(1,true_syndrome.shape[1]-1):
if np.random.rand() < p_measurement_error:
faulty_syndrome[row,col] = 1 - true_syndrome[row,col]
else:
faulty_syndrome[row,col] = true_syndrome[row,col]
# Now we take care of the boundary stabilizers
row = 0
for col in [2*x +1 for x in range(int(true_syndrome.shape[0]/2 - 1))]:
if np.random.rand() < p_measurement_error:
faulty_syndrome[row,col] = 1 - true_syndrome[row,col]
else:
faulty_syndrome[row,col] = true_syndrome[row,col]
row = true_syndrome.shape[0] - 1
for col in [2*x + 2 for x in range(int(true_syndrome.shape[0]/2 - 1))]:
if np.random.rand() < p_measurement_error:
faulty_syndrome[row,col] = 1 - true_syndrome[row,col]
else:
faulty_syndrome[row,col] = true_syndrome[row,col]
col = 0
for row in [2*x + 2 for x in range(int(true_syndrome.shape[0]/2 - 1))]:
if np.random.rand() < p_measurement_error:
faulty_syndrome[row,col] = 1 - true_syndrome[row,col]
else:
faulty_syndrome[row,col] = true_syndrome[row,col]
col = true_syndrome.shape[0] - 1
for row in [2*x +1 for x in range(int(true_syndrome.shape[0]/2 - 1))]:
if np.random.rand() < p_measurement_error:
faulty_syndrome[row,col] = 1 - true_syndrome[row,col]
else:
faulty_syndrome[row,col] = true_syndrome[row,col]
return faulty_syndrome
def obtain_new_error_configuration(old_configuration,new_gates):
""""
This function generates a new error configuration out of an old configuration and a new configuration,
which might arise either from errors or corrections.
:param: old_configuration: An error configuration on a square lattice
:param: new_gates: An error configuration on a square lattice
:return: new_configuration: The resulting error configuration
"""
new_configuration = np.zeros(np.shape(old_configuration))
for row in range(new_configuration.shape[0]):
for col in range(new_configuration.shape[1]):
new_configuration[row,col] = multiplyPaulis(new_gates[row,col], old_configuration[row,col])
return new_configuration
def index_to_move(d,move_index,error_model,use_Y=True):
""""
Given an integer index corresponding to a Pauli flip on a physical data qubit, this
function generates the lattice representation of the move.
:param: d: The code distance
:param: move_index: The integer representation of the Pauli Flip
:param: error_model: A string in ["X", "DP", "IIDXZ"] indicating the desired error model.
:param: use_Y: a boolean indicating whether or not Y flips are allowed
:return: new_move: A lattice representation of the desired move.
"""
new_move = np.zeros((d,d))
if error_model == "X":
if move_index < (d**2):
move_type = 1
new_index = move_index
row = new_index/d
col = new_index%d
new_move[int(row),int(col)] = move_type
elif error_model == "DP":
if use_Y:
if move_index < (d**2)*3:
move_type = int(move_index/d**2) + 1
new_index = move_index - (move_type - 1)*d**2
row = new_index/d
col = new_index%d
new_move[int(row),int(col)] = move_type
else:
if move_index < (d**2)*2:
move_type = int(move_index/d**2) + 1
new_index = move_index - (move_type - 1)*d**2
row = new_index/d
col = new_index%d
if move_type == 2:
move_type = 3
new_move[int(row),int(col)] = move_type
else:
print("Error model you have specified is not currently supported")
return new_move
def generate_one_hot_labels_surface_code(error,err_model):
""""
This function generates the homology class label, in a one-hot encoding, for a given perfect syndrome, to use as the target label
for a feed forward neural network homology class predicting decoder.
!
:param: error: An error configuration on a square lattice
:param: err_model: A string in ["IIDXZ","DP","X"]
:return: training_label: The one-encoded training label
"""
d = error.shape[0]
X = 0
Z = 0
| |
filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = genetic_algorithm_list4
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
self.vf.dpo.write4(outfile, genetic_algorithm_list4)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 GA Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf4GAWriterGUI=CommandGUI()
Dpf4GAWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['WriteGA4'],\
cascadeName = menuText['WriteDpfMB'])
class Dpf4LSWriter(MVCommand):
""" allows user to choose an output filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = local_search_list4
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
self.vf.dpo.write4(outfile, local_search_list4)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 LS Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf4LSWriterGUI=CommandGUI()
Dpf4LSWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['WriteLS4'],\
cascadeName = menuText['WriteDpfMB'])
class Dpf4GALSWriter(MVCommand):
""" allows user to choose an output filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = genetic_algorithm_local_search_list4
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
self.vf.dpo.write4(outfile, genetic_algorithm_local_search_list4)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 GALS Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf4GALSWriterGUI=CommandGUI()
Dpf4GALSWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['WriteGALS4'], cascadeName = menuText['WriteDpfMB'])
class Dpf41SAWriter(MVCommand):
""" allows user to choose an output filename and write simulated annealing parameters"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#to remove pickle problem, assume dpo is current self.vf.dpo
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = simulated_annealing_list4
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
#if self.vf.dpo['parameter_file']['value']=="" or self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
if self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
self.vf.dpo['parameter_file']['value']="AD4.1_bound.dat"
self.vf.dpo['custom_parameter_file']['value']= 1
self.vf.dpo.write42(outfile, simulated_annealing_list4_2)
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 SA Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf41SAWriterGUI=CommandGUI()
Dpf41SAWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['WriteSA41'], cascadeName = menuText['WriteDpfMB'])
class Dpf41GAWriter(MVCommand):
""" allows user to choose an output filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = genetic_algorithm_list4_2
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
#if self.vf.dpo['parameter_file']['value']=="" or self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
if self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
self.vf.dpo['parameter_file']['value']="AD4.1_bound.dat"
self.vf.dpo['custom_parameter_file']['value']= 1
self.vf.dpo.write42(outfile, genetic_algorithm_list4_2)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 GA Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf41GAWriterGUI=CommandGUI()
Dpf41GAWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['WriteGA41'],\
cascadeName = menuText['WriteDpfMB'])
class Dpf41LSWriter(MVCommand):
""" allows user to choose an output filename and write it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
#set self.vf.dpo['ligand_types']['value'] here
# from current values of self.vf.DPF_LIGAND_TYPES
# AND
# from current values of self.vf.DPF_FLEXRES_TYPES
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = local_search_list4
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
if self.vf.dpo['parameter_file']['value']=="" or self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
self.vf.dpo['parameter_file']['value']="AD4.1_bound.dat"
self.vf.dpo['custom_parameter_file']['value']= 1
self.vf.dpo.write42(outfile, local_search_list4_2)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4 LS Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf41LSWriterGUI=CommandGUI()
Dpf41LSWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['WriteLS41'],\
cascadeName = menuText['WriteDpfMB'])
class Dpf41GALSWriter(MVCommand):
""" allows user to choose an output filename and write an autodock4.1 dpf to it"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
autodock_types = self.vf.DPF_LIGAND_TYPES[:]
for t in self.vf.DPF_FLEXRES_TYPES:
if t not in autodock_types:
autodock_types.append(t)
if len(autodock_types)==0:
self.vf.warningMsg("You must choose a ligand before writing dpf")
return 'ERROR'
autodock_type_str = str(autodock_types[0])
if len(autodock_types)>1:
for t in autodock_types[1:]:
autodock_type_str = autodock_type_str + " " + str(t)
self.vf.dpo['ligand_types']['value'] = autodock_type_str
#if a rms reference file has been specified, write it to dpf
if self.vf.dpo['rmsref']['value']!=self.vf.dpo['move']['value']:
l = genetic_algorithm_local_search_list4_1
ind = l.index('rmstol') + 1
l.insert(ind, 'rmsref')
if self.vf.dpo['parameter_file']['value']=="" or self.vf.dpo['parameter_file']['value']=="AD4_parameters.dat":
self.vf.dpo['parameter_file']['value']="AD4.1_bound.dat"
self.vf.dpo['custom_parameter_file']['value']= 1
self.vf.dpo.write42(outfile, genetic_algorithm_local_search_list4_2)
#this is set when the dpo is written
#self.vf.dpo.dpf_filename = outfile
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = 'AutoDock4.2 GALS Docking Parameter Output File:')
if outfile:
self.doitWrapper(outfile, log=1,redraw=0)
Dpf41GALSWriterGUI=CommandGUI()
Dpf41GALSWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['WriteGALS41'], cascadeName = menuText['WriteDpfMB'])
class Dpf4ClusterWriter(MVCommand):
""" allows user to write a dpf to cluster many dockings """
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, outfile, **kw):
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
self.vf.dpo.write4(outfile, cluster_list4)
def guiCallback(self):
if not len(self.vf.dpo.receptor_stem):
self.vf.warningMsg("You must choose a macromolecule before writing dpf")
return 'ERROR'
outfile = self.vf.askFileSave(types=[('dpf file', '*.dpf')],
title = ' AutoDock4 Cluster | |
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
class SqFtProFormaConfig(object):
"""
This class encapsulates the configuration options for the square
foot based pro forma.
parcel_sizes : list
A list of parcel sizes to test. Interestingly, right now
the parcel sizes cancel in this style of pro forma computation so
you can set this to something reasonable for debugging purposes -
e.g. [10000]. All sizes can be feet or meters as long as they are
consistently used.
fars : list
A list of floor area ratios to use. FAR is a multiple of
the parcel size that is the total building bulk that is allowed by
zoning on the site. In this case, all of these ratios will be
tested regardless of zoning and the zoning test will be performed
later.
uses : list
A list of space uses to use within a building. These are
mixed into forms. Generally speaking, you should only have uses
for which you have an estimate (or observed) values for rents in
the building. By default, uses are retail, industrial, office,
and residential.
forms : dict
A dictionary where keys are names for the form and values
are also dictionaries where keys are uses and values are the
proportion of that use used in this form. The values of the
dictionary should sum to 1.0. For instance, a form called
"residential" might have a dict of space allocations equal to
{"residential": 1.0} while a form called "mixedresidential"
might have a dictionary of space allocations equal to
{"retail": .1, "residential" .9] which is 90% residential and
10% retail.
parking_rates : dict
A dict of rates per thousand square feet where keys are the uses
from the list specified in the attribute above. The ratios
are typically in the range 0.5 - 3.0 or similar. So for
instance, a key-value pair of "retail": 2.0 would be two parking
spaces per 1,000 square feet of retail. This is a per square
foot pro forma, so the more typically parking ratio of spaces
per residential unit must be converted to square feet for use in
this pro forma.
sqft_per_rate : float
The number of square feet per unit for use in the
parking_rates above. By default this is set to 1,000 but can be
overridden.
parking_configs : list
An expert parameter and is usually unchanged. By default
it is set to ['surface', 'deck', 'underground'] and very semantic
differences in the computation are performed for each of these
parking configurations. Generally speaking it will break things
to change this array, but an item can be removed if that parking
configuration should not be tested.
parking_sqft_d : dict
A dictionary where keys are the three parking
configurations listed above and values are square foot uses of
parking spaces in that configuration. This is to capture the
fact that surface parking is usually more space intensive
than deck or underground parking.
parking_cost_d : dict
The parking cost for each parking configuration. Keys are the
name of the three parking configurations listed above and values
are dollars PER SQUARE FOOT for parking in that configuration.
Used to capture the fact that underground and deck are far more
expensive than surface parking.
height_for_costs : list
A list of "break points" as heights at which construction becomes
more expensive. Generally these are the heights at which
construction materials change from wood, to concrete, to steel.
Costs are also given as lists by use for each of these break
points and are considered to be valid up to the break point. A
list would look something like [15, 55, 120, np.inf].
costs : dict
The keys are uses from the attribute above and the values are a
list of floating point numbers of same length as the
height_for_costs attribute. A key-value pair of
"residential": [160.0, 175.0, 200.0, 230.0] would say that the
residential use if $160/sqft up to 15ft in total height for the
building, $175/sqft up to 55ft, $200/sqft up to 120ft, and
$230/sqft beyond. A final value in the height_for_costs
array of np.inf is typical.
height_per_story : float
The per-story height for the building used to turn an
FAR into an actual height.
max_retail_height : float
The maximum height of retail buildings to consider.
max_industrial_height : float
The maximum height of industrial buildings to consider.
profit_factor : float
The ratio of profit a developer expects to make above the break
even rent. Should be greater than 1.0, e.g. a 10% profit would be
a profit factor of 1.1.
building_efficiency : float
The efficiency of the building. This turns total FAR into the
amount of space which gets a square foot rent. The entire building
gets the cost of course.
parcel_coverage : float
The ratio of the building footprint to the parcel size. Also used
to turn an FAR into a height to cost properly.
cap_rate : float
The rate an investor is willing to pay for a cash flow per year.
This means $1/year is equivalent to 1/cap_rate present dollars.
This is a macroeconomic input that is widely available on the
internet.
"""
def __init__(self):
self._reset_defaults()
def _reset_defaults(self):
self.parcel_sizes = [10000.0]
self.fars = [.1, .25, .5, .75, 1.0, 1.5, 1.8, 2.0, 2.25, 2.5, 2.75,
3.0, 3.25, 3.5, 3.75, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 9.0, 11.0]
self.uses = ['retail', 'industrial', 'office', 'residential']
self.residential_uses = [False, False, False, True]
self.forms = {
'retail': {
"retail": 1.0
},
'industrial': {
"industrial": 1.0
},
'office': {
"office": 1.0
},
'residential': {
"residential": 1.0
},
'mixedresidential': {
"retail": .1,
"residential": .9
},
'mixedoffice': {
"office": 0.7,
"residential": 0.3
}
}
self.profit_factor = 1.1
self.building_efficiency = .7
self.parcel_coverage = .8
self.cap_rate = .05
self.parking_rates = {
"retail": 2.0,
"industrial": .6,
"office": 1.0,
"residential": 1.0
}
self.sqft_per_rate = 1000.0
self.parking_configs = ['surface', 'deck', 'underground']
self.costs = {
"retail": [160.0, 175.0, 200.0, 230.0],
"industrial": [140.0, 175.0, 200.0, 230.0],
"office": [160.0, 175.0, 200.0, 230.0],
"residential": [170.0, 190.0, 210.0, 240.0]
}
self.heights_for_costs = [15, 55, 120, np.inf]
self.parking_sqft_d = {
'surface': 300.0,
'deck': 250.0,
'underground': 250.0
}
self.parking_cost_d = {
'surface': 30,
'deck': 90,
'underground': 110
}
self.height_per_story = 10.0
self.max_retail_height = 2.0
self.max_industrial_height = 2.0
def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.iteritems():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses]))
@property
def tiled_parcel_sizes(self):
return np.reshape(np.repeat(self.parcel_sizes, self.fars.size), (-1, 1))
def check_is_reasonable(self):
fars = pd.Series(self.fars)
assert len(fars[fars > 20]) == 0
assert len(fars[fars <= 0]) == 0
for k, v in self.forms.iteritems():
assert isinstance(v, dict)
for k2, v2 in self.forms[k].iteritems():
assert isinstance(k2, str)
assert isinstance(v2, float)
for k2, v2 in self.forms[k].iteritems():
assert isinstance(k2, str)
assert isinstance(v2, float)
for k, v in self.parking_rates.iteritems():
assert isinstance(k, str)
assert k in self.uses
assert 0 <= v < 5
for k, v in self.parking_sqft_d.iteritems():
assert isinstance(k, str)
assert k in self.parking_configs
assert 50 <= v <= 1000
for k, v in self.parking_sqft_d.iteritems():
assert isinstance(k, str)
assert k in self.parking_cost_d
assert 10 <= v <= 300
for v in self.heights_for_costs:
assert isinstance(v, int) or isinstance(v, float)
if np.isinf(v):
continue
assert 0 <= v <= 1000
for k, v in self.costs.iteritems():
assert isinstance(k, str)
assert k in self.uses
for i in v:
assert 10 < i < 1000
class SqFtProForma(object):
"""
Initialize the square foot based pro forma.
This pro forma has no representation of units - it does not
differentiate between the rent attained by 1BR, 2BR, or 3BR and change
the rents accordingly. This is largely because it is difficult to get
information | |
Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
<NAME>. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by <NAME>, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by <NAME>,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path( verts )
# Check to see which points are contained withing the Path
return [ idx for idx, p in enumerate(points) if poly.contains_point(p) ]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
numpy = ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
numpy = ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate( (x, x[::-1]) )
y = numpy.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + | |
seqno, remote_cas, options, scope=None, collection=None):
# extra = struct.pack('>IIQQI', flags, exp, seqno, remote_cas, 0)
exp = 0
extra = struct.pack('>IIQQI', flags, exp, seqno, remote_cas, options)
scope, collection = self.collection_name(scope, collection)
return self._doCmd(cmd, key, value, extra, cas, scope, collection)
def setWithMeta(self, key, value, exp, flags, seqno, remote_cas, options=2, scope=None, collection=None):
"""Set a value and its meta data in the memcached server."""
scope, collection = self.collection_name(scope, collection)
return self._doMetaCmd(memcacheConstants.CMD_SET_WITH_META,
key, value, 0, exp, flags, seqno, remote_cas, options, scope, collection)
def setWithMetaInvalid(self, key, value, exp, flags, seqno, remote_cas, options=2, scope=None, collection=None):
"""Set a value with meta that can be an invalid number memcached server."""
exp = 0
scope, collection = self.collection_name(scope, collection)
# 'i' allows for signed integer as remote_cas value
extra = struct.pack('>IIQiI', flags, exp, seqno, remote_cas, options)
cmd = memcacheConstants.CMD_SET_WITH_META
return self._doCmd(cmd, key, value, extra, 0, scope, collection)
# set with meta using the LWW conflict resolution CAS
def setWithMetaLWW(self, key, value, exp, flags, cas, scope=None, collection=None):
"""Set a value and its meta data in the memcached server.
The format is described here https://github.com/couchbase/ep-engine/blob/master/docs/protocol/set_with_meta.md,
the first CAS will be 0 because that is the traditional CAS, and the CAS in the "extras" will be populated.
The sequence number will be 0 because as to the best of my understanding it is not used with LWW.
"""
#
SET_META_EXTRA_FMT = '>IIQQH' # flags (4), expiration (4), seqno (8), CAS (8), metalen (2)
META_LEN = 0
SEQNO = 0
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, -1)
return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, value,
struct.pack(memcacheConstants.META_EXTRA_FMT, flags, exp, SEQNO, cas),
scope=scope, collection=collection)
# set with meta using the LWW conflict resolution CAS
def delWithMetaLWW(self, key, exp, flags, cas, scope=None, collection=None):
"""Set a value and its meta data in the memcached server.
The format is described here https://github.com/couchbase/ep-engine/blob/master/docs/protocol/del_with_meta.md,
the first CAS will be 0 because that is the traditional CAS, and the CAS in the "extras" will be populated.
The sequence number will be 0 because as to the best of my understanding it is not used with LWW.
"""
META_LEN = 0
SEQNO = 0
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, -1, scope=scope, collection=collection)
return self._doCmd(memcacheConstants.CMD_DEL_WITH_META, key, '',
struct.pack('>IIQQI', flags, exp, SEQNO, cas, memcacheConstants.FORCE_ACCEPT_WITH_META_OPS),
scope=scope, collection=collection)
# struct.pack(memcacheConstants.META_EXTRA_FMT, flags, exp, SEQNO, cas))
# hope to remove this and migrate existing calls to the aboce
def set_with_meta(self, key, exp, flags, seqno, cas, val, vbucket=-1, add_extended_meta_data=False,
adjusted_time=0, conflict_resolution_mode=0, scope=None, collection=None):
"""Set a value in the memcached server."""
self._set_vbucket(key, vbucket ,scope=scope, collection=collection)
scope, collection = self.collection_name(scope, collection)
return self._doCmd(memcacheConstants.CMD_SET_WITH_META,
key,
val,
struct.pack(memcacheConstants.SKIP_META_CMD_FMT,
flags,
exp,
seqno,
cas,
memcacheConstants.CR
), scope=scope, collection=collection)
# Extended meta data was a 4.0 and 4.5 era construct, and not supported in 4.6 Not sure if will ever be needed
# but leaving the code visible in case it is
"""
if add_extended_meta_data:
extended_meta_data = self.pack_the_extended_meta_data( adjusted_time, conflict_resolution_mode)
return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, val,
struct.pack(memcacheConstants.EXTENDED_META_CMD_FMT, flags, exp, seqno, cas, len(extended_meta_data)),
extended_meta_data=extended_meta_data)
else:
return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, val,
struct.pack(META_CMD_FMT, flags, exp, seqno, cas) )
"""
def del_with_meta(self, key, exp, flags, seqno, cas, vbucket=-1,
options=0,
add_extended_meta_data=False,
adjusted_time=0, conflict_resolution_mode=0, scope=None, collection=None):
"""Set a value in the memcached server."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
resp = self._doCmd(memcacheConstants.CMD_DELETE_WITH_META, key, '',
struct.pack(memcacheConstants.EXTENDED_META_CMD_FMT, flags,
exp, seqno, cas, options, 0), scope=scope, collection=collection)
return resp
def hello(self, feature_flag): # , key, exp, flags, val, vbucket= -1):
resp = self._doCmd(memcacheConstants.CMD_HELLO, '', struct.pack(">H", feature_flag))
result = struct.unpack('>H', resp[2])
if result[0] != feature_flag:
self.log.info("collections not supported")
else:
self.collections_supported = True
supported = resp[2]
for i in range(0, len(supported), struct.calcsize(">H")):
self.features.update(
struct.unpack_from(">H", supported, i))
if self.is_xerror_supported():
self.error_map = self.get_error_map()
def send_set(self, key, exp, flags, val, vbucket=-1, scope=None, collection=None):
"""Set a value in the memcached server without handling the response"""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
opaque = self.r.randint(0, 2 ** 32)
self._sendCmd(memcacheConstants.CMD_SET, key, val, opaque, struct.pack(SET_PKT_FMT, flags, exp), 0,
scope=scope, collection=collection)
def add(self, key, exp, flags, val, vbucket=-1, scope=None, collection=None):
"""Add a value in the memcached server iff it doesn't already exist."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
return self._mutate(memcacheConstants.CMD_ADD, key, exp, flags, 0, val, scope, collection)
def replace(self, key, exp, flags, val, vbucket=-1, scope=None, collection=None):
"""Replace a value in the memcached server iff it already exists."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
return self._mutate(memcacheConstants.CMD_REPLACE, key, exp, flags, 0,
val, scope, collection)
def observe(self, key, vbucket=-1, scope=None, collection=None):
"""Observe a key for persistence and replication."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
try:
key = key.encode()
except AttributeError:
pass
value = struct.pack('>HH', self.vbucketId, len(key)) + key
opaque, cas, data = self._doCmd(memcacheConstants.CMD_OBSERVE, '', value, scope=scope, collection=collection)
rep_time = (cas & 0xFFFFFFFF)
persist_time = (cas >> 32) & 0xFFFFFFFF
persisted = struct.unpack('>B', bytes([data[4 + len(key)]]))
return opaque, rep_time, persist_time, persisted, cas
def observe_seqno(self, key, vbucket_uuid, vbucket=-1, scope=None, collection=None):
"""Observe a key for persistence and replication."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
value = struct.pack('>Q', vbucket_uuid)
opaque, cas, data = self._doCmd(memcacheConstants.CMD_OBSERVE_SEQNO, '', value,
scope=scope, collection=collection)
format_type = struct.unpack('>B', data[0])[0]
vbucket_id = struct.unpack('>H', data[1:3])[0]
r_vbucket_uuid = struct.unpack('>Q', data[3:11])[0]
last_persisted_seq_no = struct.unpack('>Q', data[11:19])[0]
current_seqno = struct.unpack('>Q', data[19:27])[0]
if format_type == 1:
old_vbucket_uuid = struct.unpack('>Q', data[27:35])[0]
last_seqno_received = struct.unpack('>Q', data[35:43])[0]
else:
old_vbucket_uuid = None
last_seqno_received = None
return opaque, format_type, vbucket_id, r_vbucket_uuid, last_persisted_seq_no, current_seqno,\
old_vbucket_uuid, last_seqno_received
def __parseGet(self, data, klen=0):
flags = struct.unpack(memcacheConstants.GET_RES_FMT, data[-1][:4])[0]
if klen == 0:
return flags, data[1], data[-1][4 + klen:]
else:
return flags, data[1], b"{" + data[-1].split(b'{')[-1] # take only the value and value starts with "{"
def get(self, key, vbucket=-1, scope=None, collection=None):
"""Get the value for a given key within the memcached server."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
parts = self._doCmd(memcacheConstants.CMD_GET, key, '', scope=scope, collection=collection)
return self.__parseGet(parts)
def send_get(self, key, vbucket=-1, scope=None, collection=None):
""" sends a get message without parsing the response """
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
opaque = self.r.randint(0, 2 ** 32)
self._sendCmd(memcacheConstants.CMD_GET, key, '', opaque, scope, collection)
def getl(self, key, exp=15, vbucket=-1, scope=None, collection=None):
"""Get the value for a given key within the memcached server."""
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
parts = self._doCmd(memcacheConstants.CMD_GET_LOCKED, key, '',
struct.pack(memcacheConstants.GETL_PKT_FMT, exp), scope=scope, collection=collection)
return self.__parseGet(parts)
def getr(self, key, vbucket=-1, scope=None, collection=None):
"""Get the value for a given key within the memcached server from a replica vbucket."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
parts = self._doCmd(memcacheConstants.CMD_GET_REPLICA, key, '', scope=scope, collection=collection)
return self.__parseGet(parts, len(key))
def getMeta(self, key, request_extended_meta_data=False, scope=None, collection=None):
"""Get the metadata for a given key within the memcached server."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, scope=scope, collection=collection)
if request_extended_meta_data:
extras = struct.pack('>B', 1)
else:
extras = ''
opaque, cas, data = self._doCmd(memcacheConstants.CMD_GET_META, key, '', extras,
scope=scope, collection=collection)
deleted = struct.unpack('>I', data[0:4])[0]
flags = struct.unpack('>I', data[4:8])[0]
exp = struct.unpack('>I', data[8:12])[0]
seqno = struct.unpack('>Q', data[12:20])[0]
if request_extended_meta_data:
conflict_res = struct.unpack('>B', data[20:21])[0]
return deleted, flags, exp, seqno, cas, conflict_res
else:
return deleted, flags, exp, seqno, cas
def get_adjusted_time(self, vbucket, scope=None, collection=None):
"""Get the value for a given key within the memcached server."""
scope, collection = self.collection_name(scope, collection)
self.vbucketId = vbucket
return self._doCmd(memcacheConstants.CMD_GET_ADJUSTED_TIME, '', '', scope=scope, collection=collection)
def set_time_drift_counter_state(self, vbucket, drift, state, scope=None, collection=None):
"""Get the value for a given key within the memcached server."""
scope, collection = self.collection_name(scope, collection)
self.vbucketId = vbucket
extras = struct.pack(memcacheConstants.SET_DRIFT_COUNTER_STATE_REQ_FMT, drift, state)
return self._doCmd(memcacheConstants.CMD_SET_DRIFT_COUNTER_STATE, '', '', extras,
scope=scope, collection=collection)
def set_time_sync_state(self, vbucket, state, scope=None, collection=None):
"""Get the value for a given key within the memcached server."""
scope, collection = self.collection_name(scope, collection)
self.vbucketId = vbucket
extras = struct.pack(memcacheConstants.SET_DRIFT_COUNTER_STATE_REQ_FMT, 0, state)
return self._doCmd(memcacheConstants.CMD_SET_DRIFT_COUNTER_STATE, '', '', extras,
scope=scope, collection=collection)
def cas(self, key, exp, flags, oldVal, val, vbucket=-1, scope=None, collection=None):
"""CAS in a new value for the given key and comparison value."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, scope=scope, collection=collection)
self._mutate(memcacheConstants.CMD_SET, key, exp, flags,
oldVal, val, scope, collection)
def touch(self, key, exp, vbucket=-1, scope=None, collection=None):
"""Touch a key in the memcached server."""
scope, collection = self.collection_name(scope, collection)
self._set_vbucket(key, vbucket, | |
<reponame>paleopresto/recommender
import json
import re
from sys import platform as _platform
import os
named_individuals, q_proxy_obs = None, None
periodic_table_elements, periodic_table_name = [], []
proxy_obs_map = {}
ignore_set = set()
unknown_proxy = set()
def initialize_input_data():
'''
q_proxy_obs stores the data queried from the linked earth wiki.
named_individuals stores the data from the linked earth ontology.
Returns
-------
None.
'''
global named_individuals, q_proxy_obs
named_individuals = 'Al/Ca Ar-Ar B/Ca Ba/Ca C Clay fraction Color d13C d15N d170 d180 d34S dD Density Diffuse spectral reflectance Faunal Fe/Ca Floral Grain size Historic Layer thickness Lead Isotope Li/Ca Lithics Luminescence Magnetic susceptibility Mg/Ca Mineral matter Mn/Ca Moisture Content N Neodymium Organic matter P Permeability Porosity Radiocarbon Resistivity Sand fraction Si Silt fraction Sr/Ca TEX86 U-Th Uk37\' Uk37 X-Ray diffraction X-ray fluorescence Zn/Ca'
named_individuals = set(named_individuals.split(' '))
q_proxy_obs = '''DiffuseSpectralReflectance
*JulianDay
*Al/Ca
*B/Ca
*Ba/Ca
*Mn/Ca
*Sr/Ca
*Zn/Ca
*Radiocarbon
*D18O
*Mg/Ca
*TEX86
*TRW
*Dust
*Chloride
*Sulfate
*Nitrate
*D13C
*Depth
*Age
*Mg
*Floral
*DD
*C
*N
*P
*Si
*Uk37
*Uk37Prime
*Density
*GhostMeasured
*Trsgi
*Mg Ca
*SampleCount
*Segment
*RingWidth
*Residual
*ARS
*Corrs
*RBar
*SD
*SE
*EPS
*Core
*Uk37prime
*Upper95
*Lower95
*Year old
*Thickness
*Na
*DeltaDensity
*Reflectance
*BlueIntensity
*VarveThickness
*Reconstructed
*AgeMin
*AgeMax
*SampleID
*Depth top
*Depth bottom
*R650 700
*R570 630
*R660 670
*RABD660 670
*WaterContent
*C N
*BSi
*MXD
*EffectiveMoisture
*Pollen
*Precipitation
*Unnamed
*Sr Ca
*Calcification1
*Calcification2
*Calcification3
*CalcificationRate
*Composite
*Calcification4
*Notes
*Notes1
*Calcification5
*Calcification
*Calcification6
*Calcification7
*Trsgi1
*Trsgi2
*Trsgi3
*Trsgi4
*IceAccumulation
*F
*Cl
*Ammonium
*K
*Ca
*Duration
*Hindex
*VarveProperty
*X radiograph dark layer
*D18O1
*SedAccumulation
*Massacum
*Melt
*SampleDensity
*37:2AlkenoneConcentration
*AlkenoneConcentration
*AlkenoneAbundance
*BIT
*238U
*Distance
*232Th
*230Th/232Th
*D234U
*230Th/238U
*230Th Age uncorrected
*230Th Age corrected
*D234U initial
*TotalOrganicCarbon
*CDGT
*C/N
*CaCO3
*PollenCount
*Weight
*DryBulkDensity
*37:3AlkenoneConcentration
*Min sample
*Max sample
*Age uncertainty
*Is date used original model
*238U content
*238U uncertainty
*232Th content
*232Th uncertainty
*230Th 232Th ratio
*230Th 232Th ratio uncertainty
*230Th 238U activity
*230Th 238U activity uncertainty
*Decay constants used
*Corrected age
*Corrected age unceratainty
*Modern reference
*Al
*S
*Ti
*Mn
*Fe
*Rb
*Sr
*Zr
*Ag
*Sn
*Te
*Ba
*NumberOfObservations
*Diffuse spectral reflectance
*Total Organic Carbon
*BSiO2
*CalciumCarbonate
*WetBulkDensity'''
q_proxy_obs = q_proxy_obs.split('\n *')
def get_periodic_elements():
'''
Get the atomic number and the atomic names for the elements from the periodic table from the file PeriodicTableJSON
Returns
-------
None.
'''
global periodic_table_elements, periodic_table_name
if _platform == "win32":
print(os.getcwd())
periodic_table_path = '..\\utils\\PeriodicTableJSON.json'
else:
periodic_table_path = '../utils/PeriodicTableJSON.json'
with open(periodic_table_path, 'r', encoding="utf8") as jsonfile:
element_json = json.load(jsonfile)
for ele in element_json['elements']:
periodic_table_elements.append(ele['symbol'])
periodic_table_name.append(ele['name'])
def manual_additions_to_map():
global proxy_obs_map, ignore_set
# MANUAL ADDITIONS TO THE PROXY OBS MAP
proxy_obs_map['Calcification'] = 'Calcification'
proxy_obs_map['Trsgi'] = 'Tree Ring Standardized Growth Index'
proxy_obs_map['C37.concentration'] = '37:2AlkenoneConcentration'
proxy_obs_map['trsgi'] = 'Tree Ring Standardized Growth Index'
proxy_obs_map['d-excess'] = 'D-Excess'
proxy_obs_map['deuteriumExcess'] = 'D-Excess'
proxy_obs_map['Deuteriumexcess'] = 'D-Excess'
proxy_obs_map['d2H'] = 'Dd'
proxy_obs_map['dD'] = 'Dd'
proxy_obs_map['d18o'] = 'd18O'
proxy_obs_map['d18O1'] = 'd18O'
proxy_obs_map['Mgca'] = 'Mg/Ca'
proxy_obs_map['Blueintensity'] = 'Blue Intensity'
proxy_obs_map['MXD'] = 'maximum latewood density'
proxy_obs_map['TRW'] = 'Tree Ring Width'
proxy_obs_map['Watercontent'] = 'Water Content'
proxy_obs_map['Samplecount'] = 'Sample Count'
proxy_obs_map['Ringwidth'] = 'Tree Ring Width'
proxy_obs_map['Effectivemoisture'] = 'Effective Moisture'
proxy_obs_map['EPS'] = 'Expressed Population Signal'
proxy_obs_map['TOC'] = 'Total Organic Carbon'
proxy_obs_map['TN'] = 'Total Nitrogen'
proxy_obs_map['Laminathickness'] = 'Lamina Thickness'
proxy_obs_map['Foram.Abundance'] = 'Foraminifera Abundance'
proxy_obs_map['SE'] = 'Standard Error'
proxy_obs_map['Bsi'] = 'Biogenic Silica'
proxy_obs_map['Massacum'] = 'Mass Flux'
proxy_obs_map['R650_700'] = 'Trough area between 650 and 700 nm wavelength'
proxy_obs_map['R570_630'] = 'Ratio between reflectance at 570 and 630 nm wavelength'
proxy_obs_map['R660_670'] = 'Ratio between reflectance at 660 and 670 nm wavelength'
proxy_obs_map['RABD660_670'] = 'relative absorption band depth from 660 to 670 nm'
proxy_obs_map['ARS'] = 'ARSTAN chronology'
proxy_obs_map['Rbar'] = 'Rbar (mean pair correlation)'
proxy_obs_map['D50'] = 'Median, grain size (D50)'
proxy_obs_map['Grainsizemode'] = 'Mode, grain size'
proxy_obs_map['DBD'] = 'Dry Bulk Density'
proxy_obs_map['Dry Bulk Density'] = 'Dry Bulk Density'
proxy_obs_map['Brgdgt'] = 'brGDGT'
proxy_obs_map['Brgdgtiiia'] = 'brGDGT'
proxy_obs_map['Brgdgtiiib'] = 'brGDGT'
proxy_obs_map['Brgdgtiia'] = 'brGDGT'
proxy_obs_map['Brgdgtiib'] = 'brGDGT'
proxy_obs_map['Brgdgtia'] = 'brGDGT'
proxy_obs_map['Brgdgtib'] = 'brGDGT'
proxy_obs_map['N C24'] = 'n-alkane 24 carbon chain'
proxy_obs_map['N C26'] = 'n-alkane 26 carbon chain'
proxy_obs_map['N C28'] = 'n-alkane 28 carbon chain'
proxy_obs_map['IRD'] = 'Ice-rafted debris'
ignore_set = {'Upper95', 'Lower95', 'Sampleid', 'Julianday', 'SD', 'Elevation Sample', 'Repeats', 'Age', 'age', 'Year', 'year', 'Depth', 'depth', 'Hindex', 'Stdev C24', 'Stdev C26', 'Stdev C28', 'Surface.Temp'}
def create_proxy_obs_map():
'''
Clean the input data from wiki and the ontology to develop a mapping of given input to cleaned output.
Several checks are involved. If the input has length <= 2 then it is checked if it is an element in the periodic table
Other reductions are made to create a particular way of writing data example d13C = D13C, Mg_Ca = Mg/Ca
Returns
-------
None.
'''
global proxy_obs_map, unknown_proxy
for proxy in q_proxy_obs:
if proxy in proxy_obs_map or proxy.title() in proxy_obs_map or proxy.lower() in proxy_obs_map or proxy.upper() in proxy_obs_map:
if proxy.title() in proxy_obs_map:
proxy_obs_map[proxy] = proxy_obs_map[proxy.title()]
elif proxy.lower() in proxy_obs_map:
proxy_obs_map[proxy] = proxy_obs_map[proxy.lower()]
elif proxy.upper() in proxy_obs_map:
proxy_obs_map[proxy] = proxy_obs_map[proxy.upper()]
continue
elif 'Uk37' in proxy:
pass
elif proxy[-1].isdigit() and proxy[:-1] in proxy_obs_map:
proxy_obs_map[proxy] = proxy_obs_map[proxy[:-1]]
continue
elif (not proxy.islower() and not proxy.isupper()) and '/' not in proxy and ':' not in proxy:
new_proxy = []
for c in proxy:
if c.isupper():
new_proxy.append(' ')
new_proxy.append(c)
new_proxy = ''.join(new_proxy).strip()
in_periodic = False
for n in new_proxy.split(' '):
if n in periodic_table_elements:
in_periodic = True
break
if not in_periodic:
if new_proxy in proxy_obs_map:
pass
else:
proxy_obs_map[proxy] = new_proxy
proxy_obs_map[new_proxy] = new_proxy
continue
if len(proxy) <= 2:
if proxy in periodic_table_elements:
proxy_obs_map[proxy] = proxy
else:
unknown_proxy.add(proxy)
elif '/' in proxy:
s = proxy.split('/')
if (s[1] == 'Ca' or s[1] == 'N') and s[0] in periodic_table_elements:
proxy_obs_map[proxy] = proxy
elif 'U' in s[0] and 'U' in s[1] or 'Th' in s[0] and 'Th' in s[1]:
proxy_obs_map[proxy] = proxy
else:
unknown_proxy.add(proxy)
elif proxy.startswith('D') or proxy.startswith('d'):
if proxy.isalpha():
proxy_obs_map[proxy] = proxy
else:
proxyl = str('d' + proxy[1:])
if proxyl in named_individuals:
proxy_obs_map[proxy] = proxyl
proxy_obs_map[proxyl] = proxyl
else:
proxy_obs_map[proxy] = proxyl
proxy_obs_map[proxyl] = proxyl
unknown_proxy.add(proxy)
elif proxy.startswith('R') and proxy.replace(' ', '_') in proxy_obs_map:
proxy_obs_map[proxy] = proxy_obs_map[proxy.replace(' ', '_')]
elif ' ' in proxy:
s = proxy.split(' ')
if len(s) > 2:
proxy_obs_map[proxy] = proxy
elif len(s[1]) > 2:
proxy_obs_map[proxy] = proxy
else:
num = s[0].title()
den = s[1].title()
if den in periodic_table_elements and str(num + '/' + den) in q_proxy_obs:
proxy_obs_map[proxy] = str(num + '/' + den)
elif 'prime' in proxy or 'Prime' in proxy:
if 'prime' in proxy:
s = proxy.split('prime')
else:
s = proxy.split('Prime')
if s[0] in named_individuals:
proxy_obs_map[proxy] = str(s[0] + "\'")
elif proxy.lower() in named_individuals or proxy in named_individuals:
proxy_obs_map[proxy] = proxy if proxy in named_individuals else proxy.lower()
else:
proxy_obs_map[proxy] = proxy
unknown_proxy.add(proxy)
# print(proxy_obs_map)
def predict_proxy_obs_type_from_variable_name(vname):
'''
This method returns the corresponding mapping for the input string or the nearest corresponding value to the values in the cleaned data.
It uses the proxy_obs_map created using the ontology and the information from the wiki.
Given the input string it either finds the mapping in the proxy_obs_map or
finds if partial string from the input string is present in proxy_obs_map, it returns the corresponding result along with the remaining unused string.
Parameters
----------
vname : string
Proxy Observation Type being read from the LiPD file currently being processed.
Returns
-------
pred : string
Result from prediction using the proxy_obs_type or prediction using part of the input string
rem : string
Remaining part of input string if partially used for the prediction else 'NA'
'''
pred, rem = 'NA', 'NA'
if vname in proxy_obs_map:
pred = proxy_obs_map[vname]
elif vname.title() in proxy_obs_map:
pred = proxy_obs_map[vname.title()]
elif vname.isupper():
pass
elif '\'' in vname:
pred = vname
elif 'bubbleNumberDensity' in vname:
proxy_obs_map[vname] = vname
pred = vname
elif vname in periodic_table_name:
pred = vname
elif '_' in vname:
if 'Ca' in vname or 'N' in vname:
vname = vname.replace('_', '/')
pred = proxy_obs_map.get(vname, 'NA')
else:
vname = vname.replace('_', ' ')
pred = proxy_obs_map.get(vname, 'NA')
elif 'Planktonic.' in vname or 'Benthic.' in vname or 'planktonic.' in vname or 'benthic.' in vname or 'planktic' in vname or 'Planktic' in vname:
ind = vname.index('.')
| |
<filename>sigpy/block.py
# -*- coding: utf-8 -*-
"""Block reshape functions.
"""
import numpy as np
import numba as nb
from sigpy import backend, config, util
__all__ = ['array_to_blocks', 'blocks_to_array']
def array_to_blocks(input, blk_shape, blk_strides):
"""Extract blocks from an array in a sliding window manner.
Args:
input (array): input array of shape [..., N_1, ..., N_D]
blk_shape (tuple): block shape of length D, with D <= 4.
blk_strides (tuple): block strides of length D.
Returns:
array: array of shape [...] + num_blks + blk_shape, where
num_blks = (N - blk_shape + blk_strides) // blk_strides.
Example:
>>> input = np.array([0, 1, 2, 3, 4, 5])
>>> print(array_to_blocks(input, [2], [2]))
[[0, 1],
[2, 3],
[4, 5]]
"""
if len(blk_shape) != len(blk_strides):
raise ValueError('blk_shape must have the same length as blk_strides.')
D = len(blk_shape)
num_blks = [(i - b + s) // s for i, b,
s in zip(input.shape[-D:], blk_shape, blk_strides)]
batch_shape = list(input.shape[:-D])
batch_size = util.prod(batch_shape)
device = backend.get_device(input)
xp = device.xp
with device:
output = xp.zeros([batch_size] + num_blks + blk_shape,
dtype=input.dtype)
input = input.reshape([batch_size] + list(input.shape[-D:]))
if D == 1:
if device == backend.cpu_device:
_array_to_blocks1(output, input,
batch_size,
blk_shape[-1],
blk_strides[-1],
num_blks[-1])
else: # pragma: no cover
_array_to_blocks1_cuda(input,
batch_size,
blk_shape[-1],
blk_strides[-1],
num_blks[-1],
output,
size=batch_size *
num_blks[-1] * blk_shape[-1])
elif D == 2:
if device == backend.cpu_device:
_array_to_blocks2(output, input,
batch_size, blk_shape[-1], blk_shape[-2],
blk_strides[-1], blk_strides[-2],
num_blks[-1], num_blks[-2])
else: # pragma: no cover
_array_to_blocks2_cuda(input,
batch_size,
blk_shape[-1], blk_shape[-2],
blk_strides[-1], blk_strides[-2],
num_blks[-1], num_blks[-2],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] *
blk_shape[-1] * blk_shape[-2])
elif D == 3:
if device == backend.cpu_device:
_array_to_blocks3(output,
input,
batch_size,
blk_shape[-1],
blk_shape[-2],
blk_shape[-3],
blk_strides[-1],
blk_strides[-2],
blk_strides[-3],
num_blks[-1],
num_blks[-2],
num_blks[-3])
else: # pragma: no cover
_array_to_blocks3_cuda(input,
batch_size,
blk_shape[-1], blk_shape[-2],
blk_shape[-3],
blk_strides[-1], blk_strides[-2],
blk_strides[-3],
num_blks[-1], num_blks[-2],
num_blks[-3],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] *
num_blks[-3] *
blk_shape[-1] * blk_shape[-2] *
blk_shape[-3])
elif D == 4:
if device == backend.cpu_device:
_array_to_blocks4(output,
input,
batch_size,
blk_shape[-1],
blk_shape[-2],
blk_shape[-3],
blk_shape[-4],
blk_strides[-1],
blk_strides[-2],
blk_strides[-3],
blk_strides[-4],
num_blks[-1],
num_blks[-2],
num_blks[-3],
num_blks[-4])
else: # pragma: no cover
_array_to_blocks4_cuda(input,
batch_size,
blk_shape[-1], blk_shape[-2],
blk_shape[-3], blk_shape[-4],
blk_strides[-1], blk_strides[-2],
blk_strides[-3], blk_strides[-4],
num_blks[-1], num_blks[-2],
num_blks[-3], num_blks[-4],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] *
num_blks[-3] * num_blks[-4] *
blk_shape[-1] * blk_shape[-2] *
blk_shape[-3] * blk_shape[-4])
else:
raise ValueError('Only support D <= 4, got {}'.format(D))
return output.reshape(batch_shape + num_blks + blk_shape)
def blocks_to_array(input, oshape, blk_shape, blk_strides):
"""Accumulate blocks into an array in a sliding window manner.
Args:
input (array): input array of shape [...] + num_blks + blk_shape
oshape (tuple): output shape.
blk_shape (tuple): block shape of length D.
blk_strides (tuple): block strides of length D.
Returns:
array: array of shape oshape.
"""
if len(blk_shape) != len(blk_strides):
raise ValueError('blk_shape must have the same length as blk_strides.')
D = len(blk_shape)
num_blks = input.shape[-(2 * D):-D]
batch_shape = list(oshape[:-D])
batch_size = util.prod(batch_shape)
device = backend.get_device(input)
xp = device.xp
with device:
output = xp.zeros([batch_size] + list(oshape[-D:]),
dtype=input.dtype)
input = input.reshape([batch_size] + list(input.shape[-2 * D:]))
if D == 1:
if device == backend.cpu_device:
_blocks_to_array1(output, input,
batch_size, blk_shape[-1],
blk_strides[-1],
num_blks[-1])
else: # pragma: no cover
if np.issubdtype(input.dtype, np.floating):
_blocks_to_array1_cuda(input,
batch_size, blk_shape[-1],
blk_strides[-1],
num_blks[-1],
output,
size=batch_size *
num_blks[-1] * blk_shape[-1])
else:
_blocks_to_array1_cuda_complex(input,
batch_size, blk_shape[-1],
blk_strides[-1],
num_blks[-1],
output,
size=batch_size
* num_blks[-1] *
blk_shape[-1])
elif D == 2:
if device == backend.cpu_device:
_blocks_to_array2(output, input,
batch_size, blk_shape[-1], blk_shape[-2],
blk_strides[-1], blk_strides[-2],
num_blks[-1], num_blks[-2])
else: # pragma: no cover
if np.issubdtype(input.dtype, np.floating):
_blocks_to_array2_cuda(input,
batch_size,
blk_shape[-1], blk_shape[-2],
blk_strides[-1], blk_strides[-2],
num_blks[-1], num_blks[-2],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] *
blk_shape[-1] * blk_shape[-2])
else: # pragma: no cover
_blocks_to_array2_cuda_complex(
input,
batch_size, blk_shape[-1], blk_shape[-2],
blk_strides[-1], blk_strides[-2],
num_blks[-1], num_blks[-2],
output,
size=batch_size * num_blks[-1] * num_blks[-2] *
blk_shape[-1] * blk_shape[-2])
elif D == 3:
if device == backend.cpu_device:
_blocks_to_array3(output,
input,
batch_size, blk_shape[-1],
blk_shape[-2],
blk_shape[-3],
blk_strides[-1],
blk_strides[-2],
blk_strides[-3],
num_blks[-1],
num_blks[-2],
num_blks[-3])
else: # pragma: no cover
if np.issubdtype(input.dtype, np.floating):
_blocks_to_array3_cuda(
input,
batch_size,
blk_shape[-1], blk_shape[-2], blk_shape[-3],
blk_strides[-1], blk_strides[-2], blk_strides[-3],
num_blks[-1], num_blks[-2], num_blks[-3],
output,
size=batch_size * num_blks[-1] * num_blks[-2] *
num_blks[-3] * blk_shape[-1] * blk_shape[-2] *
blk_shape[-3])
else:
_blocks_to_array3_cuda_complex(
input,
batch_size,
blk_shape[-1], blk_shape[-2], blk_shape[-3],
blk_strides[-1], blk_strides[-2], blk_strides[-3],
num_blks[-1], num_blks[-2], num_blks[-3],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] * num_blks[-3] *
blk_shape[-1] * blk_shape[-2] * blk_shape[-3])
elif D == 4:
if device == backend.cpu_device:
_blocks_to_array4(output,
input,
batch_size, blk_shape[-1],
blk_shape[-2],
blk_shape[-3],
blk_shape[-4],
blk_strides[-1],
blk_strides[-2],
blk_strides[-3],
blk_strides[-4],
num_blks[-1],
num_blks[-2],
num_blks[-3],
num_blks[-4])
else: # pragma: no cover
if np.issubdtype(input.dtype, np.floating):
_blocks_to_array4_cuda(
input,
batch_size, blk_shape[-1], blk_shape[-2],
blk_shape[-3], blk_shape[-4],
blk_strides[-1], blk_strides[-2],
blk_strides[-3], blk_strides[-4],
num_blks[-1], num_blks[-2],
num_blks[-3], num_blks[-4],
output,
size=batch_size *
num_blks[-1] * num_blks[-2] *
num_blks[-3] * num_blks[-4] *
blk_shape[-1] * blk_shape[-2] *
blk_shape[-3] * blk_shape[-4])
else:
_blocks_to_array4_cuda_complex(
input,
batch_size, blk_shape[-1], blk_shape[-2],
blk_shape[-3], blk_shape[-4],
blk_strides[-1], blk_strides[-2],
blk_strides[-3], blk_strides[-4],
num_blks[-1], num_blks[-2],
num_blks[-3], num_blks[-4],
output,
size=batch_size * num_blks[-1] * num_blks[-2] *
num_blks[-3] * num_blks[-4] *
blk_shape[-1] * blk_shape[-2] *
blk_shape[-3] * blk_shape[-4])
else:
raise ValueError('Only support D <= 4, got {}'.format(D))
return output.reshape(oshape)
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _array_to_blocks1(output, input, batch_size, Bx, Sx, Nx):
for b in range(batch_size):
for nx in range(Nx):
for bx in range(Bx):
ix = nx * Sx + bx
if ix < input.shape[-1]:
output[b, nx, bx] = input[b, ix]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _array_to_blocks2(output, input, batch_size, Bx, By, Sx, Sy, Nx, Ny):
for b in range(batch_size):
for ny in range(Ny):
for nx in range(Nx):
for by in range(By):
for bx in range(Bx):
iy = ny * Sy + by
ix = nx * Sx + bx
if ix < input.shape[-1] and iy < input.shape[-2]:
output[b, ny, nx, by, bx] = input[b, iy, ix]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _array_to_blocks3(output, input, batch_size, Bx, By, Bz,
Sx, Sy, Sz, Nx, Ny, Nz):
for b in range(batch_size):
for nz in range(Nz):
for ny in range(Ny):
for nx in range(Nx):
for bz in range(Bz):
for by in range(By):
for bx in range(Bx):
iz = nz * Sz + bz
iy = ny * Sy + by
ix = nx * Sx + bx
if (ix < input.shape[-1] and
iy < input.shape[-2] and
iz < input.shape[-3]):
output[b, nz, ny, nx, bz, by,
bx] = input[b, iz, iy, ix]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _array_to_blocks4(output, input, batch_size, Bx, By, Bz, Bt,
Sx, Sy, Sz, St,
Nx, Ny, Nz, Nt):
for b in range(batch_size):
for nt in range(Nt):
for nz in range(Nz):
for ny in range(Ny):
for nx in range(Nx):
for bt in range(Bt):
for bz in range(Bz):
for by in range(By):
for bx in range(Bx):
it = nt * St + bt
iz = nz * Sz + bz
iy = ny * Sy + by
ix = nx * Sx + bx
if (ix < input.shape[-1] and
iy < input.shape[-2] and
iz < input.shape[-3] and
it < input.shape[-4]):
output[b, nt, nz, ny, nx,
bt, bz, by,
bx] = input[b, it,
iz, iy, ix]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _blocks_to_array1(output, input, batch_size, Bx, Sx, Nx):
for b in range(batch_size):
for nx in range(Nx):
for bx in range(Bx):
ix = nx * Sx + bx
if ix < output.shape[-1]:
output[b, ix] += input[b, nx, bx]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _blocks_to_array2(output, input, batch_size, Bx, By, Sx, Sy, Nx, Ny):
for b in range(batch_size):
for ny in range(Ny):
for nx in range(Nx):
for by in range(By):
for bx in range(Bx):
iy = ny * Sy + by
ix = nx * Sx + bx
if ix < output.shape[-1] and iy < output.shape[-2]:
output[b, iy, ix] += input[b, ny, nx, by, bx]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _blocks_to_array3(output, input, batch_size, Bx, By, Bz, Sx, Sy, Sz,
Nx, Ny, Nz):
for b in range(batch_size):
for nz in range(Nz):
for ny in range(Ny):
for nx in range(Nx):
for bz in range(Bz):
for by in range(By):
for bx in range(Bx):
iz = nz * Sz + bz
iy = ny * Sy + by
ix = nx * Sx + bx
if (ix < output.shape[-1]
and iy < output.shape[-2]
and iz < output.shape[-3]):
output[b, iz, iy, ix] += input[b, nz,
ny, nx,
bz, by, bx]
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _blocks_to_array4(output, input, batch_size, Bx, By, Bz, Bt,
Sx, Sy, Sz, | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class PowerShellOperations(object):
"""PowerShellOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2016-07-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-07-01-preview"
self.config = config
def list_session(
self, resource_group_name, node_name, session, custom_headers=None, raw=False, **operation_config):
"""Gets a list of the active sessions.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PowerShellSessionResources or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servermanager.models.PowerShellSessionResources or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
# Construct URL
url = self.list_session.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PowerShellSessionResources', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_session.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}/features/powerShellConsole/pssessions'}
def _create_session_initial(
self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_session.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str'),
'pssession': self._serialize.url("pssession", pssession, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PowerShellSessionResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_session(
self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a PowerShell session.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param pssession: The PowerShell sessionId from the user.
:type pssession: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PowerShellSessionResource or
ClientRawResponse<PowerShellSessionResource> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.PowerShellSessionResource]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.PowerShellSessionResource]]
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
raw_result = self._create_session_initial(
resource_group_name=resource_group_name,
node_name=node_name,
session=session,
pssession=pssession,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PowerShellSessionResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_session.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}/features/powerShellConsole/pssessions/{pssession}'}
def get_command_status(
self, resource_group_name, node_name, session, pssession, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the status of a command.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param pssession: The PowerShell sessionId from the user.
:type pssession: str
:param expand: Gets current output from an ongoing call. Possible
values include: 'output'
:type expand: str or
~azure.mgmt.servermanager.models.PowerShellExpandOption
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PowerShellCommandStatus or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servermanager.models.PowerShellCommandStatus or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
# Construct URL
url = self.get_command_status.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str'),
'pssession': self._serialize.url("pssession", pssession, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'PowerShellExpandOption')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PowerShellCommandStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_command_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}/features/powerShellConsole/pssessions/{pssession}'}
def _update_command_initial(
self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update_command.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str'),
'pssession': self._serialize.url("pssession", pssession, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PowerShellCommandResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_command(
self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a running PowerShell command with more data.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param pssession: The PowerShell sessionId from the user.
:type pssession: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PowerShellCommandResults or
ClientRawResponse<PowerShellCommandResults> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.PowerShellCommandResults]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.PowerShellCommandResults]]
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
raw_result = self._update_command_initial(
resource_group_name=resource_group_name,
node_name=node_name,
session=session,
pssession=pssession,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PowerShellCommandResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}/features/powerShellConsole/pssessions/{pssession}'}
def _invoke_command_initial(
self, resource_group_name, node_name, session, pssession, command=None, custom_headers=None, raw=False, **operation_config):
power_shell_command_parameters = models.PowerShellCommandParameters(command=command)
# Construct URL
url = self.invoke_command.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", | |
r"""
Permutations template
This file define high level operations on permutations (alphabet,
the different rauzy induction, ...) shared by reduced and labeled
permutations.
AUTHORS:
- <NAME> (2008-12-20): initial version
.. TODO::
- construct as options different string representations for a permutation
- the two intervals: str
- the two intervals on one line: str_one_line
- the separatrix diagram: str_separatrix_diagram
- twin[0] and twin[1] for reduced permutation
- nothing (useful for Rauzy diagram)
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.sage_object import SageObject
from copy import copy
from sage.rings.integer import Integer
from sage.combinat.words.alphabet import Alphabet
from sage.graphs.graph import DiGraph
from sage.matrix.constructor import identity_matrix, matrix
from sage.misc.nested_class import NestedClassMetaclass
def interval_conversion(interval=None):
r"""
Converts the argument in 0 or 1.
INPUT:
- ``winner`` - 'top' (or 't' or 0) or bottom (or 'b' or 1)
OUTPUT:
integer -- 0 or 1
TESTS:
::
sage: from sage.dynamics.interval_exchanges.template import interval_conversion
sage: interval_conversion('top')
0
sage: interval_conversion('t')
0
sage: interval_conversion(0)
0
sage: interval_conversion('bottom')
1
sage: interval_conversion('b')
1
sage: interval_conversion(1)
1
.. Non admissible strings raise a ValueError::
sage: interval_conversion('')
Traceback (most recent call last):
...
ValueError: the interval can not be the empty string
sage: interval_conversion('right')
Traceback (most recent call last):
...
ValueError: 'right' can not be converted to interval
sage: interval_conversion('top_right')
Traceback (most recent call last):
...
ValueError: 'top_right' can not be converted to interval
"""
if isinstance(interval, (int, Integer)):
if interval != 0 and interval != 1:
raise ValueError("interval must be 0 or 1")
return interval
if isinstance(interval,str):
if interval == '':
raise ValueError("the interval can not be the empty string")
if 'top'.startswith(interval): return 0
if 'bottom'.startswith(interval): return 1
raise ValueError("'%s' can not be converted to interval" % (interval))
raise TypeError("'%s' is not an admissible type" % (str(interval)))
def side_conversion(side=None):
r"""
Converts the argument in 0 or -1.
INPUT:
- ``side`` - either 'left' (or 'l' or 0) or 'right' (or 'r' or -1)
OUTPUT:
integer -- 0 or -1
TESTS:
::
sage: from sage.dynamics.interval_exchanges.template import side_conversion
sage: side_conversion('left')
0
sage: side_conversion('l')
0
sage: side_conversion(0)
0
sage: side_conversion('right')
-1
sage: side_conversion('r')
-1
sage: side_conversion(1)
-1
sage: side_conversion(-1)
-1
.. Non admissible strings raise a ValueError::
sage: side_conversion('')
Traceback (most recent call last):
...
ValueError: no empty string for side
sage: side_conversion('top')
Traceback (most recent call last):
...
ValueError: 'top' can not be converted to a side
"""
if side is None: return -1
if isinstance(side,str):
if side == '':
raise ValueError("no empty string for side")
if 'left'.startswith(side): return 0
if 'right'.startswith(side): return -1
raise ValueError("'%s' can not be converted to a side" % (side))
if isinstance(side, (int,Integer)):
if side != 0 and side != 1 and side != -1:
raise ValueError("side must be 0 or 1")
if side == 0: return 0
return -1
raise TypeError("'%s' is not an admissible type" % (str(side)))
def twin_list_iet(a=None):
r"""
Returns the twin list of intervals.
The twin intervals is the correspondance between positions of labels in such
way that a[interval][position] is a[1-interval][twin[interval][position]]
INPUT:
- ``a`` - two lists of labels
OUTPUT:
list -- a list of two lists of integers
TESTS::
sage: from sage.dynamics.interval_exchanges.template import twin_list_iet
sage: twin_list_iet([['a','b','c'],['a','b','c']])
[[0, 1, 2], [0, 1, 2]]
sage: twin_list_iet([['a','b','c'],['a','c','b']])
[[0, 2, 1], [0, 2, 1]]
sage: twin_list_iet([['a','b','c'],['b','a','c']])
[[1, 0, 2], [1, 0, 2]]
sage: twin_list_iet([['a','b','c'],['b','c','a']])
[[2, 0, 1], [1, 2, 0]]
sage: twin_list_iet([['a','b','c'],['c','a','b']])
[[1, 2, 0], [2, 0, 1]]
sage: twin_list_iet([['a','b','c'],['c','b','a']])
[[2, 1, 0], [2, 1, 0]]
"""
if a is None : return [[],[]]
twin = [[0]*len(a[0]), [0]*len(a[1])]
for i in range(len(twin[0])) :
c = a[0][i]
j = a[1].index(c)
twin[0][i] = j
twin[1][j] = i
return twin
def twin_list_li(a=None):
r"""
Returns the twin list of intervals
INPUT:
- ``a`` - two lists of labels
OUTPUT:
list -- a list of two lists of couples of integers
TESTS::
sage: from sage.dynamics.interval_exchanges.template import twin_list_li
sage: twin_list_li([['a','a','b','b'],[]])
[[(0, 1), (0, 0), (0, 3), (0, 2)], []]
sage: twin_list_li([['a','a','b'],['b']])
[[(0, 1), (0, 0), (1, 0)], [(0, 2)]]
sage: twin_list_li([['a','a'],['b','b']])
[[(0, 1), (0, 0)], [(1, 1), (1, 0)]]
sage: twin_list_li([['a'], ['a','b','b']])
[[(1, 0)], [(0, 0), (1, 2), (1, 1)]]
sage: twin_list_li([[], ['a','a','b','b']])
[[], [(1, 1), (1, 0), (1, 3), (1, 2)]]
"""
if a is None: return [[],[]]
twin = [
[(0,j) for j in range(len(a[0]))],
[(1,j) for j in range(len(a[1]))]]
for i in (0,1):
for j in range(len(twin[i])) :
if twin[i][j] == (i,j) :
if a[i][j] in a[i][j+1:] :
# two up or two down
j2 = (a[i][j+1:]).index(a[i][j]) + j + 1
twin[i][j] = (i,j2)
twin[i][j2] = (i,j)
else :
# one up, one down (here i=0)
j2 = a[1].index(a[i][j])
twin[0][j] = (1,j2)
twin[1][j2] = (0,j)
return twin
class Permutation(SageObject):
r"""
Template for all permutations.
.. WARNING::
Internal class! Do not use directly!
This class implement generic algorithm (stratum, connected component, ...)
and unfies all its children.
"""
def _repr_(self):
r"""
Representation method of self.
Apply the function str to _repr_type(_repr_options) if _repr_type is
callable and _repr_type else.
TESTS:
::
sage: p = iet.Permutation('a b c','c b a')
sage: p._repr_type = 'str'
sage: p._repr_options = ('\n',)
sage: p #indirect doctest
a b c
c b a
sage: p._repr_options = (' / ',)
sage: p #indirect doctest
a b c / c b a
::
sage: p._repr_type = 'separatrix_diagram'
sage: p._repr_options = (False,)
sage: p #indirect doctest
[[('c', 'a'), 'b'], ['b', ('c', 'a')]]
sage: p._repr_options = (True,)
sage: p
[[(('c', 'a'), 'L'), ('b', 'R')], [('b', 'L'), (('c', 'a'), 'R')]]
::
sage: p._repr_type = '_twin'
sage: p #indirect doctest
[[2, 1, 0], [2, 1, 0]]
"""
if self._repr_type is None:
return ''
elif self._repr_type == 'reduced':
return ''.join(map(str,self[1]))
else:
f = getattr(self, self._repr_type)
if callable(f):
return str(f(*self._repr_options))
else:
return str(f)
def str(self, sep= "\n"):
r"""
A string representation of the generalized permutation.
INPUT:
- ``sep`` - (default: '\n') a separator for the two intervals
OUTPUT:
string -- the string that represents the permutation
EXAMPLES:
For permutations of iet::
sage: p = iet.Permutation('a b c','c b a')
sage: p.str()
'a b c\nc b a'
sage: p.str(sep=' | ')
'a b c | c b a'
..the permutation can be rebuilt from the standard string::
sage: p == iet.Permutation(p.str())
True
For permutations of li::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.str()
'a b b\nc c a'
sage: p.str(sep=' | ')
'a b b | c c a'
..the generalized permutation can be rebuilt from the standard string::
sage: p == iet.GeneralizedPermutation(p.str())
True
"""
l = self.list()
s0 = ' '.join(map(str,l[0]))
s1 = ' '.join(map(str,l[1]))
return s0 + sep + s1
_repr_type = 'str'
_repr_options = ("\n",)
def _set_alphabet(self, alphabet):
r"""
Sets the alphabet of self.
TESTS:
sage: p = iet.GeneralizedPermutation('a a','b b')
sage: p.alphabet([0,1]) #indirect doctest
sage: p.alphabet() == Alphabet([0,1])
True
sage: p
0 0
1 1
sage: p.alphabet("cd") #indirect doctest
sage: p.alphabet() == Alphabet(['c','d'])
True
sage: p
c c
d d
Tests with reduced permutations::
sage: p = iet.Permutation('a b','b a',reduced=True)
sage: p.alphabet([0,1]) #indirect doctest
sage: p.alphabet() == Alphabet([0,1])
True
sage: p
0 1
1 0
sage: p.alphabet("cd") #indirect doctest
sage: p.alphabet() == Alphabet(['c','d'])
True
sage: p
c d
d c
::
sage: p = iet.GeneralizedPermutation('a a','b b',reduced=True)
sage: p.alphabet([0,1]) #indirect doctest
sage: p.alphabet() == Alphabet([0,1])
True
sage: p
0 0
1 1
sage: p.alphabet("cd") #indirect doctest
sage: p.alphabet() == Alphabet(['c','d'])
True
sage: p
c c
d d
"""
alphabet = Alphabet(alphabet)
if alphabet.cardinality() < len(self):
raise ValueError("Your alphabet has not enough letters")
self._alphabet = alphabet
def alphabet(self, data=None):
r"""
Manages the alphabet of self.
If there is no argument, the method returns the alphabet used. If the
argument could be converted to an alphabet, this alphabet will be used.
INPUT:
- ``data`` - None or something that could be converted to an alphabet
OUTPUT:
-- either None or the current alphabet
EXAMPLES::
sage: p = iet.Permutation('a b','a b')
sage: p.alphabet([0,1])
sage: p.alphabet() == Alphabet([0,1])
True
sage: p
0 1
0 1
| |
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"source_interface": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"source_address": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"client_cert": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"source_address_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dual_stack_mode": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"user_peer": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"default_portal": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"login_timeout": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"login_block_time": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_dns_server1": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dtls_tunnel": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"auto_tunnel_static_route": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"header_x_forwarded_for": {
"type": "string",
"options": [
{
"value": "pass",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "add",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "remove",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tunnel_connect_without_reauth": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"servercert": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_insert_empty_fragment": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_min_proto_ver": {
"type": "string",
"options": [
{
"value": "tls1-0",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "tls1-1",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "tls1-2",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "tls1-3",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
| |
cutout.
"""
hdr = inhdr.copy()
wcskeys = ['ra', 'dec']
if self.wcsinfo is not None:
for j in range(1, self.wcsinfo.wcs.naxis + 1):
for key in ['ctype', 'crpix', 'crval', 'cunit', 'crota']:
wcskeys.append('%s%d' % (key, j))
for k in range(1, self.wcsinfo.wcs.naxis + 1):
for key in ['pc', 'cd']:
wcskeys.append('%s%d_%d' % (key, j, k))
for key in wcskeys:
if key.upper() in hdr.keys():
del hdr[key]
if debug:
print('Deleting original %s keyword' % key.upper())
""" Create a new output header, according to keeplist """
if keeplist != 'all':
tmphdu = pf.PrimaryHDU()
outhdr = tmphdu.header.copy()
for key in keeplist:
if key.upper() in hdr.keys():
outhdr[key] = hdr[key]
if debug:
print(key.upper())
else:
outhdr = hdr
""" Add the WCS information to the header """
if self.wcsinfo is not None:
wcshdr = wcsinfo.to_header()
for key in wcshdr.keys():
outhdr[key] = wcshdr[key]
if debug:
print(key.upper(), wcshdr[key], outhdr[key])
return outhdr
# -----------------------------------------------------------------------
def update_crpix(self, crpixarr, verbose=True):
"""
Updates the CRPIX array in the wcsinfo structure
This method is no longer used, but it remains in the code for
legacy reasons.
Instead of using update_crpix, just set the crpix values directly:
e.g., myim.crpix = [1023.5, 1327.8]
This syntax will call the crpix attribute setter
Inputs:
crpixarr - a list, tuple, or numpy ndarray containing the new
CRPIX values. For most data, this parameter will
contain two elements, to replace CRPIX1 and CRPIX2
"""
self.crpix = crpixarr
# -----------------------------------------------------------------------
def update_cdelt(self, cdeltarr, verbose=True):
"""
Updates the CDELT array in the wcsinfo structure
Inputs:
cdeltarr - a list, tuple, or numpy ndarray containing the new
CDELT values. For most data, this parameter will
contain two elements, to replace CDELT1 and CDELT2
"""
""" Check dimensionality """
if len(cdeltarr) != len(self.wcsinfo.wcs.cdelt):
raise IndexError(' Input crpix array length does not match'
' length of current crpix array')
"""
Update the CRPIX array assuming that the input is in the correct
format
"""
if isinstance(crpixarr, list) or isinstance(crpixarr, tuple) \
or isinstance(crpixarr, np.ndarray):
if verbose:
print('Updating CRPIX array')
for i in range(len(crpixarr)):
if verbose:
print(' %8.2f --> %8.2f' % (self.wcsinfo.wcs.crpix[i],
crpixarr[i]))
self.wcsinfo.wcs.crpix[i] = crpixarr[i]
self.header['crpix%d' % (i+1)] = crpixarr[i]
else:
raise TypeError('crpixarr must be list, tuple, or ndarray')
# -----------------------------------------------------------------------
def update_crval(self, crvalarr):
"""
Updates the CRVAL array in the wcsinfo structure.
*** This has been supplemented by the crval @property code, but
is being kept in for legacy reasons ***
Inputs:
crvalarr - a list, tuple, or numpy ndarray containing the new
CRVAL values. For most data, this parameter will
contain two elements, to replace CRVAL1 and CRVAL2
"""
self.crval = crvalarr
# -----------------------------------------------------------------------
def copy_wcsinfo(self, wcshdu):
"""
Takes the wcsinfo from the given wcshdu and copies the information
into the appropriate locations
"""
self.wcsinfo = wcshdu.wcsinfo
self.raaxis = wcshdu.raaxis
self.decaxis = wcshdu.decaxis
self.pixscale = wcshdu.pixscale
self.impa = wcshdu.impa
self.radec = wcshdu.radec
# -----------------------------------------------------------------------
def flip(self, method):
"""
Performs a flip of the data, in order to correct for the way
that certain detectors read out.
Inputs:
method - method to utilize in order to flip the data. Possibilities
are:
'x' - flip the x-axis
'y' - flip the y-axis
'xy' - flip both x and y axes
'pfcam' - flip x and then rotate -90
"""
data = self.data.copy()
hdr = self.header
if 'CRPIX1' in hdr.keys() and 'CRPIX2' in hdr.keys():
do_update = True
crpix1 = hdr['crpix1']
crpix2 = hdr['crpix2']
else:
do_update = False
crpix1 = None
crpix2 = None
if method == 'x':
self.data = data[:, ::-1]
if do_update:
crpix1 = hdr['naxis1'] - hdr['crpix1']
crpix2 = hdr['crpix2']
elif method == 'y':
self.data = data[::-1, :]
if do_update:
crpix1 = hdr['crpix1']
crpix2 = hdr['naxis2'] - hdr['crpix2']
elif method == 'xy':
self.data = data[::-1, ::-1]
if do_update:
crpix1 = hdr['naxis1'] - hdr['crpix1']
crpix2 = hdr['naxis2'] - hdr['crpix2']
elif method == 'pfcam':
self.data = data.T[::-1,::-1]
# NOTE: Still missing correct setting of crpix values
else:
raise ValueError('Flip method %s is not recognized' % str(method))
if do_update:
self.crpix = [crpix1, crpix2]
# self.update_crpix([crpix1, crpix2], verbose=False)
# -----------------------------------------------------------------------
def sigma_clip(self, nsig=3., statsec=None, mask=None,
verbose=False):
"""
Runs a sigma-clipping on image data. After doing outlier rejection
the code returns the mean and rms of the clipped data.
This method is just a minimal wrapper for the sigclip method in the
cdfutils.datafuncs library.
NOTE: The region used for determining these image statistics is set
by the following decision path:
- if statsec is not None, use statsec
- else, use the entire image
for the second option, an optional mask can be used to
exclude known bad pixels from the calculation.
Optional inputs:
nsig - Number of sigma from the mean beyond which points are
rejected. Default=3.
statsec - Region of the input image to be used to determine the
image statistics, defined by the coordinates of its
corners (x1, y1, x2, y2).
If this variable is is None (the default value)
then the image statistics will be determined from:
- the subimage, if it has been set
- else, the entire image.
The format for statsec can be any of the following:
1. A 4-element numpy array
2. A 4-element list: [x1, y1, x2, y2]
3. A 4-element tuple: (x1, y1, x2, y2)
4. statsec=None. In this case, the region used for
determining the pixel statistics defaults to either
the subimage (if defined) or the full image (if no
subimage has been defined)
mask - If some of the input data are known to be bad, they can
be flagged before the inputs are computed by including
a mask. This mask must be set such that True
indicates good data and False indicates bad data
verbose - If False (the default) no information is printed
"""
""" Determine what the input data set is """
scdata = self.data.copy()
if statsec is not None:
x1, y1, x2, y2 = statsec
scdata = scdata[y1:y2, x1:x2]
""" Find the clipped mean and rms """
mu, sig = df.sigclip(scdata, nsig=nsig, mask=mask, verbose=verbose)
""" Store the results and clean up """
del scdata
self.found_rms = True
self.mean_clip = mu
self.rms_clip = sig
return
# -----------------------------------------------------------------------
def smooth(self, size, smtype='median', invar=False):
"""
Smooths the data, using one of the following schemes:
gaussian
median filter
(more to come)
Inputs:
size - sets the smoothing scale. For a Gaussian, this is the
sigma, for median filter this is the length of the square
box being used
smtype - type of smoothing to be done. Options are: 'gauss',
'median'
"""
""" Smooth the data using the requested smoothing type """
if smtype.lower() == 'gauss' or smtype.lower() == 'gaussian':
smdata = filters.gaussian_filter(self.data, sigma=size)
elif smtype.lower() == 'median' or smtype.lower() == 'medfilt':
smdata = filters.median_filter(self.data, size=size)
else:
print('')
print('Smoothing type %s has not been implemented' % smtype)
print('')
raise NameError
""" Return the smoothed data set """
return smdata
# -----------------------------------------------------------------------
def normalize(self, method='sigclip', mask=None):
"""
Normalizes the data in the object. Allowed methods are:
'sigclip' - divide by the clipped mean (the default)
'median' - divide by the median
'mean' - divide by the mean
'average' - alternate way of indicating divide by mean
"""
method = method.lower()
if mask is not None:
data = self.data[mask]
else:
data = self.data
if method == 'median':
normfac = np.median(data)
elif method == 'mean' or method[:3] == 'ave':
normfac = data.mean()
elif method == 'sigclip':
self.sigma_clip(mask=mask)
normfac = self.mean_clip
else:
raise ValueError('method must be one of "sigclip", "median" '
'or "mean"')
self.data /= normfac
return normfac
# -----------------------------------------------------------------------
def sky_to_zero(self, method='sigclip', mask=None, verbose=False):
"""
Subtracts a constant "sky value" from data in the object.
The allowed methods for determining the sky value are:
'sigclip' - the clipped mean (the default)
'median' - the median
"""
method = method.lower()
if mask is not None:
data = self.data[mask]
else:
data = self.data
if method == 'median':
skyval = np.median(data)
elif method == 'sigclip':
self.sigma_clip(mask=mask)
| |
<reponame>davidryanshay/AIT-Core
import os.path
from unittest import mock
from unittest import TestCase
import pytest
import ait.core.server
from ait.core import cfg
from ait.core.server.handlers import *
from ait.core.server.server import Server
def teardown_module():
ait.config.reload(filename=os.environ["AIT_CONFIG"])
@mock.patch.object(ait.core.log, "warn")
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
@mock.patch.object(ait.core.server.server.Server, "_create_outbound_stream")
@mock.patch.object(ait.core.server.server.Server, "_create_inbound_stream")
class TestStreamConfigParsing(TestCase):
test_yaml_file = "/tmp/test.yaml"
def tearDown(self):
ait.config = cfg.AitConfig()
if os.path.exists(self.test_yaml_file):
os.remove(self.test_yaml_file)
def test_no_inbound_streams(
self,
create_inbound_stream_mock,
create_outbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
log_warn_mock,
):
"""Tests that broker started with no inbound streams specified
and that warning is logged"""
yaml = """
default:
server:
inbound-streams:
outbound-streams:
- stream:
name: sle_data_stream_parallel
input:
- sle_data_stream_ccsds
handlers:
- a_handler
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._load_streams()
log_warn_mock.assert_called_with(
"No valid inbound stream configurations found. "
"No data will be received (or displayed)."
)
assert len(server.outbound_streams) == 1
def test_no_outbound_streams(
self,
create_inbound_stream_mock,
create_outbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
log_warn_mock,
):
"""Tests that broker started with no outbound streams specified
and that warning is logged"""
yaml = """
default:
server:
inbound-streams:
- stream:
name: sle_data_stream_parallel
input:
- sle_data_stream_ccsds
handlers:
- a_handler
outbound-streams:
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._load_streams()
log_warn_mock.assert_called_with(
"No valid outbound stream configurations found. "
"No data will be published."
)
assert len(server.inbound_streams) == 1
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
@mock.patch.object(ait.core.server.server.Server, "_create_inbound_stream")
class TestAPITelemStreamCreation(TestCase):
test_yaml_file = "/tmp/test.yaml"
def tearDown(self):
ait.config = cfg.AitConfig()
if os.path.exists(self.test_yaml_file):
os.remove(self.test_yaml_file)
@mock.patch.object(ait.core.log, "warn")
@mock.patch.object(ait.core.log, "info")
def test_no_api_stream_config(
self,
log_info_mock,
log_warn_mock,
create_inbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
):
"""Check handling of API stream setup without configuration provided"""
yaml = """
default:
server:
inbound-streams:
- stream:
name: log_stream
input:
- 2514
- stream:
name: telem_stream
input:
- 3076
handlers:
- name: ait.core.server.handlers.PacketHandler
packet: HS_Packet
- stream:
name: other_telem_stream
input:
- 3077
handlers:
- name: ait.core.server.handlers.PacketHandler
packet: Other_HS_Packet
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._create_api_telem_stream()
# Ensure server warns that we need to infer valid streams
log_warn_mock.assert_called_with(
"No configuration found for API Streams. Attempting to "
"determine valid streams to use as default."
)
# Server should let us know that 2 streams are valid
log_info_mock.assert_called_with(
"Located potentially valid streams. ['telem_stream', 'other_telem_stream'] "
"uses a compatible handler (['PacketHandler', 'CCSDSPacketHandler'])."
)
assert create_inbound_stream_mock.called
@mock.patch.object(ait.core.log, "error")
@mock.patch.object(ait.core.log, "warn")
def test_no_valid_defaults(
self,
log_warn_mock,
log_error_mock,
create_inbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
):
"""Check no valid streams for API defaults case"""
yaml = """
default:
server:
inbound-streams:
- stream:
name: log_stream
input:
- 2514
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._create_api_telem_stream()
log_warn_mock.assert_called_with(
"Unable to find valid streams to use as API defaults."
)
log_error_mock.assert_called_with(
"No streams available for telemetry API. Ground scripts API "
"functionality will not work."
)
@mock.patch.object(ait.core.log, "warn")
def test_invalid_straem_names(
self,
log_warn_mock,
create_inbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
):
"""Check handling of invalid stream names during API telem setup"""
yaml = """
default:
server:
api-telemetry-streams:
- not_a_valid_stream_name
inbound-streams:
- stream:
name: log_stream
input:
- 2514
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._create_api_telem_stream()
log_warn_mock.assert_called_with(
"Invalid stream name not_a_valid_stream_name. Skipping ..."
)
@mock.patch.object(ait.core.log, "warn")
def test_no_inbound_stream(
self,
log_warn_mock,
create_inbound_stream_mock,
server_stream_plugin_mock,
broker_class_mock,
):
"""Ensure proper handling of a no-bound-stream config file"""
yaml = """"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server = Server()
server._create_api_telem_stream()
log_warn_mock.assert_called_with(
"Unable to setup API telemetry stream. No streams are configured"
)
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
class TestStreamCreation(object):
def test_no_stream_config(self, server_stream_plugin_mock_mock, broker_class_mock):
"""Tests that a ValueError is raised when creating streams
with a config of None"""
server = Server()
with pytest.raises(ValueError, match="No stream config to create stream from."):
server._create_inbound_stream(None)
with pytest.raises(ValueError, match="No stream config to create stream from."):
server._create_outbound_stream(None)
def test_no_stream_name(self, server_stream_plugin_mock_mock, broker_class_mock):
"""Tests that a ValueError is raised when creating a stream
with no name specified in the config"""
config = {"input": "some_stream", "handlers": [{"name": "some-handler"}]}
server = Server()
with pytest.raises(
cfg.AitConfigMissing,
match="The parameter stream name is missing from config.yaml",
):
server._get_stream_name(config)
def test_duplicate_stream_name(
self, server_stream_plugin_mock_mock, broker_class_mock
):
"""Tests that a ValueError is raised when creating a stream with
a name that already belongs to another stream or plugin"""
server = Server()
config = {
"input": ["some_stream"],
"name": "myname",
"handlers": [{"name": "some-handler"}],
}
# Testing existing name in plugins
server.plugins = [FakeStream(name="myname")]
with pytest.raises(
ValueError,
match=(
'Duplicate stream name "{}" encountered.'
" Stream names must be unique."
).format("myname"),
):
server._get_stream_name(config)
# Testing existing name in inbound_streams
server.plugins = []
server.inbound_streams = [FakeStream(name="myname")]
with pytest.raises(
ValueError,
match=(
'Duplicate stream name "{}" encountered.'
" Stream names must be unique."
).format("myname"),
):
server._get_stream_name(config)
# Testing existing name in outbound_streams
server.inbound_streams = []
server.outbound_streams = [FakeStream(name="myname")]
with pytest.raises(
ValueError,
match=(
'Duplicate stream name "{}" encountered.'
" Stream names must be unique."
).format("myname"),
):
server._get_stream_name(config)
@mock.patch.object(ait.core.server.server.Server, "_create_handler")
def test_no_inbound_stream_input(
self, create_handler_mock, server_stream_plugin_mock_mock, broker_class_mock
):
"""Tests that a ValueError is raised when creating a stream with
no input specified in the config"""
server = Server()
config = {"name": "some_stream", "handlers": [{"name": "some-handler"}]}
with pytest.raises(
cfg.AitConfigMissing,
match="The parameter {} is missing from config.yaml".format(
"inbound stream {}'s input".format("some_stream")
),
):
server._create_inbound_stream(config)
@mock.patch.object(ait.core.server.server.Server, "_create_handler")
def test_successful_inbound_stream_creation(
self, create_handler_mock, server_stream_plugin_mock_mock, broker_class_mock
):
"""Tests that all types of inbound streams are successfully created"""
# Testing creation of inbound stream with ZMQ input/output
server = Server()
server.broker = ait.core.server.broker.Broker()
config = {
"name": "some_stream",
"input": ["some_input"],
"handlers": [{"name": "some-handler"}],
}
created_stream = server._create_inbound_stream(config)
assert type(created_stream) == ait.core.server.stream.ZMQStream
assert created_stream.name == "some_stream"
assert created_stream.inputs == ["some_input"]
assert type(created_stream.handlers) == list
# Testing creation of inbound stream with port input
config = cfg.AitConfig(config={"name": "some_stream", "input": [3333]})
created_stream = server._create_inbound_stream(config)
assert type(created_stream) == ait.core.server.stream.PortInputStream
assert created_stream.name == "some_stream"
assert created_stream.inputs == [3333]
assert created_stream.handlers == []
@mock.patch.object(ait.core.server.server.Server, "_create_handler")
def test_successful_outbound_stream_creation(
self, create_handler_mock, server_stream_plugin_mock_mock, broker_class_mock
):
"""Tests that all types of outbound streams are successfully created"""
# Testing creation of outbound stream with ZMQ input/output
server = Server()
server.broker = ait.core.server.broker.Broker()
config = {"name": "some_stream", "handlers": [{"name": "some-handler"}]}
created_stream = server._create_outbound_stream(config)
assert type(created_stream) == ait.core.server.stream.ZMQStream
assert created_stream.name == "some_stream"
assert type(created_stream.handlers) == list
# Testing creation of outbound stream with port output
config = cfg.AitConfig(config={"name": "some_stream", "output": 3333})
created_stream = server._create_outbound_stream(config)
assert type(created_stream) == ait.core.server.stream.PortOutputStream
assert created_stream.name == "some_stream"
assert created_stream.out_port == 3333
assert created_stream.handlers == []
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
class TestHandlerCreation(object):
def test_no_handler_config(self, server_stream_plugin_mock_mock, broker_mock):
"""Tests that a ValueError is raised when creating a handler with
a config of None"""
server = Server()
with pytest.raises(
ValueError, match="No handler config to create handler from."
):
server._create_handler(None)
def test_handler_creation_with_no_configs(
self, server_stream_plugin_mock_mock, broker_mock
):
"""Tests handler is successfully created when it has no configs"""
server = Server()
config = {
"name": "ait.core.server.handlers.PacketHandler",
"packet": "CCSDS_HEADER",
}
handler = server._create_handler(config)
assert type(handler) == ait.core.server.handlers.PacketHandler
assert handler.input_type is None
assert handler.output_type is None
def test_handler_creation_with_configs(
self, server_stream_plugin_mock_mock, broker_mock
):
"""Tests handler is successfully created when it has configs"""
server = Server()
# config = {'name': 'ait.core.server.handlers.example_handler', 'input_type': 'int', 'output_type': 'int'}
config = {
"name": "ait.core.server.handlers.PacketHandler",
"input_type": "int",
"output_type": "int",
"packet": "CCSDS_HEADER",
}
handler = server._create_handler(config)
assert type(handler) == ait.core.server.handlers.PacketHandler
assert handler.input_type == "int"
assert handler.output_type == "int"
def test_handler_that_doesnt_exist(
self, server_stream_plugin_mock_mock, broker_mock
):
"""Tests that exception thrown if handler doesn't exist"""
server = Server()
config = {"name": "some_nonexistant_handler"}
with pytest.raises(
ImportError, match="No module named '{}'".format(config["name"])
):
server._create_handler(config)
@mock.patch.object(ait.core.log, "warn")
@mock.patch.object(ait.core.log, "error")
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
class TestPluginConfigParsing(object):
test_yaml_file = "/tmp/test.yaml"
def tearDown(self):
ait.config = cfg.AitConfig()
if os.path.exists(self.test_yaml_file):
os.remove(self.test_yaml_file)
def test_no_plugins_listed(
self, server_stream_plugin_mock_mock, broker_mock, log_error_mock, log_warn_mock
):
"""Tests that warning logged if no plugins configured"""
server = Server()
yaml = """
default:
server:
plugins:
"""
rewrite_and_reload_config(self.test_yaml_file, yaml)
server._load_plugins()
log_warn_mock.assert_called_with("No plugins specified in config.")
@mock.patch("ait.core.server.broker.Broker")
@mock.patch.object(ait.core.server.server.Server, "_load_streams_and_plugins")
class TestPluginCreation(object):
def test_plugin_with_no_config(self, server_stream_plugin_mock_mock, broker_mock):
"""Tests that error raised if plugin not configured"""
server = Server()
config = None
with pytest.raises(ValueError, match="No plugin config to create plugin from."):
server._create_plugin(config)
def test_plugin_missing_name(self, server_stream_plugin_mock_mock, broker_mock):
"""Tests that error raised if plugin has no name"""
server = Server()
config = {"inputs": "some_inputs"}
with pytest.raises(
cfg.AitConfigMissing,
match="The parameter plugin name is missing from config.yaml",
):
server._create_plugin(config)
@mock.patch.object(ait.core.log, "warn")
def test_plugin_missing_inputs(
self, log_warn_mock, server_stream_plugin_mock_mock, broker_mock
):
"""Tests that warning logged if plugin has no inputs and
plugin created anyways"""
server = Server()
server.broker = ait.core.server.broker.Broker()
config = {
"name": "ait.core.server.plugins.TelemetryLimitMonitor",
"outputs": "some_stream",
}
server._create_plugin(config)
log_warn_mock.assert_called_with(
"No plugin inputs specified for ait.core.server.plugins.TelemetryLimitMonitor"
)
@mock.patch.object(ait.core.log, "warn")
def test_plugin_missing_outputs(
self, log_warn_mock, server_stream_plugin_mock_mock, broker_mock
):
"""Tests that warning logged if plugin has no inputs and
plugin created anyways"""
server = Server()
server.broker = ait.core.server.broker.Broker()
config = {
"name": "ait.core.server.plugins.TelemetryLimitMonitor",
"inputs": "some_stream",
}
server._create_plugin(config)
log_warn_mock.assert_called_with(
| |
bilinear_test = form.test
else:
#
# Compare against reference test
#
assert bilinear_test.dofhandler() == form.test.dofhandler(),\
'The test functions of every bilinear form in the ' +\
'problem should have the same dofhandler.'
assert bilinear_test.subforest_flag()==form.test.subforest_flag(),\
'The test functions of every bilinear form in the '+\
'problem should be defined on the same sub-mesh.'
if bilinear_trial is None:
#
# Add reference trial function
#
bilinear_trial = form.trial
else:
#
# Compare against reference trial basis
#
assert bilinear_trial.dofhandler() == form.trial.dofhandler(),\
'The trial functions of every bilinear form in the '+\
'problem shoule have the same dofhandler.'
assert bilinear_trial.subforest_flag()==form.trial.subforest_flag(),\
'The trial functions of every bilinear form in the '+\
'problem should be defined on the same sub-mesh.'
#
# Check whether form is compatible with the assembler's mesh
#
for basis in form.basis():
#
# Check that the mesh is the same
#
basis_mesh = basis.mesh()
assert basis.mesh()==self.mesh(), \
'The basis and assembler should be defined on the ' +\
'same mesh.'
#
# Check that the assembler mesh is a refinement of the basis
# mesh.
#
basis_flag = basis.subforest_flag()
own_flag = self.subforest_flag()
assert mesh.cells.is_contained_in(own_flag, basis_flag), \
'The assembler mesh should be a refinement of the '+\
'basis mesh.'
def mesh(self):
"""
Returns
-------
Mesh
Returns the mesh over which the forms are assembled.
"""
return self.__mesh
def subforest_flag(self):
"""
Returns
-------
string, int, or double
Returns a sub-mesh flag over which the form is assembled.
"""
return self.__subforest_flag
def assembled_forms(self, i_problem=0):
return self.__af[i_problem]
def assemble(self, keep_cellwise_data=False):
"""
Description
-----------
Assembles constant, linear, and bilinear forms over computational mesh.
Parameters
----------
problems : list of (list of) forms,
A list of finite element problems. Each problem is a list of
constant, linear, and bilinear forms.
Output:
assembled_forms: list of dictionaries (one for each problem), each of
which contains:
A: double coo_matrix, system matrix determined by bilinear forms and
boundary conditions.
b: double, right hand side vector determined by linear forms and
boundary conditions.
Note: If problems contain one integral form (IPFORM), then the assembly
uses a double loop of cells. This is inefficient if problems are mixed.
"""
t_shape_info = 0
#t_gauss_rules = 0
t_shape_eval = 0
t_form_eval = 0
#t_get_node_address = 0
#t_af_update = 0
#t_af_consolidate = 0
#t_reference_map = 0
#
# Assemble forms over mesh cells
#
sf = self.subforest_flag()
cells = self.mesh().cells.get_leaves(subforest_flag=sf)
for ci in cells:
#
# Compute shape functions on cell
#
tic = time.time()
xi_g, wi_g, phii, dofsi = self.shape_eval(ci)
t_shape_eval += time.time()-tic
#
# Assemble local forms and assign to global dofs
#
for problem, i_problem in zip(self.problems, range(self.n_problems())):
#
# Loop over problems
#
for form in problem:
#
# Loop over forms
#
# Get form dimension
dim = form.dim()
# Get assembled form
aform = self.assembled_forms(i_problem)[dim]
#
# Evaluate form
#
if not isinstance(form, IPForm):
#
# Not an integral form
#
# Evaluate local form
tic = time.time()
form_loc = form.eval(ci, xi_g, wi_g, phii, dofsi)
t_form_eval += time.time()-tic
# Uppdate assembled form cellwise
if dim == 0:
#
# Constant form
#
aform.update_cellwise(ci, form_loc)
elif dim == 1:
#
# Linear form
#
dofs = [form.test.dofs(ci)]
aform.update_cellwise(ci, form_loc, dofs=dofs)
elif dim == 2:
#
# Bilinear form
#
# Trial dofs
dofs_trl = form.trial.dofs(ci)
# Test dofs
if isinstance(form, IIForm):
# Interpolatory Integral forms use all dofs
dofs_tst = form.test.dofs(None)
else:
dofs_tst = form.test.dofs(ci)
# Update assembled form
dofs = [dofs_tst, dofs_trl]
aform.update_cellwise(ci, form_loc, dofs=dofs)
if isinstance(form, IPForm):
#
# Form is Double Integral
#
for cj in cells:
#
# Compute shape function on cell
#
xj_g, wj_g, phij, dofsj = self.shape_eval(cj)
#
# Evaluate integral form
#
form_loc = form.eval((ci,cj), (xi_g,xj_g), \
(wi_g,wj_g), (phii,phij),\
(dofsi,dofsj))
# Test and trial dofs
dofs_tst = form.test.dofs(ci)
dofs_trl = form.trial.dofs(cj)
#
# Update Assembled Form
#
aform.update_cellwise(ci, form_loc,
dofs = [dofs_tst, dofs_trl])
#
# Special efficiency when kernel is symmetric
#
if form.kernel.is_symmetric():
if ci!=cj:
#
# Symmetric kernel, store the transpose
#
aform.update_cellwise(ci, form_loc.T,
dofs = [dofs_trl, dofs_tst])
else:
#
# Symmetric forms assembled over subtriangular block
#
break
#
# Aggregate cellwise information
#
for i_problem in range(self.n_problems()):
# Get Dirichlet BC's
dir_bc = self.get_dirichlet(i_problem)
# Get hanging nodes
hng = self.get_hanging_nodes(i_problem)
for dim in range(3):
aform = self.assembled_forms(i_problem)[dim]
if aform is not None:
#
# Update aggregate
#
aform.distribute(ci, dir_bc=dir_bc, hng=hng)
#
# Delete cellwise information
#
if not keep_cellwise_data:
aform.clear_cellwise_data(ci)
#
# Consolidate arrays
#
for i_problem in range(self.n_problems()):
for dim in range(3):
aform = self.assembled_forms(i_problem)[dim]
if aform is not None:
aform.consolidate()
"""
for i_problem in range(len(self.problems)):
for form_type in self.af()[i_problem].keys():
#
# Iterate over assembled forms
#
af = self.af[i_problem][form_type]
#
# Consolidate assembly
#
tic = time.time()
af.consolidate(clear_cell_data=clear_cell_data)
t_af_consolidate += time.time()-tic
print('t_consolidate', t_af_consolidate)
print('Timings')
print('Shape infor',t_shape_info)
print('Shape Eval', t_shape_eval)
print('Form Eval', t_form_eval)
print('Get node address', t_get_node_address)
print('AF update', t_af_update)
print('AF consolidate', t_af_consolidate)
"""
'''
#
# Assemble forms over boundary edges
#
if isinstance(self.mesh, Mesh2D):
#
# Determine flags used to mark boundary edges
#
boundary_segments = \
self.mesh.get_boundary_segments(subforest_flag=subforest_flag)
for problem in problems:
for nc in problem['bc']['neumann']:
bnd_segs = self.mesh.get_boundary_segments(subforest_flag=subforest_flag, flag=nc['marker'])
'''
'''
if boundary_conditions is not None:
#
# Unpack boundary data
#
if 'dirichlet' in boundary_conditions:
bc_dirichlet = boundary_conditions['dirichlet']
else:
bc_dirichlet = None
if 'neumann' in boundary_conditions:
bc_neumann = boundary_conditions['neumann']
else:
bc_neumann = None
if 'robin' in boundary_conditions:
bc_robin = boundary_conditions['robin']
else:
bc_robin = None
rows = []
cols = []
dir_dofs_encountered = set()
for node in self.mesh.root_node().get_leaves():
node_dofs = self.dofhandler.get_global_dofs(node)
cell = node.cell()
#
# Assemble local system matrices/vectors
#
if bilinear_forms is not None:
bf_loc = np.zeros((n_dofs,n_dofs))
for bf in bilinear_forms:
bf_loc += self.form_eval(bf, node)
if linear_forms is not None:
lf_loc = np.zeros((n_dofs,))
for lf in linear_forms:
lf_loc += self.form_eval(lf, node)
if boundary_conditions:
#
# Boundary conditions
#
for direction in ['W','E','S','N']:
edge = cell.get_edges(direction)
#
# Check for Neumann conditions
#
neumann_edge = False
if bc_neumann is not None:
for bc_neu in bc_neumann:
m_neu,g_neu = bc_neu
if m_neu(edge):
# ---------------------------------------------
# Neumann edge
# ---------------------------------------------
neumann_edge = True
#
# Update local linear form
#
lf_loc += self.form_eval((g_neu,'v'),node, \
edge_loc=direction)
break
#
# Else Check Robin Edge
#
if not neumann_edge and bc_robin is not None:
for bc_rob in bc_robin:
m_rob, data_rob = bc_rob
if m_rob(edge):
# ---------------------------------------------
# Robin edge
# ---------------------------------------------
gamma_rob, g_rob = data_rob
#
# Update local bilinear form
#
bf_loc += \
gamma_rob*self.form_eval((1,'u','v'),\
node,\
edge_loc=direction)
#
# Update local linear form
#
lf_loc += \
gamma_rob*self.form_eval((g_rob,'v'),\
node,\
edge_loc=direction)
break
#
# Check for Dirichlet Nodes
#
x_ref = self.element.reference_nodes()
x_cell = self.rule_2d.map(cell,x=x_ref)
cell_dofs = np.arange(n_dofs)
if bc_dirichlet is not None:
list_dir_dofs_loc = []
for bc_dir in bc_dirichlet:
m_dir,g_dir = bc_dir
is_dirichlet = m_dir(x_cell[:,0],x_cell[:,1])
if is_dirichlet.any():
dir_nodes_loc = x_cell[is_dirichlet,:]
dir_dofs_loc = cell_dofs[is_dirichlet]
list_dir_dofs_loc.extend(dir_dofs_loc)
for j,x_dir in zip(dir_dofs_loc,dir_nodes_loc):
#
# Modify jth row
#
notj = np.arange(n_dofs)!=j
uj = g_dir(x_dir[0],x_dir[1])
if node_dofs[j] not in dir_dofs_encountered:
bf_loc[j,j] = 1.0
bf_loc[j,notj]=0.0
lf_loc[j] = uj
else:
bf_loc[j,:] = 0.0 # make entire row 0
lf_loc[j] = 0.0
#
# Modify jth column and right hand side
#
lf_loc[notj] -= bf_loc[notj,j]*uj
bf_loc[notj,j] = 0.0
for dof in list_dir_dofs_loc:
dir_dofs_encountered.add(dof)
#
# Local to global mapping
#
for i in range(n_dofs):
#
# Update right hand side
#
if linear_forms is not None:
linvec[node_dofs[i]] += lf_loc[i]
#
# Update system matrix
#
if bilinear_forms is not None:
for j in range(n_dofs):
rows.append(node_dofs[i])
cols.append(node_dofs[j])
bivals.append(bf_loc[i,j])
#
# Save results as a sparse matrix
#
out = []
if bilinear_forms is not None:
A = sparse.coo_matrix((bivals,(rows,cols)))
out.append(A)
if linear_forms is not None:
out.append(linvec)
if len(out) == 1:
return out[0]
elif len(out) == 2:
return tuple(out)
'''
'''
def map_to_global(self, form_loc, form, cell):
"""
Maps local form on a cell (in terms of local | |
list_constraints_for_portfolio(self, PortfolioId: str, AcceptLanguage: str = None, ProductId: str = None, PageSize: int = None, PageToken: str = None) -> Dict:
"""
Lists the constraints for the specified portfolio and product.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListConstraintsForPortfolio>`_
**Request Syntax**
::
response = client.list_constraints_for_portfolio(
AcceptLanguage='string',
PortfolioId='string',
ProductId='string',
PageSize=123,
PageToken='string'
)
**Response Syntax**
::
{
'ConstraintDetails': [
{
'ConstraintId': 'string',
'Type': 'string',
'Description': 'string',
'Owner': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ConstraintDetails** *(list) --*
Information about the constraints.
- *(dict) --*
Information about a constraint.
- **ConstraintId** *(string) --*
The identifier of the constraint.
- **Type** *(string) --*
The type of constraint.
* ``LAUNCH``
* ``NOTIFICATION``
* STACKSET
* ``TEMPLATE``
- **Description** *(string) --*
The description of the constraint.
- **Owner** *(string) --*
The owner of the constraint.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type PortfolioId: string
:param PortfolioId: **[REQUIRED]**
The portfolio identifier.
:type ProductId: string
:param ProductId:
The product identifier.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:rtype: dict
:returns:
"""
pass
def list_launch_paths(self, ProductId: str, AcceptLanguage: str = None, PageSize: int = None, PageToken: str = None) -> Dict:
"""
Lists the paths to the specified product. A path is how the user has access to a specified product, and is necessary when provisioning a product. A path also determines the constraints put on the product.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListLaunchPaths>`_
**Request Syntax**
::
response = client.list_launch_paths(
AcceptLanguage='string',
ProductId='string',
PageSize=123,
PageToken='string'
)
**Response Syntax**
::
{
'LaunchPathSummaries': [
{
'Id': 'string',
'ConstraintSummaries': [
{
'Type': 'string',
'Description': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'Name': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **LaunchPathSummaries** *(list) --*
Information about the launch path.
- *(dict) --*
Summary information about a product path for a user.
- **Id** *(string) --*
The identifier of the product path.
- **ConstraintSummaries** *(list) --*
The constraints on the portfolio-product relationship.
- *(dict) --*
Summary information about a constraint.
- **Type** *(string) --*
The type of constraint.
* ``LAUNCH``
* ``NOTIFICATION``
* STACKSET
* ``TEMPLATE``
- **Description** *(string) --*
The description of the constraint.
- **Tags** *(list) --*
The tags associated with this product path.
- *(dict) --*
Information about a tag. A tag is a key-value pair. Tags are propagated to the resources created when provisioning a product.
- **Key** *(string) --*
The tag key.
- **Value** *(string) --*
The value for this key.
- **Name** *(string) --*
The name of the portfolio to which the user was assigned.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type ProductId: string
:param ProductId: **[REQUIRED]**
The product identifier.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:rtype: dict
:returns:
"""
pass
def list_organization_portfolio_access(self, PortfolioId: str, OrganizationNodeType: str, AcceptLanguage: str = None, PageToken: str = None, PageSize: int = None) -> Dict:
"""
Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListOrganizationPortfolioAccess>`_
**Request Syntax**
::
response = client.list_organization_portfolio_access(
AcceptLanguage='string',
PortfolioId='string',
OrganizationNodeType='ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',
PageToken='string',
PageSize=123
)
**Response Syntax**
::
{
'OrganizationNodes': [
{
'Type': 'ORGANIZATION'|'ORGANIZATIONAL_UNIT'|'ACCOUNT',
'Value': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **OrganizationNodes** *(list) --*
Displays information about the organization nodes.
- *(dict) --*
Information about the organization node.
- **Type** *(string) --*
The organization node type.
- **Value** *(string) --*
The identifier of the organization node.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type PortfolioId: string
:param PortfolioId: **[REQUIRED]**
The portfolio identifier. For example, ``port-2abcdext3y5fk`` .
:type OrganizationNodeType: string
:param OrganizationNodeType: **[REQUIRED]**
The organization node type that will be returned in the output.
* ``ORGANIZATION`` - Organization that has access to the portfolio.
* ``ORGANIZATIONAL_UNIT`` - Organizational unit that has access to the portfolio within your organization.
* ``ACCOUNT`` - Account that has access to the portfolio within your organization.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:rtype: dict
:returns:
"""
pass
def list_portfolio_access(self, PortfolioId: str, AcceptLanguage: str = None) -> Dict:
"""
Lists the account IDs that have access to the specified portfolio.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListPortfolioAccess>`_
**Request Syntax**
::
response = client.list_portfolio_access(
AcceptLanguage='string',
PortfolioId='string'
)
**Response Syntax**
::
{
'AccountIds': [
'string',
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **AccountIds** *(list) --*
Information about the AWS accounts with access to the portfolio.
- *(string) --*
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type PortfolioId: string
:param PortfolioId: **[REQUIRED]**
The portfolio identifier.
:rtype: dict
:returns:
"""
pass
def list_portfolios(self, AcceptLanguage: str = None, PageToken: str = None, PageSize: int = None) -> Dict:
"""
Lists all portfolios in the catalog.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListPortfolios>`_
**Request Syntax**
::
response = client.list_portfolios(
AcceptLanguage='string',
PageToken='string',
PageSize=123
)
**Response Syntax**
::
{
'PortfolioDetails': [
{
'Id': 'string',
'ARN': 'string',
'DisplayName': 'string',
'Description': 'string',
'CreatedTime': datetime(2015, 1, 1),
'ProviderName': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **PortfolioDetails** *(list) --*
Information about the portfolios.
- *(dict) --*
Information about a portfolio.
- **Id** *(string) --*
The portfolio identifier.
- **ARN** *(string) --*
The ARN assigned to the portfolio.
- **DisplayName** *(string) --*
The name to use for display purposes.
- **Description** *(string) --*
The description of the portfolio.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **ProviderName** *(string) --*
The name of the portfolio provider.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:type PageSize: integer
:param | |
'Secreted', 'Membrane', 'Protease', 'CAZyme']
header += uniqueNotes.keys()
header += ['Notes', 'gDNA', 'mRNA', 'CDS-transcript', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for k,v in sortedGenes.items():
for i in range(0,len(v['ids'])):
# for each new feature, start with empty lists
pfams = []
iprs = []
GOS = v['go_terms'][i]
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
ecnum = []
alias = []
for key,value in uniqueNotes.items():
uniqueNotes[key] = []
# now grab the data
for y in v['db_xref'][i]:
if y.startswith('PFAM:'):
hit = y.replace('PFAM:', '')
pfams.append(hit)
elif y.startswith('InterPro:'):
hit = y.replace('InterPro:', '')
# look up description in dictionary
desc = InterProDict.get(hit)
iprs.append('{:} {:}'.format(hit, desc))
for y in v['gene_synonym']:
alias.append(y)
for y in v['EC_number'][i]:
ecnum.append(y)
for y in v['note'][i]:
if y.startswith('EggNog:'):
hit = y.replace('EggNog:', '')
nogs.append(hit)
elif y.startswith('BUSCO:'):
hit = y.replace('BUSCO:', '')
buscos.append(hit)
elif y.startswith('MEROPS:'): # change to family name
hit = y.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error("MEROPS database inconsistency: %s not found" % hit)
elif y.startswith('CAZy:'):
hit = y.replace('CAZy:', '')
cazys.append(hit)
elif y.startswith('COG:'):
hit = y.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':'+ resources.COGS.get(x)
cogs.append(desc)
elif y.startswith('SECRETED:'):
hit = y.replace('SECRETED:', '')
secreted.append(hit)
elif y.startswith('TransMembrane:'):
hit = y.replace('TransMembrane:', '')
membrane.append(hit)
elif y.startswith(tuple(uniqueNotes.keys())):
try:
n = y.split(':')[0]
hit = y.split(':', 1)[1]
uniqueNotes[n].append(hit)
except IndexError:
hit = y
therest.append(hit)
else: # capture everything else
hit = y
therest.append(hit)
# bring together output
result = [k, v['ids'][i], v['type'], v['contig'],
str(v['location'][0]), str(v['location'][1]),
v['strand'], v['name'],
v['product'][i],';'.join(alias),
';'.join(ecnum),';'.join(buscos),
';'.join(pfams),';'.join(iprs),
';'.join(nogs),';'.join(cogs),
';'.join(GOS),
';'.join(secreted),
';'.join(membrane),
';'.join(merops),
';'.join(cazys)
]
for key,value in uniqueNotes.items():
result.append(';'.join(value))
gDNA = getSeqRegions(SeqRecords, v['contig'], [v['location']])
try:
Transcript = str(v['transcript'][i])
except IndexError:
if v['cds_transcript'][i]:
Transcript = str(v['cds_transcript'][i])
else:
print('{:} has no mrna or cds transcript'.format(k))
pass
if v['type'] == 'mRNA':
CDSTranscript = str(v['cds_transcript'][i])
try:
Protein = v['protein'][i]
except IndexError:
Protein = ''
print('ERROR: No amino acid sequence exists for {}'.format(v['ids'][i]))
else:
CDSTranscript = ''
Protein = ''
if v['strand'] == '-':
gDNA = RevComp(gDNA)
Transcript = RevComp(Transcript)
CDSTranscript = RevComp(CDSTranscript)
result += [';'.join(therest), gDNA, Transcript,
CDSTranscript, Protein]
# convert any None's to empty string
result = ['' if x is None else x for x in result]
# write to file
outfile.write('%s\n' % '\t'.join(result))
def annotationtableOld(input, Database, output):
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'Feature', 'Contig', 'Start', 'Stop', 'Strand', 'Name', 'Product', 'BUSCO', 'PFAM',
'InterPro', 'EggNog', 'COG', 'GO Terms', 'Secreted', 'Membrane', 'Protease', 'CAZyme', 'Notes', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for record in SeqIO.parse(input, 'genbank'):
Contig = record.id
for f in record.features:
if f.type in ['tRNA', 'ncRNA', 'rRNA']:
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = "None"
result = [ID, f.type, Contig, str(Start), str(
End), Strand, '', Product, '', '', '', '', '', '', '', '', '', '', '', '']
outfile.write('%s\n' % '\t'.join(result))
if f.type == 'CDS':
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
Name = f.qualifiers['gene'][0]
except KeyError:
Name = ''
try:
Translation = f.qualifiers['translation'][0]
except KeyError:
Translation = ''
pfams = []
iprs = []
GOS = []
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
pfams.append(hit)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
iprs.append(hit)
elif k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
elif i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
nogs.append(hit)
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
buscos.append(hit)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error(
"MEROPS database inconsistency: %s not found" % hit)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
cazys.append(hit)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':' + resources.COGS.get(x)
cogs.append(desc)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
secreted.append(hit)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
membrane.append(hit)
else: # capture everything else
hit = i
therest.append(hit)
result = [ID, 'CDS', Contig, str(Start), str(End), Strand, Name, Product, ';'.join(buscos), ';'.join(pfams), ';'.join(iprs), ';'.join(
nogs), ';'.join(cogs), ';'.join(GOS), ';'.join(secreted), ';'.join(membrane), ';'.join(merops), ';'.join(cazys), ';'.join(therest), Translation]
outfile.write('%s\n' % '\t'.join(result))
def ncbiCheckErrors(error, validation, genename, fixOut):
ncbi_error = 0
actual_error = 0
with open(error, 'r') as errors:
for line in errors:
line = line.strip()
if 'ERROR' in line:
num = line.split(' ')[0]
ncbi_error += int(num)
# if errors in summary, then parse validation report, only get errors with gene names
if ncbi_error > 0:
# see if we can get the gene models that need to be fixed
needFixing = {}
with open(validation, 'r') as validationFile:
for line in validationFile:
line = line.strip()
if line.startswith('ERROR') and genename in line:
actual_error += 1
parts = line.split(' ')
for x in parts:
if genename in x:
ID = x.split('|')[-1]
if '-' in ID:
ID = ID.split('-')[0]
reason = line.split(' FEATURE:')[0]
reason = reason.split('] ')[-1]
if not ID in needFixing:
needFixing[ID] = reason
if actual_error > 0:
log.info("There are %i gene models that need to be fixed." %
actual_error)
print('-------------------------------------------------------')
with open(fixOut, 'w') as fix:
fix.write('#GeneID\tError Message\n')
for k, v in natsorted(list(needFixing.items())):
fix.write('%s\t%s\n' % (k, v))
print(('%s\t%s' % (k, v)))
return actual_error
def convert2counts(input):
import pandas as pd
Counts = []
for i in range(0, len(input)):
dict = {}
for k, v in list(input[i].items()):
dict[k] = len(v)
Counts.append(dict)
df = pd.DataFrame(Counts)
df.fillna(0, inplace=True) # fill in zeros for missing data
return df
def gb2proteinortho(input, folder, name):
gffOut = os.path.join(folder, name+'.gff')
FastaOut = os.path.join(folder, name+'.faa')
Transcripts = os.path.join(folder, name+'.transcripts.fa')
genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, genes)
# now output the files you need
with open(gffOut, 'w') as gff:
with open(FastaOut, 'w') as fasta:
with open(Transcripts, 'w') as transcripts:
for k, v in natsorted(list(genes.items())):
if v['type'] == 'mRNA':
for i, item in enumerate(v['ids']):
transcripts.write(">{:} {:} codon_start={:} strand={:}\n{:}\n".format(
item, k, v['codon_start'][i], v['strand'], v['cds_transcript'][i]))
fasta.write(">%s %s\n%s\n" %
(item, k, v['protein'][i]))
gff.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], item, k, v['product'][i]))
def drawStackedBar(panda, type, labels, ymax, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import numpy as np
from funannotate.stackedBarGraph import StackedBarGrapher as StackedBarGrapher
# stackedbargraph from summary data
SBG = StackedBarGrapher()
# labels
d_labels = panda.index.values
# y-ticks
ticks = np.linspace(0, ymax, 6)
ticks = list(ticks)
nums = [int(x) for x in ticks]
vals = [str(x) for x in nums]
yticks = [nums, vals]
# colors
if not colors:
color_palette = sns.hls_palette(
len(panda.columns), l=.4, s=.8).as_hex()
color_palette = [str(x).upper() for x in color_palette]
else:
color_palette = colors
# set up plot
sns.set_style('darkgrid')
sns.set_context('paper')
fig = plt.figure()
ax = fig.add_subplot(111)
YLabel = "Number of "+type
SBG.stackedBarPlot(ax, panda, color_palette, xLabels=panda.index.values,
endGaps=True, gap=0.25, xlabel="Genomes", ylabel=YLabel, yTicks=yticks)
plt.title(type+" summary")
# get the legend
legends = []
i = 0
for column in panda.columns:
legends.append(mpatches.Patch(
color=color_palette[i], label=panda.columns.values[i] + ": " + labels.get(panda.columns.values[i])))
i += 1
lgd = ax.legend(handles=legends, fontsize=6, loc='upper left',
bbox_to_anchor=(1.02, 1), borderaxespad=0)
plt.ylim([0, ymax])
# set the font size - i wish I knew how to do this proportionately.....but setting to something reasonable.
for item in ax.get_xticklabels():
item.set_fontsize(8)
# setup the | |
# RESTAURANT DINING RESERVATION SYSTEM
# Program Description: This program utilizes the class RestaurantReservation to execute the user's desired choices.
# This program stores the restaurant reservation into the txt file named reservations.txt
# The txt file can be located at currentDirectory/reservartions/reservations.txt
# Importing os so that the program will be able to create the folder which will house the reservations.txt
import os
# getting current directory where the program is currently located.
try:
currentDirectory = os.getcwd()
###print(currentDirectory)
except:
print ( " Error : Cannot find the Current Directory. " )
# This will facilitate the user's choices.
class RestaurantReservation():
def __init__( self ):
# If there are no reservations folder, the program will create one.
try:
self.reservationsList = [ line.strip() for line in open ( currentDirectory + "\\reservations\\reservations.txt" ) ]
except:
if not os.path.exists( currentDirectory + "\\reservations\\" ): # For creating the records folder which will house the records.txt
os.makedirs( currentDirectory + "\\reservations\\" )
def viewReservations( self ): # This is for displaying the current reservations stored in the txt file.
try:
if not os.path.exists( currentDirectory + "\\reservations\\" ): # For creating the records folder which will house the records.txt
os.makedirs( currentDirectory + "\\reservations\\" )
print ( "\n======================== VIEW RESERVATIONS =========================\n" )
self.reservationsList = [ line.strip() for line in open ( currentDirectory + "\\reservations\\reservations.txt" ) ]
self.fields = [ "#", "Date", "Time", "Name", "Adults", "Children" ]
self.splitToSix = lambda lst, sz: [ lst[i:i+sz] for i in range( 0, len(lst), sz)]
self.data = self.splitToSix( self.reservationsList , 5)
if len( self.reservationsList ) == 0:
print ( " There are no reservations. " )
print ( "\n====================================================================\n" )
return
else:
self.format_row = "{:<17}" * (len( self.fields ))
print( self.format_row.format( *self.fields ))
for i in range( len( self.data ) ):
print( self.format_row.format( i + 1, *self.data[i] ) )
print ( "\n====================================================================\n" )
return
except:
print ( " There are no reservations. " )
print ( "\n====================================================================\n" )
return
def deleteReservations( self ): # This is for deleting a reservation. The user will only have to choose the reservation number.
try:
if not os.path.exists( currentDirectory + "\\reservations\\" ): # For creating the records folder which will house the records.txt
os.makedirs( currentDirectory + "\\reservations\\" )
print ( "\n======================= DELETE RESERVATIONS ========================\n" )
self.fields = [ "#", "Date", "Time", "Name", "Adults", "Children" ]
self.splitToSix = lambda lst, sz: [ lst[i:i+sz] for i in range( 0, len(lst), sz)]
try:
self.reservationsList = [ line.strip() for line in open ( currentDirectory + "\\reservations\\reservations.txt" ) ]
self.data = self.splitToSix( self.reservationsList , 5)
except:
self.reservationsList = []
if len( self.reservationsList ) == 0:
print ( " There are no reservations. " )
print ( "\n====================================================================\n" )
return
else:
self.format_row = "{:<17}" * (len( self.fields ))
print( self.format_row.format( *self.fields ))
for i in range( len( self.data ) ):
print( self.format_row.format( i + 1, *self.data[i] ) )
print ( "\n====================================================================\n" )
self.toDelete = int( input( " Enter reservation number: " ) )
if ( self.toDelete - 1 ) < ( len( self.data ) ) and self.toDelete > 0:
open( currentDirectory + "\\reservations\\reservations.txt" , 'w').close()
for i in range( len( self.data ) ):
if i == ( self.toDelete - 1 ):
pass
else:
reservationsTXT = open( currentDirectory + "\\reservations\\reservations.txt" , "a+" )
reservationsTXT.write( self.data[i][0] + "\n" )
reservationsTXT.write( self.data[i][1] + "\n" )
reservationsTXT.write( self.data[i][2] + "\n" )
reservationsTXT.write( str(self.data[i][3]) + "\n" )
reservationsTXT.write( str(self.data[i][4]) + "\n" )
reservationsTXT.close()
print ( "\n====================================================================\n" )
print ( " The reservation has been deleted. " )
print ( "\n====================================================================\n" )
else:
print ( " Invalid Input " )
print ( "\n====================================================================\n" )
except:
print ( " Invalid Input " )
print ( "\n====================================================================\n" )
return
def generateReport( self ): # This is for displaying a report. This function will calculate the total adults, children, subtotal, and grand total.
try:
if not os.path.exists( currentDirectory + "\\reservations\\" ): # For creating the records folder which will house the records.txt
os.makedirs( currentDirectory + "\\reservations\\" )
print ( "\n============================= REPORTS ==============================\n" )
self.reservationsList = [ line.strip() for line in open ( currentDirectory + "\\reservations\\reservations.txt" ) ]
self.fields = [ "#", "Date", "Time", "Name", "Adults", "Children", "Subtotal" ]
self.splitToSix = lambda lst, sz: [ lst[i:i+sz] for i in range( 0, len(lst), sz)]
self.data = self.splitToSix( self.reservationsList , 5)
self.grandTotal = 0
self.totalAdults = 0
self.totalKids = 0
if len( self.reservationsList ) == 0:
print ( " There are no reservations. " )
print ( "\n====================================================================\n" )
return
else:
self.format_row = "{:<17}" * (len( self.fields ))
print( self.format_row.format( *self.fields ))
for i in range( len( self.data ) ):
self.subTotal = ( 500 * float( self.data[i][3] ) ) + ( 300 * float( self.data[i][4] ) )
self.grandTotal = self.grandTotal + self.subTotal
self.totalAdults = self.totalAdults + int( self.data[i][3] )
self.totalKids = self.totalKids + int( self.data[i][4] )
print( self.format_row.format( i + 1, *self.data[i] , self.subTotal ) )
print ( "\n Total number of Adults: ", self.totalAdults )
print ( " Total number of Kids: " , self.totalKids )
print ( " Grand Total: PHP" , self.grandTotal )
print ( "\n...........................nothing follows.........................." )
except:
print ( " There are no reservations. " )
print ( "\n====================================================================\n" )
return
def makeReservations ( self ): # This is for creating a reservation.
try:
if not os.path.exists( currentDirectory + "\\reservations\\" ): # For creating the records folder which will house the records.txt
os.makedirs( currentDirectory + "\\reservations\\" )
print ( "\n========================= MAKE RESERVATION =========================\n" )
self.name = str( input ( " Enter name: " ) )
self.date = str( input ( " Enter date: " ) )
self.time = str( input ( " Enter time: " ) )
self.nAdults = int( input ( " Enter number of Adults: " ) )
self.nChildren = int( input ( " Enter number of Children: " ) )
print ( "\n====================================================================\n" )
if self.name == "" or self.date == "" or self.time == "":
print ( " Empty inputted string detected. " )
elif ( all( x.isalpha() or x.isspace() for x in self.name ) or self.name.isalpha() ):
self.testDate = self.isDateFormat( self.date )
self.testTime = self.isTimeFormat( self.time )
if self.testDate == True and self.testTime == True :
reservationsTXT = open( currentDirectory + "\\reservations\\reservations.txt" , "a+" )
reservationsTXT.write( self.date + "\n" )
reservationsTXT.write( self.time + "\n" )
reservationsTXT.write( self.name + "\n" )
reservationsTXT.write( str(self.nAdults) + "\n" )
reservationsTXT.write( str(self.nChildren) + "\n" )
reservationsTXT.close()
print ( " {} has successfully made a reservation. ".format( self.name ) )
print ( "\n====================================================================\n" )
else:
pass
else:
print ( " Invalid Input in name. " )
print ( " Name must be in alpha form. " )
return
except:
print ( " Invalid Input. " )
return
def isDateFormat( self, checkDate ): # This is for checking if the user's inputted date follows the format. Example: November 8, 2000
# The months_day contains the months and day inputs that are recognized by the program
self.months_day = { "January" : 31, "February" : 28, "March" : 31, "April" : 30,
"May" : 31, "June" : 30, "July" : 31, "August" : 31, "September" : 30,
"October" : 31, "November" : 30, "December" : 31 ,
"Jan" : 31, "Feb" : 28, "Mar" : 31, "Apr" : 30,
"May" : 31, "Jun" : 30, "Jul" : 31, "Aug" : 31, "Sep" : 30,
"Oct" : 31, "Nov" : 30, "Dec" : 31}
self.checkDate = checkDate
try:
# The program will use .split to get determine which are the inputtedDay, inputtedMonth, and inputtedYear
self.splitDate = self.checkDate.split( ", " )
self.splitDate2 = self.splitDate[0].split( " " )
self.inputtedMonth = self.splitDate2[0]
self.inputtedDay = int(self.splitDate2[1])
self.inputtedYear = int(self.splitDate[1])
if ( self.inputtedMonth.capitalize() in self.months_day ): # Check if the user's inputted month is recognized by the program.
if ( self.inputtedDay <= self.months_day[self.inputtedMonth.capitalize()] ) and self.inputtedDay > 0: # check if the inputted month is within parameters
if ( len( str( self.inputtedYear)) == 4 ): # | |
if self.headless:
options.add_argument('--headless')
self['seleniumrc'] = selenium.webdriver.Chrome(
options=options,
service_args=['--log-path=chromedriver.log'])
WD_LAYER = WebdriverLayer(name='WebdriverLayer', bases=(HTTP_LAYER,))
WEBDRIVER_LAYER = gocept.selenium.WebdriverSeleneseLayer(
name='WebdriverSeleneseLayer', bases=(WD_LAYER,))
# XXX Hopefully not necessary once we're on py3
class OutputChecker(zope.testing.renormalizing.RENormalizing):
string_prefix = re.compile(r"(\W|^)[uUbB]([rR]?[\'\"])", re.UNICODE)
# Strip out u'' and b'' literals, adapted from
# <https://stackoverflow.com/a/56507895>.
def remove_string_prefix(self, want, got):
return (re.sub(self.string_prefix, r'\1\2', want),
re.sub(self.string_prefix, r'\1\2', got))
def check_output(self, want, got, optionflags):
# `want` is already unicode, since we pass `encoding` to DocFileSuite.
if not isinstance(got, six.text_type):
got = got.decode('utf-8')
want, got = self.remove_string_prefix(want, got)
super_ = zope.testing.renormalizing.RENormalizing
return super_.check_output(self, want, got, optionflags)
def output_difference(self, example, got, optionflags):
if not isinstance(got, six.text_type):
got = got.decode('utf-8')
example.want, got = self.remove_string_prefix(example.want, got)
super_ = zope.testing.renormalizing.RENormalizing
return super_.output_difference(self, example, got, optionflags)
checker = OutputChecker([
(re.compile(r'\d{4} \d{1,2} \d{1,2} \d\d:\d\d:\d\d'), '<FORMATTED DATE>'),
(re.compile('0x[0-9a-f]+'), "0x..."),
(re.compile(r'/\+\+noop\+\+[0-9a-f]+'), ''),
(re.compile(
'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'),
"<GUID>"),
])
def remove_exception_module(msg):
"""Copy&paste so we keep the exception message and support multi-line."""
start, end = 0, len(msg)
name_end = msg.find(':', 0, end)
i = msg.rfind('.', 0, name_end)
if i >= 0:
start = i + 1
return msg[start:end]
if sys.version_info > (3,):
doctest._strip_exception_details = remove_exception_module
optionflags = (doctest.REPORT_NDIFF +
doctest.NORMALIZE_WHITESPACE +
doctest.ELLIPSIS +
doctest.IGNORE_EXCEPTION_DETAIL)
def DocFileSuite(*paths, **kw):
kw['package'] = doctest._normalize_module(kw.get('package'))
kw.setdefault('checker', checker)
kw.setdefault('optionflags', optionflags)
kw['encoding'] = 'utf-8'
return doctest.DocFileSuite(*paths, **kw)
def FunctionalDocFileSuite(*paths, **kw):
layer = kw.pop('layer', WSGI_LAYER)
kw['package'] = doctest._normalize_module(kw.get('package'))
globs = kw.setdefault('globs', {})
globs['getRootFolder'] = lambda: layer['zodbApp']
globs['layer'] = layer
kw.setdefault('checker', checker)
kw.setdefault('optionflags', optionflags)
kw['encoding'] = 'utf-8'
test = doctest.DocFileSuite(*paths, **kw)
test.layer = layer
return test
class RepositoryHelper(object):
@property
def repository(self):
import zeit.cms.repository.interfaces
with site(self.getRootFolder()):
return zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
@repository.setter
def repository(self, value):
self.__dict__['repository'] = value
class FunctionalTestCase(
unittest.TestCase,
gocept.testing.assertion.Ellipsis,
gocept.testing.assertion.Exceptions,
gocept.testing.assertion.String,
RepositoryHelper):
def getRootFolder(self):
"""Returns the Zope root folder."""
return self.layer['zodbApp']
def setUp(self):
super(FunctionalTestCase, self).setUp()
zope.component.hooks.setSite(self.getRootFolder())
self.principal = create_interaction(u'zope.user')
# XXX We should subclass instead of monkey-patch, but then I'd have
# to change all the layer declarations in the zeit.* packages, sigh.
def selenium_setup_authcache(self):
# NOTE: Massively kludgy workaround. It seems that Firefox has a timing
# issue with HTTP auth and AJAX calls: if you open a page that requires
# auth and has AJAX calls to further pages that require the same auth,
# sometimes those AJAX calls come back as 401 (nothing to do with
# Selenium, we've seen this against the actual server).
#
# It seems that opening a page and then giving it a little time
# to settle in is enough to work around this issue.
original_setup(self)
s = self['selenium']
self['http_auth_cache'] = True
# XXX It seems something is not ready immediately?!??
s.pause(1000)
# XXX Credentials are duplicated from SeleniumTestCase.open().
s.open('http://user:userpw@%s/++skin++vivi/@@test-setup-auth'
% self['http_address'])
# We don't really know how much time the browser needs until it's
# satisfied, or how we could determine this.
s.pause(1000)
original_setup = gocept.selenium.webdriver.WebdriverSeleneseLayer.setUp
gocept.selenium.webdriver.WebdriverSeleneseLayer.setUp = (
selenium_setup_authcache)
def selenium_teardown_authcache(self):
original_teardown(self)
del self['http_auth_cache']
original_teardown = gocept.selenium.webdriver.WebdriverSeleneseLayer.tearDown
gocept.selenium.webdriver.WebdriverSeleneseLayer.tearDown = (
selenium_teardown_authcache)
@pytest.mark.selenium
class SeleniumTestCase(gocept.selenium.WebdriverSeleneseTestCase,
FunctionalTestCase):
skin = 'cms'
log_errors = False
log_errors_ignore = ()
level = 2
TIMEOUT = int(os.environ.get('ZEIT_SELENIUM_TIMEOUT', 10))
window_width = 1100
window_height = 600
def setUp(self):
super(SeleniumTestCase, self).setUp()
self.layer['selenium'].setTimeout(self.TIMEOUT * 1000)
if self.log_errors:
error_log = zope.component.getUtility(
zope.error.interfaces.IErrorReportingUtility)
error_log.copy_to_zlog = True
error_log._ignored_exceptions = self.log_errors_ignore
self.log_handler = logging.StreamHandler(sys.stdout)
logging.root.addHandler(self.log_handler)
self.old_log_level = logging.root.level
logging.root.setLevel(logging.WARN)
transaction.commit()
self.original_windows = set(self.selenium.getAllWindowIds())
self.original_width = self.selenium.getEval('window.outerWidth')
self.original_height = self.selenium.getEval('window.outerHeight')
self.selenium.setWindowSize(self.window_width, self.window_height)
self.execute('window.localStorage.clear()')
def tearDown(self):
super(SeleniumTestCase, self).tearDown()
if self.log_errors:
logging.root.removeHandler(self.log_handler)
logging.root.setLevel(self.old_log_level)
current_windows = set(self.selenium.getAllWindowIds())
for window in current_windows - self.original_windows:
self.selenium.selectWindow(window)
self.selenium.close()
self.selenium.selectWindow()
self.selenium.setWindowSize(self.original_width, self.original_height)
# open a neutral page to stop all pending AJAX requests
self.open('/@@test-setup-auth')
def open(self, path, auth='user:userpw'):
if auth:
auth += '@'
self.selenium.open(
'http://%s%s/++skin++%s%s' % (
auth, self.selenium.server, self.skin, path))
def click_label(self, label):
self.selenium.click('//label[contains(string(.), %s)]' %
xml.sax.saxutils.quoteattr(label))
js_globals = """\
var document = window.document;
var zeit = window.zeit;
"""
def execute(self, text):
return self.selenium.selenium.execute_script(self.js_globals + text)
def eval(self, text):
return self.execute('return ' + text)
def wait_for_condition(self, text):
self.selenium.waitForCondition(self.js_globals + """\
return Boolean(%s);
""" % text)
def wait_for_dotted_name(self, dotted_name):
partial = []
for part in dotted_name.split('.'):
partial.append(part)
self.wait_for_condition('.'.join(partial))
def add_by_autocomplete(self, text, widget):
s = self.selenium
s.type(widget, text)
autocomplete_item = 'css=.ui-menu-item a'
s.waitForElementPresent(autocomplete_item)
s.waitForVisible(autocomplete_item)
s.click(autocomplete_item)
s.waitForNotVisible('css=.ui-menu')
def click_wo_redirect(browser, *args, **kwargs):
browser.follow_redirects = False
try:
browser.getLink(*args, **kwargs).click()
print((browser.headers['Status']))
print((browser.headers['Location']))
finally:
browser.follow_redirects = True
def set_site(site=None):
"""Encapsulation of the getSite/setSite-dance, with doctest support."""
globs = sys._getframe(1).f_locals
if site is None:
site = globs['getRootFolder']()
zope.component.hooks.setSite(site)
# XXX use zope.publisher.testing for the following two
def create_interaction(name='zope.user'):
name = six.text_type(name) # XXX At least zope.dublincore requires unicode
principal = zope.security.testing.Principal(
name, groups=['zope.Authenticated'], description=u'<EMAIL>')
request = zope.publisher.browser.TestRequest()
request.setPrincipal(principal)
zope.security.management.newInteraction(request)
return principal
@contextlib.contextmanager
def interaction(principal_id=u'zope.user'):
if zope.security.management.queryInteraction():
# There already is an interaction. Great. Leave it alone.
yield
else:
principal = create_interaction(principal_id)
yield principal
zope.security.management.endInteraction()
# XXX use zope.component.testing.site instead
@contextlib.contextmanager
def site(root):
old_site = zope.component.hooks.getSite()
zope.component.hooks.setSite(root)
yield
zope.component.hooks.setSite(old_site)
@zope.interface.implementer(zope.i18n.interfaces.IGlobalMessageCatalog)
class TestCatalog(object):
language = 'tt'
messages = {}
def queryMessage(self, msgid, default=None):
return self.messages.get(msgid, default)
getMessage = queryMessage
def getIdentifier(self):
return 'test'
def reload(self):
pass
def copy_inherited_functions(base, locals):
"""py.test annotates the test function object with data, e.g. required
fixtures. Normal inheritance means that there is only *one* function object
(in the base class), which means for example that subclasses cannot specify
different layers, since they would all aggregate on that one function
object, which would be completely wrong.
Usage: copy_inherited_functions(BaseClass, locals())
"""
def make_delegate(name):
def delegate(self):
return getattr(super(type(self), self), name)()
return delegate
for name in dir(base):
if not name.startswith('test_'):
continue
locals[name] = make_delegate(name)
class BrowserAssertions(gocept.testing.assertion.Ellipsis):
# XXX backwards-compat method signature for existing tests, should probably
# be removed at some point
def assert_ellipsis(self, want, got=None):
if got is None:
got = self.browser.contents
self.assertEllipsis(want, got)
def assert_json(self, want, got=None):
if got is None:
got = self.browser.contents
data = json.loads(got)
self.assertEqual(want, data)
return data
class Browser(zope.testbrowser.browser.Browser):
follow_redirects = True
xml_strict = False
def __init__(self, wsgi_app):
super(Browser, self).__init__(wsgi_app=wsgi_app)
def login(self, username, password):
auth = base64.b64encode(
('%s:%s' % (username, password)).encode('utf-8'))
if sys.version_info > (3,):
auth = auth.decode('ascii')
self.addHeader('Authorization', 'Basic %s' % auth)
def reload(self):
# Don't know what the superclass is doing here, exactly, but it's not
# helpful at all, so we reimplement it in a hopefully more sane way.
if self._response is None:
raise zope.testbrowser.browser.BrowserStateError(
'No URL has yet been .open()ed')
self.open(self.url)
def _processRequest(self, url, make_request):
self._document = None
transaction.commit()
old_site = zope.component.hooks.getSite()
zope.component.hooks.setSite(None)
old_interaction = zope.security.management.queryInteraction()
zope.security.management.endInteraction()
try:
# No super call, since we had to copy&paste the whole method.
self._do_processRequest(url, make_request)
finally:
zope.component.hooks.setSite(old_site)
if old_interaction:
zope.security.management.thread_local.interaction = (
old_interaction)
# copy&paste from superclass _processRequest to plug in `follow_redirects`
def _do_processRequest(self, url, make_request):
with self._preparedRequest(url) as reqargs:
self._history.add(self._response)
resp = make_request(reqargs)
if self.follow_redirects:
remaining_redirects = 100 # infinite loops protection
while (remaining_redirects and
resp.status_int in zope.testbrowser.browser.REDIRECTS):
remaining_redirects -= 1
url = urljoin(url, resp.headers['location'])
with self._preparedRequest(url) as reqargs:
resp = self.testapp.get(url, **reqargs)
assert remaining_redirects > 0, "redirect chain looks infinite"
self._setResponse(resp)
self._checkStatus()
HTML_PARSER = lxml.html.HTMLParser(encoding='UTF-8')
_document = None
@property
def document(self):
"""Return an lxml.html.HtmlElement instance of the response body."""
if self._document is not None:
return self._document
if self.contents is not None:
if self.xml_strict:
self._document = lxml.etree.fromstring(self.contents)
else:
self._document = lxml.html.document_fromstring(
self.contents, parser=self.HTML_PARSER)
return self._document
def xpath(self, selector, **kw):
"""Return a list of lxml.HTMLElement instances that match a given
XPath selector.
"""
if self.document is not None:
return self.document.xpath(selector, **kw)
# Allow webtest to handle file download result iterators
webtest.lint.isinstance = zope.security.proxy.isinstance
class BrowserTestCase(FunctionalTestCase, BrowserAssertions):
login_as = ('user', 'userpw')
def setUp(self):
super(BrowserTestCase, self).setUp()
self.browser = Browser(self.layer['wsgi_app'])
if isinstance(self.login_as, six.string_types): # BBB:
self.login_as = self.login_as.split(':')
self.browser.login(*self.login_as)
# These ugly names are due to two reasons:
# 1. zeit.cms.testing contains both general test mechanics *and*
# specific test infrastructure/layers for zeit.cms itself
# 2. pytest does not allow for subclassing a TestCase and changing its layer
# (for the same reason as copy_inherited_functions above).
class ZeitCmsTestCase(FunctionalTestCase):
layer = ZOPE_LAYER
class ZeitCmsBrowserTestCase(BrowserTestCase):
layer = WSGI_LAYER
class JSLintTestCase(gocept.jslint.TestCase):
jshint_command = os.environ.get('JSHINT_COMMAND', '/bin/true')
options = {
'esversion': '6',
'evil': True,
'eqnull': True,
'multistr': True,
'sub': True,
'undef': True,
'browser': True,
'jquery': True,
'devel': True,
}
predefined = (
'zeit', 'gocept',
'application_url', 'context_url',
'DOMParser', 'escape', 'unescape',
'jsontemplate',
'MochiKit', '$$', 'forEach', 'filter', 'map', 'extend', 'bind',
'log', 'repr', 'logger', 'logDebug', 'logError', # XXX
'DIV', 'A', 'UL', 'LI', 'INPUT', 'IMG', 'SELECT', 'OPTION', 'BUTTON',
'SPAN', 'LABEL',
'isNull', 'isUndefined', 'isUndefinedOrNull',
'Uri',
'_', # js.underscore
)
ignore = (
"Functions | |
All input pixels are altered toward a set of
multiple points obtained from the two-dimensional
polynomial parabolic surface.
Third_Order-This technique tends to create a
smoother color change and uses less storage in
the auxiliary table, but it may take longer to
process compared to the color grid surface.
All input pixels are altered toward multiple
points obtained from the cubic surface.
------------------------------------ --------------------------------------------------------------------
target_image Optional. The image service you want to use to color balance
the images in the image collection.
It can be a portal Item or an image service URL or a URI
------------------------------------ --------------------------------------------------------------------
context Optional dictionary. It contains additional settings that allows
users to customize the statistics computation settings.
Example:
{"skipRows": 10, "skipCols": 10, "reCalculateStats": "OVERWRITE"}
------------------------------------ --------------------------------------------------------------------
gis Optional GIS. the GIS on which this tool runs. If not specified, the active GIS is used.
==================================== ====================================================================
:return:
The imagery layer url
'''
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis,params, image_collection)
color_correction_allowed_values = ['Dodging', 'Histogram', 'Standard_Deviation']
if [element.lower() for element in color_correction_allowed_values].count(color_correction_method.lower()) <= 0 :
raise RuntimeError('color_correction_method can only be one of the following: '+str(color_correction_allowed_values))
for element in color_correction_allowed_values:
if color_correction_method.lower() == element.lower():
params['colorCorrectionMethod'] = element
dodging_surface_type_allowed_values = ['Single_Color', 'Color_Grid', 'First_Order','Second_Order','Third_Order']
if [element.lower() for element in dodging_surface_type_allowed_values].count(dodging_surface_type.lower()) <= 0 :
raise RuntimeError('dodging_surface_type can only be one of the following: '+str(dodging_surface_type_allowed_values))
for element in dodging_surface_type_allowed_values:
if dodging_surface_type.lower() == element.lower():
params['dodgingSurface'] = element
if target_image is not None:
if isinstance(target_image, str):
if 'http:' in target_image or 'https:' in target_image:
params['targetImage'] = json.dumps({ 'url' : target_image })
else:
params['targetImage'] = json.dumps({ 'uri' : target_image })
elif isinstance(target_image, Item):
params['targetImage'] = json.dumps({ "itemId" : target_image.itemid })
else:
raise TypeError("target_image should be a string (url or uri) or Item")
_set_context(params, context)
task = 'ComputeColorCorrection'
job_values = _execute_task(gis, task, params)
return job_values["result"]["url"]
###################################################################################################
## Compute Control Points
###################################################################################################
def compute_control_points(image_collection, reference_image=None, image_location_accuracy="High", context = None, *, gis=None, **kwargs):
'''
This service tool is used for computing matching control points between images
within an image collection and/or matching control points between the image
collection images and the reference image.
http://pro.arcgis.com/en/pro-app/tool-reference/data-management/compute-control-points.htm
==================================== ====================================================================
**Argument** **Description**
------------------------------------ --------------------------------------------------------------------
image_collection Required. This is the image collection that will be adjusted.
The image_collection can be a portal Item or an image service URL or a URI
The image_collection must exist.
------------------------------------ --------------------------------------------------------------------
reference_image This is the reference image service that can be used to generate ground control
points set with the image service.
It can be a portal Item or an image service URL or a URI
------------------------------------ --------------------------------------------------------------------
image_location_accuracy Optional string. This option allows you to specify the GPS location accuracy
level of the source image. It determines how far the tool will search for
neighboring matching images for calculating tie points and block adjustments.
The following are the available options:
Low, Medium, High
Low- GPS accuracy of 20 to 50 meters, and the tool uses a maximum of 4
by 12 images.
Medium- GPS accuracy of 10 to 20 meters, and the tool uses a maximum of
4 by 6 images.
High- GPS accuracy of 0 to 10 meters, and the tool uses a maximum of 4 by 3 images.
If the image collection is created from satellite data, it will be automatically switched
to use RPC adjustment mode. In this case, the mode need not be explicitly set by the user.
Default is High
------------------------------------ --------------------------------------------------------------------
context Optional dictionary. Context contains additional environment settings that affect
output control points generation. Possible keys and their possible values are:
pointSimilarity- Sets LOW, MEDIUM, or HIGH tolerance for computing control points
with varying levels of potential error.
LOW tolerance will produce the most control point, but may have a higher
level of error.
HIGH tolerance will produce the least number of control point,
but each matching pair will have a lower level of error.
MEDIUM tolerance will set the similarity tolerance to medium.
pointDensity- Sets the number of tie points (LOW, MEDIUM, or HIGH), to be created.
LOW point density will create the fewest number of tie points.
MEDIUM point density will create a moderate number of tie points.
HIGH point density will create the highest number of tie points.
pointDistribution- Randomly generates points that are better for overlapping areas
with irregular shapes.
RANDOM- will generate points that are better for overlapping areas
with irregular shapes.
REGULAR- will generate points based on a
fixed pattern and uses the point density to determine how frequently to create points.
Example:
{
"pointSimilarity":"MEDIUM",
"pointDensity": "MEDIUM",
"pointDistribution": "RANDOM"
}
------------------------------------ --------------------------------------------------------------------
gis Optional GIS. the GIS on which this tool runs. If not specified, the active GIS is used.
==================================== ====================================================================
:return:
The imagery layer url
'''
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis, params, image_collection)
if reference_image is not None:
if isinstance(reference_image, str):
if 'http:' in reference_image or 'https' in reference_image:
params['referenceImage'] = json.dumps({ 'url' : reference_image })
else:
params['referenceImage'] = json.dumps({ 'uri' : reference_image })
elif isinstance(reference_image, Item):
params['referenceImage'] = json.dumps({ "itemId" : reference_image.itemid })
else:
raise TypeError("reference_image should be a string (url or uri) or Item")
image_location_accuracy_allowed_values = ['Low', 'Medium', 'High']
if [element.lower() for element in image_location_accuracy_allowed_values].count(image_location_accuracy.lower()) <= 0 :
raise RuntimeError('location_accuracy can only be one of the following:' +str(image_location_accuracy_allowed_values))
for element in image_location_accuracy_allowed_values:
if image_location_accuracy.lower() == element.lower():
params["imageLocationAccuracy"]=element
_set_context(params, context)
task = 'ComputeControlPoints'
job_values = _execute_task(gis, task, params)
return job_values["result"]
###################################################################################################
## Compute Seamlines
###################################################################################################
def compute_seamlines(image_collection,
seamlines_method,
context = None,
*,
gis=None,
**kwargs):
'''
Compute seamlines on the image collection. This service tool is used to compute
seamlines for the image collection, usually after the image collection has been
block adjusted. Seamlines are helpful for generating the seamless mosaicked
display of overlapped images in image collection. The seamlines are computed
only for candidates that will eventually be used for generating the result
ortho-mosaicked image.
http://pro.arcgis.com/en/pro-app/tool-reference/data-management/build-seamlines.htm
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
image_collection Required, the input image collection that will be adjusted.
The image_collection can be a portal Item or an image service URL or a URI
The image_collection must exist.
------------------ --------------------------------------------------------------------
seamlines_method Required string. These are supported methods for generated seamlines for the image collection.
VORONOI-Generate seamlines using the area Voronoi diagram.
DISPARITY-Generate seamlines based on the disparity images of stereo pairs.
GEOMETRY - Generate seamlines for overlapping areas based on the intersection
of footprints. Areas with no overlapping imagery will merge the footprints.
RADIOMETRY - Generate seamlines based on the spectral patterns of features
within the imagery.
EDGE_DETECTION - Generate seamlines over intersecting areas based on the
edges of features in the area.
This method can avoid seamlines cutting through buildings.
------------------ --------------------------------------------------------------------
context Optional dictionary. Context contains additional settings that allows users to customize
the seamlines generation.
Example:
{"minRegionSize": 100,
"pixelSize": "",
"blendType": "Both",
"blendWidth": null,
"blendUnit": "Pixels",
"requestSizeType": "Pixels",
"requestSize": 1000,
"minThinnessRatio": 0.05,
"maxSilverSize": 20
}
Allowed keys are:
"minRegionSize", "pixelSize", "blendType", "blendWidth",
"blendUnit", "requestSizeType", "requestSize",
"minThinnessRatio", "maxSilverSize"
------------------ --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
================== ====================================================================
:return:
The Imagery layer url
'''
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis, params, image_collection)
contextAllowedValues= {"minRegionSize", "pixelSize", "blendType", "blendWidth",
"blendUnit", "requestSizeType", "requestSize",
"minThinnessRatio", "maxSilverSize"
}
seamlines_method_allowed_values = ['VORONOI', 'DISPARITY','GEOMETRY', 'RADIOMETRY', 'EDGE_DETECTION']
if [element.lower() for element in seamlines_method_allowed_values].count(seamlines_method.lower()) <= 0 :
raise RuntimeError('seamlines_method can only be one of the following: '+str(seamlines_method_allowed_values))
for element in seamlines_method_allowed_values:
if seamlines_method.lower() == element.lower():
params["seamlinesMethod"]=element
_set_context(params, context)
| |
_logger.error("Compartment not found: %s", cspec)
else:
cvols = comp.all_volumes()
vols += cvols
else:
# compartment specified with display name regexp
comps = sess.find_compartments(display_name=cspec)
if len(comps) == 0:
_logger.error("No compartments matching '%s' found", cspec)
else:
for comp in comps:
cvols = comp.all_volumes()
vols += cvols
except Exception as e:
_logger.error('Failed to get data for compartment %s: %s', cspec, str(e))
else:
#
# -C/--compartment option wasn't used, default to the instance's own
# compartment
try:
comp = sess.this_compartment()
avail_domain = sess.this_availability_domain()
if comp is not None:
vols = comp.all_volumes(availability_domain=avail_domain)
_title = "Other available storage volumes %s/%s" % (comp.get_display_name(), avail_domain)
else:
_logger.error("Compartment for this instance not found")
except Exception as e:
_logger.error('Failed to get data for this compartment: %s', str(e))
if len(vols) == 0:
_logger.info("No additional storage volumes found.")
return
_vols_to_be_displayed = []
for v in vols:
if v.is_attached() and not show_all:
continue
# display also the attached ones
_vols_to_be_displayed.append(v)
_vols_to_be_displayed.sort()
_display_oci_volume_list(_vols_to_be_displayed, output_mode, details, truncate)
def _do_attach_oci_block_volume(sess, ocid, chap=False):
"""
Make API calls to attach a volume with the given OCID to this instance.
Parameters
----------
sess : OCISession
An OCISession instance
ocid : str
The volume OCID
chap: bool
Set the Require Chap Credentials flag if True
Returns
-------
OCIVolume
Raise:
Exception if attachment failed
"""
_logger.debug('Attaching volume [%s]', ocid)
vol = sess.get_volume(ocid)
if vol is None:
raise Exception('Volume [%s] not found' % ocid)
if vol.is_attached():
if vol.get_instance().get_ocid() == sess.this_instance().get_ocid():
# attached to this instance already
_msg = 'Volume [%s] already attached to this instance' % ocid
else:
_msg = 'Volume [%s] already attached to instance %s [%s]' % (ocid,
vol.get_instance().get_ocid(),
vol.get_instance().get_display_name())
raise Exception(_msg)
_logger.info('Attaching OCI Volume [%s] to this instance.' % ocid)
# vol = vol.attach_to(instance_id=sess.this_instance().get_ocid(), wait=True)
vol = vol.attach_to(instance_id=sess.this_instance().get_ocid(), use_chap=chap, wait=True)
_logger.debug("Volume [%s] attached", ocid)
return vol
def get_volume_by_iqn(sess, iqn):
"""
Gets a volume by given IQN
Parameters
----------
sess: OCISession
The OCISEssion instance..
iqn: str
The iSCSI qualified name.
Returns
-------
OCIVolume : the found volume or None
"""
_logger.debug('Looking for volume with IQN == %s', iqn)
#
# _GT_
# if not hasattr(get_volume_by_iqn, 'all_this_instance_volume'):
# _logger.debug('_GT_ attr A %s', sess.this_instance().all_volumes())
# get_volume_by_iqn.all_this_instance_volume = sess.this_instance().all_volumes()
# else:
# _logger.debug('_GT_ attr B %s', get_volume_by_iqn.all_this_instance_volume)
try:
if bool(sess):
get_volume_by_iqn.all_this_instance_volume = sess.this_instance().all_volumes()
for volume in get_volume_by_iqn.all_this_instance_volume:
if volume.get_iqn() == iqn:
_logger.debug('Found %s', str(volume))
return volume
else:
_logger.info('Unable to get volume ocid and display name for iqn [%s], ', iqn)
except Exception as e:
_logger.debug('Failed to get volume data for iqn [%s]: %s', iqn, str(e), stack_info=True, exc_info=True)
_logger.error('Failed to get volume data for iqn [%s]', iqn)
return None
def _get_iqn_from_ocid(sess, ocid):
"""
Try to get the value for the iqn for a volume identified by an ocid, if any.
Parameters
----------
sess: OCISession
The OCISession instance.
ocid: str
The ocid.
Returns
-------
str: the iqn.
"""
_logger.debug('Trying to find the iqn for volume [%s]', ocid)
this_compartment = sess.this_compartment()
this_availability_domain = sess.this_availability_domain()
all_volumes = this_compartment.all_volumes(this_availability_domain)
for vol in all_volumes:
try:
if vol.get_ocid() == ocid:
return vol.get_iqn()
except Exception as e:
continue
return None
def _is_iqn_attached(sess, iqn):
"""
Verify if oci volume with iqn is attached to this instance.
Parameters
----------
sess: OCISession
The OCISession instance.
iqn: str
The iSCSI qualified name.
Returns
-------
str: the ocid
"""
_logger.debug('Verifying if [%s] is attached to this instance.')
volume_data = get_volume_by_iqn(sess, iqn)
if volume_data is None:
return None
if volume_data.is_attached():
return volume_data.get_ocid()
return None
def do_umount(mountpoint):
"""
Unmount the given mountpoint.
Parameters
----------
mountpoint: str
The mountpoint.
Returns
-------
bool
True on success, False otherwise.
"""
try:
_logger.info("Unmounting %s", mountpoint)
subprocess.check_output(['/usr/bin/umount', mountpoint], stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
_logger.error("Failed to unmount [%s]: %s", mountpoint, e.output)
return False
def unmount_device(session, iqn, disks):
"""
Unmount the partitions of the device with the specified iqn, if they are mounted.
Parameters
----------
session: iscsiadm session
iscsiadm.session()
iqn: str
The iSCSI qualified name.
disks: dict
List of block devices.
Returns
-------
bool
True for success or the device is not mounted.
False if the device is mount and unmounting failed.
"""
retval = True
# find mountpoints
device = session[iqn]['device']
if device not in disks:
return True
if 'partitions' not in disks[device]:
if disks[device]['mountpoint'] != '':
# volume has no partitions and is currently mounted
if not do_umount(disks[device]['mountpoint']):
retval = False
else:
_logger.debug('Volume %s not mounted', disks[device]['mountpoint'])
else:
partitions = disks[device]['partitions']
for part in list(partitions.keys()):
if partitions[part]['mountpoint'] != '':
# the partition is mounted
_logger.debug('Volume %s mounted', partitions[part]['mountpoint'])
if not do_umount(partitions[part]['mountpoint']):
retval = False
else:
_logger.debug('Volume %s not mounted', partitions[part]['mountpoint'])
return retval
def do_create_volume(sess, size, display_name, attach_it, chap_credentials, mode):
"""
Create a new OCI volume and attach it to this instance.
Parameters
----------
sess: OCISession
The OCISession instance.
size: int
The volume size in GB.
display_name: str
The volume display name.
attach_it: boolean
Do we attach the newly created volume.
chap_credentials: boolean
Use Chap Credentials Required if True
mode: str
Show output in 0.11 compatibility mode is set to 'compat'
Returns
-------
nothing
Raises
------
Exception if something went wrong
"""
try:
_logger.info("Creating a new %d GB volume %s", size, display_name)
inst = sess.this_instance()
if inst is None:
raise Exception("OCI SDK error: couldn't get instance info")
_logger.debug('\n availability_domain %s\n compartment_id %s',
inst.get_availability_domain_name(), inst.get_compartment_id())
#
# GT
# vol = sess.create_volume(inst.get_compartment_id(),
vol = sess.create_volume(sess.this_compartment().get_ocid(),
inst.get_availability_domain_name(),
size=size,
display_name=display_name,
wait=True)
except Exception as e:
_logger.debug("Failed to create volume", exc_info=True)
raise Exception("Failed to create volume") from e
_logger.info("Volume [%s] created", vol.get_display_name())
if not attach_it:
return
compat_info_message(gen_msg="Attaching the volume to this instance", mode=mode)
try:
if chap_credentials:
vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=True)
else:
vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=False)
except Exception as e:
_logger.debug('Cannot attach BV', exc_info=True)
vol.destroy()
raise Exception('Cannot attach BV') from e
#
# attach using iscsiadm commands
compat_info_message(gen_msg="Attaching iSCSI device.", mode=mode)
vol_portal_ip = vol.get_portal_ip()
vol_portal_port = vol.get_portal_port()
vol_iqn = vol.get_iqn()
vol_username = vol.get_user()
vol_password = vol.get_password()
retval = iscsiadm.attach(ipaddr=vol_portal_ip,
port=vol_portal_port,
iqn=vol_iqn,
username=vol_username,
password=<PASSWORD>,
auto_startup=True)
compat_info_message(compat_msg="iscsiadm attach Result: %s" % iscsiadm.error_message_from_code(retval),
gen_msg="Volume [%s] is attached." % vol.get_display_name(), mode=mode)
if retval == 0:
_logger.debug('Creation successful')
if chap_credentials:
_logger.debug('Attachment OK: saving chap credentials.')
add_chap_secret(vol_iqn, vol_username, vol_password)
return
# here because of error case
try:
_logger.debug('Destroying the volume')
vol.destroy()
except Exception as e:
_logger.debug("Failed to destroy volume", exc_info=True)
_logger.error("Failed to destroy volume: %s", str(e))
raise Exception('Failed to attach created volume: %s' % iscsiadm.error_message_from_code(retval))
def add_chap_secret(iqn, user, password):
"""
Save the login information for the given iqn in the chap secrets file.
Parameters
----------
iqn: str
The iSCSI qualified name.
user: str
The iscsiadm username.
password: str
The <PASSWORD>.
Returns
-------
No return value.
"""
_, chap_passwords = load_cache(oci_utils.__chap_password_file)
if chap_passwords is None:
chap_passwords = {}
chap_passwords[iqn] = (user, password)
write_cache(cache_content=chap_passwords, cache_fname=oci_utils.__chap_password_file, mode=0o600)
def remove_chap_secret(iqn_ocid):
"""
Remove the login information for a given iqn from the chap secrets file.
Parameters
----------
iqn_ocid: str
The iSCSI qualified name
Returns
-------
str: cache file timestamp on success, None otherwise
"""
_logger.debug('Remove %s from chap secret cache', iqn_ocid)
ret_value = None
_, chap_passwords = load_cache(oci_utils.__chap_password_file)
if not bool(chap_passwords):
return ret_value
iqn, _ = get_iqn_from_chap_secrets_cache(iqn_ocid)[0] if iqn_ocid.startswith(oci_volume_tag) else iqn_ocid, _
if iqn in chap_passwords.keys():
removed_values = chap_passwords.pop(iqn)
ret_value = write_cache(cache_content=chap_passwords,
cache_fname=oci_utils.__chap_password_file,
mode=0o600)
return ret_value
def get_chap_secret(iqn):
"""
Look for a saved (user,password) pair for iqn in the chap secrets file.
Parameters
----------
iqn: str
The iSCSI qualified name.
Returns
-------
tuple
The (timestamp, password) on success, (None,None) otherwise.
"""
_, chap_passwords = load_cache(oci_utils.__chap_password_file)
if chap_passwords is None:
return None, None
if iqn in chap_passwords:
return chap_passwords[iqn]
return None, None
def get_portal_ip_from_iscsiadm_cache(iqn_x):
"""
Try to retrieve the portal ip from the iscsiadm cache.
Parameters
----------
iqn_x: str
The iqn
Returns
-------
str: the portal ip if found, None otherwise
"""
_, iscsi_cache = load_cache(iscsiadm.ISCSIADM_CACHE)
for portal in iscsi_cache:
for p_ip, iqn_list in portal.items():
if iqn_x in iqn_list:
return p_ip
return None
def get_iqn_from_chap_secrets_cache(ocid):
"""
Try to retrieve iqn and pw for volume ocid from chap secrets cache.
Parameters
----------
ocid: str
The ocid/username of the volume.
Returns
-------
tuple: (iqn, password) if found, (None, None) otherwise
"""
_, chap_passwords | |
profile layers_id
:type fp_id: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_layer_algorithms'
trace = 'none'
fail_msg = 'none'
# fp_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get layers_algorithms_table meta for fp_id %s details' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: layers_algorithms_table OK' % function_str)
es_condition = None
es_day = None
es_layer = ' [\'NOT ACTIVE - Es layer not created\']'
f1_from_time = None
f1_layer = ' [\'NOT ACTIVE - F1 layer not created\']'
f2_until_time = None
f2_layer = ' [\'NOT ACTIVE - F2 layer not created\']'
# @added 20170616 - Feature #2048: D1 ionosphere layer
d1_layer = ' [\'NOT ACTIVE - D1 layer not created\']'
d1_condition = 'none'
d1_boundary_limit = 'none'
d1_boundary_times = 'none'
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.layer_id == int(fp_layers_id))
result = connection.execute(stmt)
connection.close()
layer_algorithms_details_object = result
layer_active = '[\'ACTIVE\']'
for row in result:
layer = row['layer']
if layer == 'D':
d_condition = row['condition']
d_boundary_limit = row['layer_boundary']
# @added 20170616 - Feature #2048: D1 ionosphere layer
if layer == 'D1':
d1_condition = row['condition']
if str(d1_condition) != 'none':
d1_condition = row['condition']
d1_layer = ' [\'ACTIVE\']'
d1_boundary_limit = row['layer_boundary']
d1_boundary_times = row['times_in_row']
else:
d1_condition = 'none'
if layer == 'E':
e_condition = row['condition']
e_boundary_limit = row['layer_boundary']
e_boundary_times = row['times_in_row']
if layer == 'Es':
es_condition = row['condition']
es_day = row['layer_boundary']
es_layer = layer_active
if layer == 'F1':
f1_from_time = row['layer_boundary']
f1_layer = layer_active
if layer == 'F2':
f2_until_time = row['layer_boundary']
f2_layer = layer_active
layer_algorithms_details = '''
D layer :: if value %s %s :: [do not check] :: ['ACTIVE']
D1 layer :: if value %s %s in last %s values :: [do not check] :: %s
E layer :: if value %s %s in last %s values :: [not_anomalous, if active Es, F1 and F2 layers match] :: ['ACTIVE']
Es layer :: if day %s %s :: [not_anomalous, if active F1 and F2 layers match] :: %s
F1 layer :: if from_time > %s :: [not_anomalous, if active F2 layer matchs] :: %s
F2 layer :: if until_time < %s :: [not_anomalous] :: %s
''' % (str(d_condition), str(d_boundary_limit), str(d1_condition),
str(d1_boundary_limit), str(d1_boundary_times), str(d1_layer),
str(e_condition), str(e_boundary_limit), str(e_boundary_times),
str(es_condition), str(es_day),
str(es_layer), str(f1_from_time), str(f1_layer), str(f2_until_time),
str(f2_layer))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_algorithms for layer_id %s from layers_algorithms DB table' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
return layer_algorithms_details, True, fail_msg, trace, layer_algorithms_details_object
# @added 20170308 - Feature #1960: ionosphere_layers
# To present the operator with the existing layers and algorithms for the metric
def metric_layers_alogrithms(base_name):
"""
Get the Ionosphere layer algorithm details of a metric
:param base_name: the metric base_name
:type base_name: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: metric_layers_alogrithms'
trace = 'none'
fail_msg = 'none'
metric_layers_algorithm_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise # to webapp to return in the UI
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('metrics_table OK')
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: failed to get metrics_table meta'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_id = 0
try:
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
connection.close()
for row in result:
metric_id = int(row['id'])
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: failed to get id for %s from metrics table' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if not metric_id:
# @added 20181024 - Bug #2638: anomalies db table - anomalous_datapoint greater than DECIMAL
# For debugging
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: no id for %s' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
ionosphere_layers_table = None
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_layers_table meta for %s details' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_layers_details = []
metric_layers_count = 0
metric_layers_matched_count = 0
try:
connection = engine.connect()
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.metric_id == metric_id)
result = connection.execute(stmt)
connection.close()
for row in result:
try:
l_id = row['id']
l_fp_id = row['fp_id']
l_metric_id = row['metric_id']
l_matched_count = row['matched_count']
l_check_count = row['check_count']
l_label = str(row['label'])
metric_layers_details.append([l_id, l_fp_id, l_metric_id, l_matched_count, l_check_count, l_label])
metric_layers_count += 1
metric_layers_matched_count += int(l_matched_count)
logger.info('%s :: added layer id %s to layer count' % (function_str, str(l_id)))
except:
metric_layers_count += 0
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers ids for metric_id %s from ionosphere_layers DB table' % str(metric_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get layers_algorithms_table meta for base_name %s details' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_layers_algorithm_details = []
logger.info('%s :: layers_algorithms_table OK' % function_str)
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.metric_id == metric_id)
result = connection.execute(stmt)
connection.close()
for row in result:
la_id = row['id']
la_layer_id = row['layer_id']
la_fp_id = row['fp_id']
la_metric_id = row['metric_id']
la_layer = str(row['layer'])
la_type = str(row['type'])
la_condition = str(row['condition'])
la_layer_boundary = str(row['layer_boundary'])
la_times_in_a_row = row['times_in_row']
metric_layers_algorithm_details.append([la_id, la_layer_id, la_fp_id, la_metric_id, la_layer, la_type, la_condition, la_layer_boundary, la_times_in_a_row])
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_algorithms for metric_id %s from layers_algorithms DB table' % str(metric_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
logger.info('metric_layers_details :: %s' % str(metric_layers_details))
logger.info('metric_layers_algorithm_details :: %s' % str(metric_layers_algorithm_details))
return metric_layers_details, metric_layers_algorithm_details, metric_layers_count, metric_layers_matched_count, True, fail_msg, trace
# @added 20170327 - Feature #2004: Ionosphere layers - edit_layers
# Task #2002: Review and correct incorrectly defined layers
def edit_ionosphere_layers(layers_id):
"""
Edit a layers profile.
:param layers_id: the layer id to edit
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: edit_ionosphere_layers'
logger.info('updating layers for %s' % str(layers_id))
trace = 'none'
fail_msg = 'none'
value_conditions = ['<', '>', '==', '!=', '<=', '>=']
conditions = ['<', '>', '==', '!=', '<=', '>=', 'in', 'not in']
if 'd_condition' in request.args:
d_condition = request.args.get('d_condition', '==')
else:
logger.error('no d_condition argument passed')
fail_msg = 'error :: no d_condition argument passed'
return False, fail_msg, trace
if not str(d_condition) in conditions:
logger.error('d_condition not a valid conditon - %s' % str(d_condition))
fail_msg = 'error :: d_condition not a valid conditon - %s' % str(d_condition)
return False, fail_msg, trace
if 'd_boundary_limit' in request.args:
d_boundary_limit = request.args.get('d_boundary_limit', '0')
else:
logger.error('no d_boundary_limit argument passed')
fail_msg = 'error :: no d_boundary_limit argument passed'
return False, fail_msg, trace
try:
test_d_boundary_limit = float(d_boundary_limit) + 1
except:
trace = | |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tensor formatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
np.set_printoptions(
precision=8, threshold=1000, edgeitems=3, linewidth=75)
def _checkTensorMetadata(self, tensor, annotations):
self.assertEqual(
{"dtype": tensor.dtype, "shape": tensor.shape},
annotations["tensor_metadata"])
def _checkBeginIndices(self, expected_indices, annot):
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: expected_indices},
annot)
def _checkOmittedIndices(self, expected_indices, annot):
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: expected_indices},
annot)
def testFormatTensor1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
self.assertEqual([
"Tensor \"a\":",
"",
"array([ 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0.])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndices([0], out.annotations[2])
self._checkBeginIndices([6], out.annotations[3])
self._checkBeginIndices([12], out.annotations[4])
self._checkBeginIndices([18], out.annotations[5])
def testFormatTensor2DNoEllipsisNoRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(2, 6):
self._checkBeginIndices([i - 2, 0], out.annotations[i])
def testFormatTensorSuppressingTensorName(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, None)
self.assertEqual([
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(4):
self._checkBeginIndices([i, 0], out.annotations[i])
def testFormatTensorWithMetadata(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_metadata=True)
self.assertEqual([
"Tensor \"a\":",
" dtype: float64",
" shape: (4, 4)",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(4, 7):
self._checkBeginIndices([i - 4, 0], out.annotations[i])
def testFormatTensor2DNoEllipsisWithRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 40.0, 40).reshape([2, 20])
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 50})
self.assertEqual(
{"dtype": a.dtype, "shape": a.shape},
out.annotations["tensor_metadata"])
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.025, 0.05 , 0.075, 0.1 ,",
" 0.125, 0.15 , 0.175, 0.2 , 0.225,",
" 0.25 , 0.275, 0.3 , 0.325, 0.35 ,",
" 0.375, 0.4 , 0.425, 0.45 , 0.475],",
" [ 0.5 , 0.525, 0.55 , 0.575, 0.6 ,",
" 0.625, 0.65 , 0.675, 0.7 , 0.725,",
" 0.75 , 0.775, 0.8 , 0.825, 0.85 ,",
" 0.875, 0.9 , 0.925, 0.95 , 0.975]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
self._checkBeginIndices([0, 0], out.annotations[2])
self._checkBeginIndices([0, 5], out.annotations[3])
self._checkBeginIndices([0, 10], out.annotations[4])
self._checkBeginIndices([0, 15], out.annotations[5])
self._checkBeginIndices([1, 0], out.annotations[6])
self._checkBeginIndices([1, 5], out.annotations[7])
self._checkBeginIndices([1, 10], out.annotations[8])
self._checkBeginIndices([1, 15], out.annotations[9])
def testFormatTensor3DNoEllipsis(self): # TODO(cais): Test name.
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0. , 0.04166667, 0.08333333, 0.125 ],",
" [ 0.16666667, 0.20833333, 0.25 , 0.29166667],",
" [ 0.33333333, 0.375 , 0.41666667, 0.45833333]],",
"",
" [[ 0.5 , 0.54166667, 0.58333333, 0.625 ],",
" [ 0.66666667, 0.70833333, 0.75 , 0.79166667],",
" [ 0.83333333, 0.875 , 0.91666667, 0.95833333]]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndices([0, 0, 0], out.annotations[2])
self._checkBeginIndices([0, 1, 0], out.annotations[3])
self._checkBeginIndices([0, 2, 0], out.annotations[4])
self.assertNotIn(5, out.annotations)
self._checkBeginIndices([1, 0, 0], out.annotations[6])
self._checkBeginIndices([1, 1, 0], out.annotations[7])
self._checkBeginIndices([1, 2, 0], out.annotations[8])
def testFormatTensorWithEllipses(self):
a = np.zeros([11, 11, 11])
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" ..., ",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
for i in xrange(2):
self._checkBeginIndices([i, 0, 0], out.annotations[i * 6 + 2])
self._checkBeginIndices([i, 1, 0], out.annotations[i * 6 + 3])
self._checkOmittedIndices([i, 2, 0], out.annotations[i * 6 + 4])
self._checkBeginIndices([i, 9, 0], out.annotations[i * 6 + 5])
self._checkBeginIndices([i, 10, 0], out.annotations[i * 6 + 6])
self.assertNotIn(i * 6 + 7, out.annotations)
p = 15
for i in xrange(2):
self._checkBeginIndices([9 + i, 0, 0], out.annotations[p + i * 6])
self._checkBeginIndices([9 + i, 1, 0], out.annotations[p + i * 6 + 1])
self._checkOmittedIndices(
[9 + i, 2, 0], out.annotations[p + i * 6 + 2])
self._checkBeginIndices([9 + i, 9, 0], out.annotations[p + i * 6 + 3])
self._checkBeginIndices([9 + i, 10, 0], out.annotations[p + i * 6 + 4])
if i < 1:
self.assertNotIn(p + i * 6 + 5, out.annotations)
def testFormatNone(self):
out = tensor_format.format_tensor(None, "a")
self.assertEqual(
["Tensor \"a\":", "", "None"], out.lines)
def testLocateTensorElement1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
self.assertEqual([
"Tensor \"a\":",
"",
"array([ 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0.])",
], out.lines)
is_omitted, row = tensor_format.locate_tensor_element(out, [0])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [5])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [6])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [11])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [12])
self.assertFalse(is_omitted)
self.assertEqual(4, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [18])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [19])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [20])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0, 0])
def testLocateTensorElement2DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 0])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 3])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [1, 0])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [1, 3])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [3, 3])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement3DWithEllipses(self):
a = np.zeros([11, 11, 11])
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., | |
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = NULL
''')
self.query('''
SELECT PROPERTY_NAME, PROPERTY_VALUE FROM EXA_DBA_VIRTUAL_SCHEMA_PROPERTIES WHERE SCHEMA_NAME = 'VS1'
''')
self.assertEqual(0, self.rowcount())
def testDeleteOnlyOneProperty(self):
self.createFastAdapter(schemaName="ADAPTER", adapterName="FAST_ADAPTER")
self.query('DROP VIRTUAL SCHEMA IF EXISTS VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA VS1 USING ADAPTER.FAST_ADAPTER')
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = 'default' UNUSED2 = 'default2'
''')
rows = self.query('''
SELECT PROPERTY_NAME, PROPERTY_VALUE FROM EXA_DBA_VIRTUAL_SCHEMA_PROPERTIES WHERE SCHEMA_NAME = 'VS1' ORDER BY PROPERTY_NAME
''')
self.assertRowsEqual([('UNUSED', 'default'), ('UNUSED2', 'default2')],rows)
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED2 = 'Not deleted' UNUSED = NULL
''')
rows = self.query('''
SELECT PROPERTY_NAME, PROPERTY_VALUE FROM EXA_DBA_VIRTUAL_SCHEMA_PROPERTIES WHERE SCHEMA_NAME = 'VS1'
''')
self.assertRowsEqual([('UNUSED2', 'Not deleted')],rows)
def testDuplicatePropertyName(self):
self.createFastAdapter(schemaName="ADAPTER", adapterName="FAST_ADAPTER")
self.query('DROP VIRTUAL SCHEMA IF EXISTS VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA VS1 USING ADAPTER.FAST_ADAPTER')
with self.assertRaisesRegexp(Exception, 'Duplicate property names \\(UNUSED\\) are not allowed.'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = 'default' UNUSED = 'default2'
''')
with self.assertRaisesRegexp(Exception, 'Duplicate property names \\(UNUSED\\) are not allowed.'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = null UNUSED = 'default2'
''')
with self.assertRaisesRegexp(Exception, 'Duplicate property names \\(UNUSED\\) are not allowed.'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = 'default' UNUSED = null
''')
with self.assertRaisesRegexp(Exception, 'Duplicate property names \\(UNUSED\\) are not allowed.'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET UNUSED = null UNUSED = null
''')
def testOldPropertiesInSchemaMetadataInfo(self):
self.createTestPropertyAdapter(schemaName="ADAPTER", adapterName="TEST_PROPERTY_ADAPTER")
self.query('DROP VIRTUAL SCHEMA IF EXISTS VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA VS1 USING ADAPTER.TEST_PROPERTY_ADAPTER')
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1='1' P2='2'
''')
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1='1new' P2=null P3='3'
''')
def testInvalidPropertiesInSchemaMetadataInfo(self):
# Invalid properties => Add test with custom adapter for invalid properties displaying correct error message
self.createTestPropertyAdapter(schemaName="ADAPTER", adapterName="TEST_PROPERTY_ADAPTER")
self.query('DROP VIRTUAL SCHEMA IF EXISTS VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA VS1 USING ADAPTER.TEST_PROPERTY_ADAPTER')
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P2='2'
''')
with self.assertRaisesRegexp(Exception, 'Expected different values for old properties'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1='1'
''')
with self.assertRaisesRegexp(Exception, 'Expected different values for old properties'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1='42' P2='2'
''')
self.query('DROP VIRTUAL SCHEMA IF EXISTS VS1 CASCADE')
self.query('CREATE VIRTUAL SCHEMA VS1 USING ADAPTER.TEST_PROPERTY_ADAPTER')
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1='1' P2='2'
''')
with self.assertRaisesRegexp(Exception, 'Expected different values for new properties'):
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET P1=null P2=null P3='4'
''')
def createTestPropertyAdapter(self, schemaName="ADAPTER", adapterName="FAST_ADAPTER"):
self.dropOldAdapter(schemaName, adapterName)
self.query('CREATE SCHEMA {schema}'.format(schema=schemaName))
self.query(udf.fixindent('''
CREATE OR REPLACE PYTHON ADAPTER SCRIPT {schema}.{adapter} AS
import json
import string
def adapter_call(request):
# database expects utf-8 encoded string of type str. unicode not yet supported
root = json.loads(request)
if root["type"] == "createVirtualSchema":
res = {{
"type": "createVirtualSchema",
"schemaMetadata": {{
"tables": [
{{
"name": "DUMMY",
"columns": [{{
"name": "KEY",
"dataType": {{"type": "VARCHAR", "size": 2000000}}
}}]
}}]
}}
}}
return json.dumps(res).encode('utf-8')
elif root["type"] == "dropVirtualSchema":
return json.dumps({{"type": "dropVirtualSchema"}}).encode('utf-8')
elif root["type"] == "setProperties":
expectedOldProperties = {{'P1': '1', 'P2': '2'}}
expectedNewProperties = {{'P1': '1new', 'P2': None,'P3': '3'}}
if (root["schemaMetadataInfo"].get("properties", None) != None and len(root["schemaMetadataInfo"]["properties"]) > 0):
assert (len(root["schemaMetadataInfo"]["properties"]) == len(expectedOldProperties)), 'Expected different values for old properties. Expected: ' + str(expectedOldProperties) + ' Actual: ' + str(root["schemaMetadataInfo"]["properties"])
for propertyName, propertyValue in root["schemaMetadataInfo"]["properties"].iteritems():
assert (propertyName in expectedOldProperties), 'Expected different values for old properties. Expected: ' + str(expectedOldProperties) + ' actual: ' + str(root["schemaMetadataInfo"]["properties"])
assert (propertyValue == expectedOldProperties.get(propertyName, None)), 'Expected different values for old properties. Expected: ' + str(expectedOldProperties) + ' Actual: ' + str(root["schemaMetadataInfo"]["properties"])
assert (len(root["properties"]) == len(expectedNewProperties)), 'Expected different values for new properties. Expected: ' + str(expectedNewProperties) + ' Actual: ' + str(root["properties"])
for propertyName, propertyValue in root["properties"].iteritems():
assert (propertyName in expectedNewProperties), 'Expected different values for new properties. Expected: ' + str(expectedNewProperties) + ' Actual: ' + str(root["properties"])
assert (propertyValue == expectedNewProperties.get(propertyName, None)), 'Expected different values for new properties. Expected: ' + str(expectedNewProperties) + ' Actual: ' + str(root["properties"])
return json.dumps({{"type": "setProperties"}}).encode('utf-8')
else:
raise ValueError('Unsupported callback')
/
''').format(schema = schemaName, adapter = adapterName))
class SetPropertiesRefreshTest(VSchemaTest):
def setUp(self):
# Create a simple native schema with tables
self.createNative()
self.commit() # commit, otherwise adapter doesn't see tables
self.createJdbcAdapter(schemaName="ADAPTER", adapterName="JDBC_ADAPTER")
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
def testWithRefresh(self):
self.query('DROP TABLE NATIVE.T_DATETIME')
self.query('ALTER TABLE NATIVE.T ADD COLUMN d int')
self.query('CREATE TABLE NATIVE.T_NEW(a int)')
self.query('DROP SCHEMA IF EXISTS NATIVE_RENAMED CASCADE')
self.query('RENAME SCHEMA NATIVE TO NATIVE_RENAMED')
self.commit()
timeBefore = self.queryCurrentTimestamp()
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET SCHEMA_NAME='{remoteSchema}' CONNECTION_STRING='jdbc:exa:{host_port};schema={remoteSchema}'
'''.format(host_port = 'localhost:8888',remoteSchema='NATIVE_RENAMED'))
self.commit() # without this commit, the refresh time does not get updated
timeAfter = self.queryCurrentTimestamp()
rows = self.queryColumnMetadata('VS1')
self.assertRowsEqual(
[('G', 'K', 'DECIMAL(18,0)'), ('G', 'V1', 'DECIMAL(18,0)'), ('G', 'V2', 'VARCHAR(100) UTF8'),
('NUMBERS1', 'A', 'DECIMAL(18,0)'), ('NUMBERS1', 'B', 'DECIMAL(18,0)'), ('NUMBERS1', 'C', 'DECIMAL(18,0)'), ('NUMBERS1', 'D', 'DECIMAL(18,0)'),
('NUMBERS2', 'E', 'DECIMAL(18,0)'), ('NUMBERS2', 'F', 'DECIMAL(18,0)'), ('NUMBERS2', 'G', 'DECIMAL(18,0)'), ('NUMBERS2', 'H', 'DECIMAL(18,0)'),
('T', 'A', 'DECIMAL(18,0)'), ('T', 'B', 'VARCHAR(100) UTF8'), ('T', 'C', 'DOUBLE'), ('T', 'D', 'DECIMAL(18,0)'),
('TEST', 'A', 'TIMESTAMP WITH LOCAL TIME ZONE'),
('T_CONNECT', 'PARENT', 'DECIMAL(18,0)'),
('T_CONNECT', 'VAL', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A1', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A10', 'GEOMETRY(3857)'),
('T_DATATYPES', 'A11', 'DECIMAL(10,5)'),
('T_DATATYPES', 'A12', 'DOUBLE'),
('T_DATATYPES', 'A13', 'DECIMAL(36,0)'),
('T_DATATYPES', 'A14', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A15', 'DECIMAL(29,0)'),
('T_DATATYPES', 'A16', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A17', 'DECIMAL(25,0)'),
('T_DATATYPES', 'A18', 'DECIMAL(27,9)'),
('T_DATATYPES', 'A19', 'DOUBLE'),
('T_DATATYPES', 'A2', 'DOUBLE'),
('T_DATATYPES', 'A20', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A21', 'DOUBLE'),
('T_DATATYPES', 'A22', 'DECIMAL(1,0)'),
('T_DATATYPES', 'A23', 'DECIMAL(3,2)'),
('T_DATATYPES', 'A24', 'DECIMAL(18,0)'),
('T_DATATYPES', 'A25', 'DECIMAL(6,0)'),
('T_DATATYPES', 'A26', 'DECIMAL(6,3)'),
('T_DATATYPES', 'A27', 'DOUBLE'),
('T_DATATYPES', 'A28', 'DECIMAL(9,0)'),
('T_DATATYPES', 'A29', 'DECIMAL(9,0)'),
('T_DATATYPES', 'A3', 'DATE'),
('T_DATATYPES', 'A30', 'DECIMAL(3,0)'),
('T_DATATYPES', 'A31', 'DATE'),
('T_DATATYPES', 'A32', 'TIMESTAMP WITH LOCAL TIME ZONE'),
('T_DATATYPES', 'A4', 'TIMESTAMP'),
('T_DATATYPES', 'A5', 'VARCHAR(3000) UTF8'),
('T_DATATYPES', 'A6', 'CHAR(10) UTF8'),
('T_DATATYPES', 'A7', 'BOOLEAN'),
('T_DATATYPES', 'A8', 'INTERVAL DAY(2) TO SECOND(3)'),
('T_DATATYPES', 'A9', 'INTERVAL YEAR(2) TO MONTH'),
('T_GEOMETRY', 'A', 'GEOMETRY(3857)'),
('T_GEOMETRY', 'ID', 'DECIMAL(18,0)'),
('T_INTERVAL', 'A', 'INTERVAL YEAR(2) TO MONTH'),
('T_INTERVAL', 'B', 'INTERVAL DAY(2) TO SECOND(3)'),
('T_NEW', 'A', 'DECIMAL(18,0)'),
('T_NULLS', 'A', 'DECIMAL(18,0)'),
('T_NULLS', 'B', 'VARCHAR(100) UTF8')], rows)
# Check refresh time
self.assertBetween(self.getLastSchemaRefresh('VS1'), timeBefore, timeAfter)
self.assertBetween(self.getLastTableRefresh ('VS1', 'T'), timeBefore, timeAfter)
self.assertBetween(self.getLastTableRefresh ('VS1', 'G'), timeBefore, timeAfter)
self.assertBetween(self.getLastTableRefresh ('VS1', 'T_NEW'), timeBefore, timeAfter)
def testWithoutRefresh(self):
schemaRefreshBefore = self.getLastSchemaRefresh('VS1')
tRefreshBefore = self.getLastTableRefresh ('VS1', 'T')
gRefreshBefore = self.getLastTableRefresh ('VS1', 'G')
tNewRefreshBefore = self.getLastTableRefresh ('VS1', 'T_DATETIME')
metaBefore = self.queryColumnMetadata('VS1')
# Change the source schema
self.query('DROP TABLE NATIVE.T_DATETIME')
self.query('ALTER TABLE NATIVE.T ADD COLUMN d int')
self.query('CREATE TABLE NATIVE.T_NEW(a int)')
# Setting this property should not refresh tables
timeBefore = self.queryCurrentTimestamp()
self.query('''
ALTER VIRTUAL SCHEMA VS1 SET IS_LOCAL='false'
'''.format(host_port = 'localhost:8888',remoteSchema='NATIVE_RENAMED'))
self.commit() # without this commit, the refresh time does not get updated
timeAfter = self.queryCurrentTimestamp()
self.assertBetween(self.getLastSchemaRefresh('VS1'), timeBefore, timeAfter)
self.assertEqual (self.getLastTableRefresh ('VS1', 'T'), tRefreshBefore)
self.assertEqual (self.getLastTableRefresh ('VS1', 'G'), gRefreshBefore)
self.assertEqual (self.getLastTableRefresh ('VS1', 'T_DATETIME'), tNewRefreshBefore)
metaAfter = self.queryColumnMetadata('VS1')
self.assertRowsEqual(metaBefore, metaAfter)
class DropVSchemaTest(VSchemaTest):
def setUp(self):
self.createJdbcAdapter(schemaName="ADAPTER", adapterName="JDBC_ADAPTER")
def testDropVSchema(self):
self.createNative()
self.commit() # commit, otherwise adapter doesn't see tables
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query('DROP VIRTUAL SCHEMA VS1 CASCADE')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(0, self.rowcount())
def testDropEmptyVSchema(self):
self.query('DROP SCHEMA IF EXISTS NATIVE CASCADE')
self.query('CREATE SCHEMA NATIVE')
self.commit()
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query('DROP VIRTUAL SCHEMA VS1')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(0, self.rowcount())
def testDropVSchemaInvalidAdapterScript(self):
self.createNative()
self.commit()
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query(udf.fixindent('''
CREATE OR REPLACE PYTHON ADAPTER SCRIPT ADAPTER.JDBC_ADAPTER AS
/
'''))
with self.assertRaisesRegexp(Exception, 'SyntaxError: invalid syntax \\(JDBC_ADAPTER, line 1\\)'):
self.query('DROP VIRTUAL SCHEMA VS1 CASCADE')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(1, self.rowcount())
self.query('DROP FORCE VIRTUAL SCHEMA VS1 CASCADE')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(0, self.rowcount())
def testDropVSchemaInvalidJson(self):
self.createNative()
self.commit()
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query(udf.fixindent('''
CREATE OR REPLACE PYTHON ADAPTER SCRIPT ADAPTER.JDBC_ADAPTER AS
import json
def adapter_call(request):
# missing brackets
return """ "type": "dropVirtualSchema"} """
/
'''))
with self.assertRaisesRegexp(Exception, 'Unknown exception while parsing the response: in Json::Value::find\\(key, end, found\\): requires objectValue or nullValue'):
self.query('DROP VIRTUAL SCHEMA VS1 CASCADE')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(1, self.rowcount())
self.query('DROP FORCE VIRTUAL SCHEMA VS1 CASCADE')
rows = self.query("SELECT * FROM EXA_SCHEMAS WHERE SCHEMA_NAME = 'VS1' ")
self.assertEqual(0, self.rowcount())
def testDropVSchemaMissingCascade(self):
self.createNative()
self.commit() # commit, otherwise adapter doesn't see tables
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
with self.assertRaisesRegexp(Exception, 'schema is not empty - use DROP VIRTUAL SCHEMA VS1 CASCADE to delete it'):
self.query('DROP VIRTUAL SCHEMA VS1')
| |
<gh_stars>0
from __future__ import print_function, division
from sympy.core import Basic, Dict, sympify
from sympy.core.compatibility import as_int, default_sort_key
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
Partition(FiniteSet(1, 2), FiniteSet(3))
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
Creating Partition from SymPy finite sets:
>>> from sympy.sets.sets import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
Partition(FiniteSet(1, 2, 3), FiniteSet(4, 5))
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " "a list, set, or a FiniteSet"
)
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[Partition(FiniteSet(1, 2)), Partition(FiniteSet(1), FiniteSet(2)), Partition(FiniteSet(1, x)), Partition(FiniteSet(3, 4)), Partition(FiniteSet(0, 1, 2, 3))]
"""
if order is None:
members = self.members
else:
members = tuple(
sorted(self.members, key=lambda w: default_sort_key(w, order))
)
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted(
[sorted(p, key=default_sort_key) for p in self.args]
)
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) % RGS_enum(self.size), self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
Partition(FiniteSet(1, 2), FiniteSet(3), FiniteSet(4), FiniteSet(5))
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple(
[
rgs[i]
for i in sorted([i for p in partition for i in p], key=default_sort_key)
]
)
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
Partition(FiniteSet(c), FiniteSet(a, d), FiniteSet(b, e))
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
Partition(FiniteSet(e), FiniteSet(a, c), FiniteSet(b, d))
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
Partition(FiniteSet(1, 4), FiniteSet(2), FiniteSet(3, 5))
"""
if len(rgs) != len(elements):
raise ValueError("mismatch in rgs and element lengths")
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError("some blocks of the partition were empty.")
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not | |
to the front
print(spi, 'oa/ica search deg', graph.nodes[neinodes[0]]['deg'], graph.nodes[neinodes[0]]['pos'],
graph.nodes[spi]['pos'])
if graph.nodes[neinodes[0]]['deg'] == 1 and graph.nodes[neinodes[0]]['pos'][1] < \
graph.nodes[spi]['pos'][1]:
# if prednodes[spi]==oanodetype:
# print('oa/ica prob',probnodes[spi][oanodetype])
if probnodes[spi][oanodetype] > maxprob:
# print('max',spi)
maxprob = probnodes[spi][oanodetype]
oaid = spi
if oaid != -1:
print(oanodetype, 'oai/ica node', oaid, 'gt', targetnodes[oaid])
node_type_to_id[oanodetype] = oaid
node_id_to_type[oaid] = oanodetype
# find ending node of oa
neinodeids = [i[1] for i in graph.edges(oaid) if i[1] not in sp]
assert len(neinodeids) == 1
visited = [oaid, neinodeids[0]]
exp_edge_type = matchvestype(oanodetype, oanodetype + 2)
# oa end id +2
oaendid = findmaxprob(graph, neinodeids[0], visited, oanodetype + 2, 1, probnodes, branch_dist_mean,
branch_dist_std, exp_edge_type)
print(oanodetype + 2, 'oai end node', oaendid)
node_type_to_id[oanodetype + 2] = oaendid
node_id_to_type[oaendid] = oanodetype + 2
# Check Pcomm
for pcommnodetype in [21, 22]:
# -2 is pcomm/p1/p2 node type
if pcommnodetype - 2 not in node_type_to_id.keys():
continue
# -18 is ICA/M1/A1 node type
antnodeid = pcommnodetype - 18
if antnodeid not in node_type_to_id.keys():
print('no ica/mca/aca')
antnodeid = pcommnodetype - 14
if antnodeid not in node_type_to_id.keys():
antnodeid = pcommnodetype - 16
if antnodeid not in node_type_to_id.keys():
print('no a1/2, m1/2 and ica/mca/aca, skip')
continue
if pcommnodetype - 20 not in node_type_to_id.keys():
print('no ica root, skip')
continue
try:
sp = nx.shortest_path(graph, node_type_to_id[pcommnodetype - 2], node_type_to_id[antnodeid])
# shortest path should not include PCA/BA
if 18 in node_type_to_id.keys() and node_type_to_id[18] in sp:
print('has path from p1/2 to anterior, but through pca/ba, skip')
continue
# if p1/2 exist in deg2, remove
if pcommnodetype - 2 in deg2node:
del deg2node[deg2node.index(pcommnodetype - 2)]
del deg2edge[deg2edge.index(pcommnodetype - 4)]
print('pcomm has path, remove deg 2 search for node p1/2')
except nx.NetworkXNoPath:
print(pcommnodetype, 'pcomm missing, and p1/2 deg2 search needed')
# no need to add deg2 for P1, as some p1/2 exist but not connect to pcomm
continue
spica = nx.shortest_path(graph, node_type_to_id[antnodeid], node_type_to_id[pcommnodetype - 20])
print('spica', spica, 'sp pos to ant', sp)
assert len(set(spica) & set(sp)) > 0
for pcommnodeid in sp:
if pcommnodeid in spica:
break
print(pcommnodetype, 'pcomm/ica node id', pcommnodeid)
node_type_to_id[pcommnodetype] = pcommnodeid
node_id_to_type[pcommnodeid] = pcommnodetype
# Check VA
if 17 in node_type_to_id.keys() and graph.nodes[node_type_to_id[17]]['deg'] == 3:
# check exisiting conf node type 15 16 compatibility
for va_cf_type in [15, 16]:
if va_cf_type not in node_type_to_id.keys():
continue
try:
sp = nx.shortest_path(graph, node_type_to_id[17], node_type_to_id[va_cf_type])
except nx.NetworkXNoPath:
print('va through conf nodes are not connected, remove conf node va root')
del node_id_to_type[node_type_to_id[va_cf_type]]
del node_type_to_id[va_cf_type]
if 15 not in node_type_to_id.keys():
# BA/VA and PCA/BA
visited = list(node_type_to_id.values())
vaendid = findmaxprob(graph, node_type_to_id[17], visited, 15, 1, probnodes, branch_dist_mean,
branch_dist_std)
print(15, 'VAL end node', vaendid, 'gt', targetnodes[vaendid])
node_type_to_id[15] = vaendid
node_id_to_type[vaendid] = 15
if 16 not in node_type_to_id.keys():
visited = list(node_type_to_id.values())
vaendid = findmaxprob(graph, node_type_to_id[17], visited, 16, 1, probnodes, branch_dist_mean,
branch_dist_std)
print(16, 'VAR end node', vaendid, 'gt', targetnodes[vaendid])
node_type_to_id[16] = vaendid
node_id_to_type[vaendid] = 16
# check LR
if 15 in node_type_to_id.keys() and 16 in node_type_to_id.keys():
valnodeid = node_type_to_id[15]
varnodeid = node_type_to_id[16]
if graph.nodes[node_type_to_id[15]]['pos'][0] < graph.nodes[node_type_to_id[16]]['pos'][0]:
print('VALR swap')
node_type_to_id[15] = varnodeid
node_type_to_id[16] = valnodeid
node_id_to_type[valnodeid] = 16
node_id_to_type[varnodeid] = 15
# in full graph
# TODO
if len(deg2edge):
print('#Deg 2 search edge', deg2edge, 'node', deg2node)
if len(node_id_to_type) != len(node_type_to_id):
print('len(node_id_to_type)!=len(node_type_to_id), conflict of nodes')
node_id_to_type = {node_type_to_id[i]: i for i in node_type_to_id}
##########################################
# apply confident predictions to confoutput
confoutput = {}
confoutput['nodes'] = np.zeros(probnodes.shape)
confoutput['edges'] = np.zeros(probedges.shape)
for nodei in range(confoutput['nodes'].shape[0]):
if nodei in node_id_to_type.keys() and node_id_to_type[nodei] not in deg2node:
confoutput['nodes'][nodei][node_id_to_type[nodei]] = 1
else:
if nodei in node_id_to_type.keys() and node_id_to_type[nodei] in deg2node:
print('nodei in deg2node', node_id_to_type[nodei], 'skip setting node id')
if prednodes[nodei] == 0:
confoutput['nodes'][nodei] = probnodes[nodei]
else:
# set as non type if original max prob not nontype
confoutput['nodes'][nodei][0] = 1
# fill edge according to node
for nodetypei in range(len(nodeconnection)):
if len(nodeconnection[nodetypei]) == 0:
continue
if nodetypei not in node_id_to_type.values():
continue
for branchnodetypei in nodeconnection[nodetypei]:
if branchnodetypei not in node_id_to_type.values():
continue
edgetype = matchvestype(nodetypei, branchnodetypei)
if edgetype in [7, 8] and 11 in deg2edge and branchnodetypei in deg2node:
print('edge', edgetype, 'needs deg 2 prediction, set edge to distal type')
edgetype += 2
if edgetype in [17, 18] and edgetype in deg2edge and branchnodetypei in deg2node:
print('edge', edgetype, 'needs deg 2 prediction, set edge to distal type')
edgetype += 2
try:
sp = nx.shortest_path(graph, node_type_to_id[nodetypei], node_type_to_id[branchnodetypei])
except nx.NetworkXNoPath:
print('no shortest path between connection nodes, skip', nodetypei, branchnodetypei)
# print('sp',sp)
for spi in range(1, len(sp)):
edgei = findedgeid(graph, sp[spi - 1], sp[spi])
if edgei != -1:
confoutput['edges'][edgei][edgetype] = 1
else:
print('cannot find id for edge', sp[spi - 1], sp[spi])
# fill additional edges based on node types
for edgetype, nodetypes in edgemap.items():
if nodetypes[0] in node_type_to_id.keys() and nodetypes[1] in node_type_to_id.keys():
try:
sp = nx.shortest_path(graph, node_type_to_id[nodetypes[0]], node_type_to_id[nodetypes[1]])
for spid in range(1, len(sp)):
spi = sp[spid - 1]
if spi in list(set(node_type_to_id.keys()) - set(
[node_type_to_id[nodetypes[0]], node_type_to_id[nodetypes[1]]])):
# print(edgetype,nodetypes,'path through other confident nodes')
break
spj = sp[spid]
edgei = findedgeid(graph, spi, spj)
if edgei != -1:
print(edgetype, nodetypes, 'label edgei', edgei, 'edgetype', edgetype)
confoutput['edges'][edgei][edgetype] = 1
else:
print('cannot find edge id, possible?', spi, spj)
except nx.NetworkXNoPath:
print('no path between edgetype,nodetypes', edgetype, nodetypes)
continue
# fill M2 A2 P2
# add keyids to visted list to avoid propogate through acomm pcomm
keyids = []
for nodeids in node_type_to_id.values():
keyids.append(nodeids)
print('keyids', node_id_to_type, node_type_to_id)
print('keyids', keyids)
for fi in fillmap:
if fi[1] not in node_type_to_id.keys() or fi[0] not in node_type_to_id.keys():
continue
sp = nx.shortest_path(graph, node_type_to_id[fi[1]], node_type_to_id[fi[0]])
assert len(sp) >= 2
fillnodes, filledges = findallnei(graph, node_type_to_id[fi[1]], sp[:2] + keyids)
# print('fill node/edge',fi,fillnodes,filledges)
# node set 0
for nodeid in fillnodes:
if np.argmax(confoutput['nodes'][nodeid]) != 0:
print('node already assigned', nodeid, 'no fill needed')
continue
nodez = np.zeros((BOITYPENUM))
nodez[0] = 1
confoutput['nodes'][nodeid] = nodez
# edge set to edgetype
edgetype = fi[2]
for edgeid in filledges:
edgei = findedgeid(graph, edgeid[0], edgeid[1])
if np.argmax(confoutput['edges'][edgei]) != 0:
print(edgei, 'assign', edgetype, 'edge already assigned to ', np.argmax(confoutput['edges'][edgei]))
if np.argmax(confoutput['edges'][edgei]) in [9, 10] and edgetype in [9, 10]:
print('ERR NEED CORR, A2 touch')
# a2LR touches, compare dist to A1/2 L and R
edgenodei = list(graph.edges())[edgei][0]
cdistL = nodedist(graph, edgenodei, node_type_to_id[5])
cdistR = nodedist(graph, edgenodei, node_type_to_id[6])
edgez = np.zeros((VESTYPENUM))
if cdistL < cdistR:
print('A2LR touch, set to A2L')
edgez[9] = 1
else:
print('A2LR touch, set to A2R')
edgez[10] = 1
confoutput['edges'][edgei] = edgez
else:
edgez = np.zeros((VESTYPENUM))
edgez[edgetype] = 1
confoutput['edges'][edgei] = edgez
# fill remaining with prob if not exist, if pred non-type, force to set zero type
for edgei in range(confoutput['edges'].shape[0]):
# if unset, check connection to nodetype M12 A12 P12, set to closest
if np.max(confoutput['edges'][edgei]) == 0:
# if prednodes[nodei]==0:
cprobedge = probedges[edgei]
if np.argmax(cprobedge) == 12:
cprobedge[5] += cprobedge[12]
cprobedge[12] = 0
if np.argmax(cprobedge) == 13:
cprobedge[6] += cprobedge[13]
cprobedge[13] = 0
enodei = list(graph.edges())[edgei][0]
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[7])
if node_type_to_id[3] not in sp:
print(edgei, 'has loop, remaining edge set to m2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[5] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[8])
if node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to m2r')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[6] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[5])
if node_type_to_id[3] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to a2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[7] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[6])
if node_type_to_id[3] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to a2r')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[8] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[19])
if node_type_to_id[18] not in sp and node_type_to_id[3] not in sp:
print(edgei, 'has loop, remaining edge set to p2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[19] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[20])
if node_type_to_id[18] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to p2r')
zprobedge = np.zeros((VESTYPENUM))
| |
<filename>HEall.py
# AUTHOR: <NAME>
# CONTACT: <EMAIL>.
# --------------------------- LIBRARIES
import numpy as np
import pandas as pd
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve, factorized
np.set_printoptions(linewidth=2000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# --------------------------- END LIBRARIES
# --------------------------- INITIAL DATA: Y, SHUNTS AND Y0i
df_top = pd.read_excel('Dades_v1.xlsx', sheet_name='Topology') # DataFrame of the topology
busos_coneguts = [] # vector to store the indices of the found buses
[busos_coneguts.append(df_top.iloc[i, j]) for i in range(df_top.shape[0]) for j in range(0, 2) if
df_top.iloc[i, j] not in busos_coneguts]
n = len(busos_coneguts)
n_linies = df_top.shape[0]
A = np.zeros((n, n_linies), dtype=int) # núm busos, núm línies
L = np.zeros((n_linies, n_linies), dtype=complex)
np.fill_diagonal(L, [1 / (df_top.iloc[i, 2] + df_top.iloc[i, 3] * 1j) for i in range(n_linies)])
A[df_top.iloc[range(n_linies), 0], range(n_linies)] = 1
A[df_top.iloc[range(n_linies), 1], range(n_linies)] = -1
Yx = np.dot(np.dot(A, L), np.transpose(A))
Y = np.zeros((n - 1, n - 1), dtype=complex) # admittance matrix without slack bus
Y[:, :] = Yx[1:, 1:]
vecx_shunts = np.zeros((n, 1), dtype=complex) # vector with shunt admittances
for i in range(df_top.shape[0]): # passar per totes les files
vecx_shunts[df_top.iloc[i, 0], 0] = vecx_shunts[df_top.iloc[i, 0], 0] + df_top.iloc[
i, 4] * -1j # B/2 is in column 4. The sign is changed here
vecx_shunts[df_top.iloc[i, 1], 0] = vecx_shunts[df_top.iloc[i, 1], 0] + df_top.iloc[
i, 4] * -1j # B/2 is in column 4. The sign is changed here
vec_shunts = np.zeros((n - 1, 1), dtype=complex) # same vector, just to adapt
for i in range(n - 1):
vec_shunts[i, 0] = vecx_shunts[i + 1, 0]
# vec_shunts = --vec_shunts # no need to change the sign, already done
vec_Y0 = np.zeros((n - 1, 1), dtype=complex) # vector with admittances connecting to the slack
for i in range(df_top.shape[0]): # go through all rows
if df_top.iloc[i, 0] == 0: # if slack in the first column
vec_Y0[df_top.iloc[i, 1] - 1, 0] = 1 / (
df_top.iloc[i, 2] + df_top.iloc[i, 3] * 1j) # -1 so bus 1 goes to index 0
elif df_top.iloc[i, 1] == 0: # if slack in the second column
vec_Y0[df_top.iloc[i, 0] - 1, 0] = 1 / (df_top.iloc[i, 2] + df_top.iloc[i, 3] * 1j)
G = np.real(Y) # real parts of Yij
B = np.imag(Y) # imaginary parts of Yij
# --------------------------- INITIAL DATA: Y, SHUNTS AND Y0i. DONE
# --------------------------- INITIAL DATA: BUSES INFORMATION
df_bus = pd.read_excel('Data.xlsx', sheet_name='Buses') # dataframe of the buses
if df_bus.shape[0] != n:
print('Error: número de busos de ''Topologia'' i de ''Busos'' no és igual') # check if number of buses is coherent
num_busos_PQ = 0 # initialize number of PQ buses
num_busos_PV = 0 # initialize number of PV buses
vec_busos_PQ = np.zeros([0], dtype=int) # vector to store the indices of PQ buses
vec_busos_PV = np.zeros([0], dtype=int) # vector to store the indices of PV buses
vec_P = np.zeros((n - 1, 1), dtype=float) # data of active power
vec_Q = np.zeros((n - 1, 1), dtype=float) # data of reactive power
vec_V = np.zeros((n - 1, 1), dtype=float) # data of voltage magnitude
vec_W = np.zeros((n - 1, 1), dtype=float) # voltage magnitude squared
for i in range(df_bus.shape[0]): # find the voltage specified for the slack
if df_bus.iloc[i, 0] == 0:
V_slack = df_bus.iloc[i, 3]
else:
V_slack = 1
for i in range(df_bus.shape[0]): # store the data of both PQ and PV
vec_P[df_bus.iloc[i, 0] - 1] = df_bus.iloc[i, 1] # -1 to start at 0
if df_bus.iloc[i, 4] == 'PQ':
vec_Q[df_bus.iloc[i, 0] - 1] = df_bus.iloc[i, 2] # -1 to start at 0
vec_busos_PQ = np.append(vec_busos_PQ, df_bus.iloc[i, 0])
elif df_bus.iloc[i, 4] == 'PV':
vec_V[df_bus.iloc[i, 0] - 1] = df_bus.iloc[i, 3] # -1 to start at 0
vec_busos_PV = np.append(vec_busos_PV, df_bus.iloc[i, 0])
num_busos_PQ = len(vec_busos_PQ)
num_busos_PV = len(vec_busos_PV)
vec_W = vec_V**2
# --------------------------- INITIAL DATA: BUSES INFORMATION. DONE
# --------------------------- PREPARING IMPLEMENTATION
prof = 30 # depth
U = np.zeros((prof, n - 1), dtype=complex) # voltages
U_re = np.zeros((prof, n - 1), dtype=float) # real part of voltages
U_im = np.zeros((prof, n - 1), dtype=float) # imaginary part of voltages
X = np.zeros((prof, n - 1), dtype=complex) # X=1/conj(U)
X_re = np.zeros((prof, n - 1), dtype=float) # real part of X
X_im = np.zeros((prof, n - 1), dtype=float) # imaginary part of X
Q = np.zeros((prof, n - 1), dtype=complex) # unknown reactive powers
pqpv = np.r_[vec_busos_PQ, vec_busos_PV]
pq = vec_busos_PQ
pv = vec_busos_PV
np.sort(pqpv)
npq = len(pq)
npv = len(pv)
dimensions = 2 * npq + 3 * npv # number of unknowns
# .......................GUIDING VECTOR
lx = 0
index_Ure = []
index_Uim = []
index_Q = []
for i in range(n-1):
index_Ure.append(lx)
index_Uim.append(lx + 1)
if i + 1 in pq:
lx = lx + 2
else:
index_Q.append(lx + 2)
lx = lx + 3
# .......................GUIDING VECTOR. DONE
# .......................CALCULATION OF TERMS [0]
Y = csc_matrix(Y)
U[0, :] = spsolve(Y, vec_Y0)
X[0, :] = 1 / np.conj(U[0, :])
U_re[0, :] = np.real(U[0, :])
U_im[0, :] = np.imag(U[0, :])
X_re[0, :] = np.real(X[0, :])
X_im[0, :] = np.imag(X[0, :])
# .......................CALCULATION OF TERMS [0]. DONE
# .......................CALCULATION OF TERMS [1]
valor = np.zeros(n - 1, dtype=complex)
valor[pq - 1] = (V_slack - 1) * vec_Y0[pq - 1, 0] + (vec_P[pq - 1, 0] - vec_Q[pq - 1, 0] * 1j) * X[0, pq - 1] + U[
0, pq - 1] * vec_shunts[pq - 1, 0]
valor[pv - 1] = (V_slack - 1) * vec_Y0[pv - 1, 0] + (vec_P[pv - 1, 0]) * X[0, pv - 1] + U[0, pv - 1] * vec_shunts[
pv - 1, 0]
RHSx = np.zeros((3, n - 1), dtype=float)
RHSx[0, pq - 1] = valor[pq - 1].real
RHSx[1, pq - 1] = valor[pq - 1].imag
RHSx[2, pq - 1] = np.nan # to later delete
RHSx[0, pv - 1] = valor[pv - 1].real
RHSx[1, pv - 1] = valor[pv - 1].imag
RHSx[2, pv - 1] = vec_W[pv - 1, 0] - 1
rhs = np.matrix.flatten(RHSx, 'f')
rhs = rhs[~np.isnan(rhs)] # delete dummy cells
mat = np.zeros((dimensions, dimensions), dtype=float) # constant matrix
k = 0 # index that will go through the rows
for i in range(n - 1): # fill the matrix
lx = 0
for j in range(n - 1):
mat[k, lx] = G[i, j]
mat[k + 1, lx] = B[i, j]
mat[k, lx + 1] = -B[i, j]
mat[k + 1, lx + 1] = G[i, j]
if (j == i) and (i + 1 in pv) and (j + 1 in pv):
mat[k + 2, lx] = 2 * U_re[0, i]
mat[k + 2, lx + 1] = 2 * U_im[0, i]
mat[k, lx + 2] = -X_im[0, i]
mat[k + 1, lx + 2] = X_re[0, i]
lx = lx + 2 if (j + 1 in pq) else lx + 3
k = k + 2 if (i + 1 in pq) else k + 3
mat_factorized = factorized(csc_matrix(mat))
lhs = mat_factorized(rhs)
U_re[1, :] = lhs[index_Ure]
U_im[1, :] = lhs[index_Uim]
Q[0, pv - 1] = lhs[index_Q]
U[1, :] = U_re[1, :] + U_im[1, :] * 1j
X[1, :] = (-X[0, :] * np.conj(U[1, :])) / np.conj(U[0, :])
X_re[1, :] = np.real(X[1, :])
X_im[1, :] = np.imag(X[1, :])
# .......................CALCULATION OF TERMS [1]. DONE
# .......................CALCULATION OF TERMS [>=2]
def conv(A, B, c, i, tipus):
if tipus == 1:
suma = [np.conj(A[k, i]) * B[c - k, i] for k in range(1, c + 1)]
return sum(suma)
elif tipus == 2:
suma = [A[k, i] * B[c - 1 - k, i] for k in range(1, c)]
return sum(suma)
elif tipus == 3:
suma = [A[k, i] * np.conj(B[c - k, i]) for k in range(1, c)]
return sum(suma).real
for c in range(2, prof): # c defines the current depth
valor[pq - 1] = (vec_P[pq - 1, 0] - vec_Q[pq - 1, 0] * 1j) * X[c - 1, pq - 1] + U[c - 1, pq - 1] * vec_shunts[pq - 1, 0]
valor[pv - 1] = conv(X, Q, c, pv - 1, 2) * -1j + U[c - 1, pv - 1] * vec_shunts[pv - 1, 0] + X[c - 1, pv - 1] * vec_P[pv - 1, 0]
RHSx[0, pq - 1] = valor[pq - 1].real
RHSx[1, pq - 1] = valor[pq - 1].imag
RHSx[2, pq - 1] = np.nan # per poder-ho eliminar bé, dummy
RHSx[0, pv - 1] = valor[pv - 1].real
RHSx[1, pv - 1] = valor[pv - 1].imag
RHSx[2, pv - 1] = -conv(U, | |
<reponame>ghostchoir/fairseq_old
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from .lsqquantize_V1 import Round
class ALSQPlus(Function):
@staticmethod
def forward(ctx, weight, alpha, g, Qn, Qp, beta):
# assert alpha > 0, "alpha={}".format(alpha)
ctx.save_for_backward(weight, alpha, beta)
ctx.other = g, Qn, Qp
w_q = Round.apply(torch.div((weight - beta), alpha)).clamp(Qn, Qp)
w_q = w_q * alpha + beta
return w_q
@staticmethod
def backward(ctx, grad_weight):
weight, alpha, beta = ctx.saved_tensors
g, Qn, Qp = ctx.other
q_w = (weight - beta) / alpha
smaller = (q_w < Qn).float() # bool值转浮点值,1.0或者0.0
bigger = (q_w > Qp).float() # bool值转浮点值,1.0或者0.0
between = 1.0 - smaller - bigger # 得到位于量化区间的index
grad_alpha = ((smaller * Qn + bigger * Qp +
between * Round.apply(q_w) - between * q_w) * grad_weight * g).sum().unsqueeze(dim=0)
grad_beta = ((smaller + bigger) * grad_weight * g).sum().unsqueeze(dim=0)
# 在量化区间之外的值都是常数,故导数也是0
grad_weight = between * grad_weight
# 返回的梯度要和forward的参数对应起来
return grad_weight, grad_alpha, None, None, None, grad_beta
class WLSQPlus(Function):
@staticmethod
def forward(ctx, weight, alpha, g, Qn, Qp, per_channel):
# assert alpha > 0, "alpha={}".format(alpha)
ctx.save_for_backward(weight, alpha)
ctx.other = g, Qn, Qp, per_channel
if per_channel:
sizes = weight.size()
weight = weight.contiguous().view(weight.size()[0], -1)
weight = torch.transpose(weight, 0, 1)
alpha = torch.broadcast_to(alpha, weight.size())
w_q = Round.apply(torch.div(weight, alpha)).clamp(Qn, Qp)
w_q = w_q * alpha
w_q = torch.transpose(w_q, 0, 1)
w_q = w_q.contiguous().view(sizes)
else:
w_q = Round.apply(torch.div(weight, alpha)).clamp(Qn, Qp)
w_q = w_q * alpha
return w_q
@staticmethod
def backward(ctx, grad_weight):
weight, alpha = ctx.saved_tensors
g, Qn, Qp, per_channel = ctx.other
if per_channel:
sizes = weight.size()
weight = weight.contiguous().view(weight.size()[0], -1)
weight = torch.transpose(weight, 0, 1)
alpha = torch.broadcast_to(alpha, weight.size())
q_w = weight / alpha
q_w = torch.transpose(q_w, 0, 1)
q_w = q_w.contiguous().view(sizes)
else:
q_w = weight / alpha
smaller = (q_w < Qn).float() # bool值转浮点值,1.0或者0.0
bigger = (q_w > Qp).float() # bool值转浮点值,1.0或者0.0
between = 1.0 - smaller - bigger # 得到位于量化区间的index
if per_channel:
grad_alpha = ((smaller * Qn + bigger * Qp +
between * Round.apply(q_w) - between * q_w) * grad_weight * g)
grad_alpha = grad_alpha.contiguous().view(grad_alpha.size()[0], -1).sum(dim=1)
else:
grad_alpha = ((smaller * Qn + bigger * Qp +
between * Round.apply(q_w) - between * q_w) * grad_weight * g).sum().unsqueeze(dim=0)
# 在量化区间之外的值都是常数,故导数也是0
grad_weight = between * grad_weight
return grad_weight, grad_alpha, None, None, None, None
def grad_scale(x, scale):
y = x
y_grad = x * scale
return (y - y_grad).detach() + y_grad
def round_pass(x):
y = x.round()
y_grad = x
return (y - y_grad).detach() + y_grad
def get_percentile_min_max(input, lower_percentile, uppper_percentile, output_tensor):
batch_size = input.shape[0]
lower_index = round(batch_size * (1 - lower_percentile * 0.01))
upper_index = round(batch_size * (1 - uppper_percentile * 0.01))
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
else:
low_bound = -torch.kthvalue(-input, k=lower_index).values
# def update_scale_betas():
# for m in model.modules():
# if isinstance(m, nn.)
# A(特征)量化
class LSQPlusActivationQuantizer(nn.Module):
def __init__(self, a_bits, all_positive=False, batch_init=20):
# activations 没有per-channel这个选项的
super(LSQPlusActivationQuantizer, self).__init__()
self.a_bits = a_bits
self.all_positive = all_positive
self.batch_init = batch_init
if self.all_positive:
# unsigned activation is quantized to [0, 2^b-1]
self.Qn = 0
self.Qp = 2 ** self.a_bits - 1
else:
# signed weight/activation is quantized to [-2^(b-1), 2^(b-1)-1]
self.Qn = - 2 ** (self.a_bits - 1)
self.Qp = 2 ** (self.a_bits - 1) - 1
self.s = torch.nn.Parameter(torch.ones(1), requires_grad=True)
self.beta = torch.nn.Parameter(torch.ones(0), requires_grad=True)
self.init_state = 0
# 量化/反量化
def forward(self, activation):
if self.init_state == 0:
self.g = 1.0 / math.sqrt(activation.numel() * self.Qp)
mina = torch.min(activation.detach())
self.s.data = (torch.max(activation.detach()) - mina) / (self.Qp - self.Qn)
self.beta.data = mina - self.s.data * self.Qn
self.init_state += 1
elif self.init_state < self.batch_init:
mina = torch.min(activation.detach())
self.s.data = self.s.data * 0.9 + 0.1 * (torch.max(activation.detach()) - mina) / (self.Qp - self.Qn)
self.beta.data = self.s.data * 0.9 + 0.1 * (mina - self.s.data * self.Qn)
self.init_state += 1
elif self.init_state == self.batch_init:
# self.s = torch.nn.Parameter(self.s)
# self.beta = torch.nn.Parameter(self.beta)
self.init_state += 1
if self.a_bits == 32:
q_a = activation
elif self.a_bits == 1:
print('!Binary quantization is not supported !')
assert self.a_bits != 1
else:
q_a = ALSQPlus.apply(activation, self.s, self.g, self.Qn, self.Qp, self.beta)
return q_a
# W(权重)量化
class LSQPlusWeightQuantizer(nn.Module):
def __init__(self, w_bits, all_positive=False, per_channel=False, batch_init=20):
super(LSQPlusWeightQuantizer, self).__init__()
self.w_bits = w_bits
self.all_positive = all_positive
self.batch_init = batch_init
if self.all_positive:
# unsigned activation is quantized to [0, 2^b-1]
self.Qn = 0
self.Qp = 2 ** w_bits - 1
else:
# signed weight/activation is quantized to [-2^(b-1), 2^(b-1)-1]
self.Qn = - 2 ** (w_bits - 1)
self.Qp = 2 ** (w_bits - 1) - 1
self.per_channel = per_channel
self.init_state = 0
self.s = torch.nn.Parameter(torch.ones(1), requires_grad=True)
# 量化/反量化
def forward(self, weight):
'''
For this work, each layer of weights and each layer of activations has a distinct step size, represented
as an fp32 value, initialized to 2h|v|i/√OP , computed on either the initial weights values or the first
batch of activations, respectively
'''
if self.init_state == 0:
self.g = 1.0 / math.sqrt(weight.numel() * self.Qp)
self.div = 2 ** self.w_bits - 1
if self.per_channel:
weight_tmp = weight.detach().contiguous().view(weight.size()[0], -1)
mean = torch.mean(weight_tmp, dim=1)
std = torch.std(weight_tmp, dim=1)
self.s.data, _ = torch.max(torch.stack([torch.abs(mean - 3 * std), torch.abs(mean + 3 * std)]), dim=0)
self.s.data = self.s.data / self.div
else:
mean = torch.mean(weight.detach())
std = torch.std(weight.detach())
self.s.data = max([torch.abs(mean - 3 * std), torch.abs(mean + 3 * std)]) / self.div
self.init_state += 1
elif self.init_state < self.batch_init:
self.div = 2 ** self.w_bits - 1
if self.per_channel:
weight_tmp = weight.detach().contiguous().view(weight.size()[0], -1)
mean = torch.mean(weight_tmp, dim=1)
std = torch.std(weight_tmp, dim=1)
self.s.data, _ = torch.max(torch.stack([torch.abs(mean - 3 * std), torch.abs(mean + 3 * std)]), dim=0)
self.s.data = self.s.data * 0.9 + 0.1 * self.s.data / self.div
else:
mean = torch.mean(weight.detach())
std = torch.std(weight.detach())
self.s.data = self.s.data * 0.9 + 0.1 * max(
[torch.abs(mean - 3 * std), torch.abs(mean + 3 * std)]) / self.div
self.init_state += 1
elif self.init_state == self.batch_init:
# self.s = torch.nn.Parameter(self.s)
self.init_state += 1
if self.w_bits == 32:
output = weight
elif self.w_bits == 1:
print('!Binary quantization is not supported !')
assert self.w_bits != 1
else:
w_q = WLSQPlus.apply(weight, self.s, self.g, self.Qn, self.Qp, self.per_channel)
# alpha = grad_scale(self.s, g)
# w_q = Round.apply((weight/alpha).clamp(Qn, Qp)) * alpha
return w_q
class QuantConv1d(nn.Conv1d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
a_bits=8,
w_bits=8,
quant_inference=False,
all_positive=False,
per_channel=False,
batch_init=20):
super(QuantConv1d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups,
bias, padding_mode)
self.quant_inference = quant_inference
self.activation_quantizer = LSQPlusActivationQuantizer(a_bits=a_bits, all_positive=all_positive,
batch_init=batch_init)
self.weight_quantizer = LSQPlusWeightQuantizer(w_bits=w_bits, all_positive=all_positive,
per_channel=per_channel, batch_init=batch_init)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.conv1d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
class QuantConv2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
a_bits=8,
w_bits=8,
quant_inference=False,
all_positive=False,
per_channel=False,
batch_init=20):
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups,
bias, padding_mode)
self.quant_inference = quant_inference
self.activation_quantizer = LSQPlusActivationQuantizer(a_bits=a_bits, all_positive=all_positive,
batch_init=batch_init)
self.weight_quantizer = LSQPlusWeightQuantizer(w_bits=w_bits, all_positive=all_positive,
per_channel=per_channel, batch_init=batch_init)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.conv2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConvTranspose2d(nn.ConvTranspose2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
a_bits=8,
w_bits=8,
quant_inference=False,
all_positive=False,
per_channel=False,
batch_init=20):
super(QuantConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding,
output_padding,
dilation, groups, bias, padding_mode)
self.quant_inference = quant_inference
self.activation_quantizer = LSQPlusActivationQuantizer(a_bits=a_bits, all_positive=all_positive,
batch_init=batch_init)
self.weight_quantizer = LSQPlusWeightQuantizer(w_bits=w_bits, all_positive=all_positive,
per_channel=per_channel, batch_init=batch_init)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.conv_transpose2d(quant_input, quant_weight, self.bias, self.stride, self.padding,
self.output_padding,
self.groups, self.dilation)
return output
class QuantLinear(nn.Linear):
def __init__(self,
in_features,
out_features,
bias=True,
a_bits=8,
w_bits=8,
quant_inference=False,
all_positive=False,
per_channel=False,
batch_init=20):
super(QuantLinear, self).__init__(in_features, out_features, bias)
self.quant_inference = quant_inference
self.activation_quantizer = LSQPlusActivationQuantizer(a_bits=a_bits, all_positive=all_positive,
batch_init=batch_init)
self.weight_quantizer = LSQPlusWeightQuantizer(w_bits=w_bits, all_positive=all_positive,
per_channel=per_channel, batch_init=batch_init)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.linear(quant_input, quant_weight, self.bias)
return output
def add_quant_op(module, layer_counter, a_bits=8, w_bits=8,
quant_inference=False, all_positive=False, per_channel=False, batch_init=20):
for name, child in module.named_children():
if isinstance(child, nn.Conv2d):
layer_counter[0] += 1
if layer_counter[0] >= 1: # 第一层也量化
if child.bias is not None:
quant_conv = QuantConv2d(child.in_channels, child.out_channels,
child.kernel_size, stride=child.stride,
padding=child.padding, dilation=child.dilation,
groups=child.groups, bias=True, padding_mode=child.padding_mode,
a_bits=a_bits, w_bits=w_bits, quant_inference=quant_inference,
all_positive=all_positive, per_channel=per_channel, batch_init=batch_init)
quant_conv.bias.data | |
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~dev_center.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class DevBoxDefinition(TrackedResource):
"""Represents a definition for a Developer Machine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~dev_center.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param image_reference: Image reference information.
:type image_reference: ~dev_center.models.ImageReference
:param sku: The SKU for Dev Boxes created using this definition.
:type sku: ~dev_center.models.Sku
:param os_storage_type: The storage type used for the Operating System disk of Dev Boxes
created using this definition.
:type os_storage_type: str
:ivar provisioning_state: The provisioning state of the resource.
:vartype provisioning_state: str
:ivar image_validation_status: Validation status of the configured image. Possible values
include: "Unknown", "Pending", "Succeeded", "Failed", "TimedOut".
:vartype image_validation_status: str or ~dev_center.models.ImageValidationStatus
:ivar image_validation_error_details: Details for image validator error. Populated when the
image validation is not successful.
:vartype image_validation_error_details: ~dev_center.models.ImageValidationErrorDetails
:ivar active_image_reference: Image reference information for the currently active image (only
populated during updates).
:vartype active_image_reference: ~dev_center.models.ImageReference
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'image_validation_status': {'readonly': True},
'image_validation_error_details': {'readonly': True},
'active_image_reference': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'image_reference': {'key': 'properties.imageReference', 'type': 'ImageReference'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'os_storage_type': {'key': 'properties.osStorageType', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'image_validation_status': {'key': 'properties.imageValidationStatus', 'type': 'str'},
'image_validation_error_details': {'key': 'properties.imageValidationErrorDetails', 'type': 'ImageValidationErrorDetails'},
'active_image_reference': {'key': 'properties.activeImageReference', 'type': 'ImageReference'},
}
def __init__(
self,
**kwargs
):
super(DevBoxDefinition, self).__init__(**kwargs)
self.image_reference = kwargs.get('image_reference', None)
self.sku = kwargs.get('sku', None)
self.os_storage_type = kwargs.get('os_storage_type', None)
self.provisioning_state = None
self.image_validation_status = None
self.image_validation_error_details = None
self.active_image_reference = None
class DevBoxDefinitionListResult(msrest.serialization.Model):
"""Results of the Dev Box definition list operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Current page of results.
:vartype value: list[~dev_center.models.DevBoxDefinition]
:ivar next_link: URL to get the next set of results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DevBoxDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DevBoxDefinitionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DevBoxDefinitionUpdateProperties(msrest.serialization.Model):
"""Properties of a Dev Box definition. These properties can be updated after the resource has been created.
:param image_reference: Image reference information.
:type image_reference: ~dev_center.models.ImageReference
:param sku: The SKU for Dev Boxes created using this definition.
:type sku: ~dev_center.models.Sku
:param os_storage_type: The storage type used for the Operating System disk of Dev Boxes
created using this definition.
:type os_storage_type: str
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'sku': {'key': 'sku', 'type': 'Sku'},
'os_storage_type': {'key': 'osStorageType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DevBoxDefinitionUpdateProperties, self).__init__(**kwargs)
self.image_reference = kwargs.get('image_reference', None)
self.sku = kwargs.get('sku', None)
self.os_storage_type = kwargs.get('os_storage_type', None)
class DevBoxDefinitionProperties(DevBoxDefinitionUpdateProperties):
"""Properties of a Dev Box definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param image_reference: Image reference information.
:type image_reference: ~dev_center.models.ImageReference
:param sku: The SKU for Dev Boxes created using this definition.
:type sku: ~dev_center.models.Sku
:param os_storage_type: The storage type used for the Operating System disk of Dev Boxes
created using this definition.
:type os_storage_type: str
:ivar provisioning_state: The provisioning state of the resource.
:vartype provisioning_state: str
:ivar image_validation_status: Validation status of the configured image. Possible values
include: "Unknown", "Pending", "Succeeded", "Failed", "TimedOut".
:vartype image_validation_status: str or ~dev_center.models.ImageValidationStatus
:ivar image_validation_error_details: Details for image validator error. Populated when the
image validation is not successful.
:vartype image_validation_error_details: ~dev_center.models.ImageValidationErrorDetails
:ivar active_image_reference: Image reference information for the currently active image (only
populated during updates).
:vartype active_image_reference: ~dev_center.models.ImageReference
"""
_validation = {
'provisioning_state': {'readonly': True},
'image_validation_status': {'readonly': True},
'image_validation_error_details': {'readonly': True},
'active_image_reference': {'readonly': True},
}
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'sku': {'key': 'sku', 'type': 'Sku'},
'os_storage_type': {'key': 'osStorageType', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'image_validation_status': {'key': 'imageValidationStatus', 'type': 'str'},
'image_validation_error_details': {'key': 'imageValidationErrorDetails', 'type': 'ImageValidationErrorDetails'},
'active_image_reference': {'key': 'activeImageReference', 'type': 'ImageReference'},
}
def __init__(
self,
**kwargs
):
super(DevBoxDefinitionProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.image_validation_status = None
self.image_validation_error_details = None
self.active_image_reference = None
class TrackedResourceUpdate(msrest.serialization.Model):
"""Base tracked resource type for PATCH updates.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: The geo-location where the resource lives.
:type location: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResourceUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
class DevBoxDefinitionUpdate(TrackedResourceUpdate):
"""Partial update of a Dev Box definition resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: The geo-location where the resource lives.
:type location: str
:param image_reference: Image reference information.
:type image_reference: ~dev_center.models.ImageReference
:param sku: The SKU for Dev Boxes created using this definition.
:type sku: ~dev_center.models.Sku
:param os_storage_type: The storage type used for the Operating System disk of Dev Boxes
created using this definition.
:type os_storage_type: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'image_reference': {'key': 'properties.imageReference', 'type': 'ImageReference'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'os_storage_type': {'key': 'properties.osStorageType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DevBoxDefinitionUpdate, self).__init__(**kwargs)
self.image_reference = kwargs.get('image_reference', None)
self.sku = kwargs.get('sku', None)
self.os_storage_type = kwargs.get('os_storage_type', None)
class DevCenter(TrackedResource):
"""Represents a devcenter resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~dev_center.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: Managed identity properties.
:type identity: ~dev_center.models.ManagedServiceIdentity
:ivar provisioning_state: The provisioning state of the resource.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': | |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 17:18:39 2021
@author: Koustav
"""
import os
import glob
import matplotlib.pyplot as plt
import seaborn as sea
import numpy as np
import pandas as pan
import math
import collections
import matplotlib.ticker as mtick
from mpl_toolkits import mplot3d
from matplotlib.collections import LineCollection
from scipy.optimize import curve_fit
import powerlaw
def pow_law(x, a, expo):
return a*(np.power(x, expo))
def trunc_pow_law(x, a, expo, trunc_expo): #Truncated Power Law
return a*(np.power(x, expo))*np.exp(trunc_expo*x)
def main_ind():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0
for i in range(6,7):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
Hai
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
'''if(p == 0.728):
print("Skipped")
continue'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
#DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
'''DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
'''if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")'''
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
#hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S \geq \Delta s)$"])
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
f = sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$")
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
#sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
#ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.savefig("0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Saving best fit data.
gaol[float(round(CC,2))].append([L, p, -popt[1], perr[1], -popt[2], perr[2]])'''
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Saving as CSVs.
'''if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, alpha, SD(alpha), lambda, SD(lambda)'
for k in K:
np.savetxt("BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")'''
def main_ccdf_fit():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p == 0.678):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
fit = powerlaw.Fit(data_temp[:,5],discrete=True,estimate_discrete = False) #If you already know xmin pass it as an argument (xmin=value) for speed
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
print('x_min: ',fit.xmin)
print('alpha: ',fit.truncated_power_law.parameter1)
print('1/lambda: ',1/fit.truncated_power_law.parameter2)
tukan = (-fit.truncated_power_law.parameter1, -fit.truncated_power_law.parameter2)
fig = fit.plot_ccdf(color ='cornflowerblue', ls='-', linewidth=1.1, alpha=0.2)
fit.plot_ccdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_ccdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S \geq \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S \geq \Delta s)$")
plt.legend()
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
plt.savefig("Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir("../../")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("Done with CDF Plots And Fits. Moving On To PDF Plots...")
fig = fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.5, alpha=0.4)
#fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_pdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S = \Delta s)$")
plt.legend()
plt.savefig("Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
f = open("Taupe.txt", "w+")
f.write("LR (Power Law): " + str(comparison_tpl_pl[0]) +" p-value: "+ str(comparison_tpl_pl[1]) +"\n")
f.write("LR (Exponential): " + str(comparison_tpl_exp[0]) +" p-value: "+ str(comparison_tpl_exp[1]) +"\n")
f.write("LR (Log-Normal): " + str(comparison_tpl_log_normal[0]) +" p-value: "+ str(comparison_tpl_log_normal[1]) +"\n")
f.write("LR (Stretched-Exponential): " + str(comparison_tpl_streched_exp[0]) +" p-value: "+ str(comparison_tpl_streched_exp[1]) +"\n")
f.close()
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, x_min, alpha, 1/lambda'
for k in K:
np.savetxt("Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")
def main_cumulative():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
| |
on_error_event" %
(port, self))
else:
raise UnknownPortError("Unknown port '%s' in OutputThing %s" % (port, self)) from e
enq = self.__enqueue_fn__
if enq:
for s in connections:
enq(s.on_error, e)
else:
try:
for s in connections:
s.on_error(e)
except FatalError:
raise
except Exception as e:
raise ExcInDispatch("Unexpected exception when dispatching error '%s' to InputThing %s from OutputThing %s" %
(repr(e), s.input_thing, self)) from e
self._close_port(port)
def print_downstream(self):
"""Recursively print all the downstream paths. This is for debugging.
"""
def has_connections(thing):
if not hasattr(thing, '__connections__'):
return False
return thing._has_connections()
def print_from(current_seq, thing):
if has_connections(thing):
for (port, connections) in thing.__connections__.items():
for connection in connections:
if port=='default' and \
connection.input_port=='default':
next_seq = " => %s" % connection.input_thing
else:
next_seq = " [%s]=>[%s] %s" % \
(port, connection.input_port,
connection.input_thing)
print_from(current_seq + next_seq,
connection.input_thing)
else:
print(current_seq)
print("***** Dump of all paths from %s *****" % self.__str__())
print_from(" " + self.__str__(), self)
print("*"*(12+len(self.__str__())))
def trace_downstream(self):
"""Install wrappers that print a trace message for each
event on this thing and all downsteam things.
"""
def has_connections(thing):
if not hasattr(thing, '__connections__'):
return False
return thing._has_connections()
def fmt(thing, port):
return '%s.%s' % (str(thing), port) if port!='default' \
else str(thing)
def trace_on_next(thing, output_port, connection, x):
print(" %s => (%s) => %s" %
(fmt(thing, output_port), str(x),
fmt(connection.input_thing,
connection.input_port)))
connection.on_next(x)
def trace_on_error(thing, output_port, connection, error):
print(" %s => on_error(%s) => %s" %
(fmt(thing, output_port), str(error),
fmt(connection.input_thing,
connection.input_port)))
connection.on_error(error)
def trace_on_completed(thing, output_port, connection):
print(" %s => on_completed => %s" %
(fmt(thing, output_port),
fmt(connection.input_thing,
connection.input_port)))
connection.on_completed()
def make_trace_connection(src_thing, output_port, old_connection):
return _Connection(
on_next=lambda x: trace_on_next(src_thing, output_port,
old_connection, x),
on_error=lambda e: trace_on_error(src_thing, output_port,
old_connection, e),
on_completed=lambda : trace_on_completed(src_thing,
output_port,
old_connection),
input_thing=old_connection.input_thing,
input_port=old_connection.input_port)
def trace_from(thing):
if has_connections(thing):
new_connections = {}
for (port, connections) in thing.__connections__.items():
connections_for_port = []
for connection in connections:
trace_from(connection.input_thing)
connections_for_port.append(make_trace_connection(thing,
port,
connection))
new_connections[port] = connections_for_port
thing.__connections__ = new_connections
trace_from(self)
print("***** installed tracing in all paths starting from %s" %
str(self))
def pp_connections(self):
"""pretty print the set of connections"""
h1 = "***** InputThings for %s *****" % self
print(h1)
for port in sorted(self.__connections__.keys()):
print(" Port %s" % port)
for s in self.__connections__[port]:
print(" [%s] => %s" % (s.input_port, s.input_thing))
print(" on_next: %s" % s.on_next)
print(" on_completed: %s" % s.on_completed)
print(" on_error: %s" % s.on_error)
print("*"*len(h1))
def __str__(self):
return self.__class__.__name__ + '()'
class Filter(OutputThing, InputThing):
"""A filter has a default input port and a default output port. It is
used for data transformations. The default implementations of on_next(),
on_completed(), and on_error() just pass the event on to the downstream
connection.
"""
def __init__(self, previous_in_chain):
super().__init__()
# connect to the previous filter
self.disconnect_from_upstream = previous_in_chain.connect(self)
def on_next(self, x):
self._dispatch_next(x)
def on_error(self, e):
self._dispatch_error(e)
def on_completed(self):
self._dispatch_completed()
def __str__(self):
return self.__class__.__name__ + '()'
class XformOrDropFilter(Filter):
"""Implements a slightly more complex filter protocol where events may be
transformed or dropped. Subclasses just need to implement the _filter() and
_complete() methods.
"""
def __init__(self, previous_in_chain):
super().__init__(previous_in_chain)
def on_next(self, x):
"""Calls _filter(x) to process
the event. If _filter() returns None, nothing futher is done. Otherwise,
the return value is passed to the downstream connection. This allows you
to both transform as well as send only selected events.
Errors other than FatalError are handled gracefully by calling
self.on_error() and then disconnecing from the upstream OutputThing.
"""
try:
x_prime = self._filter(x)
except FatalError:
raise
except Exception as e:
logger.exception("Got an exception on %s._filter(%s)" %
(self, x))
self.on_error(e)
self.disconnect_from_upstream()
else:
if x_prime is not None:
self._dispatch_next(x_prime)
def _filter(self, x):
"""Filtering method to be implemented by subclasses.
"""
return x
def _complete(self):
"""Method to be overridden by subclasses. It is called as a part of
on_error() and on_completed() to give a chance to pass down a held-back
event. Return None if there is no such event.
You can also clean up any state in this method (e.g. close connections).
Shold not throw any exceptions other than FatalError.
"""
return None
def on_error(self, e):
"""Passes on any final event and then passes the notification to the
next Thing.
If you need to clean up any state, do it in _complete().
"""
x = self._complete()
if x is not None:
self._dispatch_next(x)
self._dispatch_error(e)
def on_completed(self):
"""Passes on any final event and then passes the notification to the
next Thing.
If you need to clean up any state, do it in _complete().
"""
x = self._complete()
if x is not None:
self._dispatch_next(x)
self._dispatch_completed()
class FunctionFilter(Filter):
"""Implement a filter by providing functions that implement the
on_next, on_completed, and one_error logic. This is useful
when the logic is really simple or when a more functional programming
style is more convenient.
Each function takes a "self" parameter, so it works almost like it was
defined as a bound method. The signatures are then::
on_next(self, x)
on_completed(self)
on_error(self, e)
If a function is not provided to __init__, we just dispatch the call downstream.
"""
def __init__(self, previous_in_chain,
on_next=None, on_completed=None,
on_error=None, name=None):
"""name is an option name to be used in __str__() calls.
"""
super().__init__(previous_in_chain)
self._on_next = on_next
self._on_error = on_error
self._on_completed = on_completed
if name:
self.name = name
def on_next(self, x):
try:
if self._on_next:
# we pass in an extra "self" since this is a function, not a method
self._on_next(self, x)
else:
self._dispatch_next(x)
except FatalError:
raise
except Exception as e:
logger.exception("Got an exception on %s.on_next(%s)" %
(self, x))
self.on_error(e)
self.disconnect_from_upstream() # stop from getting upstream events
def on_error(self, e):
if self._on_error:
self._on_error(self, e)
else:
self._dispatch_error(e)
def on_completed(self):
if self._on_completed:
self._on_completed(self)
else:
self._dispatch_completed()
def __str__(self):
if hasattr(self, 'name'):
return self.name
else:
return self.__class__.__name__ + '()'
def _is_thunk(t):
return hasattr(t, '__thunk__')
def _make_thunk(t):
setattr(t, '__thunk__', True)
class _ThunkBuilder:
"""This is used to create a thunk from a linq-style
method.
"""
def __init__(self, func):
self.func = func
self.__name__ = func.__name__
def __call__(self, *args, **kwargs):
if len(args)==0 and len(kwargs)==0:
_make_thunk(self.func)
return self.func
def apply(this):
return self.func(this, *args, **kwargs)
apply.__name__ = self.__name__
_make_thunk(apply)
return apply
def __repr__(self):
return "_ThunkBuilder(%s)" % self.__name__
def _connect_thunk(prev, thunk):
"""Connect the thunk to the previous in the chain. Handles
all the cases where we might be given a filter, a thunk,
a thunk builder (unevaluated linq function), or a bare callable."""
if callable(thunk):
if _is_thunk(thunk):
return thunk(prev)
elif isinstance(thunk, _ThunkBuilder):
real_thunk = thunk()
assert _is_thunk(real_thunk)
return real_thunk(prev)
else: # bare callable, will be wrapped by the connect() method
prev.connect(thunk)
return None
else:
return prev.connect(thunk) # assumed to be a filter
def filtermethod(base, alias=None):
"""Function decorator that creates a linq-style filter out of the
specified function. As described in the thingflow.linq documentation,
it should take a OutputThing as its first argument (the source of events)
and return a OutputThing (representing the end the filter sequence once
the filter is included. The returned OutputThing is typically an instance
of thingflow.base.Filter.
The specified function is used in two places:
1. A method with the specified name is added to the specified class
(usually the OutputThing base class). This is for the fluent (method
chaining) API.
2. A function is created in the local namespace for use in the functional API.
This function does not take the OutputThing as an argument. Instead,
it takes the remaining arguments and then returns a function which,
when passed a OutputThing, connects to it and returns a filter.
Decorator arguments:
* param T base: Base class to extend with method
(usually thingflow.base.OutputThing)
* param string alias: an alias for this function or list of aliases
(e.g. map for select, etc.).
* returns: A function that takes the class to be decorated.
* rtype: func -> func
This was adapted from the RxPy extensionmethod decorator.
"""
def inner(func):
"""This function is returned by the outer filtermethod()
:param types.FunctionType func: Function to be decorated
"""
func_names = [func.__name__,]
if alias:
aliases = alias if isinstance(alias, list) else [alias]
func_names += aliases
_thunk = _ThunkBuilder(func)
# For the primary name and all aliases, set the name on the
# base class as well as in the | |
import os
import sys
import pkg_resources
import numpy as np
from matplotlib.image import imread
import obrero.cal as ocal
import obrero.plot as oplot
import obrero.experimental.enso as oenso
# path where stored logo
DATA_PATH = pkg_resources.resource_filename('obrero', 'data/')
def _add_text_axes(axes, text):
"""Use a given axes to place given text."""
txt = axes.text(0.5, 0.5, text, ha='center', va='center')
axes.axis('off')
return txt
def _latex_authoring(title, author, affil, email):
"""Creates a text object with LaTeX code to include in plots
made with `video_udea`.
""" # noqa
texmsg = []
# lets build it
texmsg.append(r'\begin{center}')
# title
if isinstance(title, list):
for t in title:
texmsg.append(t + r'\\')
else:
texmsg.append(title + r'\\')
# a bit of space
texmsg.append(r'\vspace{1em}')
# authors
if isinstance(author, list):
for a in author:
texmsg.append(r'\tiny{' + a + r'}\\')
else:
texmsg.append(r'\tiny{' + author + r'}\\')
# authors
if isinstance(affil, list):
for a in affil:
texmsg.append(r'\tiny{' + a + r'}\\')
else:
texmsg.append(r'\tiny{' + affil + r'}\\')
# email
if isinstance(email, list):
for e in email:
texmsg.append(r'\tiny{' + e + r'}')
else:
texmsg.append(r'\tiny{' + email + r'}')
# finish
texmsg.append(r'\end{center}')
# join
latext = ' '.join(texmsg)
return latext
def video_udea(dlist, slist, bbox, title, author, affil, email,
rotate, wpx=1920, hpx=1080, dpi=300, lon0=0, dg=1,
save_dir=None, smooth=False, winds=None, xhres=None):
"""Create video made for ExpoIngenieria 2018.
A very specific format was used to produce this video and to keep
it we created this function. It can only be used to produce such
video. In this case we need for sets of data arrays: a variable to
be plotted in an Orthographic projection rotating every `dg`
degrees, two lines of time series area average over a region to be
plotted and compared in an xy-plot, and sea surface temperature
(SST) values to include the ONI time series. The user can also
input horizontal wind fields U and V to have vectors plotted on
top of contours.
Parameters
----------
dlist: list of xarray.DataArray
This list must have the following order:
[variable_for_contours, first_time_series,
second_time_series, sst_array]
The first variable will be plotted in a rotating Orthographic
projection. The time series will be plotted together in an
xy-plot. And the SST array will be used to plot also an ONI
index axes.
slist: list of dict objects of specifications
This list must contain three dict objects: one for the contour
plot, one for the time series plot and one for the ONI index
plot. So the list must be:
[specifications_contours, specifications_time_series,
specifications_oni_index]
For the specifications of the contours see keywords of
function `plot_global_contour`, except keyword `axes`. For the
time series specifications see keywords of the function
`averages_video_udea`. And for the ONI plot see keywords in
the `oni_video_udea` function.
bbox: list of list objects
This is a list of two list objects which have corner
coordinates to plot a squared region: [xcorners,
ycorners]. This in case the user wants to highlight a squared
region somewhere in the Orthographic projection map. This
object can be obatined using function `bbox_linecoords`.
title: str or list of str
Title to be placed in a text-only axes. Input for
`_latex_authoring`. If multiple lines it should be a list of
str in which each str is a single line.
author: str or list of str
Author information to be placed in a text-only axes. Input for
`_latex_authoring`. If multiple lines it should be a list of
str in which each str is a single line.
affil: str or list of str
Affiliation information of author to be placed in a text-only
axes. Input for `_latex_authoring`. If multiple lines it
should be a list of str in which each str is a single line.
email: str or list of str
Author e-mail information to be placed in a text-only
axes. Input for `_latex_authoring`. If multiple lines it
should be a list of str in which each str is a single line.
rotate: list
In this list the user can specify when to rotate the
projection. To do this the user must use dates in the format:
'YYYY-MMM', using 3 letters for the month. So for example if:
rotate = ['1997-Jun', '1998-Dec']
It means that the Orthographic projection will rotate for
those two months only, in spite of the data arrays having more
time steps.
wpx: int, optional
Width in pixels for the images. Default is 1920 px.
hpx: int, optional
Height in pixels for the images. Default is 1080 px.
lon0: float, optional
Initial longitude at which to start rotating every time step.
Default is Greenwich meridian.
dg: float, optional
Degrees step to advance rotation. The maximum possible value is
dg = 360 which means no rotation at all. The slowest possible is
dg = 1. Default is 1.
save_dir: str, optional
If the user wants to save all plotted frames in a folder, they
can set this keyword to a folder name and figures will be
stored there. Otherwise figures will not be saved. Default is
not save plots.
dpi: int, optional
Dots per inch for every frame. Default is 300.
smooth: bool, optional
Use this boolean flag to choose whether to smooth the time
series or not. The smoothing will be done using a rolling mean
every 3-time steps, so if it is monthly data, the user will
actually be plotting 3-monthly rolling averages. Default is
False.
winds: list of xarray.DataArray, optional
If the user has U and V winds data and wants to put vectors on
top of the contours in the Orthographic projection plot, then
they must use this option for input winds like so:
winds = [u, v]
For this to work the user must also use the `xhres` keyword
because the function needs the resolution of the grid in the x
direction to be able to avoid plotting vectors out of the
projection bounds.
xhres: float, optional
Grid resolution in the x direction. This keyword is only used
if `winds` is being used, in which case it is a mandatory
argument.
""" # noqa
# unpack data and specifications
vmap, vline1, vline2, sst = dlist
spec1, spec2, spec3 = slist
# check if wind wanted and given
if winds is not None:
u, v = winds
if xhres is None:
msg = ('if you want wind you must specify horizontal ' +
'horizontal x resolution with \'xhres\' keyword')
raise ValueError(msg)
# only lats in between will have wind
w_ymin = 4
w_ymax = 28
# longitudes will have wind
wlon = 9
# get longitudes as x
x = u.longitude.values
y = u.latitude.values
mlon = x.size
# smooth area averages if wanted
if smooth is True:
vline1 = (vline1.rolling(time=3, min_periods=2)
.mean(keep_attrs=True))
vline2 = (vline2.rolling(time=3, min_periods=2)
.mean(keep_attrs=True))
# get number of times
ntim = vmap.time.size
# get oni series from exp
oni = oenso.oni(sst).values.flatten()
# authoring message
msg = _latex_authoring(title, author, affil, email)
# get dates
dates = ocal.get_dates(vmap.time.values)
# guess number of maps
nmpr = int(360 / dg)
nrots = len(rotate)
totm = (ntim - nrots) + nrots * nmpr
# counter for names
c = 1
# create save directory
save_path = oplot.create_save_dir(save_dir)
# step every time
for t in range(ntim):
# rotate only for specified dates
dstr = dates[t].strftime('%Y-%b')
if dstr in rotate:
rotation = True
nrot = nmpr # number of maps per rotation
else:
rotation = False
nrot = 1
if winds is not None:
clon = x[(x >= lon0 - xhres / 2) & (x < lon0 + xhres / 2)]
idx = np.where(x == clon)[0][0]
# rotate or not
for i in range(nrot):
# create figure instance
fig = oplot.plt.figure(1, figsize=(wpx / dpi, hpx / dpi))
# projection
prj = oplot.ort(central_longitude=lon0)
# create axes for all
ax1 = oplot.plt.subplot2grid((3, 6), (0, 0), colspan=3,
rowspan=3, projection=prj)
ax2 = oplot.plt.subplot2grid((3, 6), (0, 3), colspan=3)
ax3 = oplot.plt.subplot2grid((3, 6), (1, 3), colspan=3)
ax4 = oplot.plt.subplot2grid((3, 6), (2, 3), colspan=2)
ax5 = oplot.plt.subplot2grid((3, 6), (2, 5))
| |
a position in a big, worth-while office. You’re not always going
to be a detective’s apprentice, are you?”
“You bet I am! Watcha talking about? Me leave F. Stone! Not on your
fleeting existence! But, never mind that part of the argument, I’ll
remember your offer, and some day, when I have a million dollars to
invest, I’ll ask your advice where to lose it. But, now, you tell me what
you want.”
“Only for you to hint to Mr. Stone that he’d better advise Miss Wheeler
not to marry Mr. Keefe.”
“So’s you can have him.”
“Never mind that. There are other reasons—truly there are.”
“Well, then, my orders are to advise F. Stone to advise M. Wheeler not to
wed one C. Keefe.”
“That’s just it. But don’t say it right out to him. Use tact, which I
know you have—though nobody’d guess it to look at you—and sort of argue
around, so he’ll see it’s wiser for her not to marry him——”
“Why?”
<NAME> stamped her foot impatiently. “I’m not saying why. That’s
enough for me to know. You’ll get along better not knowing.”
“Does he know she’s the—the——”
“I don’t wonder you can’t say it! I can’t, either. Yes, he knows
she’s—it—but he’s so crazy about her, he doesn’t care. What is there in
that girl that gets all the men!”
“It’s her sweetness,” said Fibsy, with a positive nod of his head, as if
he were simply stating an axiom. “Yep, Keefe is clean gone daffy over
her. I don’t blame him—though, of course my taste runs more to——”
“Don’t you dare!” cried Genevieve, coquettishly.
“To the rouged type,” Fibsy went on, placidly. “To my mind a complexion
dabbed on is far more attractive than nature’s tints.”
<NAME> burst into laughter and, far from offended, she said:
“You’re a darling boy, and I’ll never forget you—even in my will; now, to
come back to our dear old brass tacks. Will you tip a gentle hint to the
great Stone?”
“Oh, lord, yes—I’ll tip him a dozen—tactfully, too. Don’t worry as to my
discretion. But I don’t mind telling you I might as well tip the
Washington monument. You see, <NAME>. has made up his mind.”
“As to the murderer?”
“Yep.”
“Who is it?”
“Haven’t an idea—and if I had, I’d say I hadn’t. You see, I’m his
trusty.”
“Oh, well, in any case, you can put in a word against Mr. Keefe, can’t
you?”
But Genevieve had lost interest in her project. She realized if <NAME>
had accomplished his purpose and had solved the murder mystery he would
be apt to take small interest in the love affairs of herself or Maida
Wheeler, either.
“He won’t think much of his cherished trusty, if you don’t do the errand
he sent you on,” she said, rather crossly.
Fibsy gave her a reproachful glance. “This, from you!” he said,
dramatically. “Farewell, fair but false! I go to seek a fairer maiden,
and I know where to find her!”
He went flying across the lawn, for he had caught a glimpse of Maida in
the garden.
“<NAME>,” he said, as he reached her, “will you please come now to
see <NAME>? He wants you.”
“Certainly,” she replied, and turning, followed him.
Genevieve joined them, and the three went to Stone’s rooms.
“<NAME>,” the detective said, without preamble, “I want you to tell
me a few things, please. You’ll excuse me if my questions seem rather
pointed, also, if they seem to be queries already answered. Did you kill
<NAME>?”
“Yes,” said Maida, speaking wearily, as if tired of making the assertion.
“You know no one believes that statement?”
“I can’t help that, Mr. Stone,” she said, with a listless manner.
“That is, no one but one person—your father. He believes it.”
“Father!” exclaimed the girl in evident amazement.
“Yes; he believes you for the best of all possible reasons: He saw you
shoot.”
“What, <NAME>? My father! Saw me shoot <NAME>!”
“Yes; he says so. That is not strange, when, as you say, you fired the
pistol from where you stood in the bay window, and <NAME> stood by
or near the victim.”
“But—I don’t understand. You say, father says he _saw_ me?”
“Yes, he told me that.”
Maida was silent, but she was evidently thinking deeply and rapidly.
“This is a trap of some sort, <NAME>one,” she said at last. “My father
didn’t see me shoot—he couldn’t have seen me, and consequently he
couldn’t say he did! He wouldn’t lie about it!”
“But he said, at one time, that he did the shooting himself. Was not that
an untruth?”
“Of a quite different sort. He said that in a justifiable effort to save
me. But this other matter—for him to say he saw me shoot—when he
didn’t—he couldn’t——”
“Why couldn’t he, <NAME>? Why was it so impossible for your father
to see you commit that crime, when he was right there?”
“Because—because—oh, Mr. Stone, I don’t know what to say! I feel sure I
mustn’t say anything, or I shall regret it.”
“Would you like your father to come here and tell us about it?”
“No;—or, yes. Oh, I don’t know. Jeffrey, help me!”
Allen had sat silently brooding all through this conversation. He had not
looked at Maida, keeping his gaze turned out of the window. He was sorely
hurt at her attitude in the Keefe matter; he was puzzled at her speech
regarding her father; and he was utterly uncertain as to his own duty or
privilege in the whole affair. But at her appeal, he turned joyfully
toward her.
“Oh, Maida,” he cried, “let me help you. Do get your father here, now,
and settle this question. Then, we’ll see what next.”
“Call him, then,” said Maida, but she turned very white, and paid no
further attention to Allen. She was still lost in thought, when her
father arrived and joined the group.
“You said, <NAME>,” Stone began at once, “that you saw your daughter
fire the shot that killed Mr. Appleby?”
“I did say that,” <NAME> replied, “because it is true. And
because I am convinced that the truth will help us all better than any
further endeavor to prove a falsehood. I did see you, Maida darling, and
I tried very hard to take the blame myself. But it has been proved to me
by Mr. Stone that my pretence is useless, and so I’ve concluded that the
fact must come out, in hope of a better result than from concealment. Do
not fear, my darling, no harm shall come to you.”
“And you said you did it, father, and mother said she did it.”
“Yes, of course, I told your mother the truth, and we plotted—yes,
plotted for each of us to confess to the deed, in a wild hope of somehow
saving our little girl.”
“And you saw me shoot, father?”
“Why, yes, dear—that is, I heard the shot, and looked up to see you
standing there with consternation and guilt on your dear face. Your arm
had then dropped to your side, but your whole attitude was unmistakable.
I couldn’t shut my eyes to the evident fact that there was no one else
who could have done the deed.”
“There must have been, father—for—I didn’t do it.”
“I knew you didn’t! Oh, Maida!” With a bound Allen was at her side and
his arm went round her. But she moved away from him, and went on
talking—still in a strained, unnatural voice, but steadily and
straightforwardly.
“No; I didn’t shoot <NAME>. I’ve been saying so, to shield my
father. I thought he did it.”
“Maida! Is it possible?” and <NAME> looked perplexed. “But, oh,
I’m so glad to hear your statement.”
“But who did do it, then?” <NAME> asked, bluntly.
“Who cares, so long as it wasn’t any of the Wheelers!” exclaimed Jeffrey
Allen, unable to contain his gladness. “Oh, Maida——”
But again she waved him away from her.
“I don’t understand, <NAME>,” she began; “I don’t know where these
disclosures will lead. I hope, not back to my mother——”
“No, Maida,” said her father, “there’s no fear of that.”
Reassured, Maida went on. “Perhaps I can’t be believed now, after my
previous insistence on my guilt, but God knows it is the truth; I am
utterly innocent of the crime.”
“I believe it,” said <NAME>one. “There was little evidence against
you, except your own confession. Now you’ve retracted that it only
remains for me to find the real criminal.”
“Can | |
<filename>casper4/simple_casper.v.py
# Information about validators
validators: public({
# Amount of wei the validator holds
deposit: wei_value,
# The dynasty the validator is joining
dynasty_start: num,
# The dynasty the validator joined for the first time
original_dynasty_start: num,
# The dynasty the validator is leaving
dynasty_end: num,
# The timestamp at which the validator can withdraw
withdrawal_epoch: num,
# The address which the validator's signatures must verify to (to be later replaced with validation code)
addr: address,
# Addess to withdraw to
withdrawal_addr: address,
# Previous epoch in which this validator committed
prev_commit_epoch: num
}[num])
# The current dynasty (validator set changes between dynasties)
dynasty: public(num)
# Amount of wei added to the total deposits in the next dynasty
next_dynasty_wei_delta: wei_value
# Amount of wei added to the total deposits in the dynasty after that
second_next_dynasty_wei_delta: wei_value
# Total deposits during this dynasty
total_deposits: public(wei_value[num])
# Mapping of dynasty to start epoch of that dynasty
dynasty_start_epoch: public(num[num])
# Mapping of epoch to what dynasty it is
dynasty_in_epoch: public(num[num])
# Information for use in processing cryptoeconomic commitments
consensus_messages: public({
# How many prepares are there for this hash (hash of message hash + view source) from the current dynasty
prepares: wei_value[bytes32],
# Bitmap of which validator IDs have already prepared
prepare_bitmap: num256[num][bytes32],
# From the previous dynasty
prev_dyn_prepares: wei_value[bytes32],
# Is a prepare referencing the given ancestry hash justified?
ancestry_hash_justified: bool[bytes32],
# Is a commit on the given hash justified?
hash_justified: bool[bytes32],
# How many commits are there for this hash
commits: wei_value[bytes32],
# And from the previous dynasty
prev_dyn_commits: wei_value[bytes32],
# Was the block committed?
committed: bool,
# Value used to calculate the per-epoch fee that validators should be charged
deposit_scale_factor: decimal
}[num]) # index: epoch
# A bitmap, where the ith bit of dynasty_mark[arg1][arg2] shows
# whether or not validator arg1 is active during dynasty arg2*256+i
dynasty_mask: num256[num][num]
# ancestry[x][y] = k > 0: x is a kth generation ancestor of y
ancestry: public(num[bytes32][bytes32])
# Number of validators
nextValidatorIndex: public(num)
# Time between blocks
block_time: timedelta
# Length of an epoch in blocks
epoch_length: num
# Withdrawal delay
withdrawal_delay: timedelta
# Delay after which a message can be slashed due to absence of justification
insufficiency_slash_delay: timedelta
# Current epoch
current_epoch: public(num)
# Can withdraw destroyed deposits
owner: address
# Total deposits destroyed
total_destroyed: wei_value
# Sighash calculator library address
sighasher: address
# Purity checker library address
purity_checker: address
# Reward for preparing or committing, as fraction of deposit size
reward_factor: public(decimal)
# Desired total ether given out assuming 1M ETH deposited
reward_at_1m_eth: decimal
# Have I already been initialized?
initialized: bool
# Log topic for prepare
prepare_log_topic: bytes32
# Log topic for commit
commit_log_topic: bytes32
def initiate():
assert not self.initialized
self.initialized = True
# Set Casper parameters
self.block_time = 14
self.epoch_length = 100
# Only ~11.5 days, for testing purposes
self.withdrawal_delay = 1000000
# Only ~1 day, for testing purposes
self.insufficiency_slash_delay = 86400
# Temporary backdoor for testing purposes (to allow recovering destroyed deposits)
self.owner = 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6
# Add an initial validator
self.validators[0] = {
deposit: as_wei_value(3, ether),
dynasty_start: 0,
dynasty_end: 1000000000000000000000000000000,
original_dynasty_start: 0,
withdrawal_epoch: 1000000000000000000000000000000,
addr: 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6,
withdrawal_addr: 0x1Db3439a222C519ab44bb1144fC28167b4Fa6EE6,
prev_commit_epoch: 0,
}
self.nextValidatorIndex = 1
# Initialize the epoch counter
self.current_epoch = block.number / self.epoch_length
# Set the sighash calculator address
self.sighasher = 0x476c2cA9a7f3B16FeCa86512276271FAf63B6a24
# Set the purity checker address
self.purity_checker = 0xD7a3BD6C9eA32efF147d067f907AE6b22d436F91
# Set an initial root of the epoch hash chain
self.consensus_messages[0].ancestry_hash_justified[0x0000000000000000000000000000000000000000000000000000000000000000] = True
# self.consensus_messages[0].committed = True
# Set initial total deposit counter
self.total_deposits[0] = as_wei_value(3, ether)
# Set deposit scale factor
self.consensus_messages[0].deposit_scale_factor = 1000000000000000000.0
# Total ETH given out assuming 1m ETH deposits
self.reward_at_1m_eth = 12.5
# Log topics for prepare and commit
self.prepare_log_topic = sha3("prepare()")
self.commit_log_topic = sha3("commit()")
# Called at the start of any epoch
def initialize_epoch(epoch: num):
# Check that the epoch actually has started
computed_current_epoch = block.number / self.epoch_length
assert epoch <= computed_current_epoch and epoch == self.current_epoch + 1
# Set the epoch number
self.current_epoch = epoch
# Increment the dynasty
if self.consensus_messages[epoch - 1].committed:
self.dynasty += 1
self.total_deposits[self.dynasty] = self.total_deposits[self.dynasty - 1] + self.next_dynasty_wei_delta
self.next_dynasty_wei_delta = self.second_next_dynasty_wei_delta
self.second_next_dynasty_wei_delta = 0
self.dynasty_start_epoch[self.dynasty] = epoch
self.dynasty_in_epoch[epoch] = self.dynasty
# Compute square root factor
ether_deposited_as_number = self.total_deposits[self.dynasty] / as_wei_value(1, ether)
sqrt = ether_deposited_as_number / 2.0
for i in range(20):
sqrt = (sqrt + (ether_deposited_as_number / sqrt)) / 2
# Reward factor is the reward given for preparing or committing as a
# fraction of that validator's deposit size
base_coeff = 1.0 / sqrt * (self.reward_at_1m_eth / 1000)
# Rules:
# * You are penalized 2x per epoch
# * If you prepare, you get 1.5x, and if you commit you get another 1.5x
# Hence, assuming 100% performance, your reward per epoch is x
self.reward_factor = 1.5 * base_coeff
self.consensus_messages[epoch].deposit_scale_factor = self.consensus_messages[epoch - 1].deposit_scale_factor * (1 - 2 * base_coeff)
# Send a deposit to join the validator set
def deposit(validation_addr: address, withdrawal_addr: address):
assert self.current_epoch == block.number / self.epoch_length
assert extract32(raw_call(self.purity_checker, concat('\xa1\x90>\xab', as_bytes32(validation_addr)), gas=500000, outsize=32), 0) != as_bytes32(0)
self.validators[self.nextValidatorIndex] = {
deposit: msg.value,
dynasty_start: self.dynasty + 2,
original_dynasty_start: self.dynasty + 2,
dynasty_end: 1000000000000000000000000000000,
withdrawal_epoch: 1000000000000000000000000000000,
addr: validation_addr,
withdrawal_addr: withdrawal_addr,
prev_commit_epoch: 0,
}
self.nextValidatorIndex += 1
self.second_next_dynasty_wei_delta += msg.value
# Log in or log out from the validator set. A logged out validator can log
# back in later, if they do not log in for an entire withdrawal period,
# they can get their money out
def flick_status(logout_msg: bytes <= 1024):
assert self.current_epoch == block.number / self.epoch_length
# Get hash for signature, and implicitly assert that it is an RLP list
# consisting solely of RLP elements
sighash = extract32(raw_call(self.sighasher, logout_msg, gas=200000, outsize=32), 0)
# Extract parameters
values = RLPList(logout_msg, [num, num, bool, bytes])
validator_index = values[0]
epoch = values[1]
login_flag = values[2]
sig = values[3]
assert self.current_epoch == epoch
# Signature check
assert extract32(raw_call(self.validators[validator_index].addr, concat(sighash, sig), gas=500000, outsize=32), 0) == as_bytes32(1)
# Logging in
if login_flag:
# Check that we are logged out
assert self.validators[validator_index].dynasty_end < self.dynasty
# Check that we logged out for less than 3840 dynasties (min: ~2 months)
assert self.validators[validator_index].dynasty_end >= self.dynasty - 3840
# Apply the per-epoch deposit penalty
prev_login_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_start]
prev_logout_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_end + 1]
self.validators[validator_index].deposit = \
floor(self.validators[validator_index].deposit *
(self.consensus_messages[prev_logout_epoch].deposit_scale_factor /
self.consensus_messages[prev_login_epoch].deposit_scale_factor))
# Log back in
# Go through the dynasty mask to clear out the ineligible dynasties
old_ds = self.validators[validator_index].dynasty_end
new_ds = self.dynasty + 2
for i in range(old_ds / 256, old_ds / 256 + 16):
if old_ds > i * 256:
s = old_ds % 256
else:
s = 0
if new_ds < i * 256 + 256:
e = new_ds % 256
else:
e = 256
self.dynasty_mask[validator_index][i] = num256_sub(shift(as_num256(1), e), shift(as_num256(1), s))
if e < 256:
break
self.validators[validator_index].dynasty_start = new_ds
self.validators[validator_index].dynasty_end = 1000000000000000000000000000000
self.second_next_dynasty_wei_delta += self.validators[validator_index].deposit
# Logging out
else:
# Check that we haven't already withdrawn
assert self.validators[validator_index].dynasty_end >= self.dynasty + 2
# Set the end dynasty
self.validators[validator_index].dynasty_end = self.dynasty + 2
self.second_next_dynasty_wei_delta -= self.validators[validator_index].deposit
# Set the withdrawal date
self.validators[validator_index].withdrawal_epoch = self.current_epoch + self.withdrawal_delay / self.block_time / self.epoch_length
# Removes a validator from the validator pool
def delete_validator(validator_index: num):
self.validators[validator_index] = {
deposit: 0,
dynasty_start: 0,
dynasty_end: 0,
original_dynasty_start: 0,
withdrawal_epoch: 0,
addr: None,
withdrawal_addr: None,
prev_commit_epoch: 0,
}
# Withdraw deposited ether
def withdraw(validator_index: num):
# Check that we can withdraw
assert self.current_epoch >= self.validators[validator_index].withdrawal_epoch
# Apply the per-epoch deposit penalty
prev_login_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_start]
prev_logout_epoch = self.dynasty_start_epoch[self.validators[validator_index].dynasty_end + 1]
self.validators[validator_index].deposit = \
floor(self.validators[validator_index].deposit *
(self.consensus_messages[prev_logout_epoch].deposit_scale_factor /
self.consensus_messages[prev_login_epoch].deposit_scale_factor))
# Withdraw
send(self.validators[validator_index].withdrawal_addr, self.validators[validator_index].deposit)
self.delete_validator(validator_index)
# Checks if a given validator could have prepared in a given epoch
def check_eligible_in_epoch(validator_index: num, epoch: num) -> num(const):
# Time limit for submitting a prepare
assert epoch > self.current_epoch - 3840
# Original starting dynasty of the validator; fail if before
do = self.validators[validator_index].original_dynasty_start
# Ending dynasty of the current login period
de = self.validators[validator_index].dynasty_end
# Dynasty of the prepare
dc = self.dynasty_in_epoch[epoch]
# Dynasty before the prepare (for prev dynasty checking)
dp = dc - 1
# Check against mask to see if the dynasty was eligible before
cur_in_mask = bitwise_and(self.dynasty_mask[validator_index][dc / 256], shift(as_num256(1), dc % 256))
prev_in_mask = bitwise_and(self.dynasty_mask[validator_index][dp / 256], shift(as_num256(1), dp % 256))
o = 0
# Return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.