code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
from __future__ import unicode_literals
import Queue
import datetime
import errno
import gettext
import itertools
import json
import locale
import os
import subprocess
import sys
import threading
import wx
import openslides
from openslides.utils.main import (
detect_openslides_type,
filesystem2unicode,
unicode2filesystem,
get_default_user_data_path,
get_port,
PortableDirNotWritable,
)
# NOTE: djangos translation module can't be used here since it requires
# a defined settings module
_translations = gettext.NullTranslations()
_ = lambda text: _translations.ugettext(text)
ungettext = lambda msg1, msg2, n: _translations.ungettext(msg1, msg2, n)
def get_data_path(*args):
path = filesystem2unicode(__file__)
return os.path.join(os.path.dirname(path), "data", *args)
class RunCmdEvent(wx.PyCommandEvent):
def __init__(self, evt_type, evt_id):
super(RunCmdEvent, self).__init__(evt_type, evt_id)
self.running = False
self.exitcode = None
EVT_RUN_CMD_ID = wx.NewEventType()
EVT_RUN_CMD = wx.PyEventBinder(EVT_RUN_CMD_ID, 1)
class RunCommandControl(wx.Panel):
UPDATE_INTERVAL = 500
def __init__(self, parent):
super(RunCommandControl, self).__init__(parent)
self.child_process = None
self.output_queue = Queue.Queue()
self.output_read_thread = None
self.canceled = False
self.output_mutex = threading.RLock()
vbox = wx.BoxSizer(wx.VERTICAL)
self.te_output = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
vbox.Add(self.te_output, 1, wx.EXPAND)
self.update_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_update_timer, self.update_timer)
self.SetSizerAndFit(vbox)
def _read_output(self):
while True:
# NOTE: don't use iterator interface since it uses an
# internal buffer and we don't see output in a timely fashion
line = self.child_process.stdout.readline()
if not line:
break
self.output_queue.put(line)
def is_alive(self):
if self.child_process is None:
return False
return self.child_process.poll() is None
def run_command(self, *args):
if self.is_alive():
raise ValueError("already running a command")
cmd = [sys.executable, "-u", "-m", "openslides"]
cmd.extend(args)
# XXX: subprocess on windows only handles byte strings
# with python3 this will hopefully no longer be the case
cmd = [unicode2filesystem(x) for x in cmd]
creationflags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0)
self.child_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, creationflags=creationflags)
self.child_process.stdin.close()
self.output_read_thread = threading.Thread(target=self._read_output)
self.output_read_thread.start()
self.update_timer.Start(self.UPDATE_INTERVAL)
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = True
self.GetEventHandler().ProcessEvent(evt)
def cancel_command(self):
if not self.is_alive():
return
# TODO: try sigint first, then get more aggressive if user insists
self.child_process.kill()
self.canceled = True
def on_update_timer(self, evt):
is_alive = self.is_alive()
if not is_alive:
# join thread to make sure everything was read
self.output_read_thread.join()
self.output_read_thread = None
for line_no in itertools.count():
try:
data = self.output_queue.get(block=False)
except Queue.Empty:
break
else:
# XXX: check whether django uses utf-8 or locale for
# it's cli output
text = data.decode("utf-8", errors="replace")
with self.output_mutex:
self.te_output.AppendText(text)
# avoid waiting too long here if child is still alive
if is_alive and line_no > 10:
break
if not is_alive:
exitcode = self.child_process.returncode
self.update_timer.Stop()
self.child_process = None
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = False
evt.exitcode = exitcode
self.GetEventHandler().ProcessEvent(evt)
def append_message(self, text, newline="\n"):
with self.output_mutex:
self.te_output.AppendText(text + newline)
class SettingsDialog(wx.Dialog):
def __init__(self, parent):
super(SettingsDialog, self).__init__(parent, wx.ID_ANY, _("Settings"))
grid = wx.GridBagSizer(5, 5)
row = 0
lb_host = wx.StaticText(self, label=_("&Host:"))
grid.Add(lb_host, pos=(row, 0))
self.tc_host = wx.TextCtrl(self)
grid.Add(self.tc_host, pos=(row, 1), flag=wx.EXPAND)
row += 1
lb_port = wx.StaticText(self, label=_("&Port:"))
grid.Add(lb_port, pos=(row, 0))
self.tc_port = wx.TextCtrl(self)
grid.Add(self.tc_port, pos=(row, 1), flag=wx.EXPAND)
row += 1
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 2))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 2))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
@property
def host(self):
return self.tc_host.GetValue()
@host.setter
def host(self, host):
self.tc_host.SetValue(host)
@property
def port(self):
return self.tc_port.GetValue()
@port.setter
def port(self, port):
self.tc_port.SetValue(port)
class BackupSettingsDialog(wx.Dialog):
# NOTE: keep order in sync with _update_interval_choices()
_INTERVAL_UNITS = ["second", "minute", "hour"]
def __init__(self, parent):
super(BackupSettingsDialog, self).__init__(
parent, wx.ID_ANY, _("Database backup"))
self._interval_units = {}
grid = wx.GridBagSizer(5, 5)
row = 0
self.cb_backup = wx.CheckBox(
self, label=_("&Regularly backup database"))
self.cb_backup.SetValue(True)
self.cb_backup.Bind(wx.EVT_CHECKBOX, self.on_backup_checked)
grid.Add(self.cb_backup, pos=(row, 0), span=(1, 3))
row += 1
lb_dest = wx.StaticText(self, label=_("&Destination:"))
grid.Add(lb_dest, pos=(row, 0))
style = wx.FLP_SAVE | wx.FLP_USE_TEXTCTRL
self.fp_dest = wx.FilePickerCtrl(self, style=style)
grid.Add(self.fp_dest, pos=(row, 1), span=(1, 2), flag=wx.EXPAND)
row += 1
lb_interval = wx.StaticText(self, label=_("&Every"))
grid.Add(lb_interval, pos=(row, 0))
self.sb_interval = wx.SpinCtrl(self, min=1, initial=1)
self.sb_interval.Bind(wx.EVT_SPINCTRL, self.on_interval_changed)
grid.Add(self.sb_interval, pos=(row, 1))
self.ch_interval_unit = wx.Choice(self)
grid.Add(self.ch_interval_unit, pos=(row, 2))
row += 1
grid.AddGrowableCol(1)
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 3))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 3))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
self._update_interval_choices()
self._update_backup_enabled()
@property
def backupdb_enabled(self):
return self.cb_backup.GetValue()
@backupdb_enabled.setter
def backupdb_enabled(self, enabled):
self.cb_backup.SetValue(enabled)
self._update_backup_enabled()
@property
def backupdb_destination(self):
return self.fp_dest.GetPath()
@backupdb_destination.setter
def backupdb_destination(self, path):
self.fp_dest.SetPath(path)
@property
def interval(self):
return self.sb_interval.GetValue()
@interval.setter
def interval(self, value):
self.sb_interval.SetValue(value)
self._update_interval_choices()
@property
def interval_unit(self):
return self._INTERVAL_UNITS[self.ch_interval_unit.GetSelection()]
@interval_unit.setter
def interval_unit(self, unit):
try:
idx = self._INTERVAL_UNITS.index(unit)
except IndexError:
raise ValueError("Unknown unit {0}".format(unit))
self.ch_interval_unit.SetSelection(idx)
def _update_interval_choices(self):
count = self.sb_interval.GetValue()
choices = [
ungettext("second", "seconds", count),
ungettext("minute", "minutes", count),
ungettext("hour", "hours", count),
]
current = self.ch_interval_unit.GetSelection()
if current == wx.NOT_FOUND:
current = 2 # default to hour
self.ch_interval_unit.Clear()
self.ch_interval_unit.AppendItems(choices)
self.ch_interval_unit.SetSelection(current)
def _update_backup_enabled(self):
checked = self.cb_backup.IsChecked()
self.fp_dest.Enable(checked)
self.sb_interval.Enable(checked)
self.ch_interval_unit.Enable(checked)
def on_backup_checked(self, evt):
self._update_backup_enabled()
def on_interval_changed(self, evt):
self._update_interval_choices()
# TODO: validate settings on close (e.g. non-empty path if backup is
# enabled)
class MainWindow(wx.Frame):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent, title="OpenSlides")
icons = wx.IconBundleFromFile(
get_data_path("openslides.ico"),
wx.BITMAP_TYPE_ICO)
self.SetIcons(icons)
self.server_running = False
self.gui_settings_path = None
self.gui_initialized = False
self.backupdb_enabled = False
self.backupdb_destination = ""
self.backupdb_interval = 15
self.backupdb_interval_unit = "minute"
self.last_backup = None
self.backup_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_backup_timer, self.backup_timer)
spacing = 5
panel = wx.Panel(self)
grid = wx.GridBagSizer(spacing, spacing)
# logo & about button
logo_box = wx.BoxSizer(wx.HORIZONTAL)
grid.Add(logo_box, pos=(0, 0), flag=wx.EXPAND)
row = 0
fp = get_data_path("openslides-logo_wide.png")
with open(fp, "rb") as f:
logo_wide_bmp = wx.ImageFromStream(f).ConvertToBitmap()
logo_wide = wx.StaticBitmap(panel, wx.ID_ANY, logo_wide_bmp)
logo_box.AddSpacer(2 * spacing)
logo_box.Add(logo_wide)
logo_box.AddStretchSpacer()
version_str = _("Version {0}").format(openslides.get_version())
lb_version = wx.StaticText(panel, label=version_str)
font = lb_version.GetFont()
font.SetPointSize(8)
lb_version.SetFont(font)
logo_box.Add(lb_version, flag=wx.ALIGN_CENTER_VERTICAL)
self.bt_about = wx.Button(panel, label=_("&About..."))
self.bt_about.Bind(wx.EVT_BUTTON, self.on_about_clicked)
grid.Add(self.bt_about, pos=(row, 1), flag=wx.ALIGN_CENTER_VERTICAL)
row += 1
grid.Add((0, spacing), pos=(row, 0), span=(1, 2))
row += 1
# server settings
server_settings = wx.StaticBox(panel, wx.ID_ANY, _("Server Settings"))
server_box = wx.StaticBoxSizer(server_settings, wx.VERTICAL)
grid.Add(server_box, pos=(row, 0), flag=wx.EXPAND)
self._host = None
self._port = None
hbox = wx.BoxSizer(wx.HORIZONTAL)
server_box.Add(hbox, flag=wx.EXPAND)
self.lb_host = wx.StaticText(panel)
hbox.Add(self.lb_host, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.lb_port = wx.StaticText(panel)
hbox.Add(self.lb_port, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.bt_settings = wx.Button(panel, label=_("S&ettings..."))
self.bt_settings.Bind(wx.EVT_BUTTON, self.on_settings_clicked)
hbox.Add(self.bt_settings)
server_box.AddSpacer(spacing)
self.cb_start_browser = wx.CheckBox(
panel, label=_("Automatically open &browser"))
self.cb_start_browser.SetValue(True)
server_box.Add(self.cb_start_browser)
server_box.AddStretchSpacer()
server_box.AddSpacer(spacing)
self.bt_server = wx.Button(panel, label=_("&Start server"))
self.bt_server.Bind(wx.EVT_BUTTON, self.on_start_server_clicked)
server_box.Add(self.bt_server, flag=wx.EXPAND)
self.host = "0.0.0.0"
self.port = unicode(get_port(self.host, 80))
# "action" buttons
action_vbox = wx.BoxSizer(wx.VERTICAL)
action_vbox.AddSpacer(3 * spacing)
grid.Add(action_vbox, pos=(row, 1))
self.bt_backup = wx.Button(panel, label=_("&Backup database..."))
self.bt_backup.Bind(wx.EVT_BUTTON, self.on_backup_clicked)
action_vbox.Add(self.bt_backup)
action_vbox.AddSpacer(spacing)
self.bt_sync_db = wx.Button(panel, label=_("S&ync database"))
self.bt_sync_db.Bind(wx.EVT_BUTTON, self.on_syncdb_clicked)
action_vbox.Add(self.bt_sync_db)
action_vbox.AddSpacer(spacing)
self.bt_reset_admin = wx.Button(panel, label=_("&Reset admin"))
self.bt_reset_admin.Bind(wx.EVT_BUTTON, self.on_reset_admin_clicked)
action_vbox.Add(self.bt_reset_admin)
row += 1
# command output
self.cmd_run_ctrl = RunCommandControl(panel)
self.cmd_run_ctrl.Bind(EVT_RUN_CMD, self.on_run_cmd_changed)
grid.Add(
self.cmd_run_ctrl,
pos=(row, 0), span=(1, 2),
flag=wx.EXPAND)
grid.AddGrowableCol(0)
grid.AddGrowableRow(3)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=spacing, proportion=1)
panel.SetSizerAndFit(box)
self.Fit()
self.SetMinSize(self.ClientToWindowSize(box.GetMinSize()))
self.SetInitialSize(wx.Size(500, 400))
self.Bind(wx.EVT_CLOSE, self.on_close)
def initialize_gui(self):
if self.gui_initialized:
return True
# Set path for gui settings to default user data according to the
# OpenSlides type. This does not depend on any argument the user might
# type in.
openslides_type = detect_openslides_type()
try:
default_user_data_path = get_default_user_data_path(openslides_type)
except PortableDirNotWritable:
wx.MessageBox(
_("The portable directory is not writable. Please copy the "
"openslides portable to a writeable location and start it "
"again from there"),
_("Error: Portable directory not writable"),
wx.OK | wx.ICON_ERROR)
return False
self.gui_settings_path = os.path.join(
default_user_data_path, 'openslides', 'gui_settings.json')
self.load_gui_settings()
self.apply_backup_settings()
self.gui_initialized = True
return True
@property
def backup_interval_seconds(self):
if self.backupdb_interval_unit == "second":
factor = 1
elif self.backupdb_interval_unit == "minute":
factor = 60
elif self.backupdb_interval_unit == "hour":
factor = 3600
return self.backupdb_interval * factor
@property
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
self.lb_host.SetLabel(_("Host: {0}").format(host))
@property
def port(self):
return self._port
@port.setter
def port(self, port):
self._port = port
self.lb_port.SetLabel(_("Port: {0}").format(port))
def load_gui_settings(self):
if self.gui_settings_path is None:
return
try:
f = open(self.gui_settings_path, "rb")
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
with f:
settings = json.load(f)
def setattr_unless_none(attr, value):
if not value is None:
setattr(self, attr, value)
backup_settings = settings.get("database_backup", {})
setattr_unless_none("backupdb_enabled", backup_settings.get("enabled"))
setattr_unless_none(
"backupdb_destination", backup_settings.get("destination"))
setattr_unless_none(
"backupdb_interval", backup_settings.get("interval"))
setattr_unless_none(
"backupdb_interval_unit", backup_settings.get("interval_unit"))
last_backup = backup_settings.get("last_backup")
if not last_backup is None:
self.last_backup = datetime.datetime.strptime(
last_backup, "%Y-%m-%d %H:%M:%S")
server_settings = settings.get("server_settings", {})
setattr_unless_none("host", server_settings.get("host"))
setattr_unless_none("port", server_settings.get("port"))
def save_gui_settings(self):
if self.last_backup is None:
last_backup = None
else:
last_backup = self.last_backup.strftime("%Y-%m-%d %H:%M:%S")
settings = {
"database_backup": {
"enabled": self.backupdb_enabled,
"destination": self.backupdb_destination,
"internal": self.backupdb_interval,
"interval_unit": self.backupdb_interval_unit,
"last_backup": last_backup
},
"server_settings": {
"host": self.host,
"port": self.port,
},
}
dp = os.path.dirname(self.gui_settings_path)
if not os.path.exists(dp):
os.makedirs(dp)
with open(self.gui_settings_path, "wb") as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def apply_backup_settings(self):
if self.backupdb_enabled and self.server_running:
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=self.backup_interval_seconds)
ref = self.last_backup
if ref is None:
ref = now
ref += delta
d = ref - now
seconds = d.days * 86400 + d.seconds
if seconds < 1:
seconds = 30 # avoid backup immediatly after start
self.backup_timer.Start(seconds * 1000, True)
else:
self.backup_timer.Stop()
def do_backup(self):
cmd = [
sys.executable, "-u", "-m", "openslides", "backupdb",
self.backupdb_destination,
]
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.close()
output = p.stdout.read().strip()
exitcode = p.wait()
if output:
self.cmd_run_ctrl.append_message(output)
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if exitcode == 0:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup successful.").format(time))
else:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup failed!").format(time))
self.last_backup = datetime.datetime.utcnow()
def on_syncdb_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Syncing database..."))
self.cmd_run_ctrl.run_command("syncdb")
def on_reset_admin_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Resetting admin user..."))
self.cmd_run_ctrl.run_command("createsuperuser")
def on_about_clicked(self, evt):
info = wx.AboutDialogInfo()
info.SetName("OpenSlides")
info.SetVersion(openslides.get_version())
info.SetDescription(_(
"OpenSlides is a free web based presentation and "
"assembly system.\n"
"OpenSlides is free software; licensed under the MIT license."
).replace(u" ", u"\u00a0"))
info.SetCopyright(_(u"\u00a9 2011-2014 by OpenSlides team"))
info.SetWebSite(("http://www.openslides.org/", "www.openslides.org"))
# XXX: at least on wxgtk this has no effect
info.SetIcon(self.GetIcon())
wx.AboutBox(info)
def on_start_server_clicked(self, evt):
if self.server_running:
self.cmd_run_ctrl.cancel_command()
return
if self._host == "0.0.0.0":
args = ["--port", self._port]
else:
args = ["--address", self._host, "--port", self._port]
if not self.cb_start_browser.GetValue():
args.append("--no-browser")
self.server_running = True
self.cmd_run_ctrl.run_command("start", *args)
# initiate backup_timer if backup is enabled
self.apply_backup_settings()
self.bt_server.SetLabel(_("&Stop server"))
def on_settings_clicked(self, evt):
dlg = SettingsDialog(self)
dlg.host = self._host
dlg.port = self._port
if dlg.ShowModal() == wx.ID_OK:
self.host = dlg.host
self.port = dlg.port
def on_backup_clicked(self, evt):
dlg = BackupSettingsDialog(self)
dlg.backupdb_enabled = self.backupdb_enabled
dlg.backupdb_destination = self.backupdb_destination
dlg.interval = self.backupdb_interval
dlg.interval_unit = self.backupdb_interval_unit
if dlg.ShowModal() == wx.ID_OK:
self.backupdb_enabled = dlg.backupdb_enabled
self.backupdb_destination = dlg.backupdb_destination
self.backupdb_interval = dlg.interval
self.backupdb_interval_unit = dlg.interval_unit
self.apply_backup_settings()
def on_run_cmd_changed(self, evt):
show_completion_msg = not evt.running
if self.server_running and not evt.running:
self.bt_server.SetLabel(_("&Start server"))
self.server_running = False
self.backup_timer.Stop()
if self.backupdb_enabled:
self.do_backup()
# no operation completed msg when stopping server
show_completion_msg = False
self.bt_settings.Enable(not evt.running)
self.bt_backup.Enable(not evt.running)
self.bt_sync_db.Enable(not evt.running)
self.bt_reset_admin.Enable(not evt.running)
self.bt_server.Enable(self.server_running or not evt.running)
if show_completion_msg:
if evt.exitcode == 0:
text = _("Operation successfully completed.")
else:
text = _("Operation failed (exit code = {0})").format(
evt.exitcode)
self.cmd_run_ctrl.append_message(text)
def on_backup_timer(self, evt):
if not self.backupdb_enabled:
return
self.do_backup()
self.backup_timer.Start(1000 * self.backup_interval_seconds, True)
def on_close(self, ev):
self.cmd_run_ctrl.cancel_command()
self.save_gui_settings()
self.Destroy()
class OpenslidesApp(wx.App):
def __init__(self):
super(OpenslidesApp, self).__init__(False)
def OnInit(self):
window = MainWindow()
self.SetTopWindow(window)
if not window.initialize_gui():
self.Exit()
return False
window.Show()
return True
def main():
locale.setlocale(locale.LC_ALL, "")
lang = locale.getdefaultlocale()[0]
if lang:
global _translations
localedir = filesystem2unicode(openslides.__file__)
localedir = os.path.dirname(localedir)
localedir = os.path.join(localedir, "locale")
_translations = gettext.translation(
"django", localedir, [lang], fallback=True)
app = OpenslidesApp()
app.MainLoop()
if __name__ == "__main__":
main()
| [
[
[
23,
39
]
],
[
[
48,
53
],
[
1318,
1323
],
[
3856,
3861
]
],
[
[
61,
69
],
[
17678,
17686
],
[
18949,
18957
],
[
18996,
19004
],
[
19918,
19926
],
[
20264,
20272
]
],
[
[
77,
82
],
[
16878,
16883
]
],
[
[
90,
97
],
[
540,
547
],
[
24739,
24746
]
],
[
[
105,
114
],
[
3743,
3752
]
],
[
[
122,
126
],
[
16973,
16977
],
[
18782,
18786
]
],
[
[
134,
140
],
[
24436,
24442
],
[
24453,
24459
],
[
24483,
24489
]
],
[
[
148,
150
],
[
765,
767
],
[
778,
780
],
[
15737,
15739
],
[
18613,
18615
],
[
18668,
18670
],
[
18700,
18702
],
[
24634,
24636
],
[
24681,
24683
]
],
[
[
158,
168
],
[
2689,
2699
],
[
2761,
2771
],
[
2802,
2812
],
[
2826,
2836
],
[
2862,
2872
],
[
19617,
19627
],
[
19658,
19668
],
[
19682,
19692
],
[
19718,
19728
]
],
[
[
176,
179
],
[
2404,
2407
],
[
19502,
19505
]
],
[
[
187,
196
],
[
1429,
1438
],
[
2985,
2994
]
],
[
[
205,
207
],
[
836,
838
],
[
1035,
1037
],
[
1067,
1069
],
[
1129,
1131
],
[
4810,
4812
],
[
6178,
6180
],
[
10112,
10114
],
[
24115,
24117
],
[
1463,
1465
],
[
1475,
1477
],
[
1514,
1516
],
[
1551,
1553
],
[
1569,
1571
],
[
1586,
1588
],
[
1634,
1636
],
[
1674,
1676
],
[
1707,
1709
],
[
4907,
4909
],
[
4949,
4951
],
[
5006,
5008
],
[
5108,
5110
],
[
5176,
5178
],
[
5224,
5226
],
[
5326,
5328
],
[
5394,
5396
],
[
5463,
5465
],
[
5471,
5473
],
[
5659,
5661
],
[
5671,
5673
],
[
5724,
5726
],
[
5736,
5738
],
[
5745,
5747
],
[
6409,
6411
],
[
6493,
6495
],
[
6557,
6559
],
[
6693,
6695
],
[
6830,
6832
],
[
6932,
6934
],
[
6946,
6948
],
[
6989,
6991
],
[
7089,
7091
],
[
7140,
7142
],
[
7250,
7252
],
[
7316,
7318
],
[
7440,
7442
],
[
7599,
7601
],
[
7607,
7609
],
[
7795,
7797
],
[
7807,
7809
],
[
7860,
7862
],
[
7872,
7874
],
[
7881,
7883
],
[
9433,
9435
],
[
10245,
10247
],
[
10325,
10327
],
[
10709,
10711
],
[
10742,
10744
],
[
10835,
10837
],
[
10865,
10867
],
[
10949,
10951
],
[
10961,
10963
],
[
11020,
11022
],
[
11165,
11167
],
[
11226,
11228
],
[
11249,
11251
],
[
11477,
11479
],
[
11653,
11655
],
[
11704,
11706
],
[
11770,
11772
],
[
11859,
11861
],
[
12031,
12033
],
[
12051,
12053
],
[
12105,
12107
],
[
12140,
12142
],
[
12201,
12203
],
[
12280,
12282
],
[
12292,
12294
],
[
12341,
12343
],
[
12375,
12377
],
[
12432,
12434
],
[
12513,
12515
],
[
12570,
12572
],
[
12655,
12657
],
[
12727,
12729
],
[
12874,
12876
],
[
13139,
13141
],
[
13210,
13212
],
[
13299,
13301
],
[
13444,
13446
],
[
13456,
13458
],
[
13581,
13583
],
[
13658,
13660
],
[
13802,
13804
],
[
13875,
13877
],
[
14024,
14026
],
[
14099,
14101
],
[
14458,
14460
],
[
14547,
14549
],
[
14559,
14561
],
[
14612,
14614
],
[
14624,
14626
],
[
14633,
14635
],
[
14849,
14851
],
[
14887,
14889
],
[
15373,
15375
],
[
15655,
15657
],
[
15663,
15665
],
[
20670,
20672
],
[
21259,
21261
],
[
22070,
22072
],
[
22472,
22474
]
],
[
[
216,
226
],
[
11430,
11440
],
[
20750,
20760
],
[
24593,
24603
]
],
[
[
268,
290
],
[
15203,
15225
]
],
[
[
296,
314
],
[
725,
743
],
[
24574,
24592
]
],
[
[
320,
338
],
[
2620,
2638
]
],
[
[
344,
370
],
[
15278,
15304
]
],
[
[
376,
384
],
[
13369,
13377
]
],
[
[
390,
412
],
[
15337,
15359
]
],
[
[
524,
537
],
[
584,
597
],
[
647,
660
]
],
[
[
567,
568
],
[
4918,
4919
],
[
5032,
5033
],
[
5250,
5251
],
[
6420,
6421
],
[
6594,
6595
],
[
6856,
6857
],
[
7166,
7167
],
[
11406,
11407
],
[
11727,
11728
],
[
12062,
12063
],
[
12678,
12679
],
[
12912,
12913
],
[
13162,
13163
],
[
13604,
13605
],
[
13825,
13826
],
[
14047,
14048
],
[
15404,
15405
],
[
15594,
15595
],
[
16443,
16444
],
[
16633,
16634
],
[
20060,
20061
],
[
20188,
20189
],
[
20371,
20372
],
[
20530,
20531
],
[
20804,
20805
],
[
21040,
21041
],
[
21884,
21885
],
[
22929,
22930
],
[
23558,
23559
],
[
23638,
23639
]
],
[
[
613,
622
],
[
9208,
9217
],
[
9259,
9268
],
[
9310,
9319
]
],
[
[
692,
705
],
[
10280,
10293
],
[
11061,
11074
]
],
[
[
824,
835
],
[
912,
923
],
[
3138,
3149
],
[
4487,
4498
]
],
[
[
1018,
1032
],
[
1084,
1098
],
[
3150,
3164
],
[
4499,
4513
]
],
[
[
1053,
1064
],
[
14315,
14326
]
],
[
[
1111,
1128
],
[
1213,
1230
],
[
14259,
14276
]
],
[
[
4795,
4809
],
[
4868,
4882
],
[
21958,
21972
]
],
[
[
6157,
6177
],
[
6351,
6371
],
[
22199,
22219
]
],
[
[
10101,
10111
],
[
10174,
10184
],
[
24239,
24249
]
],
[
[
24101,
24114
],
[
24162,
24175
],
[
24827,
24840
]
],
[
[
24424,
24428
],
[
24894,
24898
]
],
[
[
24723,
24736
]
]
] |
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import aldryn_apphooks_config.fields
import app_data.fields
import djangocms_text_ckeditor.fields
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
('djangocms_blog', '0009_latestpostsplugin_tags_new'),
]
operations = [
migrations.CreateModel(
name='BlogConfig',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('type', models.CharField(verbose_name='type', max_length=100)),
('namespace', models.CharField(default=None, verbose_name='instance namespace', unique=True, max_length=100)),
('app_data', app_data.fields.AppDataField(editable=False, default='{}')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BlogConfigTranslation',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('language_code', models.CharField(db_index=True, verbose_name='Language', max_length=15)),
('app_title', models.CharField(verbose_name='application title', max_length=234)),
('master', models.ForeignKey(editable=False, to='djangocms_blog.BlogConfig', related_name='translations', null=True)),
],
options={
'verbose_name': 'blog config Translation',
'db_table': 'djangocms_blog_blogconfig_translation',
'default_permissions': (),
'db_tablespace': '',
'managed': True,
},
),
migrations.CreateModel(
name='GenericBlogPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, serialize=False, primary_key=True, auto_created=True, to='cms.CMSPlugin')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(verbose_name='app. config', blank=True, to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AlterField(
model_name='posttranslation',
name='abstract',
field=djangocms_text_ckeditor.fields.HTMLField(default='', verbose_name='abstract', blank=True),
),
migrations.AddField(
model_name='authorentriesplugin',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, blank=True, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='blogcategory',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='latestpostsplugin',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, blank=True, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(default=None, verbose_name='app. config', to='djangocms_blog.BlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default', null=True),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='blogconfigtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterField(
model_name='post',
name='sites',
field=models.ManyToManyField(to='sites.Site', help_text='Select sites in which to show the post. If none is set it will be visible in all the configured sites.', blank=True, verbose_name='Site(s)'),
),
]
| [
[
[
47,
63
]
],
[
[
72,
101
],
[
2108,
2130
],
[
2798,
2820
],
[
3194,
3216
],
[
3583,
3605
],
[
3971,
3993
]
],
[
[
109,
124
],
[
822,
830
]
],
[
[
132,
162
],
[
2572,
2595
]
],
[
[
185,
191
],
[
493,
499
],
[
610,
616
],
[
696,
702
],
[
1099,
1105
],
[
1225,
1231
],
[
1329,
1335
],
[
1425,
1431
],
[
1962,
1968
],
[
4501,
4507
]
],
[
[
193,
203
],
[
222,
232
],
[
394,
404
],
[
989,
999
],
[
1845,
1855
],
[
2460,
2470
],
[
2682,
2692
],
[
3085,
3095
],
[
3469,
3479
],
[
3870,
3880
],
[
4246,
4256
],
[
4403,
4413
]
],
[
[
212,
221
]
]
] |
import copy
import re
from collections import defaultdict
from typing import List, Dict
from .substitution_augmenter import SubstitutionAugmenter
from ..actions import Chemical
from ..utils import extract_chemicals
from paragraph2actions.misc import TextWithActions
class CompoundNameAugmenter(SubstitutionAugmenter):
"""
Augments data by substituting compound names.
"""
def __init__(self, probability: float, compounds: List[str]):
"""
Args:
probability: probability with which to switch the compound name
compounds: list of names to use for substitution
"""
super().__init__(probability=probability, values=compounds)
def augment(self, sample: TextWithActions) -> TextWithActions:
sample = copy.deepcopy(sample)
chemicals = extract_chemicals(sample.actions)
# Build a dictionary of compound names and associated chemicals
# (necessary if the same chemical is present twice)
cpd_dict: Dict[str, List[Chemical]] = defaultdict(list)
for c in chemicals:
cpd_dict[c.name].append(c)
# remove compound names that are comprised in others; with this, if both '3-ethyltoluene' and
# '2-bromo-3-ethyltoluene' are present as compounds, we will never substitute the short one.
for chemical_name in list(cpd_dict.keys()):
if any(chemical_name in cpd for cpd in cpd_dict.keys() if chemical_name != cpd):
cpd_dict.pop(chemical_name)
# For each chemical name, try substitution
for cpd_name in cpd_dict:
if not self.random_draw_passes() or cpd_name not in sample.text:
continue
new_name = self.draw_value()
sample.text = self.replace_in_text(
text=sample.text, compound=cpd_name, new_name=new_name
)
for c in cpd_dict[cpd_name]:
c.name = new_name
return sample
def replace_in_text(self, text: str, compound: str, new_name: str) -> str:
# We replace only at word boundaries, to avoid things like 'H2SO4 -> waterSO4' when replacing 'H2' by 'water'
pattern = re.compile(rf'\b{re.escape(compound)}\b')
return pattern.sub(new_name, text)
| [
[
[
7,
11
],
[
782,
786
]
],
[
[
19,
21
],
[
2192,
2194
],
[
2209,
2211
]
],
[
[
46,
57
],
[
1038,
1049
]
],
[
[
77,
81
],
[
442,
446
],
[
1020,
1024
]
],
[
[
83,
87
],
[
1010,
1014
]
],
[
[
125,
146
],
[
297,
318
]
],
[
[
169,
177
],
[
1025,
1033
]
],
[
[
198,
215
],
[
825,
842
]
],
[
[
251,
266
],
[
748,
763
],
[
728,
743
]
],
[
[
275,
296
]
]
] |
# Generated by Django 3.1.13 on 2021-09-02 17:31
import django.contrib.postgres.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('stac_api', '0015_data_collection_summaries'),
]
operations = [
migrations.AddField(
model_name='collection',
name='summaries_geoadmin_lang',
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=2),
blank=True,
default=list,
editable=False,
size=None
),
),
migrations.AlterField(
model_name='asset',
name='media_type',
field=models.CharField(
choices=[
(
'application/x.ascii-grid+zip',
'Zipped ESRI ASCII raster format (.asc) (application/x.ascii-grid+zip)'
),
(
'application/x.ascii-xyz+zip',
'Zipped XYZ (.xyz) (application/x.ascii-xyz+zip)'
), ('application/x.e00+zip', 'Zipped e00 (application/x.e00+zip)'), (
'image/tiff; application=geotiff',
'GeoTIFF (image/tiff; application=geotiff)'
), ('application/x.geotiff+zip', 'Zipped GeoTIFF (application/x.geotiff+zip)'),
('application/x.tiff+zip', 'Zipped TIFF (application/x.tiff+zip)'),
('application/x.png+zip', 'Zipped PNG (application/x.png+zip)'),
('application/x.jpeg+zip', 'Zipped JPEG (application/x.jpeg+zip)'),
(
'application/vnd.google-earth.kml+xml',
'KML (application/vnd.google-earth.kml+xml)'
),
(
'application/vnd.google-earth.kmz',
'Zipped KML (application/vnd.google-earth.kmz)'
), ('application/x.dxf+zip', 'Zipped DXF (application/x.dxf+zip)'),
('application/gml+xml', 'GML (application/gml+xml)'),
('application/x.gml+zip', 'Zipped GML (application/x.gml+zip)'),
('application/vnd.las', 'LIDAR (application/vnd.las)'),
('application/vnd.laszip', 'Zipped LIDAR (application/vnd.laszip)'), (
'application/x.shapefile+zip',
'Zipped Shapefile (application/x.shapefile+zip)'
),
(
'application/x.filegdb+zip',
'Zipped File Geodatabase (application/x.filegdb+zip)'
),
(
'application/x.ms-access+zip',
'Zipped Personal Geodatabase (application/x.ms-access+zip)'
), ('application/x.ms-excel+zip', 'Zipped Excel (application/x.ms-excel+zip)'),
('application/x.tab+zip', 'Zipped Mapinfo-TAB (application/x.tab+zip)'),
(
'application/x.tab-raster+zip',
'Zipped Mapinfo-Raster-TAB (application/x.tab-raster+zip)'
), ('application/x.csv+zip',
'Zipped CSV (application/x.csv+zip)'), ('text/csv', 'CSV (text/csv)'), (
'application/geopackage+sqlite3',
'Geopackage (application/geopackage+sqlite3)'
),
(
'application/x.geopackage+zip',
'Zipped Geopackage (application/x.geopackage+zip)'
), ('application/geo+json', 'GeoJSON (application/geo+json)'),
('application/x.geojson+zip', 'Zipped GeoJSON (application/x.geojson+zip)'),
(
'application/x.interlis; version=2.3',
'Interlis 2 (application/x.interlis; version=2.3)'
),
(
'application/x.interlis+zip; version=2.3',
'Zipped XTF (2.3) (application/x.interlis+zip; version=2.3)'
),
(
'application/x.interlis; version=1',
'Interlis 1 (application/x.interlis; version=1)'
),
(
'application/x.interlis+zip; version=1',
'Zipped ITF (application/x.interlis+zip; version=1)'
),
(
'image/tiff; application=geotiff; profile=cloud-optimized',
'Cloud Optimized GeoTIFF (COG) (image/tiff; application=geotiff; profile=cloud-optimized)'
), ('application/pdf', 'PDF (application/pdf)'),
('application/x.pdf+zip', 'Zipped PDF (application/x.pdf+zip)'),
('application/json', 'JSON (application/json)'),
('application/x.json+zip', 'Zipped JSON (application/x.json+zip)'),
('application/x-netcdf', 'NetCDF (application/x-netcdf)'),
('application/x.netcdf+zip', 'Zipped NetCDF (application/x.netcdf+zip)'),
('application/xml', 'XML (application/xml)'),
('application/x.xml+zip', 'Zipped XML (application/x.xml+zip)'),
(
'application/vnd.mapbox-vector-tile',
'mbtiles (application/vnd.mapbox-vector-tile)'
), ('text/plain', 'Text (text/plain)'),
('text/x.plain+zip', 'Zipped text (text/x.plain+zip)'),
('application/x.dwg+zip', 'Zipped DWG (application/x.dwg+zip)')
],
max_length=200
),
),
]
| [
[
[
57,
87
],
[
423,
429
]
],
[
[
110,
120
],
[
168,
178
],
[
303,
313
],
[
675,
685
]
],
[
[
143,
149
],
[
493,
499
],
[
779,
785
]
],
[
[
158,
167
]
]
] |
SOURCE_DOC_PATH = '' | [
[
[
0,
15
]
]
] |
import numpy as np
from i3Deep import utils
from tqdm import tqdm
import os
# name = "KGU-53317EB91645"
# load_mask = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/mask.nii.gz"
# load_label_table = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/label_table.txt"
# save_mask = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/" + name + "/mask2.nii.gz"
load_path = "D:/Datasets/medical_data/ExportKGU/3D Slicer 2/"
def rename(case_path):
filenames = utils.load_filenames(case_path + "/", extensions=None)
for filename in filenames:
name = os.path.basename(filename)
if "label" in name and ".nii.gz" in name:
os.rename(filename, case_path + "/mask.nii.gz")
elif ".txt" in name:
os.rename(filename, case_path + "/label_table.txt")
elif ".nii.gz" in name:
os.rename(filename, case_path + "/image.nii.gz")
def get_labels(load_label_table):
with open(load_label_table) as f:
label_table = f.readlines()
label_table = np.asarray(label_table)
ggo = []
cons = []
pe = []
for line in label_table:
label = line.split()[0]
if label.isnumeric():
if "Background" in line or "background" in line:
continue
infection = line.split("_")[1]
keywords = ["ggo", "gg"]
if any(x in infection.lower() for x in keywords):
ggo.append(int(label))
keywords = ["cons", "cns", "con", "cos", "co"]
if any(x in infection.lower() for x in keywords):
cons.append(int(label))
keywords = ["pe", "pes"]
if any(x in infection.lower() for x in keywords):
pe.append(int(label))
return ggo, cons, pe
def merge_labels(load_mask, save_mask, load_label_table):
mask, affine, spacing, header = utils.load_nifty(load_mask)
mask = mask.astype(int)
ggo, cons, pe = get_labels(load_label_table)
for label in tqdm(np.concatenate((ggo, cons, pe), axis=0), disable=True):
mask[mask == label] = -label
for label in tqdm(ggo, disable=True):
mask[mask == -label] = 1
for label in tqdm(cons, disable=True):
mask[mask == -label] = 2
for label in tqdm(pe, disable=True):
mask[mask == -label] = 3
mask = np.rint(mask)
mask = mask.astype(int)
utils.save_nifty(save_mask, mask, affine, spacing, header)
def round_mask(filename):
mask, affine, spacing, header = utils.load_nifty(filename)
mask = np.rint(mask)
mask = mask.astype(int)
utils.save_nifty(filename, mask, affine, spacing, header)
def tmp2(filename):
mask, affine, spacing, header = utils.load_nifty(filename)
print(mask[46-1][155-1][116-1])
if __name__ == '__main__':
# filenames = utils.load_filenames(load_path, extensions=None)
# for filename in tqdm(filenames):
# if os.path.isfile(filename + "/mask2.nii.gz"):
# continue
# rename(filename)
# load_mask = filename + "/mask.nii.gz"
# save_mask = filename + "/mask2.nii.gz"
# load_label_table = filename + "/label_table.txt"
# merge_labels(load_mask, save_mask, load_label_table)
# for filename in tqdm(filenames):
# old_mask = filename + "/mask.nii.gz"
# new_mask = filename + "/mask2.nii.gz"
# label_table = filename + "/label_table.txt"
# if os.path.exists(new_mask):
# os.remove(old_mask)
# os.rename(new_mask, old_mask)
# os.remove(label_table)
# filenames = utils.load_filenames("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task79_frankfurt3/labelsTr/", extensions=None)
# for filename in tqdm(filenames):
# mask, affine, spacing, header = utils.load_nifty(filename)
# mask = np.rint(mask)
# mask = mask.astype(np.uint8)
# utils.save_nifty(filename, mask, affine, spacing, header)
# filename = "/gris/gris-f/homelv/kgotkows/datasets/covid19/UK_Frankfurt3/KGU-E9EC0F06F1D6/mask.nii.gz"
# mask, affine, spacing, header = utils.load_nifty(filename)
# mask[mask == 5] = 2
# mask[mask == 6] = 2
# utils.save_nifty(filename, mask, affine, spacing, header)
#tmp("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task077_frankfurt3Guided/imagesTr/0001_0001.nii.gz")
tmp2("/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task77_frankfurt3Guided/tmp/900.nii.gz") | [
[
[
7,
18
],
[
1069,
1071
],
[
2066,
2068
],
[
2411,
2413
],
[
2625,
2627
]
],
[
[
39,
44
],
[
499,
504
],
[
1934,
1939
],
[
2461,
2466
],
[
2586,
2591
],
[
2673,
2678
],
[
2791,
2796
]
],
[
[
63,
67
],
[
2061,
2065
],
[
2180,
2184
],
[
2259,
2263
],
[
2339,
2343
]
],
[
[
76,
78
],
[
602,
604
],
[
693,
695
],
[
784,
786
],
[
882,
884
]
],
[
[
392,
401
]
],
[
[
463,
469
]
],
[
[
940,
950
],
[
2012,
2022
]
],
[
[
1843,
1855
]
],
[
[
2527,
2537
]
],
[
[
2738,
2742
],
[
4542,
4546
]
]
] |
import base64
import json
import requests
import urllib3
from contextlib import contextmanager
from packaging.version import Version
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from requests.exceptions import HTTPError
from mlflow import __version__
from mlflow.protos import databricks_pb2
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, ENDPOINT_NOT_FOUND, ErrorCode
from mlflow.utils.proto_json_utils import parse_dict
from mlflow.utils.string_utils import strip_suffix
from mlflow.exceptions import MlflowException, RestException
RESOURCE_DOES_NOT_EXIST = "RESOURCE_DOES_NOT_EXIST"
_REST_API_PATH_PREFIX = "/api/2.0"
_DEFAULT_HEADERS = {"User-Agent": "mlflow-python-client/%s" % __version__}
# Response codes that generally indicate transient network failures and merit client retries,
# based on guidance from cloud service providers
# (https://docs.microsoft.com/en-us/azure/architecture/best-practices/retry-service-specific#general-rest-and-retry-guidelines)
_TRANSIENT_FAILURE_RESPONSE_CODES = frozenset(
[
408, # Request Timeout
429, # Too Many Requests
500, # Internal Server Error
502, # Bad Gateway
503, # Service Unavailable
504, # Gateway Timeout
]
)
def _get_http_response_with_retries(
method, url, max_retries, backoff_factor, retry_codes, **kwargs
):
"""
Performs an HTTP request using Python's `requests` module with an automatic retry policy.
:param method: a string indicating the method to use, e.g. "GET", "POST", "PUT".
:param url: the target URL address for the HTTP request.
:param max_retries: Maximum total number of retries.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return: requests.Response object.
"""
assert 0 <= max_retries < 10
assert 0 <= backoff_factor < 120
retry_kwargs = {
"total": max_retries,
"connect": max_retries,
"read": max_retries,
"redirect": max_retries,
"status": max_retries,
"status_forcelist": retry_codes,
"backoff_factor": backoff_factor,
}
if Version(urllib3.__version__) >= Version("1.26.0"):
retry_kwargs["allowed_methods"] = None
else:
retry_kwargs["method_whitelist"] = None
retry = Retry(**retry_kwargs)
adapter = HTTPAdapter(max_retries=retry)
with requests.Session() as http:
http.mount("https://", adapter)
http.mount("http://", adapter)
response = http.request(method, url, **kwargs)
return response
def http_request(
host_creds,
endpoint,
method,
max_retries=5,
backoff_factor=2,
retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,
timeout=120,
**kwargs,
):
"""
Makes an HTTP request with the specified method to the specified hostname/endpoint. Transient
errors such as Rate-limited (429), service unavailable (503) and internal error (500) are
retried with an exponential back off with backoff_factor * (1, 2, 4, ... seconds).
The function parses the API response (assumed to be JSON) into a Python object and returns it.
:param host_creds: A :py:class:`mlflow.rest_utils.MlflowHostCreds` object containing
hostname and optional authentication.
:param endpoint: a string for service endpoint, e.g. "/path/to/object".
:param method: a string indicating the method to use, e.g. "GET", "POST", "PUT".
:param max_retries: maximum number of retries before throwing an exception.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param timeout: wait for timeout seconds for response from remote server for connect and
read request.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return: requests.Response object.
"""
hostname = host_creds.host
auth_str = None
if host_creds.username and host_creds.password:
basic_auth_str = ("%s:%s" % (host_creds.username, host_creds.password)).encode("utf-8")
auth_str = "Basic " + base64.standard_b64encode(basic_auth_str).decode("utf-8")
elif host_creds.token:
auth_str = "Bearer %s" % host_creds.token
from mlflow.tracking.request_header.registry import resolve_request_headers
headers = dict({**_DEFAULT_HEADERS, **resolve_request_headers()})
if auth_str:
headers["Authorization"] = auth_str
if host_creds.server_cert_path is None:
verify = not host_creds.ignore_tls_verification
else:
verify = host_creds.server_cert_path
if host_creds.client_cert_path is not None:
kwargs["cert"] = host_creds.client_cert_path
cleaned_hostname = strip_suffix(hostname, "/")
url = "%s%s" % (cleaned_hostname, endpoint)
try:
return _get_http_response_with_retries(
method,
url,
max_retries,
backoff_factor,
retry_codes,
headers=headers,
verify=verify,
timeout=timeout,
**kwargs,
)
except Exception as e:
raise MlflowException("API request to %s failed with exception %s" % (url, e))
def _can_parse_as_json(string):
try:
json.loads(string)
return True
except Exception:
return False
def http_request_safe(host_creds, endpoint, method, **kwargs):
"""
Wrapper around ``http_request`` that also verifies that the request succeeds with code 200.
"""
response = http_request(host_creds=host_creds, endpoint=endpoint, method=method, **kwargs)
return verify_rest_response(response, endpoint)
def verify_rest_response(response, endpoint):
"""Verify the return code and format, raise exception if the request was not successful."""
if response.status_code != 200:
if _can_parse_as_json(response.text):
raise RestException(json.loads(response.text))
else:
base_msg = "API request to endpoint %s failed with error code " "%s != 200" % (
endpoint,
response.status_code,
)
raise MlflowException("%s. Response body: '%s'" % (base_msg, response.text))
# Skip validation for endpoints (e.g. DBFS file-download API) which may return a non-JSON
# response
if endpoint.startswith(_REST_API_PATH_PREFIX) and not _can_parse_as_json(response.text):
base_msg = (
"API request to endpoint was successful but the response body was not "
"in a valid JSON format"
)
raise MlflowException("%s. Response body: '%s'" % (base_msg, response.text))
return response
def augmented_raise_for_status(response):
"""Wrap the standard `requests.response.raise_for_status()` method and return reason"""
try:
response.raise_for_status()
except HTTPError as e:
if response.text:
raise HTTPError(f"{e}. Response text: {response.text}")
else:
raise e
def _get_path(path_prefix, endpoint_path):
return "{}{}".format(path_prefix, endpoint_path)
def extract_api_info_for_service(service, path_prefix):
"""Return a dictionary mapping each API method to a tuple (path, HTTP method)"""
service_methods = service.DESCRIPTOR.methods
res = {}
for service_method in service_methods:
endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints
endpoint = endpoints[0]
endpoint_path = _get_path(path_prefix, endpoint.path)
res[service().GetRequestClass(service_method)] = (endpoint_path, endpoint.method)
return res
def extract_all_api_info_for_service(service, path_prefix):
"""Return a dictionary mapping each API method to a list of tuples [(path, HTTP method)]"""
service_methods = service.DESCRIPTOR.methods
res = {}
for service_method in service_methods:
endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints
res[service().GetRequestClass(service_method)] = [
(_get_path(path_prefix, endpoint.path), endpoint.method) for endpoint in endpoints
]
return res
def call_endpoint(host_creds, endpoint, method, json_body, response_proto):
# Convert json string to json dictionary, to pass to requests
if json_body:
json_body = json.loads(json_body)
if method == "GET":
response = http_request(
host_creds=host_creds, endpoint=endpoint, method=method, params=json_body
)
else:
response = http_request(
host_creds=host_creds, endpoint=endpoint, method=method, json=json_body
)
response = verify_rest_response(response, endpoint)
js_dict = json.loads(response.text)
parse_dict(js_dict=js_dict, message=response_proto)
return response_proto
def call_endpoints(host_creds, endpoints, json_body, response_proto):
# The order that the endpoints are called in is defined by the order
# specified in ModelRegistryService in model_registry.proto
for i, (endpoint, method) in enumerate(endpoints):
try:
return call_endpoint(host_creds, endpoint, method, json_body, response_proto)
except RestException as e:
if e.error_code != ErrorCode.Name(ENDPOINT_NOT_FOUND) or i == len(endpoints) - 1:
raise e
@contextmanager
def cloud_storage_http_request(
method,
url,
max_retries=5,
backoff_factor=2,
retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,
timeout=None,
**kwargs,
):
"""
Performs an HTTP PUT/GET request using Python's `requests` module with automatic retry.
:param method: string of 'PUT' or 'GET', specify to do http PUT or GET
:param url: the target URL address for the HTTP request.
:param max_retries: maximum number of retries before throwing an exception.
:param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP
request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the
exponential backoff.
:param retry_codes: a list of HTTP response error codes that qualifies for retry.
:param timeout: wait for timeout seconds for response from remote server for connect and
read request. Default to None owing to long duration operation in read / write.
:param kwargs: Additional keyword arguments to pass to `requests.Session.request()`
:return requests.Response object.
"""
if method.lower() not in ("put", "get"):
raise ValueError("Illegal http method: " + method)
try:
with _get_http_response_with_retries(
method, url, max_retries, backoff_factor, retry_codes, timeout=timeout, **kwargs
) as response:
yield response
except Exception as e:
raise MlflowException("API request failed with exception %s" % e)
class MlflowHostCreds(object):
"""
Provides a hostname and optional authentication for talking to an MLflow tracking server.
:param host: Hostname (e.g., http://localhost:5000) to MLflow server. Required.
:param username: Username to use with Basic authentication when talking to server.
If this is specified, password must also be specified.
:param password: Password to use with Basic authentication when talking to server.
If this is specified, username must also be specified.
:param token: Token to use with Bearer authentication when talking to server.
If provided, user/password authentication will be ignored.
:param ignore_tls_verification: If true, we will not verify the server's hostname or TLS
certificate. This is useful for certain testing situations, but should never be
true in production.
If this is set to true ``server_cert_path`` must not be set.
:param client_cert_path: Path to ssl client cert file (.pem).
Sets the cert param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
:param server_cert_path: Path to a CA bundle to use.
Sets the verify param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
If this is set ``ignore_tls_verification`` must be false.
"""
def __init__(
self,
host,
username=None,
password=None,
token=None,
ignore_tls_verification=False,
client_cert_path=None,
server_cert_path=None,
):
if not host:
raise MlflowException(
message="host is a required parameter for MlflowHostCreds",
error_code=INVALID_PARAMETER_VALUE,
)
if ignore_tls_verification and (server_cert_path is not None):
raise MlflowException(
message=(
"When 'ignore_tls_verification' is true then 'server_cert_path' "
"must not be set! This error may have occurred because the "
"'MLFLOW_TRACKING_INSECURE_TLS' and 'MLFLOW_TRACKING_SERVER_CERT_PATH' "
"environment variables are both set - only one of these environment "
"variables may be set."
),
error_code=INVALID_PARAMETER_VALUE,
)
self.host = host
self.username = username
self.password = password
self.token = token
self.ignore_tls_verification = ignore_tls_verification
self.client_cert_path = client_cert_path
self.server_cert_path = server_cert_path
| [
[
[
7,
13
],
[
4632,
4638
]
],
[
[
21,
25
],
[
5792,
5796
],
[
6456,
6460
],
[
8900,
8904
],
[
9282,
9286
]
],
[
[
33,
41
],
[
2717,
2725
]
],
[
[
49,
56
],
[
2480,
2487
]
],
[
[
80,
94
],
[
9913,
9927
]
],
[
[
125,
132
],
[
2472,
2479
],
[
2504,
2511
]
],
[
[
163,
174
],
[
2677,
2688
]
],
[
[
200,
205
],
[
2641,
2646
]
],
[
[
238,
247
],
[
7409,
7418
],
[
7469,
7478
]
],
[
[
268,
279
],
[
732,
743
]
],
[
[
306,
320
],
[
7958,
7972
],
[
8509,
8523
]
],
[
[
362,
385
],
[
13221,
13244
],
[
13832,
13855
]
],
[
[
387,
405
],
[
9838,
9856
]
],
[
[
407,
416
],
[
9823,
9832
]
],
[
[
459,
469
],
[
9312,
9322
]
],
[
[
508,
520
],
[
5262,
5274
]
],
[
[
551,
566
],
[
5668,
5683
],
[
6685,
6700
],
[
7125,
7140
],
[
11382,
11397
],
[
13101,
13116
],
[
13349,
13364
]
],
[
[
568,
581
],
[
6442,
6455
],
[
9772,
9785
]
],
[
[
583,
606
]
],
[
[
635,
656
],
[
6893,
6914
]
],
[
[
670,
686
],
[
4871,
4887
]
],
[
[
1016,
1049
],
[
3022,
3055
],
[
10038,
10071
]
],
[
[
1283,
1314
],
[
5362,
5393
],
[
11165,
11196
]
],
[
[
2909,
2921
],
[
6066,
6078
],
[
8965,
8977
],
[
9104,
9116
]
],
[
[
5747,
5765
],
[
6389,
6407
],
[
6924,
6942
]
],
[
[
5880,
5897
]
],
[
[
6204,
6224
],
[
6157,
6177
],
[
9227,
9247
]
],
[
[
7223,
7249
]
],
[
[
7559,
7568
],
[
8044,
8053
],
[
8611,
8620
]
],
[
[
7657,
7685
]
],
[
[
8193,
8225
]
],
[
[
8724,
8737
],
[
9686,
9699
]
],
[
[
9396,
9410
]
],
[
[
9932,
9958
]
],
[
[
11450,
11465
]
]
] |
"""
sampling.py
We sample Metropolis-Hastings:
* Random walk proposals
* Langevin proposals
* Langevin proposals with preconditioning
* Hamiltonian MC
* Hamiltonian MC with preconditioning
NOTE:
The functionality of this module is restricted to log-densities,
i.e. densities of the form p(s) = exp(-E(s)). We work with E(s) only.
The reason is that in Bayesian inference, evaluations of exp(-E(s))
are too instable in a numerical sense.
"""
import collections
from abc import ABC, abstractmethod
import numpy as np
from difflikelihoods import logdensity
def metropolishastings_rw(logpdf, nsamps, initstate, pwidth, ninits):
"""
Convenience function for Metropolis-Hastings sampling with
random walk proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf)
rwmh = RandomWalkMH(logdens)
return rwmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_lang(logpdf, loggrad, nsamps, initstate, pwidth, ninits):
"""
Convenience function for Metropolis-Hastings sampling with
Langevin dynamics proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf, loggrad)
langmh = LangevinMH(logdens)
return langmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_plang(
logpdf, loggrad, loghess, nsamps, initstate, pwidth, ninits
):
"""
Convenience function for Metropolis-Hastings sampling with
Riemannian (preconditioned) Langevin dynamics proposal kernel.
"""
logdens = logdensity.LogDensity(logpdf, loggrad, loghess)
plangmh = PrecondLangevinMH(logdens)
return plangmh.sample_nd(nsamps, initstate, pwidth, ninits)
def metropolishastings_ham(
logpdf, loggrad, nsamps, initstate, stepsize, nsteps, ninits
):
"""
Convenience function for Hamiltonian MCMC.
"""
logdens = logdensity.LogDensity(logpdf, loggrad)
hmc = HamiltonianMC(logdens, nsteps)
return hmc.sample_nd(nsamps, initstate, stepsize, ninits)
def metropolishastings_pham(
logpdf, loggrad, loghess, nsamps, initstate, stepsize, nsteps, ninits
):
"""
Convenience function for preconditioned Hamiltonian MCMC.
"""
logdens = logdensity.LogDensity(logpdf, loggrad, loghess)
phmc = PrecondHamiltonianMC(logdens, nsteps)
return phmc.sample_nd(nsamps, initstate, stepsize, ninits)
# Convenience data structure.
MCMCState = collections.namedtuple("MCMCState", "state logdens loggrad loghess")
class MetropolisHastings(ABC):
"""
Abstract Metropolis-Hastings class. Contains everything but the
proposal kernels.
"""
def __init__(self, logdens):
"""
Initialise MH sampler with a log-density function.
Args:
logdens: LogDensity object, evaluations of a negative log-
density and derivatives
"""
self.logdens = logdens
def sample_nd(self, nsamps, init_state, pwidth, ninits=None, *optional):
"""
"""
assert init_state_is_array(
init_state
), "Please enter a (d,) dimensional initial state"
states, logprobs = np.zeros((nsamps, len(init_state))), np.zeros(nsamps)
accepted = 0
if ninits is None:
ninits = 0
currstate = self.evaluate_logdens(init_state)
states[0], logprobs[0] = currstate.state, currstate.logdens
for idx in range(1, nsamps):
if idx < ninits:
proposal, corrfact = self.generate_proposal(currstate, pwidth)
else:
proposal, corrfact = self.generate_proposal(currstate, 0.2 * pwidth)
currstate, is_accept = self.accept_or_reject(
currstate, proposal, corrfact, idx, ninits
)
states[idx], logprobs[idx] = (
currstate.state.copy(),
currstate.logdens.copy(),
)
if idx >= ninits:
accepted = accepted + int(is_accept)
ratio = accepted / nsamps
return states, logprobs, ratio
def evaluate_logdens(self, loc):
"""
"""
logdenseval = self.logdens.eval(loc)
if self.logdens.has_gradient:
gradeval = self.logdens.gradeval(loc)
else:
gradeval = 0
if self.logdens.has_hessian:
hesseval = self.logdens.hesseval(loc)
else:
hesseval = 0
return MCMCState(
state=loc, logdens=logdenseval, loggrad=gradeval, loghess=hesseval
)
def accept_or_reject(self, currstate, proposal, corrfact, idx, ninits):
"""
"""
logaccprob = self.get_logaccprob(currstate, proposal, corrfact, idx, ninits)
if logaccprob < 0 or logaccprob < -np.log(np.random.rand()):
state = proposal
is_accept = True
else:
state = currstate
is_accept = False
return state, is_accept
def get_logaccprob(self, currstate, proposal, corrfact, idx, ninits):
"""
Returns NEGATIVE log acceptance probability, i.e.
corrected proposal - corrected currstate
"""
if idx < ninits:
corrfact = -corrfact
return (corrfact) + (proposal.logdens - currstate.logdens)
@abstractmethod
def generate_proposal(self, *args):
"""
"""
pass
def init_state_is_array(init_state):
"""
Checks whether init_state is compliant with an Nd algorithm.
That is, whether init_state is an (d,) np.ndarray.
"""
assert isinstance(init_state, np.ndarray), "Please enter init_state of shape (d,)"
assert len(init_state.shape) == 1, "Please enter init_state of shape (d,)"
return True
class RandomWalkMH(MetropolisHastings):
"""
"""
def __init__(self, logdens):
"""
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_randomwalk(currstate.state, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = 0
return proposal, corrfact
def sample_randomwalk(self, mean, var):
"""
"""
return mean + np.sqrt(var) * np.random.randn(len(mean))
class LangevinMH(MetropolisHastings):
"""
"""
def __init__(self, logdens):
"""
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_langevin(currstate, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)
return proposal, corrfact
def sample_langevin(self, currstate, pwidth):
"""
"""
noise = np.random.randn(len(currstate.state))
return (
currstate.state - pwidth * currstate.loggrad + np.sqrt(2 * pwidth) * noise
)
def compute_corrfact_langevin(self, currstate, proposal, pwidth):
"""
"""
lognomin = self.kernel_langevin(currstate, proposal, pwidth)
logdenom = self.kernel_langevin(proposal, currstate, pwidth)
return lognomin - logdenom
def kernel_langevin(self, state1, state2, pwidth):
"""
"""
state2_dyn = state2.state - pwidth * state2.loggrad
dist = np.linalg.norm(state1.state - state2_dyn) ** 2
return 0.5 * dist / (2 * pwidth)
class PrecondLangevinMH(MetropolisHastings):
"""
Preconditioning with (inverse) Hessian.
"""
def __init__(self, logdens):
"""
precondeval returns M (and not M^{-1}) as used in Cald&Gir
"""
MetropolisHastings.__init__(self, logdens)
def generate_proposal(self, currstate, pwidth):
"""
"""
newloc = self.sample_langevin(currstate, pwidth)
proposal = self.evaluate_logdens(newloc)
corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)
return proposal, corrfact
def sample_langevin(self, currstate, pwidth):
"""
"""
noise = np.random.multivariate_normal(
np.zeros(len(currstate.loghess)), np.linalg.inv(currstate.loghess)
)
prec_dyn = np.linalg.solve(currstate.loghess, currstate.loggrad)
return currstate.state - pwidth * prec_dyn + np.sqrt(2 * pwidth) * noise
def compute_corrfact_langevin(self, currstate, proposal, pwidth):
"""
"""
lognomin = self.kernel_langevin(currstate, proposal, pwidth)
logdenom = self.kernel_langevin(proposal, currstate, pwidth)
return lognomin - logdenom
def kernel_langevin(self, state1, state2, pwidth):
"""
"""
prec_dyn = np.linalg.solve(state2.loghess, state2.loggrad)
state2_dyn = state2.state - pwidth * prec_dyn
difference = state1.state - state2_dyn
return 0.5 * difference.dot(np.dot(state2.loghess, difference)) / (2 * pwidth)
class HamiltonianMC(MetropolisHastings):
"""
"""
def __init__(self, logdens, nsteps):
"""
"""
MetropolisHastings.__init__(self, logdens)
self.nsteps = nsteps
def generate_proposal(self, currstate, pwidth):
"""
pwidth is used as stepsize for self.nsteps leapfrog steps.
The correction factor is the quotient of the hamiltonian terms.
"""
momentum = np.random.multivariate_normal(
np.zeros(len(currstate.state)), np.eye(len(currstate.state))
)
# hamilt = self.evaluate_hamiltonian(momentum, currstate)
momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)
# prop_hamilt = self.evaluate_hamiltonian(momentum_new, proposal)
corrfact = self.get_corrfact(momentum, momentum_new)
return proposal, corrfact
def leapfrog_dynamics(self, momentum, currstate, pwidth):
"""
"""
proposal = currstate
for idx in range(self.nsteps):
momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)
return momentum, proposal
def compute_next_lfstep(self, momentum, proposal, pwidth):
"""
"""
momentum = momentum - 0.5 * pwidth * proposal.loggrad
pstate = proposal.state + pwidth * momentum
proposal = self.evaluate_logdens(pstate)
momentum = momentum - 0.5 * pwidth * proposal.loggrad
return momentum, proposal
def get_corrfact(self, mom_new, mom):
"""
"""
return 0.5 * (mom_new.T @ mom_new - mom.T @ mom)
class PrecondHamiltonianMC(MetropolisHastings):
"""
In fact, the true name would be either
* Riemannian-Gaussian HMC: if the preconditioner depends on the state
* Euclidean-Gaussian HMC: if the preconditioner is constant
[Girolami and Calderhead, 2011; Betancourt, 2018]
"""
def __init__(self, logdens, nsteps):
"""
evalprecond returns M (and not M^{-1}) as used in Cald&Gir.
M is the Hessian
"""
MetropolisHastings.__init__(self, logdens)
self.nsteps = nsteps
def generate_proposal(self, currstate, pwidth):
"""
pwidth is used as stepsize for self.nsteps leapfrog steps.
The correction factor is the quotient of the hamiltonian terms.
"""
momentum = np.random.multivariate_normal(
np.zeros(len(currstate.state)), currstate.loghess
)
momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)
corrfact = self.get_corrfact(momentum, momentum_new, currstate, proposal)
return proposal, corrfact
def leapfrog_dynamics(self, momentum, currstate, pwidth):
"""
"""
proposal = currstate
for idx in range(self.nsteps):
momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)
return momentum, proposal
def compute_next_lfstep(self, momentum, proposal, pwidth):
"""
"""
momentum = momentum - 0.5 * pwidth * proposal.loggrad
pstate = proposal.state + pwidth * np.linalg.solve(proposal.loghess, momentum)
proposal = self.evaluate_logdens(pstate)
momentum = momentum - 0.5 * pwidth * proposal.loggrad
return momentum, proposal
def get_corrfact(self, mom, mom_new, currstate, proposal):
"""
"""
return 0.5 * (
mom_new.T @ np.linalg.solve(proposal.loghess, mom_new)
+ np.log(np.linalg.det(proposal.loghess))
- mom.T @ np.linalg.solve(currstate.loghess, mom)
- np.log(np.linalg.det(currstate.loghess))
)
| [
[
[
488,
499
],
[
2399,
2410
]
],
[
[
516,
519
],
[
2495,
2498
]
],
[
[
521,
535
],
[
5295,
5309
]
],
[
[
543,
554
],
[
3139,
3141
],
[
3176,
3178
],
[
4764,
4766
],
[
4771,
4773
],
[
5596,
5598
],
[
6248,
6250
],
[
6263,
6265
],
[
6842,
6844
],
[
6956,
6958
],
[
7417,
7419
],
[
8175,
8177
],
[
8218,
8220
],
[
8252,
8254
],
[
8314,
8316
],
[
8421,
8423
],
[
8816,
8818
],
[
9001,
9003
],
[
9493,
9495
],
[
9536,
9538
],
[
9568,
9570
],
[
11457,
11459
],
[
11500,
11502
],
[
12229,
12231
],
[
12553,
12555
],
[
12610,
12612
],
[
12617,
12619
],
[
12672,
12674
],
[
12726,
12728
],
[
12733,
12735
]
],
[
[
583,
593
],
[
792,
802
],
[
1131,
1141
],
[
1525,
1535
],
[
1853,
1863
],
[
2195,
2205
]
],
[
[
600,
621
]
],
[
[
922,
945
]
],
[
[
1272,
1296
]
],
[
[
1684,
1706
]
],
[
[
2001,
2024
]
],
[
[
2387,
2396
],
[
4435,
4444
]
],
[
[
2476,
2494
],
[
5765,
5783
],
[
6309,
6327
],
[
7531,
7549
],
[
9074,
9092
],
[
10702,
10720
],
[
5868,
5886
],
[
6412,
6430
],
[
7745,
7763
],
[
9185,
9203
],
[
11149,
11167
]
],
[
[
5393,
5412
],
[
3009,
3028
]
],
[
[
5752,
5764
],
[
833,
845
]
],
[
[
6298,
6308
],
[
1183,
1193
]
],
[
[
7513,
7530
],
[
1587,
1604
]
],
[
[
9060,
9073
],
[
1902,
1915
]
],
[
[
10681,
10701
],
[
2254,
2274
]
]
] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomCropAndResize op in DE
"""
import numpy as np
import cv2
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.utils as mode
import mindspore.dataset as ds
from mindspore import log as logger
from util import diff_mse, save_and_check_md5, visualize_list, \
config_get_set_seed, config_get_set_num_parallel_workers
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
GENERATE_GOLDEN = False
def test_random_crop_and_resize_op_c(plot=False):
"""
Test RandomCropAndResize op in c transforms
"""
logger.info("test_random_crop_and_resize_op_c")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# With these inputs we expect the code to crop the whole image
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(input_columns=["image"], operations=decode_op)
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
crop_and_resize = item1["image"]
original = item2["image"]
# Note: resize the original image with the same size as the one applied RandomResizedCrop()
original = cv2.resize(original, (512, 256))
mse = diff_mse(crop_and_resize, original)
assert mse == 0
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize_list(original_images, crop_and_resize_images)
def test_random_crop_and_resize_op_py(plot=False):
"""
Test RandomCropAndResize op in py transforms
"""
logger.info("test_random_crop_and_resize_op_py")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# With these inputs we expect the code to crop the whole image
transforms1 = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)),
py_vision.ToTensor()
]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1)
# Second dataset
# Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2)
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = cv2.resize(original, (512, 256))
mse = diff_mse(crop_and_resize, original)
# Due to rounding error the mse for Python is not exactly 0
assert mse <= 0.05
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize_list(original_images, crop_and_resize_images)
def test_random_crop_and_resize_01():
"""
Test RandomCropAndResize with md5 check, expected to pass
"""
logger.info("test_random_crop_and_resize_01")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_01_c_result.npz"
filename2 = "random_crop_and_resize_01_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_02():
"""
Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST,
expected to pass
"""
logger.info("test_random_crop_and_resize_02")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_02_c_result.npz"
filename2 = "random_crop_and_resize_02_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_03():
"""
Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass
"""
logger.info("test_random_crop_and_resize_03")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), max_attempts=1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), max_attempts=1),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
filename1 = "random_crop_and_resize_03_c_result.npz"
filename2 = "random_crop_and_resize_03_py_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_04_c():
"""
Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# If input range of scale is not in the order of (min, max), ValueError will be raised.
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_04_py():
"""
Test RandomCropAndResize with py_transforms: invalid range of scale (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# If input range of scale is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(input_columns=["image"], operations=transform)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_05_c():
"""
Test RandomCropAndResize with c_transforms: invalid range of ratio (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_05_py():
"""
Test RandomCropAndResize with py_transforms: invalid range of ratio (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data = data.map(input_columns=["image"], operations=transform)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required interval of (0 to 16777216)." in str(e)
def test_random_crop_and_resize_comp(plot=False):
"""
Test RandomCropAndResize and compare between python and c image augmentation
"""
logger.info("test_random_crop_and_resize_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5))
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)),
py_vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(input_columns=["image"], operations=transform)
image_c_cropped = []
image_py_cropped = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
c_image = item1["image"]
py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_c_cropped.append(c_image)
image_py_cropped.append(py_image)
mse = diff_mse(c_image, py_image)
assert mse < 0.02 # rounding error
if plot:
visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2)
def test_random_crop_and_resize_06():
"""
Test RandomCropAndResize with c_transforms: invalid values for scale,
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale="", ratio=(1, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Argument scale with value \"\" is not of type (<class 'tuple'>,)" in str(e)
try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale=(1, "2"), ratio=(1, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data.map(input_columns=["image"], operations=random_crop_and_resize_op)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Argument scale[1] with value 2 is not of type (<class 'float'>, <class 'int'>)." in str(e)
if __name__ == "__main__":
test_random_crop_and_resize_op_c(True)
test_random_crop_and_resize_op_py(True)
test_random_crop_and_resize_01()
test_random_crop_and_resize_02()
test_random_crop_and_resize_03()
test_random_crop_and_resize_04_c()
test_random_crop_and_resize_04_py()
test_random_crop_and_resize_05_c()
test_random_crop_and_resize_05_py()
test_random_crop_and_resize_06()
test_random_crop_and_resize_comp(True)
| [
[
[
721,
732
],
[
4195,
4197
],
[
4273,
4275
],
[
14329,
14331
]
],
[
[
740,
743
],
[
2512,
2515
],
[
4302,
4305
]
],
[
[
752,
794
],
[
3416,
3425
],
[
3799,
3808
],
[
5721,
5730
],
[
7288,
7297
],
[
8792,
8801
],
[
10886,
10895
],
[
12755,
12764
],
[
13931,
13940
]
],
[
[
802,
851
],
[
1622,
1630
],
[
1739,
1747
],
[
5177,
5185
],
[
5227,
5235
],
[
6716,
6724
],
[
6766,
6774
],
[
8256,
8264
],
[
8306,
8314
],
[
9681,
9689
],
[
9840,
9848
],
[
11558,
11566
],
[
11621,
11629
],
[
13401,
13409
],
[
13451,
13459
],
[
14942,
14950
],
[
15005,
15013
],
[
15451,
15459
]
],
[
[
859,
910
],
[
3279,
3288
],
[
3307,
3316
],
[
3372,
3381
],
[
3727,
3736
],
[
3755,
3764
],
[
5581,
5590
],
[
5609,
5618
],
[
5678,
5687
],
[
7134,
7143
],
[
7162,
7171
],
[
7245,
7254
],
[
8656,
8665
],
[
8684,
8693
],
[
8749,
8758
],
[
10628,
10637
],
[
10760,
10769
],
[
10835,
10844
],
[
12501,
12510
],
[
12633,
12642
],
[
12704,
12713
],
[
13798,
13807
],
[
13826,
13835
],
[
13888,
13897
]
],
[
[
918,
956
],
[
6819,
6823
],
[
7216,
7220
]
],
[
[
964,
987
],
[
1526,
1528
],
[
1982,
1984
],
[
3104,
3106
],
[
3619,
3621
],
[
5081,
5083
],
[
5474,
5476
],
[
6151,
6153
],
[
6189,
6191
],
[
6620,
6622
],
[
7027,
7029
],
[
7718,
7720
],
[
7756,
7758
],
[
8160,
8162
],
[
8549,
8551
],
[
9222,
9224
],
[
9260,
9262
],
[
9585,
9587
],
[
10504,
10506
],
[
11462,
11464
],
[
12377,
12379
],
[
13305,
13307
],
[
13691,
13693
],
[
14846,
14848
]
],
[
[
1010,
1023
],
[
1445,
1451
],
[
2627,
2633
],
[
3023,
3029
],
[
4488,
4494
],
[
4884,
4890
],
[
6423,
6429
],
[
7963,
7969
],
[
9502,
9508
],
[
10095,
10101
],
[
10420,
10426
],
[
11056,
11062
],
[
11379,
11385
],
[
11968,
11974
],
[
12293,
12299
],
[
12925,
12931
],
[
13224,
13230
],
[
14763,
14769
],
[
15256,
15262
],
[
15708,
15714
]
],
[
[
1041,
1049
],
[
2559,
2567
],
[
4349,
4357
],
[
14435,
14443
]
],
[
[
1051,
1069
],
[
5973,
5991
],
[
6047,
6065
],
[
7540,
7558
],
[
7614,
7632
],
[
9044,
9062
],
[
9118,
9136
]
],
[
[
1071,
1085
],
[
2845,
2859
],
[
4706,
4720
],
[
14528,
14542
]
],
[
[
1093,
1112
],
[
4950,
4969
],
[
6489,
6508
],
[
8029,
8048
]
],
[
[
1114,
1149
],
[
5009,
5044
],
[
6548,
6583
],
[
8088,
8123
]
],
[
[
1151,
1159
],
[
1545,
1553
],
[
2001,
2009
],
[
3123,
3131
],
[
3638,
3646
],
[
5100,
5108
],
[
5493,
5501
],
[
6639,
6647
],
[
7046,
7054
],
[
8179,
8187
],
[
8568,
8576
],
[
9604,
9612
],
[
10523,
10531
],
[
11481,
11489
],
[
12396,
12404
],
[
13324,
13332
],
[
13710,
13718
],
[
14865,
14873
]
],
[
[
1228,
1238
],
[
1555,
1565
],
[
2011,
2021
],
[
3133,
3143
],
[
3648,
3658
],
[
5110,
5120
],
[
5503,
5513
],
[
6649,
6659
],
[
7056,
7066
],
[
8189,
8199
],
[
8578,
8588
],
[
9614,
9624
],
[
10533,
10543
],
[
11491,
11501
],
[
12406,
12416
],
[
13334,
13344
],
[
13720,
13730
],
[
14875,
14885
]
],
[
[
1301,
1316
],
[
6026,
6041
],
[
6100,
6115
],
[
7593,
7608
],
[
7667,
7682
],
[
9097,
9112
],
[
9171,
9186
]
],
[
[
1331,
1363
],
[
15904,
15936
]
],
[
[
2907,
2940
],
[
15947,
15980
]
],
[
[
4768,
4798
],
[
15991,
16021
]
],
[
[
6261,
6291
],
[
16028,
16058
]
],
[
[
7828,
7858
],
[
16065,
16095
]
],
[
[
9332,
9364
],
[
16102,
16134
]
],
[
[
10247,
10280
],
[
16141,
16174
]
],
[
[
11208,
11240
],
[
16181,
16213
]
],
[
[
12120,
12153
],
[
16220,
16253
]
],
[
[
13077,
13109
],
[
16297,
16329
]
],
[
[
14602,
14632
],
[
16260,
16290
]
]
] |
import pytest
import torch
from torch.testing import assert_allclose
from kornia.augmentation.random_generator import (
random_prob_generator,
random_color_jitter_generator,
random_perspective_generator,
random_affine_generator,
random_rotation_generator,
random_crop_generator,
random_crop_size_generator,
random_rectangles_params_generator,
center_crop_generator,
random_motion_blur_generator,
random_solarize_generator,
random_posterize_generator,
random_sharpness_generator,
random_mixup_generator,
random_cutmix_generator,
)
class RandomGeneratorBaseTests():
def test_valid_param_combinations(self, device, dtype):
raise NotImplementedError
def test_invalid_param_combinations(self, device, dtype):
raise NotImplementedError
def test_random_gen(self, device, dtype):
raise NotImplementedError
def test_same_on_batch(self, device, dtype):
raise NotImplementedError
class TestRandomProbGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('p', [0., 0.5, 1.])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, p, batch_size, same_on_batch, device, dtype):
random_prob_generator(batch_size=batch_size, p=p, same_on_batch=same_on_batch)
@pytest.mark.parametrize(
'p',
[
# Should be failed if p > 1. or p < 0.
(-1.),
(2.)
]
)
def test_invalid_param_combinations(self, p, device, dtype):
with pytest.raises(Exception):
random_prob_generator(batch_size=8, p=p)
@pytest.mark.parametrize(
'p,expected',
[(0., [False] * 8), (0.5, [False, False, True, False, True, False, True, False]), (1., [True] * 8)]
)
def test_random_gen(self, p, expected, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_prob_generator(batch_size=batch_size, p=p)
assert (res == torch.tensor(expected)).long().sum() == batch_size
@pytest.mark.parametrize("seed,expected", [
(42, [False] * 8),
(0, [True] * 8),
])
def test_same_on_batch(self, seed, expected, device, dtype):
torch.manual_seed(seed)
batch_size = 8
res = random_prob_generator(batch_size=batch_size, p=.5, same_on_batch=True)
assert (res == torch.tensor(expected)).long().sum() == batch_size
class TestColorJitterGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('brightness', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('contrast', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('saturation', [None, torch.tensor([0.8, 1.2])])
@pytest.mark.parametrize('hue', [None, torch.tensor([-0.1, 0.1])])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, brightness, contrast, saturation, hue, batch_size, same_on_batch, device, dtype
):
random_color_jitter_generator(
batch_size,
brightness.to(device=device, dtype=dtype) if brightness is not None else None,
contrast.to(device=device, dtype=dtype) if contrast is not None else None,
saturation.to(device=device, dtype=dtype) if saturation is not None else None,
hue.to(device=device, dtype=dtype) if hue is not None else None, same_on_batch
)
@pytest.mark.parametrize(
'brightness,contrast,saturation,hue',
[
# Should be failed if value out of bounds or tensor.shape != [1, 2]
(torch.tensor([-1., 2.]), None, None, None),
(torch.tensor([0., 3.]), None, None, None),
(torch.tensor(0.), None, None, None),
(torch.tensor([0.]), None, None, None),
(torch.tensor([0., 1., 2.]), None, None, None),
(None, torch.tensor([-1., 2.]), None, None),
(None, torch.tensor(0.), None, None),
(None, torch.tensor([0.]), None, None),
(None, torch.tensor([0., 1., 2.]), None, None),
(None, None, torch.tensor([-1., 2.]), None),
(None, None, torch.tensor(0.), None),
(None, None, torch.tensor([0.]), None),
(None, None, torch.tensor([0., 1., 2.]), None),
(None, None, None, torch.tensor([-1., 0.])),
(None, None, None, torch.tensor([0, 1.])),
(None, None, None, torch.tensor(0.)),
(None, None, None, torch.tensor([0.])),
(None, None, None, torch.tensor([0., 1., 2.])),
]
)
def test_invalid_param_combinations(self, brightness, contrast, saturation, hue, device, dtype):
with pytest.raises(Exception):
random_color_jitter_generator(
8,
brightness.to(device=device, dtype=dtype) if brightness is not None else None,
contrast.to(device=device, dtype=dtype) if contrast is not None else None,
saturation.to(device=device, dtype=dtype) if saturation is not None else None,
hue.to(device=device, dtype=dtype) if hue is not None else None
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
jitter_params = random_color_jitter_generator(
batch_size,
brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),
contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),
hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype)
)
expected_jitter_params = {
'brightness_factor': torch.tensor(
[1.1529, 1.1660, 0.9531, 1.1837, 0.9562, 1.0404, 0.9026, 1.1175], device=device, dtype=dtype
),
'contrast_factor': torch.tensor(
[1.2645, 0.7799, 1.2608, 1.0561, 1.2216, 1.0406, 1.1447, 0.9576], device=device, dtype=dtype
),
'hue_factor': torch.tensor(
[0.0771, 0.0148, -0.0467, 0.0255, -0.0461, -0.0117, -0.0406, 0.0663], device=device, dtype=dtype
),
'saturation_factor': torch.tensor(
[0.6843, 0.8156, 0.8871, 0.7595, 1.0378, 0.6049, 1.3612, 0.6602], device=device, dtype=dtype
),
'order': torch.tensor([3, 2, 0, 1], device=device, dtype=dtype)
}
assert set(list(jitter_params.keys())) == set([
'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order']), \
"Redundant keys found apart from \
'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order'"
assert_allclose(
jitter_params['brightness_factor'], expected_jitter_params['brightness_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(
jitter_params['contrast_factor'], expected_jitter_params['contrast_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(jitter_params['hue_factor'], expected_jitter_params['hue_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(
jitter_params['saturation_factor'], expected_jitter_params['saturation_factor'], rtol=1e-4, atol=1e-4
)
assert_allclose(jitter_params['order'].to(dtype), expected_jitter_params['order'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
jitter_params = random_color_jitter_generator(
batch_size,
brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),
contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),
hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype),
same_on_batch=True
)
expected_res = {
'brightness_factor': torch.tensor([1.1529] * batch_size, device=device, dtype=dtype),
'contrast_factor': torch.tensor([1.2490] * batch_size, device=device, dtype=dtype),
'hue_factor': torch.tensor([-0.0234] * batch_size, device=device, dtype=dtype),
'saturation_factor': torch.tensor([1.3674] * batch_size, device=device, dtype=dtype),
'order': torch.tensor([2, 3, 0, 1], device=device, dtype=dtype)
}
assert_allclose(jitter_params['brightness_factor'], expected_res['brightness_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['contrast_factor'], expected_res['contrast_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['hue_factor'], expected_res['hue_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['saturation_factor'], expected_res['saturation_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(jitter_params['order'].to(dtype), expected_res['order'], rtol=1e-4, atol=1e-4)
class TestRandomPerspectiveGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('height,width', [(200, 200)])
@pytest.mark.parametrize('distortion_scale', [torch.tensor(0.), torch.tensor(0.5), torch.tensor(1.)])
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, height, width, distortion_scale, batch_size, same_on_batch, device, dtype):
random_perspective_generator(
batch_size=8,
height=height,
width=width,
distortion_scale=distortion_scale.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,distortion_scale',
[
# Should be failed if distortion_scale > 1. or distortion_scale < 0.
(-100, 100, torch.tensor(0.5)),
(100, -100, torch.tensor(0.5)),
(100, 100, torch.tensor(-0.5)),
(100, 100, torch.tensor(1.5)),
(100, 100, torch.tensor([0., 0.5])),
]
)
def test_invalid_param_combinations(self, height, width, distortion_scale, device, dtype):
with pytest.raises(Exception):
random_perspective_generator(
batch_size=8,
height=height,
width=width,
distortion_scale=distortion_scale.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_perspective_generator(batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype))
expected = dict(
start_points=torch.tensor(
[[[0., 0.], [199., 0.], [199., 199.], [0., 199.]], [[0., 0.], [199., 0.], [199., 199.], [0., 199.]]],
device=device,
dtype=dtype
),
end_points=torch.tensor(
[
[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]],
[[47.0386, 6.6593], [152.2701, 29.6790], [155.5298, 170.6142], [37.0547, 177.5298]]
],
device=device,
dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['start_points'], expected['start_points'])
assert_allclose(res['end_points'], expected['end_points'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_perspective_generator(
batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype), same_on_batch=True
)
expected = dict(
start_points=torch.tensor([[[0., 0.], [199., 0.], [199., 199.], [0., 199.]]], device=device,
dtype=dtype).repeat(2, 1, 1),
end_points=torch.tensor(
[[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]]],
device=device,
dtype=dtype
).repeat(2, 1, 1),
)
assert res.keys() == expected.keys()
assert_allclose(res['start_points'], expected['start_points'])
assert_allclose(res['end_points'], expected['end_points'])
class TestRandomAffineGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 4])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [300])
@pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])
@pytest.mark.parametrize('translate', [None, torch.tensor([0.1, 0.1])])
@pytest.mark.parametrize('scale', [None, torch.tensor([0.7, 1.2])])
@pytest.mark.parametrize('shear', [None, torch.tensor([[0, 20], [0, 20]])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, height, width, degrees, translate, scale, shear, same_on_batch, device, dtype
):
random_affine_generator(
batch_size=batch_size,
height=height,
width=width,
degrees=degrees.to(device=device, dtype=dtype),
translate=translate.to(device=device, dtype=dtype) if translate is not None else None,
scale=scale.to(device=device, dtype=dtype) if scale is not None else None,
shear=shear.to(device=device, dtype=dtype) if shear is not None else None,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,degrees,translate,scale,shear', [
(-100, 100, torch.tensor([10, 20]), None, None, None),
(100, -100, torch.tensor([10, 20]), None, None, None),
(100, 100, 0.5, None, None, None),
(100, 100, torch.tensor([10, 20, 30]), None, None, None),
(100, 100, torch.tensor([10, 20]), torch.tensor([0.1]), None, None),
(10, 10, torch.tensor([1, 2]), torch.tensor([0.1, 0.2, 0.3]), None, None),
(100, 100, torch.tensor([10, 20]), None, torch.tensor([1]), None),
(100, 100, torch.tensor([10, 20]), None, torch.tensor([1, 2, 3]), None),
(100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1])),
(100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1, 2])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4])),
(10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4, 5])),
]
)
def test_invalid_param_combinations(self, height, width, degrees, translate, scale, shear, device, dtype):
with pytest.raises(Exception):
random_affine_generator(
batch_size=8,
height=height,
width=width,
degrees=degrees.to(device=device, dtype=dtype),
translate=translate.to(device=device, dtype=dtype) if translate is not None else None,
scale=scale.to(device=device, dtype=dtype) if scale is not None else None,
shear=shear.to(device=device, dtype=dtype) if shear is not None else None
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)
scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)
shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)
res = random_affine_generator(
batch_size=2,
height=200,
width=200,
degrees=degrees,
translate=translate,
scale=scale,
shear=shear,
same_on_batch=False
)
expected = dict(
translations=torch.tensor([[-4.3821, -9.7371], [4.0358, 11.7457]], device=device, dtype=dtype),
center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),
scale=torch.tensor([[0.8914, 0.8914], [1.1797, 1.1797]], device=device, dtype=dtype),
angle=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype),
sx=torch.tensor([19.4077, 11.3319], device=device, dtype=dtype),
sy=torch.tensor([19.3460, 15.9358], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)
assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)
assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)
scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)
shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)
res = random_affine_generator(
batch_size=2,
height=200,
width=200,
degrees=degrees,
translate=translate,
scale=scale,
shear=shear,
same_on_batch=True
)
expected = dict(
translations=torch.tensor([[-4.6854, 18.3722], [-4.6854, 18.3722]], device=device, dtype=dtype),
center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),
scale=torch.tensor([[1.1575, 1.1575], [1.1575, 1.1575]], device=device, dtype=dtype),
angle=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype),
sx=torch.tensor([13.9045, 13.9045], device=device, dtype=dtype),
sy=torch.tensor([16.0090, 16.0090], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)
assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)
assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)
assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)
class TestRandomRotationGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, degrees, same_on_batch, device, dtype):
random_rotation_generator(
batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize('degrees', [(torch.tensor(10)), (torch.tensor([10])), (torch.tensor([10, 20, 30]))])
def test_invalid_param_combinations(self, degrees, device, dtype):
batch_size = 8
with pytest.raises(Exception):
random_rotation_generator(batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype))
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_rotation_generator(
batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=False
)
expected = dict(degrees=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype))
assert res.keys() == expected.keys()
assert_allclose(res['degrees'], expected['degrees'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_rotation_generator(
batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=True
)
expected = dict(degrees=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype))
assert res.keys() == expected.keys()
assert_allclose(res['degrees'], expected['degrees'])
class TestRandomCropGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 2])
@pytest.mark.parametrize('input_size', [(200, 200)])
@pytest.mark.parametrize('size', [(100, 100), torch.tensor([50, 50])])
@pytest.mark.parametrize('resize_to', [None, (100, 100)])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, input_size, size, resize_to, same_on_batch, device, dtype):
if isinstance(size, torch.Tensor):
size = size.repeat(batch_size, 1).to(device=device, dtype=dtype)
random_crop_generator(
batch_size=batch_size,
input_size=input_size,
size=size,
resize_to=resize_to,
same_on_batch=same_on_batch,
device=device,
dtype=dtype
)
@pytest.mark.parametrize(
'input_size,size,resize_to', [
((-300, 300), (200, 200), (100, 100)),
((200, 200), torch.tensor([50, 50]), (100, 100)),
]
)
def test_invalid_param_combinations(self, input_size, size, resize_to, device, dtype):
batch_size = 2
with pytest.raises(Exception):
random_crop_generator(
batch_size=batch_size,
input_size=input_size,
size=size.to(device=device, dtype=dtype) if isinstance(size, torch.Tensor) else size,
resize_to=resize_to
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
res = random_crop_generator(
batch_size=2,
input_size=(100, 100),
size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),
resize_to=(200, 200)
)
expected = dict(
src=torch.tensor(
[[[36, 19], [95, 19], [95, 68], [36, 68]], [[19, 29], [98, 29], [98, 98], [19, 98]]],
device=device,
dtype=dtype
),
dst=torch.tensor(
[[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],
device=device,
dtype=dtype
),
input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'], expected['src'])
assert_allclose(res['dst'], expected['dst'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20], device=device, dtype=dtype)
res = random_crop_generator(
batch_size=2,
input_size=(100, 100),
size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),
resize_to=(200, 200),
same_on_batch=True
)
expected = dict(
src=torch.tensor(
[[[36, 46], [95, 46], [95, 95], [36, 95]], [[36, 46], [115, 46], [115, 115], [36, 115]]],
device=device,
dtype=dtype
),
dst=torch.tensor(
[[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],
device=device,
dtype=dtype
),
input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'], expected['src'])
assert_allclose(res['dst'], expected['dst'])
class TestRandomCropSizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('size', [(200, 200)])
@pytest.mark.parametrize('scale', [torch.tensor([.7, 1.3])])
@pytest.mark.parametrize('ratio', [torch.tensor([.9, 1.1])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, size, scale, ratio, same_on_batch, device, dtype):
random_crop_size_generator(
batch_size=batch_size,
size=size,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'size,scale,ratio', [
((100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),
((100, 100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7, 1.3, 1.5]), torch.tensor([.9, 1.1])),
((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9])),
((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1, 1.3])),
]
)
def test_invalid_param_combinations(self, size, scale, ratio, device, dtype):
batch_size = 2
with pytest.raises(Exception):
random_crop_size_generator(
batch_size=batch_size,
size=size,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
res = random_crop_size_generator(
batch_size=8,
size=(100, 100),
scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
size=torch.tensor(
[[99, 94], [91, 95], [90, 96], [87, 86], [94, 98], [87, 81], [85, 93], [83, 90]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
res = random_crop_size_generator(
batch_size=100,
size=(100, 100),
scale=torch.tensor([0.999, 1.], device=device, dtype=dtype),
ratio=torch.tensor([1., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(size=torch.tensor([[100, 100]], device=device, dtype=dtype).repeat(100, 1))
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
degrees = torch.tensor([10, 20])
res = random_crop_size_generator(
batch_size=8,
size=(100, 100),
scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),
ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
size=torch.tensor(
[[99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95]],
device=device,
dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['size'], expected['size'])
class TestRandomRectangleGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [300])
@pytest.mark.parametrize('scale', [torch.tensor([.7, 1.1])])
@pytest.mark.parametrize('ratio', [torch.tensor([.7, 1.1])])
@pytest.mark.parametrize('value', [0])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, height, width, scale, ratio, value, same_on_batch, device, dtype
):
random_rectangles_params_generator(
batch_size=batch_size,
height=height,
width=width,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
value=value,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'height,width,scale,ratio,value', [
(-100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),
(100, -100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),
(100, -100, torch.tensor([0.7]), torch.tensor([0.7, 1.3]), 0),
(100, 100, torch.tensor([0.7, 1.3, 1.5]), torch.tensor([0.7, 1.3]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3, 1.5]), 0),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), -1),
(100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 2),
(100, 100, torch.tensor([.5, .7]), torch.tensor([.7, .9]), torch.tensor(0.5)),
]
)
def test_invalid_param_combinations(self, height, width, scale, ratio, value, device, dtype):
batch_size = 8
with pytest.raises(Exception):
random_rectangles_params_generator(
batch_size=batch_size,
height=height,
width=width,
scale=scale.to(device=device, dtype=dtype),
ratio=ratio.to(device=device, dtype=dtype),
value=value,
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
width, height = 100, 150
scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
value = 0.5
res = random_rectangles_params_generator(
batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=False
)
expected = dict(
widths=torch.tensor([100, 100], device=device, dtype=dtype),
heights=torch.tensor([0, 0], device=device, dtype=dtype),
xs=torch.tensor([0, 0], device=device, dtype=dtype),
ys=torch.tensor([6, 8], device=device, dtype=dtype),
values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['xs'], expected['xs'])
assert_allclose(res['ys'], expected['ys'])
assert_allclose(res['values'], expected['values'])
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
width, height = 100, 150
scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)
value = 0.5
res = random_rectangles_params_generator(
batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=True
)
expected = dict(
widths=torch.tensor([100, 100], device=device, dtype=dtype),
heights=torch.tensor([0, 0], device=device, dtype=dtype),
xs=torch.tensor([0, 0], device=device, dtype=dtype),
ys=torch.tensor([10, 10], device=device, dtype=dtype),
values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['widths'], expected['widths'])
assert_allclose(res['xs'], expected['xs'])
assert_allclose(res['ys'], expected['ys'])
assert_allclose(res['values'], expected['values'])
class TestCenterCropGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 2])
@pytest.mark.parametrize('height', [200])
@pytest.mark.parametrize('width', [200])
@pytest.mark.parametrize('size', [(100, 100)])
def test_valid_param_combinations(self, batch_size, height, width, size, device, dtype):
center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)
@pytest.mark.parametrize(
'height,width,size', [
(200, -200, (100, 100)),
(-200, 200, (100, 100)),
(100, 100, (120, 120)),
(150, 100, (120, 120)),
(100, 150, (120, 120)),
]
)
def test_invalid_param_combinations(self, height, width, size, device, dtype):
batch_size = 2
with pytest.raises(Exception):
center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
res = center_crop_generator(batch_size=2, height=200, width=200, size=(120, 150))
expected = dict(
src=torch.tensor(
[[[25, 40], [174, 40], [174, 159], [25, 159]], [[25, 40], [174, 40], [174, 159], [25, 159]]],
device=device,
dtype=torch.long
),
dst=torch.tensor(
[[[0, 0], [149, 0], [149, 119], [0, 119]], [[0, 0], [149, 0], [149, 119], [0, 119]]],
device=device,
dtype=torch.long
),
input_size=torch.tensor([[200, 200], [200, 200]], device=device, dtype=torch.long)
)
assert res.keys() == expected.keys()
assert_allclose(res['src'].to(device=device), expected['src'])
assert_allclose(res['dst'].to(device=device), expected['dst'])
def test_same_on_batch(self, device, dtype):
pass
class TestRandomMotionBlur(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('kernel_size', [3, (3, 5)])
@pytest.mark.parametrize('angle', [torch.tensor([10, 30])])
@pytest.mark.parametrize('direction', [torch.tensor([-1, -1]), torch.tensor([1, 1])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, kernel_size, angle, direction, same_on_batch, device, dtype):
random_motion_blur_generator(
batch_size=batch_size,
kernel_size=kernel_size,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'kernel_size,angle,direction', [
(4, torch.tensor([30, 100]), torch.tensor([-1, 1])),
(1, torch.tensor([30, 100]), torch.tensor([-1, 1])),
((1, 2, 3), torch.tensor([30, 100]), torch.tensor([-1, 1])),
(3, torch.tensor([30, 100]), torch.tensor([-2, 1])),
(3, torch.tensor([30, 100]), torch.tensor([-1, 2])),
]
)
def test_invalid_param_combinations(self, kernel_size, angle, direction, device, dtype):
with pytest.raises(Exception):
random_motion_blur_generator(
batch_size=8,
kernel_size=kernel_size,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
angle = torch.tensor([30, 90])
direction = torch.tensor([-1, 1])
res = random_motion_blur_generator(
batch_size=2,
kernel_size=3,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),
angle_factor=torch.tensor([82.9362, 84.9002], device=device, dtype=dtype),
direction_factor=torch.tensor([-0.2343, 0.9186], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
angle = torch.tensor([30, 90])
direction = torch.tensor([-1, 1])
res = random_motion_blur_generator(
batch_size=2,
kernel_size=3,
angle=angle.to(device=device, dtype=dtype),
direction=direction.to(device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),
angle_factor=torch.tensor([82.9362, 82.9362], device=device, dtype=dtype),
direction_factor=torch.tensor([0.8300, 0.8300], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)
class TestRandomSolarizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('thresholds', [torch.tensor([0, 1]), torch.tensor([0.4, 0.6])])
@pytest.mark.parametrize('additions', [torch.tensor([-0.5, 0.5])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, thresholds, additions, same_on_batch, device, dtype):
random_solarize_generator(
batch_size=batch_size,
thresholds=thresholds.to(device=device, dtype=dtype),
additions=additions.to(device=device, dtype=dtype),
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'thresholds,additions', [
(torch.tensor([0, 2]), torch.tensor([-0.5, 0.5])),
(torch.tensor([-1, 1]), torch.tensor([-0.5, 0.5])),
([0, 1], torch.tensor([-0.5, 0.5])),
(torch.tensor([0, 1]), torch.tensor([-0.5, 1])),
(torch.tensor([0, 1]), torch.tensor([-1, 0.5])),
(torch.tensor([0, 1]), [-0.5, 0.5]),
]
)
def test_invalid_param_combinations(self, thresholds, additions, device, dtype):
with pytest.raises(Exception):
random_solarize_generator(
batch_size=batch_size,
thresholds=thresholds.to(device=device, dtype=dtype),
additions=additions.to(device=device, dtype=dtype)
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_solarize_generator(
batch_size=batch_size,
thresholds=torch.tensor([0, 1], device=device, dtype=dtype),
additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
thresholds_factor=torch.tensor(
[0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype
),
additions_factor=torch.tensor(
[0.4408, -0.3668, 0.4346, 0.0936, 0.3694, 0.0677, 0.2411, -0.0706], device=device, dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_solarize_generator(
batch_size=batch_size,
thresholds=torch.tensor([0, 1], device=device, dtype=dtype),
additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
thresholds_factor=torch.tensor(
[0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype
),
additions_factor=torch.tensor(
[0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150], device=device, dtype=dtype
),
)
assert res.keys() == expected.keys()
assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)
assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)
class TestRandomPosterizeGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('bits', [torch.tensor([0, 8])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, bits, same_on_batch, device, dtype):
random_posterize_generator(
batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'bits', [
(torch.tensor([-1, 1])),
(torch.tensor([0, 9])),
(torch.tensor([3])),
([0, 8]),
]
)
def test_invalid_param_combinations(self, bits, device, dtype):
with pytest.raises(Exception):
random_posterize_generator(
batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_posterize_generator(
batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=False
)
expected = dict(bits_factor=torch.tensor([5, 2, 3, 6, 7, 7, 2, 7], device=device, dtype=torch.int32))
assert res.keys() == expected.keys()
assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_posterize_generator(
batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=True
)
expected = dict(bits_factor=torch.tensor([5, 5, 5, 5, 5, 5, 5, 5], device=device, dtype=torch.int32))
assert res.keys() == expected.keys()
assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)
class TestRandomSharpnessGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('sharpness', [torch.tensor([0., 1.])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, sharpness, same_on_batch, device, dtype):
random_sharpness_generator(
batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
@pytest.mark.parametrize('sharpness', [
(torch.tensor([-1, 5])),
(torch.tensor([3])),
([0, 1.]),
])
def test_invalid_param_combinations(self, sharpness, device, dtype):
with pytest.raises(Exception):
random_sharpness_generator(
batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_sharpness_generator(
batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=False
)
expected = dict(
sharpness_factor=torch.
tensor([0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_sharpness_generator(
batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=True
)
expected = dict(
sharpness_factor=torch.
tensor([0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype)
)
assert res.keys() == expected.keys()
assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)
class TestRandomMixUpGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('p', [0., 0.5, 1.])
@pytest.mark.parametrize('lambda_val', [None, torch.tensor([0., 1.])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(self, batch_size, p, lambda_val, same_on_batch, device, dtype):
random_mixup_generator(
batch_size=batch_size,
p=p,
lambda_val=lambda_val.to(device=device, dtype=dtype) if isinstance(lambda_val,
(torch.Tensor)) else lambda_val,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'lambda_val', [
(torch.tensor([-1, 1])),
(torch.tensor([0, 2])),
(torch.tensor([0, 0.5, 1])),
([0., 1.]),
]
)
def test_invalid_param_combinations(self, lambda_val, device, dtype):
with pytest.raises(Exception):
random_mixup_generator(batch_size=8, lambda_val=lambda_val.to(device=device, dtype=dtype))
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 8
res = random_mixup_generator(
batch_size=batch_size,
p=0.5,
lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
mixup_pairs=torch.tensor([6, 1, 0, 7, 2, 5, 3, 4], device=device, dtype=torch.long),
mixup_lambdas=torch.tensor(
[0.0000, 0.0000, 0.5739, 0.0000, 0.6274, 0.0000, 0.4414, 0.0000], device=device, dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(9)
batch_size = 8
res = random_mixup_generator(
batch_size=batch_size,
p=.9999,
lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
mixup_pairs=torch.tensor([4, 6, 7, 5, 0, 1, 3, 2], device=device, dtype=torch.long),
mixup_lambdas=torch.tensor(
[0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804], device=device, dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)
class TestRandomCutMixGen(RandomGeneratorBaseTests):
@pytest.mark.parametrize('batch_size', [0, 1, 8])
@pytest.mark.parametrize('p', [0, 0.5, 1.])
@pytest.mark.parametrize('width,height', [(200, 200)])
@pytest.mark.parametrize('num_mix', [1, 3])
@pytest.mark.parametrize('beta', [None, torch.tensor(1e-15), torch.tensor(1.)])
@pytest.mark.parametrize('cut_size', [None, torch.tensor([0., 1.]), torch.tensor([0.3, 0.6])])
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_valid_param_combinations(
self, batch_size, p, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype
):
random_cutmix_generator(
batch_size=batch_size,
p=p,
width=width,
height=height,
num_mix=num_mix,
beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,
cut_size=cut_size.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,
same_on_batch=same_on_batch
)
@pytest.mark.parametrize(
'width,height,num_mix,beta,cut_size', [
(200, -200, 1, None, None),
(-200, 200, 1, None, None),
(200, 200, 0, None, None),
(200, 200, 1.5, None, None),
(200, 200, 1, torch.tensor([0., 1.]), None),
(200, 200, 1, None, torch.tensor([-1., 1.])),
(200, 200, 1, None, torch.tensor([0., 2.])),
]
)
@pytest.mark.parametrize('same_on_batch', [True, False])
def test_invalid_param_combinations(self, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype):
with pytest.raises(Exception):
random_cutmix_generator(
batch_size=8,
p=0.5,
width=width,
height=height,
num_mix=num_mix,
beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,
cut_size=beta.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,
same_on_batch=same_on_batch
)
def test_random_gen(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_cutmix_generator(
batch_size=batch_size,
width=200,
height=200,
p=0.5,
num_mix=1,
beta=torch.tensor(1., device=device, dtype=dtype),
cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=False
)
expected = dict(
mix_pairs=torch.tensor([[0, 1]], device=device, dtype=torch.long),
crop_src=torch.tensor(
[[[[71, 108], [70, 108], [70, 107], [71, 107]], [[39, 1], [38, 1], [38, 0], [39, 0]]]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
torch.manual_seed(42)
batch_size = 2
res = random_cutmix_generator(
batch_size=batch_size,
width=200,
height=200,
p=0.5,
num_mix=1,
beta=torch.tensor(1., device=device, dtype=dtype),
cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),
same_on_batch=True
)
expected = dict(
mix_pairs=torch.tensor([[1, 0]], device=device, dtype=torch.long),
crop_src=torch.tensor(
[[[[114, 53], [113, 53], [113, 52], [114, 52]], [[114, 53], [113, 53], [113, 52], [114, 52]]]],
device=device,
dtype=dtype
)
)
assert res.keys() == expected.keys()
assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)
assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)
| [
[
[
7,
13
],
[
1043,
1049
],
[
1092,
1098
],
[
1146,
1152
],
[
1385,
1391
],
[
1699,
1705
],
[
2116,
2122
],
[
2557,
2563
],
[
2634,
2640
],
[
2709,
2715
],
[
2786,
2792
],
[
2857,
2863
],
[
2911,
2917
],
[
3546,
3552
],
[
9189,
9195
],
[
9248,
9254
],
[
9354,
9360
],
[
9408,
9414
],
[
9834,
9840
],
[
12551,
12557
],
[
12605,
12611
],
[
12651,
12657
],
[
12696,
12702
],
[
12761,
12767
],
[
12837,
12843
],
[
12909,
12915
],
[
12989,
12995
],
[
13704,
13710
],
[
18968,
18974
],
[
19022,
19028
],
[
19087,
19093
],
[
19401,
19407
],
[
20739,
20745
],
[
20790,
20796
],
[
20847,
20853
],
[
20922,
20928
],
[
20984,
20990
],
[
21541,
21547
],
[
24376,
24382
],
[
24430,
24436
],
[
24481,
24487
],
[
24546,
24552
],
[
24611,
24617
],
[
25036,
25042
],
[
27978,
27984
],
[
28032,
28038
],
[
28078,
28084
],
[
28123,
28129
],
[
28188,
28194
],
[
28253,
28259
],
[
28296,
28302
],
[
28813,
28819
],
[
32504,
32510
],
[
32555,
32561
],
[
32601,
32607
],
[
32646,
32652
],
[
32883,
32889
],
[
34417,
34423
],
[
34471,
34477
],
[
34528,
34534
],
[
34592,
34598
],
[
34682,
34688
],
[
35142,
35148
],
[
38089,
38095
],
[
38143,
38149
],
[
38236,
38242
],
[
38307,
38313
],
[
38729,
38735
],
[
41546,
41552
],
[
41600,
41606
],
[
41661,
41667
],
[
41967,
41973
],
[
43504,
43510
],
[
43558,
43564
],
[
43626,
43632
],
[
43947,
43953
],
[
45619,
45625
],
[
45673,
45679
],
[
45722,
45728
],
[
45797,
45803
],
[
46298,
46304
],
[
48451,
48457
],
[
48505,
48511
],
[
48553,
48559
],
[
48612,
48618
],
[
48660,
48666
],
[
48744,
48750
],
[
48843,
48849
],
[
49481,
49487
],
[
49907,
49913
],
[
1614,
1620
],
[
4824,
4830
],
[
10339,
10345
],
[
14921,
14927
],
[
19617,
19623
],
[
21861,
21867
],
[
25681,
25687
],
[
29759,
29765
],
[
33256,
33262
],
[
35663,
35669
],
[
39249,
39255
],
[
42235,
42241
],
[
44160,
44166
],
[
46588,
46594
],
[
50093,
50099
]
],
[
[
21,
26
],
[
2602,
2607
],
[
2677,
2682
],
[
2754,
2759
],
[
2824,
2829
],
[
3720,
3725
],
[
3777,
3782
],
[
3833,
3838
],
[
3883,
3888
],
[
3935,
3940
],
[
4001,
4006
],
[
4058,
4063
],
[
4108,
4113
],
[
4160,
4165
],
[
4226,
4231
],
[
4283,
4288
],
[
4333,
4338
],
[
4385,
4390
],
[
4451,
4456
],
[
4508,
4513
],
[
4563,
4568
],
[
4613,
4618
],
[
4665,
4670
],
[
9293,
9298
],
[
9311,
9316
],
[
9330,
9335
],
[
10015,
10020
],
[
10059,
10064
],
[
10102,
10107
],
[
10146,
10151
],
[
10189,
10194
],
[
12732,
12737
],
[
12805,
12810
],
[
12877,
12882
],
[
12949,
12954
],
[
13809,
13814
],
[
13876,
13881
],
[
13989,
13994
],
[
14059,
14064
],
[
14083,
14088
],
[
14138,
14143
],
[
14160,
14165
],
[
14227,
14232
],
[
14257,
14262
],
[
14306,
14311
],
[
14336,
14341
],
[
14391,
14396
],
[
14427,
14432
],
[
14470,
14475
],
[
14506,
14511
],
[
14550,
14555
],
[
14584,
14589
],
[
14631,
14636
],
[
14665,
14670
],
[
14715,
14720
],
[
14749,
14754
],
[
19058,
19063
],
[
19438,
19443
],
[
19458,
19463
],
[
19480,
19485
],
[
20892,
20897
],
[
21681,
21686
],
[
24515,
24520
],
[
24580,
24585
],
[
25111,
25116
],
[
25136,
25141
],
[
25192,
25197
],
[
25217,
25222
],
[
25268,
25273
],
[
25288,
25293
],
[
25339,
25344
],
[
25369,
25374
],
[
25420,
25425
],
[
25445,
25450
],
[
25491,
25496
],
[
25516,
25521
],
[
28157,
28162
],
[
28222,
28227
],
[
28906,
28911
],
[
28932,
28937
],
[
28986,
28991
],
[
29012,
29017
],
[
29066,
29071
],
[
29087,
29092
],
[
29140,
29145
],
[
29171,
29176
],
[
29224,
29229
],
[
29250,
29255
],
[
29298,
29303
],
[
29324,
29329
],
[
29382,
29387
],
[
29408,
29413
],
[
29462,
29467
],
[
29488,
29493
],
[
29541,
29546
],
[
29565,
29570
],
[
29589,
29594
],
[
34562,
34567
],
[
34630,
34635
],
[
34654,
34659
],
[
35224,
35229
],
[
35249,
35254
],
[
35289,
35294
],
[
35314,
35319
],
[
35362,
35367
],
[
35387,
35392
],
[
35427,
35432
],
[
35452,
35457
],
[
35492,
35497
],
[
35517,
35522
],
[
38182,
38187
],
[
38204,
38209
],
[
38274,
38279
],
[
38801,
38806
],
[
38823,
38828
],
[
38864,
38869
],
[
38887,
38892
],
[
38936,
38941
],
[
38977,
38982
],
[
38999,
39004
],
[
39038,
39043
],
[
39060,
39065
],
[
39099,
39104
],
[
41633,
41638
],
[
42023,
42028
],
[
42060,
42065
],
[
42096,
42101
],
[
43596,
43601
],
[
43995,
44000
],
[
44028,
44033
],
[
45767,
45772
],
[
46360,
46365
],
[
46397,
46402
],
[
46433,
46438
],
[
48699,
48704
],
[
48720,
48725
],
[
48787,
48792
],
[
48811,
48816
],
[
49740,
49745
],
[
49803,
49808
],
[
49861,
49866
],
[
1927,
1932
],
[
2059,
2064
],
[
2291,
2296
],
[
2446,
2451
],
[
5342,
5347
],
[
5489,
5494
],
[
5564,
5569
],
[
5641,
5646
],
[
5711,
5716
],
[
5844,
5849
],
[
6013,
6018
],
[
6177,
6182
],
[
6352,
6357
],
[
6511,
6516
],
[
7608,
7613
],
[
7755,
7760
],
[
7830,
7835
],
[
7907,
7912
],
[
7977,
7982
],
[
8132,
8137
],
[
8228,
8233
],
[
8319,
8324
],
[
8418,
8423
],
[
8504,
8509
],
[
10647,
10652
],
[
10757,
10762
],
[
10854,
10859
],
[
11083,
11088
],
[
11669,
11674
],
[
11792,
11797
],
[
11918,
11923
],
[
12089,
12094
],
[
15491,
15496
],
[
15531,
15536
],
[
15602,
15607
],
[
15671,
15676
],
[
15740,
15745
],
[
16119,
16124
],
[
16221,
16226
],
[
16323,
16328
],
[
16421,
16426
],
[
16498,
16503
],
[
16575,
16580
],
[
17227,
17232
],
[
17267,
17272
],
[
17338,
17343
],
[
17407,
17412
],
[
17476,
17481
],
[
17854,
17859
],
[
17957,
17962
],
[
18059,
18064
],
[
18157,
18162
],
[
18234,
18239
],
[
18311,
18316
],
[
19807,
19812
],
[
19847,
19852
],
[
20047,
20052
],
[
20273,
20278
],
[
20313,
20318
],
[
20512,
20517
],
[
21184,
21189
],
[
22077,
22082
],
[
22207,
22212
],
[
22247,
22252
],
[
22413,
22418
],
[
22561,
22566
],
[
22767,
22772
],
[
22980,
22985
],
[
23040,
23045
],
[
23271,
23276
],
[
23311,
23316
],
[
23477,
23482
],
[
23657,
23662
],
[
23867,
23872
],
[
24080,
24085
],
[
24140,
24145
],
[
26046,
26051
],
[
26183,
26188
],
[
26255,
26260
],
[
26393,
26398
],
[
26806,
26811
],
[
26879,
26884
],
[
27002,
27007
],
[
27231,
27236
],
[
27271,
27276
],
[
27409,
27414
],
[
27481,
27486
],
[
27618,
27623
],
[
30194,
30199
],
[
30265,
30270
],
[
30334,
30339
],
[
30624,
30629
],
[
30698,
30703
],
[
30763,
30768
],
[
30828,
30833
],
[
30897,
30902
],
[
31348,
31353
],
[
31419,
31424
],
[
31488,
31493
],
[
31777,
31782
],
[
31851,
31856
],
[
31916,
31921
],
[
31981,
31986
],
[
32052,
32057
],
[
33433,
33438
],
[
33586,
33591
],
[
33763,
33768
],
[
33805,
33810
],
[
33974,
33979
],
[
34023,
34028
],
[
34083,
34088
],
[
35998,
36003
],
[
36036,
36041
],
[
36079,
36084
],
[
36410,
36415
],
[
36452,
36457
],
[
36491,
36496
],
[
36582,
36587
],
[
37042,
37047
],
[
37080,
37085
],
[
37123,
37128
],
[
37453,
37458
],
[
37495,
37500
],
[
37534,
37539
],
[
37625,
37630
],
[
39559,
39564
],
[
39703,
39708
],
[
39775,
39780
],
[
39927,
39932
],
[
40094,
40099
],
[
40551,
40556
],
[
40695,
40700
],
[
40767,
40772
],
[
40918,
40923
],
[
41085,
41090
],
[
42479,
42484
],
[
42605,
42610
],
[
42721,
42726
],
[
42781,
42786
],
[
42989,
42994
],
[
43115,
43120
],
[
43230,
43235
],
[
43290,
43295
],
[
44414,
44419
],
[
44546,
44551
],
[
44682,
44687
],
[
45016,
45021
],
[
45148,
45153
],
[
45283,
45288
],
[
46210,
46215
],
[
46772,
46777
],
[
46932,
46937
],
[
47075,
47080
],
[
47135,
47140
],
[
47174,
47179
],
[
47610,
47615
],
[
47771,
47776
],
[
47913,
47918
],
[
47973,
47978
],
[
48012,
48017
],
[
49283,
49288
],
[
49395,
49400
],
[
50380,
50385
],
[
50492,
50497
],
[
50635,
50640
],
[
50860,
50865
],
[
50927,
50932
],
[
51068,
51073
],
[
51112,
51117
],
[
51146,
51151
],
[
51622,
51627
],
[
51847,
51852
],
[
51914,
51919
],
[
52054,
52059
],
[
52098,
52103
],
[
52132,
52137
]
],
[
[
53,
68
],
[
6889,
6904
],
[
7038,
7053
],
[
7183,
7198
],
[
7296,
7311
],
[
7445,
7460
],
[
8578,
8593
],
[
8695,
8710
],
[
8808,
8823
],
[
8911,
8926
],
[
9028,
9043
],
[
11481,
11496
],
[
11552,
11567
],
[
12360,
12375
],
[
12431,
12446
],
[
16699,
16714
],
[
16792,
16807
],
[
16873,
16888
],
[
16952,
16967
],
[
17031,
17046
],
[
17104,
17119
],
[
18435,
18450
],
[
18528,
18543
],
[
18609,
18624
],
[
18688,
18703
],
[
18767,
18782
],
[
18840,
18855
],
[
20162,
20177
],
[
20627,
20642
],
[
23115,
23130
],
[
23168,
23183
],
[
24215,
24230
],
[
24268,
24283
],
[
26641,
26656
],
[
27126,
27141
],
[
27867,
27882
],
[
31019,
31034
],
[
31078,
31093
],
[
31137,
31152
],
[
31188,
31203
],
[
31239,
31254
],
[
32174,
32189
],
[
32233,
32248
],
[
32292,
32307
],
[
32343,
32358
],
[
32394,
32409
],
[
34158,
34173
],
[
34229,
34244
],
[
36705,
36720
],
[
36798,
36813
],
[
36891,
36906
],
[
37747,
37762
],
[
37840,
37855
],
[
37933,
37948
],
[
40297,
40312
],
[
40400,
40415
],
[
41286,
41301
],
[
41389,
41404
],
[
42848,
42863
],
[
43357,
43372
],
[
44865,
44880
],
[
45466,
45481
],
[
47374,
47389
],
[
47465,
47480
],
[
48212,
48227
],
[
48303,
48318
],
[
51400,
51415
],
[
51487,
51502
],
[
52394,
52409
],
[
52481,
52496
]
],
[
[
125,
146
],
[
1300,
1321
],
[
1652,
1673
],
[
1986,
2007
],
[
2352,
2373
]
],
[
[
152,
181
],
[
3115,
3144
],
[
4862,
4891
],
[
5411,
5440
],
[
7677,
7706
]
],
[
[
187,
215
],
[
9592,
9620
],
[
10377,
10405
],
[
10706,
10734
],
[
11728,
11756
]
],
[
[
221,
244
],
[
13203,
13226
],
[
14959,
14982
],
[
15817,
15840
],
[
17553,
17576
]
],
[
[
250,
275
],
[
19247,
19272
],
[
19655,
19680
],
[
19884,
19909
],
[
20350,
20375
]
],
[
[
281,
302
],
[
21284,
21305
],
[
21899,
21920
],
[
22312,
22333
],
[
23376,
23397
]
],
[
[
308,
334
],
[
24782,
24808
],
[
25719,
25745
],
[
26082,
26108
],
[
26703,
26729
],
[
27308,
27334
]
],
[
[
340,
374
],
[
28497,
28531
],
[
29797,
29831
],
[
30421,
30455
],
[
31575,
31609
]
],
[
[
380,
401
],
[
32793,
32814
],
[
33294,
33315
],
[
33469,
33490
]
],
[
[
407,
435
],
[
34864,
34892
],
[
35701,
35729
],
[
36115,
36143
],
[
37159,
37187
]
],
[
[
441,
466
],
[
38481,
38506
],
[
39287,
39312
],
[
39618,
39643
],
[
40610,
40635
]
],
[
[
472,
498
],
[
41818,
41844
],
[
42273,
42299
],
[
42537,
42563
],
[
43047,
43073
]
],
[
[
504,
530
],
[
43788,
43814
],
[
44198,
44224
],
[
44473,
44499
],
[
45075,
45101
]
],
[
[
536,
558
],
[
45963,
45985
],
[
46626,
46648
],
[
46831,
46853
],
[
47668,
47690
]
],
[
[
564,
587
],
[
49051,
49074
],
[
50131,
50154
],
[
50694,
50717
],
[
51681,
51704
]
],
[
[
599,
623
],
[
1010,
1034
],
[
2524,
2548
],
[
9156,
9180
],
[
12518,
12542
],
[
18935,
18959
],
[
20706,
20730
],
[
24343,
24367
],
[
27945,
27969
],
[
32471,
32495
],
[
34384,
34408
],
[
38056,
38080
],
[
41513,
41537
],
[
43471,
43495
],
[
45586,
45610
],
[
48418,
48442
]
],
[
[
992,
1009
]
],
[
[
2505,
2523
]
],
[
[
9131,
9155
]
],
[
[
12498,
12517
]
],
[
[
18913,
18934
]
],
[
[
20688,
20705
]
],
[
[
24321,
24342
]
],
[
[
27922,
27944
]
],
[
[
32453,
32470
]
],
[
[
34363,
34383
]
],
[
[
38034,
38055
]
],
[
[
41490,
41512
]
],
[
[
43448,
43470
]
],
[
[
45567,
45585
]
],
[
[
48398,
48417
]
]
] |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command as Command_bc5ff832
class ParallelCommand(Command):
"""
Execute a series of commands in parallel. The parallel command starts executing all child command simultaneously. The parallel command is considered finished when all of its child commands have finished. When the parallel command is terminated early, all currently executing commands are terminated.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param commands: An un-ordered array of commands to execute in parallel. Once all commands have finished executing the parallel command finishes. Please note that the delay of parallel command and the delay of each command are additive.
:type commands: (optional) list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]
"""
deserialized_types = {
'object_type': 'str',
'delay': 'int',
'description': 'str',
'when': 'bool',
'commands': 'list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]'
} # type: Dict
attribute_map = {
'object_type': 'type',
'delay': 'delay',
'description': 'description',
'when': 'when',
'commands': 'commands'
} # type: Dict
supports_multiple_types = False
def __init__(self, delay=None, description=None, when=None, commands=None):
# type: (Union[int, str, None], Optional[str], Optional[bool], Optional[List[Command_bc5ff832]]) -> None
"""Execute a series of commands in parallel. The parallel command starts executing all child command simultaneously. The parallel command is considered finished when all of its child commands have finished. When the parallel command is terminated early, all currently executing commands are terminated.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param commands: An un-ordered array of commands to execute in parallel. Once all commands have finished executing the parallel command finishes. Please note that the delay of parallel command and the delay of each command are additive.
:type commands: (optional) list[ask_sdk_model.interfaces.alexa.presentation.apl.command.Command]
"""
self.__discriminator_value = "Parallel" # type: str
self.object_type = self.__discriminator_value
super(ParallelCommand, self).__init__(object_type=self.__discriminator_value, delay=delay, description=description, when=when)
self.commands = commands
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ParallelCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
586,
592
],
[
5310,
5316
]
],
[
[
600,
602
]
],
[
[
624,
627
],
[
4235,
4238
]
],
[
[
635,
641
],
[
745,
751
]
],
[
[
659,
663
],
[
4607,
4611
],
[
4513,
4517
],
[
5037,
5041
]
],
[
[
732,
739
],
[
981,
988
]
],
[
[
790,
794
]
],
[
[
796,
800
]
],
[
[
802,
810
]
],
[
[
812,
817
]
],
[
[
819,
822
]
],
[
[
848,
856
]
],
[
[
929,
956
]
],
[
[
965,
980
],
[
3906,
3921
],
[
5609,
5624
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Test Coin Super Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three testcoinsuperd nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.messages import msg_ping
from test_framework.mininode import P2PInterface
from test_framework.test_framework import TestCoinSuperTestFramework
class TestP2PConn(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(TestCoinSuperTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False, wait_for_verack=False)
sleep(1)
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(30)
assert "version" in no_verack_node.last_message
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(31)
assert not no_verack_node.is_connected
assert not no_version_node.is_connected
assert not no_send_node.is_connected
if __name__ == '__main__':
TimeoutsTest().main()
| [
[
[
833,
838
],
[
1646,
1651
],
[
1891,
1896
],
[
2194,
2199
]
],
[
[
876,
884
],
[
1821,
1829
],
[
1870,
1878
],
[
2124,
2132
],
[
2173,
2181
]
],
[
[
921,
933
],
[
1022,
1034
]
],
[
[
976,
1002
],
[
1147,
1173
]
],
[
[
1010,
1021
],
[
1391,
1402
],
[
1465,
1476
],
[
1579,
1590
]
],
[
[
1134,
1146
],
[
2377,
2389
]
]
] |
MRPYTHON_VERSION_MAJOR = 3
MRPYTHON_VERSION_MINOR = 0
MRPYTHON_VERSION_PATCH = 9
MRPYTHON_VERSION_TAG = "beta"
def version_string():
return "{}.{}.{}{}".format(MRPYTHON_VERSION_MAJOR,
MRPYTHON_VERSION_MINOR,
MRPYTHON_VERSION_PATCH,
"" if MRPYTHON_VERSION_TAG == "" else ("-" + MRPYTHON_VERSION_TAG))
| [
[
[
1,
23
],
[
167,
189
]
],
[
[
28,
50
],
[
222,
244
]
],
[
[
55,
77
],
[
277,
299
]
],
[
[
82,
102
],
[
338,
358
],
[
377,
397
]
],
[
[
118,
132
]
]
] |
import torch
import torch.optim as optim
def set_optimizer(model, cfg):
r"""Sets the optimizer
"""
if cfg.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=cfg.lr,
momentum=cfg.momentum, weight_decay=cfg.weight_decay,
nesterov=cfg.nesterov)
elif cfg.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=cfg.lr,
betas=(cfg.momentum, 0.999),
weight_decay=cfg.weight_decay)
return optimizer
| [
[
[
7,
12
]
],
[
[
20,
40
],
[
160,
165
],
[
392,
397
]
],
[
[
47,
60
]
]
] |
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from datetime import datetime
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
from uuid import UUID, uuid4
from pydantic import BaseModel, Field, root_validator, validator
from pydantic.dataclasses import dataclass
from .consts import ONE_HOUR, SEVEN_DAYS
from .enums import (
OS,
Architecture,
Compare,
ContainerPermission,
ContainerType,
ErrorCode,
GithubIssueSearchMatch,
GithubIssueState,
HeartbeatType,
JobState,
NodeState,
NodeTaskState,
PoolState,
ScalesetState,
StatsFormat,
TaskDebugFlag,
TaskFeature,
TaskState,
TaskType,
VmState,
)
from .primitives import Container, PoolName, Region
class UserInfo(BaseModel):
application_id: Optional[UUID]
object_id: Optional[UUID]
upn: Optional[str]
# Stores the address of a secret
class SecretAddress(BaseModel):
# keyvault address of a secret
url: str
T = TypeVar("T")
# This class allows us to store some data that are intended to be secret
# The secret field stores either the raw data or the address of that data
# This class allows us to maintain backward compatibility with existing
# NotificationTemplate classes
@dataclass
class SecretData(Generic[T]):
secret: Union[T, SecretAddress]
def __init__(self, secret: Union[T, SecretAddress]):
if isinstance(secret, dict):
self.secret = SecretAddress.parse_obj(secret)
else:
self.secret = secret
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if isinstance(self.secret, SecretAddress):
return str(self.secret)
else:
return "[REDACTED]"
class EnumModel(BaseModel):
@root_validator(pre=True)
def exactly_one(cls: Any, values: Any) -> Any:
some = []
for field, val in values.items():
if val is not None:
some.append(field)
if not some:
raise ValueError("no variant set for enum")
if len(some) > 1:
raise ValueError("multiple values set for enum: %s" % some)
return values
class Error(BaseModel):
code: ErrorCode
errors: List[str]
OkType = TypeVar("OkType")
Result = Union[OkType, Error]
class FileEntry(BaseModel):
container: Container
filename: str
sas_url: Optional[str]
class Authentication(BaseModel):
password: str
public_key: str
private_key: str
class JobConfig(BaseModel):
project: str
name: str
build: str
duration: int
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class ReproConfig(BaseModel):
container: Container
path: str
duration: int
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class TaskDetails(BaseModel):
type: TaskType
duration: int
target_exe: Optional[str]
target_env: Optional[Dict[str, str]]
target_options: Optional[List[str]]
target_workers: Optional[int]
target_options_merge: Optional[bool]
check_asan_log: Optional[bool]
check_debugger: Optional[bool] = Field(default=True)
check_retry_count: Optional[int]
check_fuzzer_help: Optional[bool]
expect_crash_on_failure: Optional[bool]
rename_output: Optional[bool]
supervisor_exe: Optional[str]
supervisor_env: Optional[Dict[str, str]]
supervisor_options: Optional[List[str]]
supervisor_input_marker: Optional[str]
generator_exe: Optional[str]
generator_env: Optional[Dict[str, str]]
generator_options: Optional[List[str]]
analyzer_exe: Optional[str]
analyzer_env: Optional[Dict[str, str]]
analyzer_options: Optional[List[str]]
wait_for_files: Optional[ContainerType]
stats_file: Optional[str]
stats_format: Optional[StatsFormat]
reboot_after_setup: Optional[bool]
target_timeout: Optional[int]
ensemble_sync_delay: Optional[int]
preserve_existing_outputs: Optional[bool]
report_list: Optional[List[str]]
minimized_stack_depth: Optional[int]
@validator("check_retry_count", allow_reuse=True)
def validate_check_retry_count(cls, value: int) -> int:
if value is not None:
if value < 0:
raise ValueError("invalid check_retry_count")
return value
@validator("target_timeout", allow_reuse=True)
def check_target_timeout(cls, value: Optional[int]) -> Optional[int]:
if value is not None:
if value < 1:
raise ValueError("invalid target_timeout")
return value
@validator("duration", allow_reuse=True)
def check_duration(cls, value: int) -> int:
if value < ONE_HOUR or value > SEVEN_DAYS:
raise ValueError("invalid duration")
return value
class TaskPool(BaseModel):
count: int
pool_name: PoolName
class TaskVm(BaseModel):
region: Region
sku: str
image: str
count: int = Field(default=1)
spot_instances: bool = Field(default=False)
reboot_after_setup: Optional[bool]
@validator("count", allow_reuse=True)
def check_count(cls, value: int) -> int:
if value <= 0:
raise ValueError("invalid count")
return value
class TaskContainers(BaseModel):
type: ContainerType
name: Container
class TaskConfig(BaseModel):
job_id: UUID
prereq_tasks: Optional[List[UUID]]
task: TaskDetails
vm: Optional[TaskVm]
pool: Optional[TaskPool]
containers: List[TaskContainers]
tags: Dict[str, str]
debug: Optional[List[TaskDebugFlag]]
colocate: Optional[bool]
class BlobRef(BaseModel):
account: str
container: Container
name: str
class Report(BaseModel):
input_url: Optional[str]
input_blob: Optional[BlobRef]
executable: str
crash_type: str
crash_site: str
call_stack: List[str]
call_stack_sha256: str
input_sha256: str
asan_log: Optional[str]
task_id: UUID
job_id: UUID
scariness_score: Optional[int]
scariness_description: Optional[str]
minimized_stack: Optional[List[str]]
minimized_stack_sha256: Optional[str]
minimized_stack_function_names: Optional[List[str]]
minimized_stack_function_names_sha256: Optional[str]
class NoReproReport(BaseModel):
input_sha256: str
input_blob: Optional[BlobRef]
executable: str
task_id: UUID
job_id: UUID
tries: int
error: Optional[str]
class CrashTestResult(BaseModel):
crash_report: Optional[Report]
no_repro: Optional[NoReproReport]
class RegressionReport(BaseModel):
crash_test_result: CrashTestResult
original_crash_test_result: Optional[CrashTestResult]
class ADODuplicateTemplate(BaseModel):
increment: List[str]
comment: Optional[str]
set_state: Dict[str, str]
ado_fields: Dict[str, str]
class ADOTemplate(BaseModel):
base_url: str
auth_token: SecretData[str]
project: str
type: str
unique_fields: List[str]
comment: Optional[str]
ado_fields: Dict[str, str]
on_duplicate: ADODuplicateTemplate
# validator needed for backward compatibility
@validator("auth_token", pre=True, always=True)
def validate_auth_token(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
class TeamsTemplate(BaseModel):
url: SecretData[str]
# validator needed for backward compatibility
@validator("url", pre=True, always=True)
def validate_url(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
class ContainerDefinition(BaseModel):
type: ContainerType
compare: Compare
value: int
permissions: List[ContainerPermission]
class VmDefinition(BaseModel):
compare: Compare
value: int
class TaskDefinition(BaseModel):
features: List[TaskFeature]
containers: List[ContainerDefinition]
monitor_queue: Optional[ContainerType]
vm: VmDefinition
# TODO: service shouldn't pass SyncedDir, but just the url and let the agent
# come up with paths
class SyncedDir(BaseModel):
path: str
url: str
CONTAINER_DEF = Optional[Union[SyncedDir, List[SyncedDir]]]
class ClientCredentials(BaseModel):
client_id: UUID
client_secret: str
class AgentConfig(BaseModel):
client_credentials: Optional[ClientCredentials]
onefuzz_url: str
pool_name: PoolName
heartbeat_queue: Optional[str]
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
multi_tenant_domain: Optional[str]
instance_id: UUID
class TaskUnitConfig(BaseModel):
instance_id: UUID
job_id: UUID
task_id: UUID
task_type: TaskType
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
heartbeat_queue: str
# command_queue: str
input_queue: Optional[str]
supervisor_exe: Optional[str]
supervisor_env: Optional[Dict[str, str]]
supervisor_options: Optional[List[str]]
supervisor_input_marker: Optional[str]
target_exe: Optional[str]
target_env: Optional[Dict[str, str]]
target_options: Optional[List[str]]
target_timeout: Optional[int]
target_options_merge: Optional[bool]
target_workers: Optional[int]
check_asan_log: Optional[bool]
check_debugger: Optional[bool]
check_retry_count: Optional[int]
check_fuzzer_help: Optional[bool]
expect_crash_on_failure: Optional[bool]
rename_output: Optional[bool]
generator_exe: Optional[str]
generator_env: Optional[Dict[str, str]]
generator_options: Optional[List[str]]
wait_for_files: Optional[str]
analyzer_exe: Optional[str]
analyzer_env: Optional[Dict[str, str]]
analyzer_options: Optional[List[str]]
stats_file: Optional[str]
stats_format: Optional[StatsFormat]
ensemble_sync_delay: Optional[int]
report_list: Optional[List[str]]
minimized_stack_depth: Optional[int]
# from here forwards are Container definitions. These need to be inline
# with TaskDefinitions and ContainerTypes
analysis: CONTAINER_DEF
coverage: CONTAINER_DEF
crashes: CONTAINER_DEF
inputs: CONTAINER_DEF
no_repro: CONTAINER_DEF
readonly_inputs: CONTAINER_DEF
reports: CONTAINER_DEF
tools: CONTAINER_DEF
unique_inputs: CONTAINER_DEF
unique_reports: CONTAINER_DEF
regression_reports: CONTAINER_DEF
class Forward(BaseModel):
src_port: int
dst_ip: str
dst_port: int
class ProxyConfig(BaseModel):
url: str
notification: str
region: Region
forwards: List[Forward]
instance_telemetry_key: Optional[str]
microsoft_telemetry_key: Optional[str]
instance_id: UUID
class ProxyHeartbeat(BaseModel):
region: Region
forwards: List[Forward]
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Files(BaseModel):
files: List[str]
class WorkUnit(BaseModel):
job_id: UUID
task_id: UUID
task_type: TaskType
# JSON-serialized `TaskUnitConfig`.
config: str
class WorkSet(BaseModel):
reboot: bool
setup_url: str
script: bool
work_units: List[WorkUnit]
class WorkUnitSummary(BaseModel):
job_id: UUID
task_id: UUID
task_type: TaskType
class WorkSetSummary(BaseModel):
work_units: List[WorkUnitSummary]
class GithubIssueDuplicate(BaseModel):
comment: Optional[str]
labels: List[str]
reopen: bool
class GithubIssueSearch(BaseModel):
author: Optional[str]
state: Optional[GithubIssueState]
field_match: List[GithubIssueSearchMatch]
string: str
class GithubAuth(BaseModel):
user: str
personal_access_token: str
class GithubIssueTemplate(BaseModel):
auth: SecretData[GithubAuth]
organization: str
repository: str
title: str
body: str
unique_search: GithubIssueSearch
assignees: List[str]
labels: List[str]
on_duplicate: GithubIssueDuplicate
# validator needed for backward compatibility
@validator("auth", pre=True, always=True)
def validate_auth(cls, v: Any) -> SecretData:
if isinstance(v, str):
return SecretData(secret=v)
elif isinstance(v, SecretData):
return v
elif isinstance(v, dict):
try:
return SecretData(GithubAuth.parse_obj(v))
except Exception:
return SecretData(secret=v["secret"])
else:
raise TypeError(f"invalid datatype {type(v)}")
NotificationTemplate = Union[ADOTemplate, TeamsTemplate, GithubIssueTemplate]
class Notification(BaseModel):
container: Container
notification_id: UUID = Field(default_factory=uuid4)
config: NotificationTemplate
class JobTaskInfo(BaseModel):
task_id: UUID
type: TaskType
state: TaskState
class Job(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
job_id: UUID = Field(default_factory=uuid4)
state: JobState = Field(default=JobState.init)
config: JobConfig
error: Optional[str]
end_time: Optional[datetime] = None
task_info: Optional[List[JobTaskInfo]]
user_info: Optional[UserInfo]
class TaskHeartbeatEntry(BaseModel):
task_id: UUID
job_id: Optional[UUID]
machine_id: UUID
data: List[Dict[str, HeartbeatType]]
class NodeHeartbeatEntry(BaseModel):
node_id: UUID
data: List[Dict[str, HeartbeatType]]
class Node(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
pool_name: PoolName
machine_id: UUID
state: NodeState = Field(default=NodeState.init)
scaleset_id: Optional[UUID] = None
tasks: Optional[List[Tuple[UUID, NodeTaskState]]] = None
heartbeat: Optional[datetime]
version: str = Field(default="1.0.0")
reimage_requested: bool = Field(default=False)
delete_requested: bool = Field(default=False)
debug_keep_node: bool = Field(default=False)
class ScalesetSummary(BaseModel):
scaleset_id: UUID
state: ScalesetState
class NodeTasks(BaseModel):
machine_id: UUID
task_id: UUID
state: NodeTaskState = Field(default=NodeTaskState.init)
class AutoScaleConfig(BaseModel):
image: str
max_size: Optional[int] # max size of pool
min_size: int = Field(default=0) # min size of pool
region: Optional[Region]
scaleset_size: int # Individual scaleset size
spot_instances: bool = Field(default=False)
ephemeral_os_disks: bool = Field(default=False)
vm_sku: str
@validator("scaleset_size", allow_reuse=True)
def check_scaleset_size(cls, value: int) -> int:
if value < 1 or value > 1000:
raise ValueError("invalid scaleset size")
return value
@root_validator()
def check_data(cls, values: Any) -> Any:
if (
"max_size" in values
and values.get("max_size")
and values.get("min_size") > values.get("max_size")
):
raise ValueError("The pool min_size is greater than max_size")
return values
@validator("max_size", allow_reuse=True)
def check_max_size(cls, value: Optional[int]) -> Optional[int]:
if value and value < 1:
raise ValueError("Autoscale sizes are not defined properly")
return value
@validator("min_size", allow_reuse=True)
def check_min_size(cls, value: int) -> int:
if value < 0 or value > 1000:
raise ValueError("Invalid pool min_size")
return value
class Pool(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
name: PoolName
pool_id: UUID = Field(default_factory=uuid4)
os: OS
managed: bool
autoscale: Optional[AutoScaleConfig]
arch: Architecture
state: PoolState = Field(default=PoolState.init)
client_id: Optional[UUID]
nodes: Optional[List[Node]]
config: Optional[AgentConfig]
# work_queue is explicitly not saved to Tables (see save_exclude). This is
# intended to be used to pass the information to the CLI when the CLI asks
# for information about what work is in the queue for the pool.
work_queue: Optional[List[WorkSetSummary]]
# explicitly excluded from Tables
scaleset_summary: Optional[List[ScalesetSummary]]
class ScalesetNodeState(BaseModel):
machine_id: UUID
instance_id: str
state: Optional[NodeState]
class Scaleset(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
pool_name: PoolName
scaleset_id: UUID = Field(default_factory=uuid4)
state: ScalesetState = Field(default=ScalesetState.init)
auth: Optional[Authentication]
vm_sku: str
image: str
region: Region
size: int
spot_instances: bool
ephemeral_os_disks: bool = Field(default=False)
needs_config_update: bool = Field(default=False)
error: Optional[Error]
nodes: Optional[List[ScalesetNodeState]]
client_id: Optional[UUID]
client_object_id: Optional[UUID]
tags: Dict[str, str] = Field(default_factory=lambda: {})
@validator("size", allow_reuse=True)
def check_size(cls, value: int) -> int:
if value < 0:
raise ValueError("Invalid scaleset size")
return value
class NotificationConfig(BaseModel):
config: NotificationTemplate
class Repro(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
vm_id: UUID = Field(default_factory=uuid4)
task_id: UUID
config: ReproConfig
state: VmState = Field(default=VmState.init)
auth: Optional[Authentication]
os: OS
error: Optional[Error]
ip: Optional[str]
end_time: Optional[datetime]
user_info: Optional[UserInfo]
class ExitStatus(BaseModel):
code: Optional[int]
signal: Optional[int]
success: bool
class ProcessOutput(BaseModel):
exit_status: ExitStatus
stderr: str
stdout: str
class WorkerRunningEvent(BaseModel):
task_id: UUID
class WorkerDoneEvent(BaseModel):
task_id: UUID
exit_status: ExitStatus
stderr: str
stdout: str
class WorkerEvent(EnumModel):
done: Optional[WorkerDoneEvent]
running: Optional[WorkerRunningEvent]
class NodeSettingUpEventData(BaseModel):
tasks: List[UUID]
class NodeDoneEventData(BaseModel):
error: Optional[str]
script_output: Optional[ProcessOutput]
NodeStateData = Union[NodeSettingUpEventData, NodeDoneEventData]
class NodeStateUpdate(BaseModel):
state: NodeState
data: Optional[NodeStateData]
@root_validator(pre=False, skip_on_failure=True)
def check_data(cls, values: Any) -> Any:
data = values.get("data")
if data:
state = values["state"]
if state == NodeState.setting_up:
if isinstance(data, NodeSettingUpEventData):
return values
if state == NodeState.done:
if isinstance(data, NodeDoneEventData):
return values
raise ValueError(
"data for node state update event does not match state = %s" % state
)
else:
# For now, `data` is always optional.
return values
class NodeEvent(EnumModel):
state_update: Optional[NodeStateUpdate]
worker_event: Optional[WorkerEvent]
# Temporary shim type to support hot upgrade of 1.0.0 nodes.
#
# We want future variants to use an externally-tagged repr.
NodeEventShim = Union[NodeStateUpdate, NodeEvent, WorkerEvent]
class NodeEventEnvelope(BaseModel):
machine_id: UUID
event: NodeEventShim
class StopNodeCommand(BaseModel):
pass
class StopTaskNodeCommand(BaseModel):
task_id: UUID
class NodeCommandAddSshKey(BaseModel):
public_key: str
class NodeCommand(EnumModel):
stop: Optional[StopNodeCommand]
stop_task: Optional[StopTaskNodeCommand]
add_ssh_key: Optional[NodeCommandAddSshKey]
class NodeCommandEnvelope(BaseModel):
command: NodeCommand
message_id: str
class TaskEvent(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
task_id: UUID
machine_id: UUID
event_data: WorkerEvent
class TaskEventSummary(BaseModel):
timestamp: Optional[datetime]
event_data: str
event_type: str
class NodeAssignment(BaseModel):
node_id: UUID
scaleset_id: Optional[UUID]
state: NodeTaskState
class Task(BaseModel):
timestamp: Optional[datetime] = Field(alias="Timestamp")
job_id: UUID
task_id: UUID = Field(default_factory=uuid4)
state: TaskState = Field(default=TaskState.init)
os: OS
config: TaskConfig
error: Optional[Error]
auth: Optional[Authentication]
heartbeat: Optional[datetime]
end_time: Optional[datetime]
events: Optional[List[TaskEventSummary]]
nodes: Optional[List[NodeAssignment]]
user_info: Optional[UserInfo]
| [
[
[
119,
127
],
[
11615,
11623
],
[
11582,
11590
],
[
13630,
13638
],
[
13836,
13844
],
[
14223,
14231
],
[
14482,
14490
],
[
16287,
16295
],
[
17166,
17174
],
[
18075,
18083
],
[
18368,
18376
],
[
20745,
20753
],
[
20910,
20918
],
[
21119,
21127
],
[
21395,
21403
],
[
21428,
21436
]
],
[
[
147,
150
],
[
1917,
1920
],
[
1896,
1899
],
[
1909,
1912
],
[
7587,
7590
],
[
8080,
8083
],
[
12845,
12848
],
[
15529,
15532
],
[
15521,
15524
],
[
19309,
19312
],
[
19301,
19304
]
],
[
[
152,
156
],
[
3308,
3312
],
[
3747,
3751
],
[
3911,
3915
],
[
4029,
4033
],
[
5898,
5902
],
[
7163,
7167
],
[
7194,
7198
],
[
7394,
7398
],
[
9727,
9731
],
[
9885,
9889
],
[
10334,
10338
],
[
10486,
10490
],
[
14050,
14054
],
[
14148,
14152
],
[
17719,
17723
]
],
[
[
158,
165
],
[
1335,
1342
]
],
[
[
167,
171
],
[
2308,
2312
],
[
3353,
3357
],
[
3796,
3800
],
[
3959,
3963
],
[
4076,
4080
],
[
4385,
4389
],
[
5763,
5767
],
[
5867,
5871
],
[
5933,
5937
],
[
6233,
6237
],
[
6461,
6465
],
[
6559,
6563
],
[
7111,
7115
],
[
7341,
7345
],
[
8506,
8510
],
[
8650,
8654
],
[
8684,
8688
],
[
8973,
8977
],
[
9776,
9780
],
[
9930,
9934
],
[
10382,
10386
],
[
10533,
10537
],
[
10679,
10683
],
[
11364,
11368
],
[
11553,
11557
],
[
11669,
11673
],
[
11921,
11925
],
[
12082,
12086
],
[
12184,
12188
],
[
12330,
12334
],
[
12647,
12651
],
[
12669,
12673
],
[
13877,
13881
],
[
14045,
14049
],
[
14143,
14147
],
[
14417,
14421
],
[
16588,
16592
],
[
16887,
16891
],
[
16979,
16983
],
[
17617,
17621
],
[
18940,
18944
],
[
21459,
21463
],
[
21503,
21507
]
],
[
[
173,
181
],
[
853,
861
],
[
883,
891
],
[
907,
915
],
[
2463,
2471
],
[
3269,
3277
],
[
3299,
3307
],
[
3344,
3352
],
[
3384,
3392
],
[
3424,
3432
],
[
3459,
3467
],
[
3494,
3502
],
[
3554,
3562
],
[
3591,
3599
],
[
3635,
3643
],
[
3669,
3677
],
[
3704,
3712
],
[
3738,
3746
],
[
3787,
3795
],
[
3836,
3844
],
[
3869,
3877
],
[
3902,
3910
],
[
3950,
3958
],
[
3988,
3996
],
[
4020,
4028
],
[
4067,
4075
],
[
4107,
4115
],
[
4147,
4155
],
[
4179,
4187
],
[
4225,
4233
],
[
4260,
4268
],
[
4299,
4307
],
[
4344,
4352
],
[
4376,
4384
],
[
4423,
4431
],
[
5416,
5424
],
[
5754,
5762
],
[
5805,
5813
],
[
5832,
5840
],
[
5924,
5932
],
[
5968,
5976
],
[
6109,
6117
],
[
6139,
6147
],
[
6306,
6314
],
[
6376,
6384
],
[
6417,
6425
],
[
6452,
6460
],
[
6500,
6508
],
[
6550,
6558
],
[
6613,
6621
],
[
6699,
6707
],
[
6798,
6806
],
[
6866,
6874
],
[
6897,
6905
],
[
7029,
7037
],
[
7134,
7142
],
[
7364,
7372
],
[
8729,
8737
],
[
8947,
8955
],
[
9128,
9136
],
[
9222,
9230
],
[
9264,
9272
],
[
9307,
9315
],
[
9346,
9354
],
[
9526,
9534
],
[
9569,
9577
],
[
9650,
9658
],
[
9684,
9692
],
[
9718,
9726
],
[
9767,
9775
],
[
9816,
9824
],
[
9846,
9854
],
[
9876,
9884
],
[
9921,
9929
],
[
9961,
9969
],
[
10001,
10009
],
[
10036,
10044
],
[
10070,
10078
],
[
10105,
10113
],
[
10143,
10151
],
[
10180,
10188
],
[
10224,
10232
],
[
10258,
10266
],
[
10292,
10300
],
[
10325,
10333
],
[
10373,
10381
],
[
10413,
10421
],
[
10445,
10453
],
[
10477,
10485
],
[
10524,
10532
],
[
10560,
10568
],
[
10592,
10600
],
[
10639,
10647
],
[
10670,
10678
],
[
10717,
10725
],
[
11406,
11414
],
[
11449,
11457
],
[
12158,
12166
],
[
12261,
12269
],
[
12286,
12294
],
[
13621,
13629
],
[
13799,
13807
],
[
13827,
13835
],
[
13868,
13876
],
[
13911,
13919
],
[
13999,
14007
],
[
14214,
14222
],
[
14375,
14383
],
[
14408,
14416
],
[
14473,
14481
],
[
14962,
14970
],
[
15065,
15073
],
[
16278,
16286
],
[
16436,
16444
],
[
16553,
16561
],
[
16579,
16587
],
[
16612,
16620
],
[
16878,
16886
],
[
16970,
16978
],
[
17093,
17101
],
[
17157,
17165
],
[
17351,
17359
],
[
17581,
17589
],
[
17608,
17616
],
[
17657,
17665
],
[
17694,
17702
],
[
18066,
18074
],
[
18260,
18268
],
[
18307,
18315
],
[
18331,
18339
],
[
18359,
18367
],
[
18393,
18401
],
[
18453,
18461
],
[
18479,
18487
],
[
18818,
18826
],
[
18857,
18865
],
[
19000,
19008
],
[
19033,
19041
],
[
19191,
19199
],
[
19943,
19951
],
[
19987,
19995
],
[
20487,
20495
],
[
20528,
20536
],
[
20575,
20583
],
[
20736,
20744
],
[
20901,
20909
],
[
21030,
21038
],
[
21110,
21118
],
[
21320,
21328
],
[
21346,
21354
],
[
21386,
21394
],
[
21419,
21427
],
[
21450,
21458
],
[
21494,
21502
],
[
21540,
21548
],
[
4802,
4810
],
[
4784,
4792
],
[
15890,
15898
],
[
15872,
15880
]
],
[
[
183,
188
],
[
14422,
14427
]
],
[
[
190,
197
],
[
1042,
1049
],
[
2329,
2336
]
],
[
[
199,
204
],
[
1360,
1365
],
[
2356,
2361
],
[
8956,
8961
],
[
13289,
13294
],
[
19075,
19080
],
[
20150,
20155
],
[
1416,
1421
]
],
[
[
222,
226
],
[
862,
866
],
[
892,
896
],
[
5731,
5735
],
[
5768,
5772
],
[
6333,
6337
],
[
6350,
6354
],
[
6750,
6754
],
[
6767,
6771
],
[
9044,
9048
],
[
9377,
9381
],
[
9434,
9438
],
[
9451,
9455
],
[
9469,
9473
],
[
11480,
11484
],
[
11720,
11724
],
[
11738,
11742
],
[
11984,
11988
],
[
12002,
12006
],
[
13423,
13427
],
[
13537,
13541
],
[
13679,
13683
],
[
13982,
13986
],
[
14008,
14012
],
[
14030,
14034
],
[
14128,
14132
],
[
14300,
14304
],
[
14384,
14388
],
[
14428,
14432
],
[
14737,
14741
],
[
14813,
14817
],
[
14831,
14835
],
[
16356,
16360
],
[
16562,
16566
],
[
17056,
17060
],
[
17244,
17248
],
[
17666,
17670
],
[
17703,
17707
],
[
18123,
18127
],
[
18172,
18176
],
[
18657,
18661
],
[
18711,
18715
],
[
18945,
18949
],
[
20251,
20255
],
[
20379,
20383
],
[
20795,
20799
],
[
20816,
20820
],
[
21008,
21012
],
[
21039,
21043
],
[
21168,
21172
],
[
21186,
21190
]
],
[
[
228,
233
],
[
13452,
13457
],
[
13708,
13713
],
[
16385,
16390
],
[
17273,
17278
],
[
18152,
18157
],
[
21215,
21220
]
],
[
[
256,
265
],
[
821,
830
],
[
976,
985
],
[
1829,
1838
],
[
2264,
2273
],
[
2395,
2404
],
[
2500,
2509
],
[
2589,
2598
],
[
2900,
2909
],
[
3204,
3213
],
[
5185,
5194
],
[
5251,
5260
],
[
5632,
5641
],
[
5707,
5716
],
[
5999,
6008
],
[
6082,
6091
],
[
6649,
6658
],
[
6836,
6845
],
[
6946,
6955
],
[
7084,
7093
],
[
7229,
7238
],
[
7918,
7927
],
[
8417,
8426
],
[
8553,
8562
],
[
8624,
8633
],
[
8890,
8899
],
[
9017,
9026
],
[
9092,
9101
],
[
9405,
9414
],
[
11200,
11209
],
[
11284,
11293
],
[
11508,
11517
],
[
11646,
11655
],
[
11696,
11705
],
[
11840,
11849
],
[
11960,
11969
],
[
12054,
12063
],
[
12133,
12142
],
[
12237,
12246
],
[
12394,
12403
],
[
12479,
12488
],
[
13365,
13374
],
[
13512,
13521
],
[
13594,
13603
],
[
13957,
13966
],
[
14103,
14112
],
[
14187,
14196
],
[
14708,
14717
],
[
14785,
14794
],
[
14921,
14930
],
[
16251,
16260
],
[
17028,
17037
],
[
17130,
17139
],
[
17980,
17989
],
[
18039,
18048
],
[
18431,
18440
],
[
18533,
18542
],
[
18632,
18641
],
[
18686,
18695
],
[
18917,
18926
],
[
18977,
18986
],
[
19148,
19157
],
[
20223,
20232
],
[
20305,
20314
],
[
20354,
20363
],
[
20413,
20422
],
[
20634,
20643
],
[
20709,
20718
],
[
20874,
20883
],
[
20983,
20992
],
[
21083,
21092
]
],
[
[
267,
272
],
[
3511,
3516
],
[
5327,
5332
],
[
5371,
5376
],
[
11593,
11598
],
[
13430,
13435
],
[
13642,
13647
],
[
13686,
13691
],
[
13737,
13742
],
[
14235,
14240
],
[
14328,
14333
],
[
14511,
14516
],
[
14564,
14569
],
[
14614,
14619
],
[
14663,
14668
],
[
14863,
14868
],
[
15016,
15021
],
[
15160,
15165
],
[
15212,
15217
],
[
16299,
16304
],
[
16363,
16368
],
[
16508,
16513
],
[
17178,
17183
],
[
17251,
17256
],
[
17307,
17312
],
[
17496,
17501
],
[
17549,
17554
],
[
17736,
17741
],
[
18087,
18092
],
[
18130,
18135
],
[
18222,
18227
],
[
20757,
20762
],
[
21131,
21136
],
[
21193,
21198
],
[
21245,
21250
]
],
[
[
274,
288
],
[
1846,
1860
],
[
15472,
15486
],
[
19221,
19235
]
],
[
[
290,
299
],
[
2671,
2680
],
[
2975,
2984
],
[
4443,
4452
],
[
4697,
4706
],
[
4959,
4968
],
[
5437,
5446
],
[
7504,
7513
],
[
8011,
8020
],
[
12774,
12783
],
[
15255,
15264
],
[
15797,
15806
],
[
16037,
16046
],
[
17776,
17785
]
],
[
[
333,
342
],
[
1308,
1317
]
],
[
[
364,
372
],
[
2778,
2786
],
[
3082,
3090
],
[
5066,
5074
]
],
[
[
374,
384
],
[
2798,
2808
],
[
3102,
3112
],
[
5086,
5096
]
],
[
[
410,
412
],
[
16400,
16402
],
[
18293,
18295
],
[
21283,
21285
]
],
[
[
418,
430
],
[
16472,
16484
]
],
[
[
436,
443
],
[
8466,
8473
],
[
8578,
8585
]
],
[
[
449,
468
],
[
8511,
8530
]
],
[
[
474,
487
],
[
4116,
4129
],
[
5654,
5667
],
[
8439,
8452
],
[
8738,
8751
]
],
[
[
493,
502
],
[
2286,
2295
]
],
[
[
508,
530
],
[
12335,
12357
]
],
[
[
536,
552
],
[
12295,
12311
]
],
[
[
558,
571
],
[
14060,
14073
],
[
14158,
14171
]
],
[
[
577,
585
],
[
13751,
13759
],
[
13726,
13734
]
],
[
[
591,
600
],
[
14342,
14351
],
[
14316,
14325
],
[
17102,
17111
],
[
19171,
19180
],
[
19427,
19436
],
[
19569,
19578
]
],
[
[
606,
619
],
[
14434,
14447
],
[
14877,
14890
],
[
14847,
14860
],
[
21056,
21069
]
],
[
[
625,
634
],
[
16522,
16531
],
[
16496,
16505
]
],
[
[
640,
653
],
[
14753,
14766
],
[
17321,
17334
],
[
17291,
17304
]
],
[
[
659,
670
],
[
4188,
4199
],
[
10601,
10612
]
],
[
[
676,
689
],
[
5938,
5951
]
],
[
[
695,
706
],
[
8655,
8666
]
],
[
[
712,
721
],
[
13572,
13581
],
[
21259,
21268
],
[
21233,
21242
]
],
[
[
727,
735
],
[
3226,
3234
],
[
9489,
9497
],
[
11758,
11766
],
[
12022,
12030
],
[
13552,
13560
]
],
[
[
741,
748
],
[
18236,
18243
],
[
18212,
18219
]
],
[
[
776,
785
],
[
2422,
2431
],
[
2927,
2936
],
[
5678,
5687
],
[
6043,
6052
],
[
13392,
13401
]
],
[
[
787,
795
],
[
5227,
5235
],
[
9192,
9200
],
[
14275,
14283
],
[
16334,
16342
],
[
17218,
17226
]
],
[
[
797,
803
],
[
5275,
5281
],
[
11343,
11349
],
[
11532,
11538
],
[
15074,
15080
],
[
17419,
17425
]
],
[
[
812,
820
],
[
13920,
13928
],
[
18402,
18410
],
[
21549,
21557
]
],
[
[
962,
975
],
[
1369,
1382
],
[
1425,
1438
],
[
1505,
1518
],
[
1713,
1726
]
],
[
[
1038,
1039
],
[
1343,
1344
],
[
1366,
1367
],
[
1422,
1423
]
],
[
[
1324,
1334
],
[
7275,
7285
],
[
7939,
7949
],
[
12501,
12511
],
[
7595,
7605
],
[
7657,
7667
],
[
7705,
7715
],
[
7792,
7802
],
[
8088,
8098
],
[
8150,
8160
],
[
8198,
8208
],
[
8285,
8295
],
[
12853,
12863
],
[
12915,
12925
],
[
12963,
12973
],
[
13071,
13081
],
[
13160,
13170
]
],
[
[
1819,
1828
],
[
18796,
18805
],
[
19913,
19922
],
[
20465,
20474
]
],
[
[
2258,
2263
],
[
2370,
2375
],
[
17590,
17595
],
[
18316,
18321
],
[
21329,
21334
]
],
[
[
2320,
2326
],
[
2362,
2368
]
],
[
[
2347,
2353
]
],
[
[
2385,
2394
]
],
[
[
2485,
2499
],
[
17360,
17374
],
[
18269,
18283
],
[
21355,
21369
]
],
[
[
2579,
2588
],
[
13778,
13787
]
],
[
[
2888,
2899
],
[
18189,
18200
]
],
[
[
3192,
3203
],
[
5785,
5796
]
],
[
[
5176,
5184
],
[
5841,
5849
]
],
[
[
5244,
5250
],
[
5814,
5820
]
],
[
[
5617,
5631
],
[
5872,
5886
]
],
[
[
5696,
5706
],
[
21298,
21308
]
],
[
[
5991,
5998
],
[
6148,
6155
],
[
6708,
6715
]
],
[
[
6075,
6081
],
[
6875,
6881
]
],
[
[
6635,
6648
],
[
6906,
6919
]
],
[
[
6820,
6835
],
[
6981,
6996
],
[
7038,
7053
]
],
[
[
6929,
6945
]
],
[
[
7063,
7083
],
[
7427,
7447
]
],
[
[
7217,
7228
],
[
13295,
13306
]
],
[
[
7904,
7917
],
[
13308,
13321
]
],
[
[
8397,
8416
],
[
8689,
8708
]
],
[
[
8540,
8552
],
[
8761,
8773
]
],
[
[
8609,
8623
]
],
[
[
8880,
8889
],
[
8962,
8971
],
[
8978,
8987
]
],
[
[
8931,
8944
],
[
10869,
10882
],
[
10897,
10910
],
[
10924,
10937
],
[
10950,
10963
],
[
10978,
10991
],
[
11013,
11026
],
[
11040,
11053
],
[
11065,
11078
],
[
11098,
11111
],
[
11132,
11145
],
[
11170,
11183
]
],
[
[
8999,
9016
],
[
9137,
9154
]
],
[
[
9080,
9091
],
[
16621,
16632
]
],
[
[
9390,
9404
]
],
[
[
11192,
11199
],
[
11369,
11376
],
[
11558,
11565
]
],
[
[
11272,
11283
]
],
[
[
11493,
11507
]
],
[
[
11640,
11645
]
],
[
[
11687,
11695
],
[
11926,
11934
]
],
[
[
11832,
11839
]
],
[
[
11944,
11959
],
[
12087,
12102
]
],
[
[
12039,
12053
],
[
16892,
16906
]
],
[
[
12112,
12132
],
[
12697,
12717
]
],
[
[
12219,
12236
],
[
12614,
12631
]
],
[
[
12383,
12393
],
[
12512,
12522
],
[
13082,
13092
]
],
[
[
12459,
12478
],
[
13323,
13342
]
],
[
[
13266,
13286
],
[
13471,
13491
],
[
18004,
18024
]
],
[
[
13352,
13364
]
],
[
[
13500,
13511
],
[
13882,
13893
]
],
[
[
13590,
13593
]
],
[
[
13938,
13956
]
],
[
[
14084,
14102
]
],
[
[
14182,
14186
],
[
16593,
16597
]
],
[
[
14692,
14707
],
[
16984,
16999
]
],
[
[
14775,
14784
]
],
[
[
14905,
14920
],
[
16445,
16460
]
],
[
[
16246,
16250
]
],
[
[
17010,
17027
],
[
17622,
17639
]
],
[
[
17121,
17129
]
],
[
[
17961,
17979
]
],
[
[
18033,
18038
]
],
[
[
18420,
18430
],
[
18562,
18572
],
[
18733,
18743
]
],
[
[
18519,
18532
],
[
19042,
19055
]
],
[
[
18613,
18631
],
[
18866,
18884
]
],
[
[
18670,
18685
],
[
18827,
18842
]
],
[
[
18784,
18795
],
[
19996,
20007
],
[
20184,
20195
],
[
20837,
20848
]
],
[
[
18894,
18916
],
[
19081,
19103
],
[
19485,
19507
]
],
[
[
18959,
18976
],
[
19105,
19122
],
[
19621,
19638
]
],
[
[
19059,
19072
],
[
19200,
19213
]
],
[
[
19132,
19147
],
[
19952,
19967
],
[
20156,
20171
]
],
[
[
19903,
19912
],
[
20173,
20182
]
],
[
[
20134,
20147
],
[
20267,
20280
]
],
[
[
20205,
20222
]
],
[
[
20289,
20304
],
[
20496,
20511
]
],
[
[
20334,
20353
],
[
20537,
20556
]
],
[
[
20392,
20412
],
[
20584,
20604
]
],
[
[
20453,
20464
],
[
20659,
20670
]
],
[
[
20614,
20633
]
],
[
[
20699,
20708
]
],
[
[
20857,
20873
],
[
21464,
21480
]
],
[
[
20968,
20982
],
[
21508,
21522
]
],
[
[
21078,
21082
]
]
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(873, 697)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 0, 0, 1, 1)
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setEnabled(True)
self.tabWidget.setMinimumSize(QtCore.QSize(251, 489))
self.tabWidget.setMaximumSize(QtCore.QSize(251, 16777215))
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.groupSetting = QtWidgets.QGroupBox(self.tab)
self.groupSetting.setGeometry(QtCore.QRect(10, 10, 221, 110))
self.groupSetting.setMinimumSize(QtCore.QSize(221, 110))
self.groupSetting.setMaximumSize(QtCore.QSize(221, 110))
self.groupSetting.setObjectName("groupSetting")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.groupSetting)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 20, 201, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.labelFruit = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.labelFruit.setObjectName("labelFruit")
self.horizontalLayout.addWidget(self.labelFruit)
self.comboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.horizontalLayout.addWidget(self.comboBox)
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.groupSetting)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(10, 60, 199, 31))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pushWifi = QtWidgets.QPushButton(self.horizontalLayoutWidget_3)
self.pushWifi.setObjectName("pushWifi")
self.horizontalLayout_3.addWidget(self.pushWifi)
self.labelWifi = QtWidgets.QLabel(self.horizontalLayoutWidget_3)
self.labelWifi.setObjectName("labelWifi")
self.horizontalLayout_3.addWidget(self.labelWifi)
self.groupCurve = QtWidgets.QGroupBox(self.tab)
self.groupCurve.setGeometry(QtCore.QRect(10, 130, 221, 211))
self.groupCurve.setMinimumSize(QtCore.QSize(221, 211))
self.groupCurve.setMaximumSize(QtCore.QSize(221, 211))
self.groupCurve.setObjectName("groupCurve")
self.verticalLayoutWidget = QtWidgets.QWidget(self.groupCurve)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 30, 201, 168))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.labelScanTimes = QtWidgets.QLabel(self.verticalLayoutWidget)
self.labelScanTimes.setMaximumSize(QtCore.QSize(16777215, 28))
self.labelScanTimes.setObjectName("labelScanTimes")
self.horizontalLayout_2.addWidget(self.labelScanTimes)
self.spinBox = QtWidgets.QSpinBox(self.verticalLayoutWidget)
self.spinBox.setMinimum(1)
self.spinBox.setMaximum(20)
self.spinBox.setProperty("value", 3)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout_2.addWidget(self.spinBox)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.pushDetection = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushDetection.setObjectName("pushDetection")
self.verticalLayout_2.addWidget(self.pushDetection)
self.pushOriginal = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushOriginal.setObjectName("pushOriginal")
self.verticalLayout_2.addWidget(self.pushOriginal)
self.pushDerivative = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushDerivative.setObjectName("pushDerivative")
self.verticalLayout_2.addWidget(self.pushDerivative)
self.pushIntegral = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushIntegral.setObjectName("pushIntegral")
self.verticalLayout_2.addWidget(self.pushIntegral)
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(10, 350, 221, 261))
self.tableWidget.setMinimumSize(QtCore.QSize(221, 0))
self.tableWidget.setMaximumSize(QtCore.QSize(221, 16777215))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(1)
self.tableWidget.setRowCount(6)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(7)
item.setFont(font)
self.tableWidget.setHorizontalHeaderItem(0, item)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupLine_1 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_1.setGeometry(QtCore.QRect(10, 10, 221, 141))
self.groupLine_1.setObjectName("groupLine_1")
self.formLayoutWidget = QtWidgets.QWidget(self.groupLine_1)
self.formLayoutWidget.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.labelLineWidth_1 = QtWidgets.QLabel(self.formLayoutWidget)
self.labelLineWidth_1.setObjectName("labelLineWidth_1")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_1)
self.horizontalSlider_1 = QtWidgets.QSlider(self.formLayoutWidget)
self.horizontalSlider_1.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_1.setObjectName("horizontalSlider_1")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_1)
self.labelColor_1 = QtWidgets.QLabel(self.formLayoutWidget)
self.labelColor_1.setObjectName("labelColor_1")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_1)
self.comboColor_1 = QtWidgets.QComboBox(self.formLayoutWidget)
self.comboColor_1.setObjectName("comboColor_1")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.comboColor_1.addItem("")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_1)
self.checkVisible_1 = QtWidgets.QCheckBox(self.formLayoutWidget)
self.checkVisible_1.setChecked(True)
self.checkVisible_1.setObjectName("checkVisible_1")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_1)
self.groupLine_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_2.setGeometry(QtCore.QRect(10, 170, 221, 141))
self.groupLine_2.setObjectName("groupLine_2")
self.formLayoutWidget_3 = QtWidgets.QWidget(self.groupLine_2)
self.formLayoutWidget_3.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget_3.setObjectName("formLayoutWidget_3")
self.formLayout_3 = QtWidgets.QFormLayout(self.formLayoutWidget_3)
self.formLayout_3.setContentsMargins(0, 0, 0, 0)
self.formLayout_3.setObjectName("formLayout_3")
self.labelLineWidth_2 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.labelLineWidth_2.setObjectName("labelLineWidth_2")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_2)
self.horizontalSlider_3 = QtWidgets.QSlider(self.formLayoutWidget_3)
self.horizontalSlider_3.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_3.setObjectName("horizontalSlider_3")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_3)
self.labelColor_2 = QtWidgets.QLabel(self.formLayoutWidget_3)
self.labelColor_2.setObjectName("labelColor_2")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_2)
self.comboColor_2 = QtWidgets.QComboBox(self.formLayoutWidget_3)
self.comboColor_2.setObjectName("comboColor_2")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.comboColor_2.addItem("")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_2)
self.checkVisible_2 = QtWidgets.QCheckBox(self.formLayoutWidget_3)
self.checkVisible_2.setChecked(True)
self.checkVisible_2.setObjectName("checkVisible_2")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_2)
self.groupLine_3 = QtWidgets.QGroupBox(self.tab_2)
self.groupLine_3.setGeometry(QtCore.QRect(10, 330, 221, 141))
self.groupLine_3.setObjectName("groupLine_3")
self.formLayoutWidget_4 = QtWidgets.QWidget(self.groupLine_3)
self.formLayoutWidget_4.setGeometry(QtCore.QRect(20, 20, 181, 101))
self.formLayoutWidget_4.setObjectName("formLayoutWidget_4")
self.formLayout_4 = QtWidgets.QFormLayout(self.formLayoutWidget_4)
self.formLayout_4.setContentsMargins(0, 0, 0, 0)
self.formLayout_4.setObjectName("formLayout_4")
self.labelLineWidth_3 = QtWidgets.QLabel(self.formLayoutWidget_4)
self.labelLineWidth_3.setObjectName("labelLineWidth_3")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelLineWidth_3)
self.horizontalSlider_4 = QtWidgets.QSlider(self.formLayoutWidget_4)
self.horizontalSlider_4.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_4.setObjectName("horizontalSlider_4")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.horizontalSlider_4)
self.labelColor_3 = QtWidgets.QLabel(self.formLayoutWidget_4)
self.labelColor_3.setObjectName("labelColor_3")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelColor_3)
self.comboColor_3 = QtWidgets.QComboBox(self.formLayoutWidget_4)
self.comboColor_3.setObjectName("comboColor_3")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.comboColor_3.addItem("")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboColor_3)
self.checkVisible_3 = QtWidgets.QCheckBox(self.formLayoutWidget_4)
self.checkVisible_3.setChecked(True)
self.checkVisible_3.setObjectName("checkVisible_3")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.checkVisible_3)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 873, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuSave = QtWidgets.QMenu(self.menuFile)
self.menuSave.setObjectName("menuSave")
self.menuSettings = QtWidgets.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSave_Data = QtWidgets.QAction(MainWindow)
self.actionSave_Data.setObjectName("actionSave_Data")
self.actionSave_Graph = QtWidgets.QAction(MainWindow)
self.actionSave_Graph.setObjectName("actionSave_Graph")
self.actionLine = QtWidgets.QAction(MainWindow)
self.actionLine.setObjectName("actionLine")
self.actionUsage = QtWidgets.QAction(MainWindow)
self.actionUsage.setObjectName("actionUsage")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionCopyright = QtWidgets.QAction(MainWindow)
self.actionCopyright.setObjectName("actionCopyright")
self.actionWi_Fi_Setting = QtWidgets.QAction(MainWindow)
self.actionWi_Fi_Setting.setObjectName("actionWi_Fi_Setting")
self.menuSave.addAction(self.actionSave_Data)
self.menuSave.addAction(self.actionSave_Graph)
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.menuSave.menuAction())
self.menuSettings.addAction(self.actionLine)
self.menuSettings.addAction(self.actionWi_Fi_Setting)
self.menuHelp.addAction(self.actionUsage)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.actionCopyright)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
self.checkVisible_1.clicked.connect(self.checkVisible_1.click)
self.checkVisible_2.clicked.connect(self.checkVisible_2.click)
self.checkVisible_3.clicked.connect(self.checkVisible_3.click)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "水果光谱检测"))
self.groupSetting.setTitle(_translate("MainWindow", "Setting"))
self.labelFruit.setText(_translate("MainWindow", "Fruit"))
self.comboBox.setItemText(0, _translate("MainWindow", "None"))
self.comboBox.setItemText(1, _translate("MainWindow", "Apple"))
self.pushWifi.setText(_translate("MainWindow", "Wi-Fi"))
self.labelWifi.setText(_translate("MainWindow", "unconnected"))
self.groupCurve.setTitle(_translate("MainWindow", "Curve"))
self.labelScanTimes.setText(_translate("MainWindow", "ScanTimes"))
self.pushDetection.setText(_translate("MainWindow", "Spectral Detection"))
self.pushOriginal.setText(_translate("MainWindow", "Original Time"))
self.pushDerivative.setText(_translate("MainWindow", "Derivative Time"))
self.pushIntegral.setText(_translate("MainWindow", "Integral Time"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Energy"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Carbohydrates"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "-Sugars"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "Protein"))
item = self.tableWidget.verticalHeaderItem(4)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(5)
item.setText(_translate("MainWindow", "Sodium"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Per 100g"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "检测"))
self.groupLine_1.setTitle(_translate("MainWindow", "Line1"))
self.labelLineWidth_1.setText(_translate("MainWindow", "Width"))
self.labelColor_1.setText(_translate("MainWindow", "Color"))
self.comboColor_1.setItemText(0, _translate("MainWindow", "Black"))
self.comboColor_1.setItemText(1, _translate("MainWindow", "Gray"))
self.comboColor_1.setItemText(2, _translate("MainWindow", "White"))
self.comboColor_1.setItemText(3, _translate("MainWindow", "Red"))
self.comboColor_1.setItemText(4, _translate("MainWindow", "Green"))
self.comboColor_1.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_1.setText(_translate("MainWindow", "Visible"))
self.groupLine_2.setTitle(_translate("MainWindow", "Line2"))
self.labelLineWidth_2.setText(_translate("MainWindow", "Width"))
self.labelColor_2.setText(_translate("MainWindow", "Color"))
self.comboColor_2.setItemText(0, _translate("MainWindow", "Green"))
self.comboColor_2.setItemText(1, _translate("MainWindow", "Black"))
self.comboColor_2.setItemText(2, _translate("MainWindow", "Gray"))
self.comboColor_2.setItemText(3, _translate("MainWindow", "White"))
self.comboColor_2.setItemText(4, _translate("MainWindow", "Red"))
self.comboColor_2.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_2.setText(_translate("MainWindow", "Visible"))
self.groupLine_3.setTitle(_translate("MainWindow", "Line3"))
self.labelLineWidth_3.setText(_translate("MainWindow", "Width"))
self.labelColor_3.setText(_translate("MainWindow", "Color"))
self.comboColor_3.setItemText(0, _translate("MainWindow", "Red"))
self.comboColor_3.setItemText(1, _translate("MainWindow", "Black"))
self.comboColor_3.setItemText(2, _translate("MainWindow", "Gray"))
self.comboColor_3.setItemText(3, _translate("MainWindow", "White"))
self.comboColor_3.setItemText(4, _translate("MainWindow", "Green"))
self.comboColor_3.setItemText(5, _translate("MainWindow", "Blue"))
self.checkVisible_3.setText(_translate("MainWindow", "Visible"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "设置"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuSave.setTitle(_translate("MainWindow", "Save"))
self.menuSettings.setTitle(_translate("MainWindow", "Settings"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionSave_Data.setText(_translate("MainWindow", "Save Data"))
self.actionSave_Graph.setText(_translate("MainWindow", "Save Graph"))
self.actionLine.setText(_translate("MainWindow", "Line Setting"))
self.actionUsage.setText(_translate("MainWindow", "Usage"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionCopyright.setText(_translate("MainWindow", "Copyright"))
self.actionWi_Fi_Setting.setText(_translate("MainWindow", "Wi-Fi Setting"))
| [
[
[
305,
311
],
[
1053,
1059
],
[
1115,
1121
],
[
1433,
1439
],
[
1506,
1512
],
[
1571,
1577
],
[
1774,
1780
],
[
2641,
2647
],
[
3425,
3431
],
[
3497,
3503
],
[
3560,
3566
],
[
3753,
3759
],
[
4306,
4312
],
[
5686,
5692
],
[
5759,
5765
],
[
5821,
5827
],
[
7507,
7513
],
[
7703,
7709
],
[
8328,
8334
],
[
9535,
9541
],
[
9736,
9742
],
[
10381,
10387
],
[
11602,
11608
],
[
11803,
11809
],
[
12448,
12454
],
[
13824,
13830
],
[
16750,
16756
],
[
16863,
16869
]
],
[
[
313,
318
],
[
6046,
6051
],
[
6231,
6236
],
[
6416,
6421
],
[
6601,
6606
],
[
6786,
6791
],
[
6971,
6976
],
[
7156,
7161
]
],
[
[
320,
329
],
[
508,
517
],
[
622,
631
],
[
744,
753
],
[
934,
943
],
[
1182,
1191
],
[
1279,
1288
],
[
1365,
1374
],
[
1689,
1698
],
[
1913,
1922
],
[
2115,
2124
],
[
2294,
2303
],
[
2554,
2563
],
[
2786,
2795
],
[
2994,
3003
],
[
3177,
3186
],
[
3359,
3368
],
[
3672,
3681
],
[
3889,
3898
],
[
4097,
4106
],
[
4219,
4228
],
[
4480,
4489
],
[
4838,
4847
],
[
5033,
5042
],
[
5227,
5236
],
[
5425,
5434
],
[
5616,
5625
],
[
6002,
6011
],
[
6187,
6196
],
[
6372,
6381
],
[
6557,
6566
],
[
6742,
6751
],
[
6927,
6936
],
[
7112,
7121
],
[
7349,
7358
],
[
7438,
7447
],
[
7625,
7634
],
[
7825,
7834
],
[
8009,
8018
],
[
8150,
8159
],
[
8240,
8249
],
[
8455,
8464
],
[
8541,
8550
],
[
8674,
8683
],
[
8754,
8763
],
[
9118,
9127
],
[
9200,
9209
],
[
9385,
9394
],
[
9466,
9475
],
[
9656,
9665
],
[
9864,
9873
],
[
10056,
10065
],
[
10201,
10210
],
[
10291,
10300
],
[
10510,
10519
],
[
10596,
10605
],
[
10733,
10742
],
[
10813,
10822
],
[
11181,
11190
],
[
11263,
11272
],
[
11452,
11461
],
[
11533,
11542
],
[
11723,
11732
],
[
11931,
11940
],
[
12123,
12132
],
[
12268,
12277
],
[
12358,
12367
],
[
12577,
12586
],
[
12663,
12672
],
[
12800,
12809
],
[
12880,
12889
],
[
13248,
13257
],
[
13330,
13339
],
[
13519,
13528
],
[
13760,
13769
],
[
13923,
13932
],
[
14025,
14034
],
[
14132,
14141
],
[
14242,
14251
],
[
14344,
14353
],
[
14491,
14500
],
[
14647,
14656
],
[
14753,
14762
],
[
14866,
14875
],
[
14990,
14999
],
[
15110,
15119
],
[
15219,
15228
],
[
15330,
15339
],
[
15445,
15454
],
[
15572,
15581
]
],
[
[
338,
351
]
]
] |
from .hn import NewsClient | [
[
[
16,
26
]
]
] |
from functools import reduce
from itertools import chain
from operator import add
from typing import Iterable, Optional, TypeVar
from lhotse.audio import Recording, RecordingSet
from lhotse.cut import Cut, CutSet, MixedCut
from lhotse.features import FeatureSet, Features
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, load_yaml
ManifestItem = TypeVar('ManifestItem', Recording, SupervisionSegment, Features, Cut, MixedCut)
Manifest = TypeVar('Manifest', RecordingSet, SupervisionSet, FeatureSet, CutSet)
def combine(*manifests: Manifest) -> Manifest:
"""Combine multiple manifests of the same type into one."""
return reduce(add, manifests)
def to_manifest(items: Iterable[ManifestItem]) -> Optional[Manifest]:
"""
Take an iterable of data types in Lhotse such as Recording, SupervisonSegment or Cut, and create the manifest of the
corresponding type. When the iterable is empty, returns None.
"""
items = iter(items)
try:
first_item = next(items)
except StopIteration:
return None
items = chain([first_item], items)
if isinstance(first_item, Recording):
return RecordingSet.from_recordings(items)
if isinstance(first_item, SupervisionSegment):
return SupervisionSet.from_segments(items)
if isinstance(first_item, (Cut, MixedCut)):
return CutSet.from_cuts(items)
if isinstance(first_item, Features):
raise ValueError("FeatureSet generic construction from iterable is not possible, as the config information "
"would have been lost. Call FeatureSet.from_features() directly instead.")
raise ValueError(f"Unknown type of manifest item: {first_item}")
def load_manifest(path: Pathlike) -> Manifest:
"""Generic utility for reading an arbitrary manifest."""
raw_data = load_yaml(path)
data_set = None
for manifest_type in [RecordingSet, SupervisionSet, FeatureSet, CutSet]:
try:
data_set = manifest_type.from_dicts(raw_data)
except Exception:
pass
if data_set is None:
raise ValueError(f'Unknown type of manifest: {path}')
return data_set
| [
[
[
22,
28
],
[
685,
691
]
],
[
[
51,
56
],
[
1107,
1112
]
],
[
[
78,
81
],
[
692,
695
]
],
[
[
101,
109
],
[
733,
741
]
],
[
[
111,
119
],
[
760,
768
]
],
[
[
121,
128
],
[
400,
407
],
[
491,
498
]
],
[
[
155,
164
],
[
424,
433
],
[
1165,
1174
]
],
[
[
166,
178
],
[
511,
523
],
[
1192,
1204
],
[
1932,
1944
]
],
[
[
202,
205
],
[
465,
468
],
[
1361,
1364
]
],
[
[
207,
213
],
[
553,
559
],
[
1393,
1399
],
[
1974,
1980
]
],
[
[
215,
223
],
[
470,
478
],
[
1366,
1374
]
],
[
[
252,
262
],
[
541,
551
],
[
1962,
1972
]
],
[
[
264,
272
],
[
455,
463
],
[
1447,
1455
]
],
[
[
304,
322
],
[
435,
453
],
[
1258,
1276
]
],
[
[
324,
338
],
[
525,
539
],
[
1294,
1308
],
[
1946,
1960
]
],
[
[
364,
372
],
[
1771,
1779
]
],
[
[
374,
383
],
[
1870,
1879
]
],
[
[
385,
397
],
[
742,
754
]
],
[
[
480,
488
],
[
600,
608
],
[
587,
595
],
[
769,
777
],
[
1784,
1792
]
],
[
[
567,
574
]
],
[
[
714,
725
]
],
[
[
1751,
1764
]
]
] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Melody-over-chords RNN generation code as a SequenceGenerator interface."""
from functools import partial
# internal imports
from magenta.models.improv_rnn import improv_rnn_model
import magenta.music as mm
class ImprovRnnSequenceGenerator(mm.BaseSequenceGenerator):
"""Improv RNN generation code as a SequenceGenerator interface."""
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None,
bundle=None):
"""Creates an ImprovRnnSequenceGenerator.
Args:
model: Instance of ImprovRnnModel.
details: A generator_pb2.GeneratorDetails for this generator.
steps_per_quarter: What precision to use when quantizing the melody and
chords. How many steps per quarter note.
checkpoint: Where to search for the most recent model checkpoint. Mutually
exclusive with `bundle`.
bundle: A GeneratorBundle object that includes both the model checkpoint
and metagraph. Mutually exclusive with `checkpoint`.
"""
super(ImprovRnnSequenceGenerator, self).__init__(
model, details, checkpoint, bundle)
self.steps_per_quarter = steps_per_quarter
def _generate(self, input_sequence, generator_options):
if len(generator_options.input_sections) > 1:
raise mm.SequenceGeneratorException(
'This model supports at most one input_sections message, but got %s' %
len(generator_options.input_sections))
if len(generator_options.generate_sections) != 1:
raise mm.SequenceGeneratorException(
'This model supports only 1 generate_sections message, but got %s' %
len(generator_options.generate_sections))
qpm = (input_sequence.tempos[0].qpm
if input_sequence and input_sequence.tempos
else mm.DEFAULT_QUARTERS_PER_MINUTE)
steps_per_second = mm.steps_per_quarter_to_steps_per_second(
self.steps_per_quarter, qpm)
generate_section = generator_options.generate_sections[0]
if generator_options.input_sections:
# Use primer melody from input section only. Take backing chords from
# beginning of input section through end of generate section.
input_section = generator_options.input_sections[0]
primer_sequence = mm.trim_note_sequence(
input_sequence, input_section.start_time, input_section.end_time)
backing_sequence = mm.trim_note_sequence(
input_sequence, input_section.start_time, generate_section.end_time)
input_start_step = mm.quantize_to_step(
input_section.start_time, steps_per_second, quantize_cutoff=0.0)
else:
# No input section. Take primer melody from the beginning of the sequence
# up until the start of the generate section.
primer_sequence = mm.trim_note_sequence(
input_sequence, 0.0, generate_section.start_time)
backing_sequence = mm.trim_note_sequence(
input_sequence, 0.0, generate_section.end_time)
input_start_step = 0
last_end_time = (max(n.end_time for n in primer_sequence.notes)
if primer_sequence.notes else 0)
if last_end_time >= generate_section.start_time:
raise mm.SequenceGeneratorException(
'Got GenerateSection request for section that is before or equal to '
'the end of the input section. This model can only extend melodies. '
'Requested start time: %s, Final note end time: %s' %
(generate_section.start_time, last_end_time))
# Quantize the priming and backing sequences.
quantized_primer_sequence = mm.quantize_note_sequence(
primer_sequence, self.steps_per_quarter)
quantized_backing_sequence = mm.quantize_note_sequence(
backing_sequence, self.steps_per_quarter)
# Setting gap_bars to infinite ensures that the entire input will be used.
extracted_melodies, _ = mm.extract_melodies(
quantized_primer_sequence, search_start_step=input_start_step,
min_bars=0, min_unique_pitches=1, gap_bars=float('inf'),
ignore_polyphonic_notes=True)
assert len(extracted_melodies) <= 1
start_step = mm.quantize_to_step(
generate_section.start_time, steps_per_second, quantize_cutoff=0.0)
# Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
# always rounds down. This avoids generating a sequence that ends at 5.0
# seconds when the requested end time is 4.99.
end_step = mm.quantize_to_step(
generate_section.end_time, steps_per_second, quantize_cutoff=1.0)
if extracted_melodies and extracted_melodies[0]:
melody = extracted_melodies[0]
else:
# If no melody could be extracted, create an empty melody that starts 1
# step before the request start_step. This will result in 1 step of
# silence when the melody is extended below.
steps_per_bar = int(
mm.steps_per_bar_in_quantized_sequence(quantized_primer_sequence))
melody = mm.Melody([],
start_step=max(0, start_step - 1),
steps_per_bar=steps_per_bar,
steps_per_quarter=self.steps_per_quarter)
extracted_chords, _ = mm.extract_chords(quantized_backing_sequence)
chords = extracted_chords[0]
# Make sure that chords and melody start on the same step.
if chords.start_step < melody.start_step:
chords.set_length(len(chords) - melody.start_step + chords.start_step)
assert chords.end_step == end_step
# Ensure that the melody extends up to the step we want to start generating.
melody.set_length(start_step - melody.start_step)
# Extract generation arguments from generator options.
arg_types = {
'temperature': lambda arg: arg.float_value,
'beam_size': lambda arg: arg.int_value,
'branch_factor': lambda arg: arg.int_value,
'steps_per_iteration': lambda arg: arg.int_value
}
args = dict((name, value_fn(generator_options.args[name]))
for name, value_fn in arg_types.items()
if name in generator_options.args)
generated_melody = self._model.generate_melody(melody, chords, **args)
generated_lead_sheet = mm.LeadSheet(generated_melody, chords)
generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
return generated_sequence
def get_generator_map():
"""Returns a map from the generator ID to a SequenceGenerator class creator.
Binds the `config` argument so that the arguments match the
BaseSequenceGenerator class constructor.
Returns:
Map from the generator ID to its SequenceGenerator class creator with a
bound `config` argument.
"""
def create_sequence_generator(config, **kwargs):
return ImprovRnnSequenceGenerator(
improv_rnn_model.ImprovRnnModel(config), config.details,
steps_per_quarter=config.steps_per_quarter, **kwargs)
return {key: partial(create_sequence_generator, config)
for (key, config) in improv_rnn_model.default_configs.items()}
| [
[
[
697,
704
],
[
7543,
7550
]
],
[
[
764,
780
],
[
7617,
7633
],
[
7408,
7424
]
],
[
[
788,
807
],
[
843,
845
],
[
1871,
1873
],
[
2098,
2100
],
[
2372,
2374
],
[
2427,
2429
],
[
2836,
2838
],
[
2960,
2962
],
[
3087,
3089
],
[
3349,
3351
],
[
3457,
3459
],
[
3753,
3755
],
[
4147,
4149
],
[
4256,
4258
],
[
4441,
4443
],
[
4694,
4696
],
[
5012,
5014
],
[
5448,
5450
],
[
5530,
5532
],
[
5752,
5754
],
[
6760,
6762
]
],
[
[
816,
842
],
[
1615,
1641
],
[
7372,
7398
]
],
[
[
6981,
6998
]
]
] |
from csrv.model import actions
from csrv.model.actions import play_run_event
from csrv.model import cost
from csrv.model import events
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards import event
class TrashForFree(actions.TrashOnAccess):
COST_CLASS = cost.NullCost
def is_usable(self):
return actions.TrashOnAccess.is_usable(self) and self.card.is_being_accessed
class Card01003Action(play_run_event.PlayRunEvent):
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
play_run_event.PlayRunEvent.resolve(
self, response, ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.register_choice_provider(
timing_phases.AccessCard, self, 'access_card_actions')
self.game.register_listener(events.RunEnds, self)
def access_card_actions(self):
card = self.game.current_phase().card # blech
return [TrashForFree(self.game, self.player, card)]
def on_run_ends(self, sender, event):
self.game.deregister_choice_provider(
timing_phases.AccessCard, self, 'access_card_actions')
self.game.deregister_listener(events.RunEnds, self)
class Card01003(event.Event):
NAME = u'Card01003'
SET = card_info.CORE
NUMBER = 3
SIDE = card_info.RUNNER
FACTION = card_info.ANARCH
INFLUENCE = 2
UNIQUE = False
KEYWORDS = set([
card_info.RUN,
card_info.SABOTAGE,
])
COST = 2
IMAGE_SRC = '01003.png'
def build_actions(self):
event.Event.build_actions(self)
self._play_event_action = Card01003Action(self.game, self.player, self)
| [
[
[
23,
30
],
[
267,
274
],
[
355,
362
]
],
[
[
62,
76
],
[
449,
463
],
[
565,
579
]
],
[
[
100,
104
],
[
306,
310
]
],
[
[
128,
134
],
[
833,
839
],
[
1176,
1182
]
],
[
[
158,
171
],
[
746,
759
],
[
1087,
1100
]
],
[
[
201,
210
],
[
1261,
1270
],
[
1298,
1307
],
[
1327,
1336
],
[
1402,
1411
],
[
1423,
1432
]
],
[
[
240,
245
],
[
1216,
1221
],
[
1517,
1522
]
],
[
[
254,
266
],
[
952,
964
]
],
[
[
433,
448
],
[
1579,
1594
]
],
[
[
1206,
1215
]
]
] |
from __future__ import print_function
import copy
import time
from pprint import pformat
import uuid
import ast
import urllib
import urllib3
import requests.exceptions
from demisto_client.demisto_api.rest import ApiException
import demisto_client
import json
from Tests.test_utils import print_error, print_warning, print_color, LOG_COLORS, Docker
from Tests.scripts.constants import PB_Status
# Disable insecure warnings
urllib3.disable_warnings()
# ----- Constants ----- #
DEFAULT_TIMEOUT = 60
DEFAULT_INTERVAL = 20
ENTRY_TYPE_ERROR = 4
# ----- Functions ----- #
# get integration configuration
def __get_integration_config(client, integration_name, prints_manager, thread_index=0):
body = {
'page': 0, 'size': 100, 'query': 'name:' + integration_name
}
try:
res_raw = demisto_client.generic_request_func(self=client, path='/settings/integration/search',
method='POST', body=body)
except ApiException as conn_error:
prints_manager.add_print_job(conn_error, print, thread_index)
return None
res = ast.literal_eval(res_raw[0])
TIMEOUT = 180
SLEEP_INTERVAL = 5
total_sleep = 0
while 'configurations' not in res:
if total_sleep == TIMEOUT:
error_message = "Timeout - failed to get integration {} configuration. Error: {}".format(integration_name,
res)
prints_manager.add_print_job(error_message, print_error, thread_index)
return None
time.sleep(SLEEP_INTERVAL)
total_sleep += SLEEP_INTERVAL
all_configurations = res['configurations']
match_configurations = [x for x in all_configurations if x['name'] == integration_name]
if not match_configurations or len(match_configurations) == 0:
prints_manager.add_print_job('integration was not found', print_error, thread_index)
return None
return match_configurations[0]
# __test_integration_instance
def __test_integration_instance(client, module_instance, prints_manager, thread_index=0):
connection_retries = 3
response_code = 0
prints_manager.add_print_job("trying to connect.", print_warning, thread_index)
for i in range(connection_retries):
try:
response_data, response_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/test',
body=module_instance,
_request_timeout=120)
break
except ApiException as conn_err:
error_msg = 'Failed to test integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False, None
except urllib3.exceptions.ReadTimeoutError:
warning_msg = "Could not connect. Trying to connect for the {} time".format(i + 1)
prints_manager.add_print_job(warning_msg, print_warning, thread_index)
if int(response_code) != 200:
test_failed_msg = 'Integration-instance test ("Test" button) failed.\nBad status code: ' + str(
response_code)
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
return False, None
result_object = ast.literal_eval(response_data)
success, failure_message = bool(result_object.get('success')), result_object.get('message')
if not success:
if failure_message:
test_failed_msg = 'Test integration failed.\nFailure message: {}'.format(failure_message)
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
else:
test_failed_msg = 'Test integration failed\nNo failure message.'
prints_manager.add_print_job(test_failed_msg, print_error, thread_index)
return success, failure_message
# return instance name if succeed, None otherwise
def __create_integration_instance(client, integration_name, integration_instance_name,
integration_params, is_byoi, prints_manager, validate_test=True, thread_index=0):
start_message = 'Configuring instance for {} (instance name: {}, ' \
'validate "Test": {})'.format(integration_name, integration_instance_name, validate_test)
prints_manager.add_print_job(start_message, print, thread_index)
# get configuration config (used for later rest api
configuration = __get_integration_config(client, integration_name, prints_manager,
thread_index=thread_index)
if not configuration:
return None, 'No configuration', None
module_configuration = configuration['configuration']
if not module_configuration:
module_configuration = []
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'),
str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': configuration['name'],
'category': configuration['category'],
'configuration': configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set module params
for param_conf in module_configuration:
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# param is required - take default value
param_conf['value'] = param_conf['defaultValue']
module_instance['data'].append(param_conf)
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
error_message = 'Error trying to create instance for integration: {0}:\n {1}'.format(
integration_name, conn_err
)
prints_manager.add_print_job(error_message, print_error, thread_index)
return None, error_message, None
if res[1] != 200:
error_message = 'create instance failed with status code ' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res[0]), print_error, thread_index)
return None, error_message, None
integration_config = ast.literal_eval(res[0])
module_instance['id'] = integration_config['id']
# test integration
if validate_test:
test_succeed, failure_message = __test_integration_instance(client, module_instance, prints_manager,
thread_index=thread_index)
else:
print_warning(
"Skipping test validation for integration: {} (it has test_validate set to false)".format(integration_name)
)
test_succeed = True
if not test_succeed:
__disable_integrations_instances(client, [module_instance], prints_manager, thread_index=thread_index)
return None, failure_message, None
docker_image = Docker.get_integration_image(integration_config)
return module_instance, '', docker_image
def __disable_integrations_instances(client, module_instances, prints_manager, thread_index=0):
for configured_instance in module_instances:
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "false"
module_instance['version'] = -1
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
error_message = 'Failed to disable integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
if res[1] != 200:
error_message = 'disable instance failed with status code ' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
def __enable_integrations_instances(client, module_instances):
for configured_instance in module_instances:
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "true"
module_instance['version'] = -1
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException as conn_err:
print_error(
'Failed to enable integration instance, error trying to communicate with demisto '
'server: {} '.format(conn_err)
)
if res[1] != 200:
print_error('Enabling instance failed with status code ' + str(res[1]) + '\n' + pformat(res))
# create incident with given name & playbook, and then fetch & return the incident
def __create_incident_with_playbook(client, name, playbook_id, integrations, prints_manager, thread_index=0):
# create incident
create_incident_request = demisto_client.demisto_api.CreateIncidentRequest()
create_incident_request.create_investigation = True
create_incident_request.playbook_id = playbook_id
create_incident_request.name = name
try:
response = client.create_incident(create_incident_request=create_incident_request)
except ApiException as err:
prints_manager.add_print_job(str(err), print_error, thread_index)
try:
inc_id = response.id
except: # noqa: E722
inc_id = 'incCreateErr'
# inc_id = response_json.get('id', 'incCreateErr')
if inc_id == 'incCreateErr':
integration_names = [integration['name'] for integration in integrations if
'name' in integration]
error_message = 'Failed to create incident for integration names: {} and playbookID: {}.' \
'Possible reasons are:\nMismatch between playbookID in conf.json and ' \
'the id of the real playbook you were trying to use,' \
'or schema problems in the TestPlaybook.'.format(str(integration_names), playbook_id)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False, -1
# get incident
search_filter = demisto_client.demisto_api.SearchIncidentsData()
inc_filter = demisto_client.demisto_api.IncidentFilter()
inc_filter.query = 'id:' + str(inc_id)
# inc_filter.query
search_filter.filter = inc_filter
try:
incidents = client.search_incidents(filter=search_filter)
except ApiException as err:
prints_manager.add_print_job(err, print, thread_index)
incidents = {'total': 0}
# poll the incidents queue for a max time of 120 seconds
timeout = time.time() + 120
while incidents['total'] != 1:
try:
incidents = client.search_incidents(filter=search_filter)
except ApiException as err:
prints_manager.add_print_job(err, print, thread_index)
if time.time() > timeout:
error_message = 'Got timeout for searching incident with id {}, ' \
'got {} incidents in the search'.format(inc_id, incidents['total'])
prints_manager.add_print_job(error_message, print_error, thread_index)
return False, -1
time.sleep(1)
return incidents['data'][0], inc_id
# returns current investigation playbook state - 'inprogress'/'failed'/'completed'
def __get_investigation_playbook_state(client, inv_id, prints_manager, thread_index=0):
try:
investigation_playbook_raw = demisto_client.generic_request_func(self=client, method='GET',
path='/inv-playbook/' + inv_id)
investigation_playbook = ast.literal_eval(investigation_playbook_raw[0])
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to get investigation playbook state, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return PB_Status.FAILED
try:
state = investigation_playbook['state']
return state
except: # noqa: E722
return PB_Status.NOT_SUPPORTED_VERSION
# return True if delete-incident succeeded, False otherwise
def __delete_incident(client, incident, prints_manager, thread_index=0):
try:
body = {
'ids': [incident['id']],
'filter': {},
'all': False
}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/incident/batchDelete', body=body)
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to delete incident, error trying to communicate with demisto server: {} ' \
''.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False
if int(res[1]) != 200:
error_message = 'delete incident failed\nStatus code' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
return False
return True
# return True if delete-integration-instance succeeded, False otherwise
def __delete_integration_instance(client, instance_id, prints_manager, thread_index=0):
try:
res = demisto_client.generic_request_func(self=client, method='DELETE',
path='/settings/integration/' + urllib.quote(
instance_id))
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to delete integration instance, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return False
if int(res[1]) != 200:
error_message = 'delete integration instance failed\nStatus code' + str(res[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.add_print_job(pformat(res), print_error, thread_index)
return False
return True
# delete all integration instances, return True if all succeed delete all
def __delete_integrations_instances(client, module_instances, prints_manager, thread_index=0):
succeed = True
for module_instance in module_instances:
succeed = __delete_integration_instance(client, module_instance['id'], thread_index=thread_index,
prints_manager=prints_manager) and succeed
return succeed
def __print_investigation_error(client, playbook_id, investigation_id, prints_manager, color=LOG_COLORS.RED,
thread_index=0):
try:
empty_json = {"pageSize": 1000}
res = demisto_client.generic_request_func(self=client, method='POST',
path='/investigation/' + urllib.quote(
investigation_id), body=empty_json)
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to print investigation error, error trying to communicate with demisto ' \
'server: {} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
if res and int(res[1]) == 200:
resp_json = ast.literal_eval(res[0])
entries = resp_json['entries']
prints_manager.add_print_job('Playbook ' + playbook_id + ' has failed:', print_color, thread_index,
message_color=color)
for entry in entries:
if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']:
prints_manager.add_print_job('- Task ID: ' + entry['taskId'].encode('utf-8'), print_color, thread_index,
message_color=color)
prints_manager.add_print_job(' Command: ' + entry['parentContent'].encode('utf-8'), print_color,
thread_index, message_color=color)
body_contents_str = ' Body:\n' + entry['contents'].encode('utf-8') + '\n'
prints_manager.add_print_job(body_contents_str, print_color,
thread_index, message_color=color)
# Configure integrations to work with mock
def configure_proxy_unsecure(integration_params):
"""Copies the integration parameters dictionary.
Set proxy and insecure integration parameters to true.
Args:
integration_params: dict of the integration parameters.
"""
integration_params_copy = copy.deepcopy(integration_params)
for param in ('proxy', 'useProxy', 'insecure', 'unsecure'):
integration_params[param] = True
return integration_params_copy
# 1. create integrations instances
# 2. create incident with playbook
# 3. wait for playbook to finish run
# 4. if test pass - delete incident & instance
# return playbook status
def test_integration(client, server_url, integrations, playbook_id, prints_manager, options=None, is_mock_run=False,
thread_index=0):
options = options if options is not None else {}
# create integrations instances
module_instances = []
test_docker_images = set()
with open("./Tests/conf.json", 'r') as conf_file:
docker_thresholds = json.load(conf_file).get('docker_thresholds', {}).get('images', {})
for integration in integrations:
integration_name = integration.get('name', None)
integration_instance_name = integration.get('instance_name', '')
integration_params = integration.get('params', None)
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
if is_mock_run:
configure_proxy_unsecure(integration_params)
module_instance, failure_message, docker_image = __create_integration_instance(client, integration_name,
integration_instance_name,
integration_params,
is_byoi, prints_manager,
validate_test=validate_test,
thread_index=thread_index)
if module_instance is None:
failure_message = failure_message if failure_message else 'No failure message could be found'
msg = 'Failed to create instance: {}'.format(failure_message)
prints_manager.add_print_job(msg, print_error, thread_index) # disable-secrets-detection
__delete_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
return False, -1
module_instances.append(module_instance)
if docker_image:
test_docker_images.update(docker_image)
prints_manager.add_print_job('Create integration {} succeed'.format(integration_name), print, thread_index)
# create incident with playbook
incident, inc_id = __create_incident_with_playbook(client, 'inc_{}'.format(playbook_id, ),
playbook_id, integrations, prints_manager,
thread_index=thread_index)
if not incident:
return False, -1
investigation_id = incident['investigationId']
if investigation_id is None or len(investigation_id) == 0:
incident_id_not_found_msg = 'Failed to get investigation id of incident:' + incident
prints_manager.add_print_job(incident_id_not_found_msg, print_error, thread_index) # disable-secrets-detection
return False, -1
prints_manager.add_print_job('Investigation URL: {}/#/WorkPlan/{}'.format(server_url, investigation_id), print,
thread_index)
timeout_amount = options['timeout'] if 'timeout' in options else DEFAULT_TIMEOUT
timeout = time.time() + timeout_amount
i = 1
# wait for playbook to finish run
while True:
# give playbook time to run
time.sleep(1)
# fetch status
playbook_state = __get_investigation_playbook_state(client, investigation_id, prints_manager,
thread_index=thread_index)
if playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION):
break
if playbook_state == PB_Status.FAILED:
if is_mock_run:
prints_manager.add_print_job(playbook_id + ' failed with error/s', print_warning, thread_index)
__print_investigation_error(client, playbook_id, investigation_id, prints_manager,
LOG_COLORS.YELLOW, thread_index=thread_index)
else:
prints_manager.add_print_job(playbook_id + ' failed with error/s', print_error, thread_index)
__print_investigation_error(client, playbook_id, investigation_id, prints_manager,
thread_index=thread_index)
break
if time.time() > timeout:
prints_manager.add_print_job(playbook_id + ' failed on timeout', print_error, thread_index)
break
if i % DEFAULT_INTERVAL == 0:
loop_number_message = 'loop no. {}, playbook state is {}'.format(
i / DEFAULT_INTERVAL, playbook_state)
prints_manager.add_print_job(loop_number_message, print, thread_index)
i = i + 1
__disable_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
if test_docker_images:
memory_threshold = options.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE)
pids_threshold = options.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
error_message = Docker.check_resource_usage(server_url=server_url,
docker_images=test_docker_images,
def_memory_threshold=memory_threshold,
def_pid_threshold=pids_threshold,
docker_thresholds=docker_thresholds)
if error_message:
prints_manager.add_print_job(error_message, print_error, thread_index)
return PB_Status.FAILED_DOCKER_TEST, inc_id
else:
prints_manager.add_print_job("Skipping docker container memory resource check for test {}".format(playbook_id),
print_warning, thread_index)
test_pass = playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
if test_pass:
# delete incident
__delete_incident(client, incident, prints_manager, thread_index=thread_index)
# delete integration instance
__delete_integrations_instances(client, module_instances, prints_manager, thread_index=thread_index)
return playbook_state, inc_id
def disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=0):
"""
Disable all enabled integrations. Should be called at start of test loop to start out clean
Arguments:
client -- demisto py client
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
try:
body = {'size': 1000}
int_resp = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/search',
body=body)
int_instances = ast.literal_eval(int_resp[0])
except requests.exceptions.RequestException as conn_err:
error_message = 'Failed to disable all integrations, error trying to communicate with demisto server: ' \
'{} '.format(conn_err)
prints_manager.add_print_job(error_message, print_error, thread_index)
return
if int(int_resp[1]) != 200:
error_message = 'Get all integration instances failed with status code: {}'.format(int_resp[1])
prints_manager.add_print_job(error_message, print_error, thread_index)
return
if 'instances' not in int_instances:
prints_manager.add_print_job("No integrations instances found to disable all", print, thread_index)
return
to_disable = []
for instance in int_instances['instances']:
if instance.get('enabled') == 'true' and instance.get("isIntegrationScript"):
add_to_disable_message = "Adding to disable list. Name: {}. Brand: {}".format(instance.get("name"),
instance.get("brand"))
prints_manager.add_print_job(add_to_disable_message, print, thread_index)
to_disable.append(instance)
if len(to_disable) > 0:
__disable_integrations_instances(client, to_disable, prints_manager, thread_index=thread_index)
| [
[
[
23,
37
]
],
[
[
45,
49
],
[
18963,
18967
]
],
[
[
57,
61
],
[
1616,
1620
],
[
12724,
12728
],
[
12974,
12978
],
[
13294,
13298
],
[
22550,
22554
],
[
22688,
22692
],
[
23722,
23726
]
],
[
[
81,
88
],
[
7462,
7469
],
[
9639,
9646
],
[
10709,
10716
],
[
15271,
15278
],
[
16321,
16328
]
],
[
[
96,
100
],
[
5263,
5267
]
],
[
[
108,
111
],
[
1111,
1114
],
[
3637,
3640
],
[
7573,
7576
],
[
13769,
13772
],
[
17673,
17676
],
[
26311,
26314
]
],
[
[
119,
125
],
[
15683,
15689
],
[
17212,
17218
]
],
[
[
133,
140
],
[
424,
431
],
[
3127,
3134
]
],
[
[
148,
167
],
[
13828,
13836
],
[
14749,
14757
],
[
15776,
15784
],
[
17327,
17335
],
[
26352,
26360
]
],
[
[
212,
224
],
[
982,
994
],
[
2814,
2826
],
[
6953,
6965
],
[
9120,
9132
],
[
10379,
10391
],
[
11283,
11295
],
[
12531,
12543
],
[
12875,
12887
]
],
[
[
232,
246
],
[
805,
819
],
[
2392,
2406
],
[
6727,
6741
],
[
8882,
8896
],
[
10141,
10155
],
[
10970,
10984
],
[
12230,
12244
],
[
12296,
12310
],
[
13568,
13582
],
[
14583,
14597
],
[
15535,
15549
],
[
17073,
17087
],
[
25922,
25936
],
[
26065,
26079
]
],
[
[
254,
258
],
[
19703,
19707
]
],
[
[
289,
300
],
[
1556,
1567
],
[
1955,
1966
],
[
3054,
3065
],
[
3562,
3573
],
[
3973,
3984
],
[
4149,
4160
],
[
7174,
7185
],
[
7398,
7409
],
[
7479,
7490
],
[
9375,
9386
],
[
9571,
9582
],
[
9653,
9664
],
[
10417,
10428
],
[
10629,
10640
],
[
11351,
11362
],
[
12138,
12149
],
[
13229,
13240
],
[
14099,
14110
],
[
15003,
15014
],
[
15207,
15218
],
[
15285,
15296
],
[
16042,
16053
],
[
16257,
16268
],
[
16335,
16346
],
[
17591,
17602
],
[
21132,
21143
],
[
22205,
22216
],
[
23496,
23507
],
[
23822,
23833
],
[
24973,
24984
],
[
26615,
26626
],
[
26845,
26856
]
],
[
[
302,
315
],
[
2264,
2277
],
[
3313,
3326
],
[
7919,
7932
],
[
23177,
23190
],
[
25223,
25236
]
],
[
[
317,
328
],
[
17818,
17829
],
[
18104,
18115
],
[
18298,
18309
],
[
18546,
18557
]
],
[
[
330,
340
],
[
16945,
16955
],
[
23349,
23359
]
],
[
[
342,
348
],
[
8292,
8298
],
[
24333,
24339
],
[
24426,
24432
],
[
24487,
24493
]
],
[
[
385,
394
],
[
14141,
14150
],
[
14278,
14287
],
[
22946,
22955
],
[
22967,
22976
],
[
23048,
23057
],
[
25019,
25028
],
[
25288,
25297
],
[
25309,
25318
]
],
[
[
478,
493
],
[
22520,
22535
]
],
[
[
499,
515
],
[
23883,
23899
],
[
24004,
24020
]
],
[
[
521,
537
],
[
17965,
17981
]
],
[
[
607,
631
],
[
4796,
4820
]
],
[
[
2074,
2101
],
[
7737,
7764
]
],
[
[
4268,
4297
],
[
20251,
20280
]
],
[
[
8393,
8425
],
[
8126,
8158
],
[
24144,
24176
],
[
27592,
27624
]
],
[
[
9686,
9717
]
],
[
[
10812,
10843
],
[
21634,
21665
]
],
[
[
13438,
13472
],
[
22751,
22785
]
],
[
[
14376,
14393
],
[
25394,
25411
]
],
[
[
15428,
15457
],
[
16652,
16681
]
],
[
[
16479,
16510
],
[
21200,
21231
],
[
25520,
25551
]
],
[
[
16856,
16883
],
[
23222,
23249
],
[
23539,
23566
]
],
[
[
18688,
18712
],
[
20148,
20172
]
],
[
[
19323,
19339
]
],
[
[
25662,
25686
]
]
] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "PyData Sphinx Theme"
copyright = "2019, PyData Community"
author = "PyData Community"
# The full version, including alpha/beta/rc tags
release = "0.0.1dev0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"recommonmark",
"jupyter_sphinx",
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_sidebars = {
"contributing": ["sidebar-search-bs.html", "custom-template.html"],
"changelog": [],
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
html_logo = "_static/pandas.svg"
html_theme_options = {
"external_links": [
{"url": "https://pandas.pydata.org/pandas-docs/stable/", "name": "Pandas Docs"}
],
"github_url": "https://github.com/pydata/pydata-sphinx-theme",
"twitter_url": "https://twitter.com/pandas_dev",
"icon_links": [
{
"name": "PyPI",
"url": "https://pypi.org/project/pydata-sphinx-theme",
"icon": "fas fa-box",
}
],
"use_edit_page_button": True,
"show_toc_level": 1,
# "navbar_align": "right", # For testing that the navbar items align properly
}
html_context = {
"github_user": "pandas-dev",
"github_repo": "pydata-sphinx-theme",
"github_version": "master",
"doc_path": "docs",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Auto-convert markdown pages to demo --------------------------------------
import recommonmark
from recommonmark.transform import AutoStructify
def setup(app):
app.add_transform(AutoStructify)
| [
[
[
703,
710
]
],
[
[
735,
744
]
],
[
[
772,
778
]
],
[
[
850,
857
]
],
[
[
1103,
1113
]
],
[
[
1235,
1255
]
],
[
[
1337,
1351
]
],
[
[
1562,
1578
]
],
[
[
1619,
1632
]
],
[
[
1919,
1929
]
],
[
[
1954,
1963
]
],
[
[
1988,
2006
]
],
[
[
2571,
2583
]
],
[
[
2952,
2968
]
],
[
[
3072,
3084
]
],
[
[
3120,
3133
],
[
3174,
3187
]
],
[
[
3140,
3145
]
]
] |
from charms.reactive import Endpoint, when, set_flag, clear_flag
import charmhelpers.core.hookenv as hookenv
from charmhelpers.core.hookenv import log
class GearmanRequires(Endpoint):
@when('endpoint.{endpoint_name}.joined')
def joined(self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
@when('endpoint.{endpoint_name}.changed')
def changed(self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
def address(self):
"""Get the address to access Gearman over."""
for relation in self.relations:
for unit in relation.joined_units:
log("Unit: {}".format(unit.received))
address = unit.received['ingress-address']
if address is not None:
return address
| [
[
[
28,
36
],
[
174,
182
]
],
[
[
38,
42
],
[
191,
195
],
[
382,
386
]
],
[
[
44,
52
],
[
336,
344
],
[
529,
537
]
],
[
[
54,
64
]
],
[
[
72,
108
]
],
[
[
147,
150
],
[
750,
753
]
],
[
[
158,
173
]
]
] |
import csv
import sys
from flee import flee
from flee import SimulationSettings
class InputGeography:
"""
Class which reads in Geographic information.
"""
def __init__(self):
self.locations = []
self.links = []
def ReadLocationsFromCSV(self, csv_name, columns=["name", "region", "country", "gps_x", "gps_y", "location_type", "conflict_date", "pop/cap"]):
"""
Converts a CSV file to a locations information table
"""
self.locations = []
c = {} # column map
c["location_type"] = 0
c["conflict_date"] = 0
c["country"] = 0
c["region"] = 0
for i in range(0, len(columns)):
c[columns[i]] = i
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.locations.append([row[c["name"]], row[c["pop/cap"]], row[c["gps_x"]], row[c["gps_y"]], row[
c["location_type"]], row[c["conflict_date"]], row[c["region"]], row[c["country"]]])
def ReadLinksFromCSV(self, csv_name, name1_col=0, name2_col=1, dist_col=2):
"""
Converts a CSV file to a locations information table
"""
self.links = []
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.links.append(
[row[name1_col], row[name2_col], row[dist_col]])
def ReadClosuresFromCSV(self, csv_name):
"""
Read the closures.csv file. Format is:
closure_type,name1,name2,closure_start,closure_end
"""
self.closures = []
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
else:
# print(row)
self.closures.append(row)
def StoreInputGeographyInEcosystem(self, e):
"""
Store the geographic information in this class in a FLEE simulation,
overwriting existing entries.
"""
lm = {}
for l in self.locations:
# if population field is empty, just set it to 0.
if len(l[1]) < 1:
l[1] = "0"
# if population field is empty, just set it to 0.
if len(l[7]) < 1:
l[7] = "unknown"
#print(l, file=sys.stderr)
movechance = l[4]
if "conflict" in l[4].lower() and int(l[5]) > 0:
movechance = "town"
if "camp" in l[4].lower():
lm[l[0]] = e.addLocation(l[0], movechance=movechance, capacity=int(
l[1]), x=l[2], y=l[3], country=l[7], region=l[6])
else:
lm[l[0]] = e.addLocation(l[0], movechance=movechance, pop=int(
l[1]), x=l[2], y=l[3], country=l[7], region=l[6])
for l in self.links:
if (len(l) > 3):
if int(l[3]) == 1:
e.linkUp(l[0], l[1], int(l[2]), True)
if int(l[3]) == 2:
e.linkUp(l[1], l[0], int(l[2]), True)
else:
e.linkUp(l[0], l[1], int(l[2]), False)
else:
e.linkUp(l[0], l[1], int(l[2]), False)
e.closures = []
for l in self.closures:
e.closures.append([l[0], l[1], l[2], int(l[3]), int(l[4])])
return e, lm
def AddNewConflictZones(self, e, time):
for l in self.locations:
if "conflict" in l[4].lower() and int(l[5]) == time:
print("Time = %s. Adding a new conflict zone [%s]" % (
time, l[0]), file=sys.stderr)
e.add_conflict_zone(l[0])
| [
[
[
7,
10
],
[
796,
799
],
[
1472,
1475
],
[
2030,
2033
]
],
[
[
18,
21
],
[
4058,
4061
]
],
[
[
39,
43
]
],
[
[
61,
79
]
],
[
[
88,
102
]
]
] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=42)
# Average CV score on the training set was: 0.9996457287206185
exported_pipeline = KNeighborsClassifier(n_neighbors=2, p=1, weights="distance")
# Fix random state in exported estimator
if hasattr(exported_pipeline, 'random_state'):
setattr(exported_pipeline, 'random_state', 42)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
[
[
7,
18
],
[
298,
300
]
],
[
[
26,
38
],
[
235,
237
]
],
[
[
75,
91
],
[
439,
455
]
],
[
[
122,
142
],
[
588,
608
]
],
[
[
223,
232
],
[
321,
330
],
[
466,
475
]
],
[
[
310,
318
],
[
456,
464
]
],
[
[
354,
371
],
[
811,
828
]
],
[
[
373,
389
],
[
883,
899
]
],
[
[
391,
406
],
[
830,
845
]
],
[
[
408,
422
]
],
[
[
568,
585
],
[
701,
718
],
[
749,
766
],
[
789,
806
],
[
857,
874
]
],
[
[
847,
854
]
]
] |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <[email protected]>`
'''
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import MagicMock, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import brew
# Global Variables
brew.__context__ = {}
brew.__salt__ = {}
TAPS_STRING = 'homebrew/dupes\nhomebrew/science\nhomebrew/x11'
TAPS_LIST = ['homebrew/dupes', 'homebrew/science', 'homebrew/x11']
HOMEBREW_BIN = '/usr/local/bin/brew'
class BrewTestCase(TestCase):
'''
TestCase for salt.modules.brew module
'''
# '_list_taps' function tests: 1
def test_list_taps(self):
'''
Tests the return of the list of taps
'''
mock_taps = MagicMock(return_value=TAPS_STRING)
with patch.dict(brew.__salt__, {'cmd.run': mock_taps}):
self.assertEqual(brew._list_taps(), TAPS_LIST)
# '_tap' function tests: 3
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap_installed(self):
'''
Tests if tap argument is already installed or not
'''
self.assertTrue(brew._tap('homebrew/science'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value={}))
def test_tap_failure(self):
'''
Tests if the tap installation failed
'''
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_failure}):
self.assertFalse(brew._tap('homebrew/test'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap(self):
'''
Tests adding unofficial Github repos to the list of brew taps
'''
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_success}):
self.assertTrue(brew._tap('homebrew/test'))
# '_homebrew_bin' function tests: 1
def test_homebrew_bin(self):
'''
Tests the path to the homebrew binary
'''
mock_path = MagicMock(return_value='/usr/local')
with patch.dict(brew.__salt__, {'cmd.run': mock_path}):
self.assertEqual(brew._homebrew_bin(), '/usr/local/bin/brew')
# 'list_pkgs' function tests: 2
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_list_pkgs_removed(self):
'''
Tests removed implementation
'''
self.assertEqual(brew.list_pkgs(removed=True), {})
def test_list_pkgs_versions_true(self):
'''
Tests if pkg.list_pkgs is already in context and is a list
'''
mock_context = {'foo': ['bar']}
with patch.dict(brew.__context__, {'pkg.list_pkgs': mock_context}):
self.assertEqual(brew.list_pkgs(versions_as_list=True),
mock_context)
# 'version' function tests: 1
def test_version(self):
'''
Tests version name returned
'''
mock_version = MagicMock(return_value='0.1.5')
with patch.dict(brew.__salt__, {'pkg_resource.version': mock_version}):
self.assertEqual(brew.version('foo'), '0.1.5')
# 'latest_version' function tests: 0
# It has not been fully implemented
# 'remove' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
@patch('salt.modules.brew.list_pkgs',
MagicMock(return_value={'test': '0.1.5'}))
def test_remove(self):
'''
Tests if package to be removed exists
'''
mock_params = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.remove('foo'), {})
# 'refresh_db' function tests: 2
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db_failure(self):
'''
Tests an update of homebrew package repository failure
'''
mock_user = MagicMock(return_value='foo')
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_failure}):
self.assertFalse(brew.refresh_db())
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db(self):
'''
Tests a successful update of homebrew package repository
'''
mock_user = MagicMock(return_value='foo')
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_success}):
self.assertTrue(brew.refresh_db())
# 'install' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_install(self):
'''
Tests if package to be installed exists
'''
mock_params = MagicMock(return_value=[None, None])
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.install('name=foo'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(BrewTestCase, needs_daemon=False)
| [
[
[
147,
155
],
[
581,
589
]
],
[
[
185,
194
],
[
1043,
1052
],
[
1293,
1302
],
[
1643,
1652
],
[
3547,
3556
],
[
4021,
4030
],
[
4529,
4538
],
[
808,
817
],
[
1445,
1454
],
[
1819,
1828
],
[
2137,
2146
],
[
3112,
3121
],
[
3709,
3718
],
[
4205,
4214
],
[
4258,
4267
],
[
4707,
4716
],
[
4760,
4769
],
[
5224,
5233
]
],
[
[
196,
201
],
[
1005,
1010
],
[
1255,
1260
],
[
1605,
1610
],
[
3499,
3504
],
[
3969,
3974
],
[
4477,
4482
],
[
857,
862
],
[
1484,
1489
],
[
1858,
1863
],
[
2187,
2192
],
[
2791,
2796
],
[
3157,
3162
],
[
3776,
3781
],
[
4297,
4302
],
[
4799,
4804
],
[
5274,
5279
]
],
[
[
234,
251
],
[
253,
270
]
],
[
[
326,
330
],
[
351,
355
],
[
373,
377
],
[
868,
872
],
[
937,
941
],
[
1218,
1222
],
[
1495,
1499
],
[
1571,
1575
],
[
1869,
1873
],
[
1944,
1948
],
[
2198,
2202
],
[
2267,
2271
],
[
2568,
2572
],
[
2802,
2806
],
[
2883,
2887
],
[
3168,
3172
],
[
3253,
3257
],
[
3787,
3791
],
[
3901,
3905
],
[
4308,
4312
],
[
4452,
4456
],
[
4810,
4814
],
[
4953,
4957
],
[
5285,
5289
],
[
5399,
5403
]
],
[
[
393,
404
],
[
831,
842
]
],
[
[
456,
465
],
[
1066,
1075
],
[
1666,
1675
],
[
956,
965
]
],
[
[
523,
535
],
[
4044,
4056
],
[
4552,
4564
]
],
[
[
568,
580
],
[
5510,
5522
]
],
[
[
5486,
5495
],
[
5500,
5509
]
]
] |
# coding: utf-8
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IBM Watson™ Compare and Comply analyzes governing documents to provide details about
critical aspects of the documents.
"""
import json
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from .common import get_sdk_headers
from datetime import date
from datetime import datetime
from enum import Enum
from ibm_cloud_sdk_core import BaseService
from ibm_cloud_sdk_core import DetailedResponse
from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from typing import BinaryIO
from typing import Dict
from typing import List
##############################################################################
# Service
##############################################################################
class CompareComplyV1(BaseService):
"""The Compare Comply V1 service."""
DEFAULT_SERVICE_URL = 'https://gateway.watsonplatform.net/compare-comply/api'
DEFAULT_SERVICE_NAME = 'compare_comply'
def __init__(
self,
version: str,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
"""
Construct a new client for the Compare Comply service.
:param str version: The API version date to use with the service, in
"YYYY-MM-DD" format. Whenever the API is changed in a backwards
incompatible way, a new minor version of the API is released.
The service uses the API version for the date you specify, or
the most recent version before that date. Note that you should
not programmatically specify the current date at runtime, in
case the API has been updated since your application's release.
Instead, specify a version date that is compatible with your
application, and don't change it until your application is
ready for a later version.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator,
disable_ssl_verification=False)
self.version = version
self.configure_service(service_name)
#########################
# HTML conversion
#########################
def convert_to_html(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Convert document to HTML.
Converts a document to HTML.
:param TextIO file: The document to convert.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='convert_to_html')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/html_conversion'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Element classification
#########################
def classify_elements(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param TextIO file: The document to classify.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/element_classification'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Tables
#########################
def extract_tables(self,
file: BinaryIO,
*,
file_content_type: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Extract a document's tables.
Analyzes the tables in a document.
:param TextIO file: The document on which to run table extraction.
:param str file_content_type: (optional) The content type of file.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='extract_tables')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = []
form_data.append(('file', (None, file, file_content_type or
'application/octet-stream')))
url = '/v1/tables'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Comparison
#########################
def compare_documents(self,
file_1: BinaryIO,
file_2: BinaryIO,
*,
file_1_content_type: str = None,
file_2_content_type: str = None,
file_1_label: str = None,
file_2_label: str = None,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Compare two documents.
Compares two input documents. Documents must be in the same format.
:param TextIO file_1: The first document to compare.
:param TextIO file_2: The second document to compare.
:param str file_1_content_type: (optional) The content type of file_1.
:param str file_2_content_type: (optional) The content type of file_2.
:param str file_1_label: (optional) A text label for the first document.
:param str file_2_label: (optional) A text label for the second document.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file_1 is None:
raise ValueError('file_1 must be provided')
if file_2 is None:
raise ValueError('file_2 must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='compare_documents')
headers.update(sdk_headers)
params = {
'version': self.version,
'file_1_label': file_1_label,
'file_2_label': file_2_label,
'model': model
}
form_data = []
form_data.append(('file_1', (None, file_1, file_1_content_type or
'application/octet-stream')))
form_data.append(('file_2', (None, file_2, file_2_content_type or
'application/octet-stream')))
url = '/v1/comparison'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
#########################
# Feedback
#########################
def add_feedback(self,
feedback_data: 'FeedbackDataInput',
*,
user_id: str = None,
comment: str = None,
**kwargs) -> 'DetailedResponse':
"""
Add feedback.
Adds feedback in the form of _labels_ from a subject-matter expert (SME) to a
governing document.
**Important:** Feedback is not immediately incorporated into the training model,
nor is it guaranteed to be incorporated at a later date. Instead, submitted
feedback is used to suggest future updates to the training model.
:param FeedbackDataInput feedback_data: Feedback data for submission.
:param str user_id: (optional) An optional string identifying the user.
:param str comment: (optional) An optional comment on or description of the
feedback.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_data is None:
raise ValueError('feedback_data must be provided')
feedback_data = self._convert_model(feedback_data)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='add_feedback')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'feedback_data': feedback_data,
'user_id': user_id,
'comment': comment
}
url = '/v1/feedback'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def list_feedback(self,
*,
feedback_type: str = None,
before: date = None,
after: date = None,
document_title: str = None,
model_id: str = None,
model_version: str = None,
category_removed: str = None,
category_added: str = None,
category_not_changed: str = None,
type_removed: str = None,
type_added: str = None,
type_not_changed: str = None,
page_limit: int = None,
cursor: str = None,
sort: str = None,
include_total: bool = None,
**kwargs) -> 'DetailedResponse':
"""
List the feedback in a document.
Lists the feedback in a document.
:param str feedback_type: (optional) An optional string that filters the
output to include only feedback with the specified feedback type. The only
permitted value is `element_classification`.
:param date before: (optional) An optional string in the format
`YYYY-MM-DD` that filters the output to include only feedback that was
added before the specified date.
:param date after: (optional) An optional string in the format `YYYY-MM-DD`
that filters the output to include only feedback that was added after the
specified date.
:param str document_title: (optional) An optional string that filters the
output to include only feedback from the document with the specified
`document_title`.
:param str model_id: (optional) An optional string that filters the output
to include only feedback with the specified `model_id`. The only permitted
value is `contracts`.
:param str model_version: (optional) An optional string that filters the
output to include only feedback with the specified `model_version`.
:param str category_removed: (optional) An optional string in the form of a
comma-separated list of categories. If it is specified, the service filters
the output to include only feedback that has at least one category from the
list removed.
:param str category_added: (optional) An optional string in the form of a
comma-separated list of categories. If this is specified, the service
filters the output to include only feedback that has at least one category
from the list added.
:param str category_not_changed: (optional) An optional string in the form
of a comma-separated list of categories. If this is specified, the service
filters the output to include only feedback that has at least one category
from the list unchanged.
:param str type_removed: (optional) An optional string of comma-separated
`nature`:`party` pairs. If this is specified, the service filters the
output to include only feedback that has at least one `nature`:`party` pair
from the list removed.
:param str type_added: (optional) An optional string of comma-separated
`nature`:`party` pairs. If this is specified, the service filters the
output to include only feedback that has at least one `nature`:`party` pair
from the list removed.
:param str type_not_changed: (optional) An optional string of
comma-separated `nature`:`party` pairs. If this is specified, the service
filters the output to include only feedback that has at least one
`nature`:`party` pair from the list unchanged.
:param int page_limit: (optional) An optional integer specifying the number
of documents that you want the service to return.
:param str cursor: (optional) An optional string that returns the set of
documents after the previous set. Use this parameter with the `page_limit`
parameter.
:param str sort: (optional) An optional comma-separated list of fields in
the document to sort on. You can optionally specify the sort direction by
prefixing the value of the field with `-` for descending order or `+` for
ascending order (the default). Currently permitted sorting fields are
`created`, `user_id`, and `document_title`.
:param bool include_total: (optional) An optional boolean value. If
specified as `true`, the `pagination` object in the output includes a value
called `total` that gives the total count of feedback created.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_feedback')
headers.update(sdk_headers)
params = {
'version': self.version,
'feedback_type': feedback_type,
'before': before,
'after': after,
'document_title': document_title,
'model_id': model_id,
'model_version': model_version,
'category_removed': category_removed,
'category_added': category_added,
'category_not_changed': category_not_changed,
'type_removed': type_removed,
'type_added': type_added,
'type_not_changed': type_not_changed,
'page_limit': page_limit,
'cursor': cursor,
'sort': sort,
'include_total': include_total
}
url = '/v1/feedback'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_feedback(self, feedback_id: str, *, model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Get a specified feedback entry.
Gets a feedback entry with a specified `feedback_id`.
:param str feedback_id: A string that specifies the feedback entry to be
included in the output.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_id is None:
raise ValueError('feedback_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_feedback')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def delete_feedback(self, feedback_id: str, *, model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Delete a specified feedback entry.
Deletes a feedback entry with a specified `feedback_id`.
:param str feedback_id: A string that specifies the feedback entry to be
deleted from the document.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if feedback_id is None:
raise ValueError('feedback_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_feedback')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Batches
#########################
def create_batch(self,
function: str,
input_credentials_file: BinaryIO,
input_bucket_location: str,
input_bucket_name: str,
output_credentials_file: BinaryIO,
output_bucket_location: str,
output_bucket_name: str,
*,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Submit a batch-processing request.
Run Compare and Comply methods over a collection of input documents.
**Important:** Batch processing requires the use of the [IBM Cloud Object Storage
service](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-about#about-ibm-cloud-object-storage).
The use of IBM Cloud Object Storage with Compare and Comply is discussed at [Using
batch
processing](https://cloud.ibm.com/docs/services/compare-comply?topic=compare-comply-batching#before-you-batch).
:param str function: The Compare and Comply method to run across the
submitted input documents.
:param TextIO input_credentials_file: A JSON file containing the input
Cloud Object Storage credentials. At a minimum, the credentials must enable
`READ` permissions on the bucket defined by the `input_bucket_name`
parameter.
:param str input_bucket_location: The geographical location of the Cloud
Object Storage input bucket as listed on the **Endpoint** tab of your Cloud
Object Storage instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str input_bucket_name: The name of the Cloud Object Storage input
bucket.
:param TextIO output_credentials_file: A JSON file that lists the Cloud
Object Storage output credentials. At a minimum, the credentials must
enable `READ` and `WRITE` permissions on the bucket defined by the
`output_bucket_name` parameter.
:param str output_bucket_location: The geographical location of the Cloud
Object Storage output bucket as listed on the **Endpoint** tab of your
Cloud Object Storage instance; for example, `us-geo`, `eu-geo`, or
`ap-geo`.
:param str output_bucket_name: The name of the Cloud Object Storage output
bucket.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if function is None:
raise ValueError('function must be provided')
if input_credentials_file is None:
raise ValueError('input_credentials_file must be provided')
if input_bucket_location is None:
raise ValueError('input_bucket_location must be provided')
if input_bucket_name is None:
raise ValueError('input_bucket_name must be provided')
if output_credentials_file is None:
raise ValueError('output_credentials_file must be provided')
if output_bucket_location is None:
raise ValueError('output_bucket_location must be provided')
if output_bucket_name is None:
raise ValueError('output_bucket_name must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_batch')
headers.update(sdk_headers)
params = {'version': self.version, 'function': function, 'model': model}
form_data = []
form_data.append(('input_credentials_file',
(None, input_credentials_file, 'application/json')))
input_bucket_location = str(input_bucket_location)
form_data.append(('input_bucket_location', (None, input_bucket_location,
'text/plain')))
input_bucket_name = str(input_bucket_name)
form_data.append(
('input_bucket_name', (None, input_bucket_name, 'text/plain')))
form_data.append(('output_credentials_file',
(None, output_credentials_file, 'application/json')))
output_bucket_location = str(output_bucket_location)
form_data.append(('output_bucket_location',
(None, output_bucket_location, 'text/plain')))
output_bucket_name = str(output_bucket_name)
form_data.append(
('output_bucket_name', (None, output_bucket_name, 'text/plain')))
url = '/v1/batches'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def list_batches(self, **kwargs) -> 'DetailedResponse':
"""
List submitted batch-processing jobs.
Lists batch-processing jobs submitted by users.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_batches')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/batches'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_batch(self, batch_id: str, **kwargs) -> 'DetailedResponse':
"""
Get information about a specific batch-processing job.
Gets information about a batch-processing job with a specified ID.
:param str batch_id: The ID of the batch-processing job whose information
you want to retrieve.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if batch_id is None:
raise ValueError('batch_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_batch')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_batch(self,
batch_id: str,
action: str,
*,
model: str = None,
**kwargs) -> 'DetailedResponse':
"""
Update a pending or active batch-processing job.
Updates a pending or active batch-processing job. You can rescan the input bucket
to check for new documents or cancel a job.
:param str batch_id: The ID of the batch-processing job you want to update.
:param str action: The action you want to perform on the specified
batch-processing job.
:param str model: (optional) The analysis model to be used by the service.
For the **Element classification** and **Compare two documents** methods,
the default is `contracts`. For the **Extract tables** method, the default
is `tables`. These defaults apply to the standalone methods as well as to
the methods' use in batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if batch_id is None:
raise ValueError('batch_id must be provided')
if action is None:
raise ValueError('action must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_batch')
headers.update(sdk_headers)
params = {'version': self.version, 'action': action, 'model': model}
url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id))
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
class ConvertToHtmlEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
TEXT_PLAIN = 'text/plain'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class ClassifyElementsEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class ExtractTablesEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
TEXT_PLAIN = 'text/plain'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class CompareDocumentsEnums(object):
class File1ContentType(Enum):
"""
The content type of file_1.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class File2ContentType(Enum):
"""
The content type of file_2.
"""
APPLICATION_PDF = 'application/pdf'
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
IMAGE_BMP = 'image/bmp'
IMAGE_GIF = 'image/gif'
IMAGE_JPEG = 'image/jpeg'
IMAGE_PNG = 'image/png'
IMAGE_TIFF = 'image/tiff'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class GetFeedbackEnums(object):
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class DeleteFeedbackEnums(object):
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class CreateBatchEnums(object):
class Function(Enum):
"""
The Compare and Comply method to run across the submitted input documents.
"""
HTML_CONVERSION = 'html_conversion'
ELEMENT_CLASSIFICATION = 'element_classification'
TABLES = 'tables'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
class UpdateBatchEnums(object):
class Action(Enum):
"""
The action you want to perform on the specified batch-processing job.
"""
RESCAN = 'rescan'
CANCEL = 'cancel'
class Model(Enum):
"""
The analysis model to be used by the service. For the **Element classification**
and **Compare two documents** methods, the default is `contracts`. For the
**Extract tables** method, the default is `tables`. These defaults apply to the
standalone methods as well as to the methods' use in batch-processing requests.
"""
CONTRACTS = 'contracts'
TABLES = 'tables'
##############################################################################
# Models
##############################################################################
class Address():
"""
A party's address.
:attr str text: (optional) A string listing the address.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Address object.
:param str text: (optional) A string listing the address.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Address':
"""Initialize a Address object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Address: ' +
', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Address object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Address object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Address') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Address') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AlignedElement():
"""
AlignedElement.
:attr List[ElementPair] element_pair: (optional) Identifies two elements that
semantically align between the compared documents.
:attr bool identical_text: (optional) Specifies whether the aligned element is
identical. Elements are considered identical despite minor differences such as
leading punctuation, end-of-sentence punctuation, whitespace, the presence or
absence of definite or indefinite articles, and others.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr bool significant_elements: (optional) Indicates that the elements aligned
are contractual clauses of significance.
"""
def __init__(self,
*,
element_pair: List['ElementPair'] = None,
identical_text: bool = None,
provenance_ids: List[str] = None,
significant_elements: bool = None) -> None:
"""
Initialize a AlignedElement object.
:param List[ElementPair] element_pair: (optional) Identifies two elements
that semantically align between the compared documents.
:param bool identical_text: (optional) Specifies whether the aligned
element is identical. Elements are considered identical despite minor
differences such as leading punctuation, end-of-sentence punctuation,
whitespace, the presence or absence of definite or indefinite articles, and
others.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param bool significant_elements: (optional) Indicates that the elements
aligned are contractual clauses of significance.
"""
self.element_pair = element_pair
self.identical_text = identical_text
self.provenance_ids = provenance_ids
self.significant_elements = significant_elements
@classmethod
def from_dict(cls, _dict: Dict) -> 'AlignedElement':
"""Initialize a AlignedElement object from a json dictionary."""
args = {}
valid_keys = [
'element_pair', 'identical_text', 'provenance_ids',
'significant_elements'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class AlignedElement: '
+ ', '.join(bad_keys))
if 'element_pair' in _dict:
args['element_pair'] = [
ElementPair._from_dict(x) for x in (_dict.get('element_pair'))
]
if 'identical_text' in _dict:
args['identical_text'] = _dict.get('identical_text')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'significant_elements' in _dict:
args['significant_elements'] = _dict.get('significant_elements')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AlignedElement object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'element_pair') and self.element_pair is not None:
_dict['element_pair'] = [x._to_dict() for x in self.element_pair]
if hasattr(self, 'identical_text') and self.identical_text is not None:
_dict['identical_text'] = self.identical_text
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'significant_elements'
) and self.significant_elements is not None:
_dict['significant_elements'] = self.significant_elements
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AlignedElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'AlignedElement') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AlignedElement') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Attribute():
"""
List of document attributes.
:attr str type: (optional) The type of attribute.
:attr str text: (optional) The text associated with the attribute.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
type: str = None,
text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Attribute object.
:param str type: (optional) The type of attribute.
:param str text: (optional) The text associated with the attribute.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.type = type
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Attribute':
"""Initialize a Attribute object from a json dictionary."""
args = {}
valid_keys = ['type', 'text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Attribute: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Attribute object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Attribute object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Attribute') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Attribute') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(Enum):
"""
The type of attribute.
"""
CURRENCY = "Currency"
DATETIME = "DateTime"
DEFINEDTERM = "DefinedTerm"
DURATION = "Duration"
LOCATION = "Location"
NUMBER = "Number"
ORGANIZATION = "Organization"
PERCENTAGE = "Percentage"
PERSON = "Person"
class BatchStatus():
"""
The batch-request status.
:attr str function: (optional) The method to be run against the documents.
Possible values are `html_conversion`, `element_classification`, and `tables`.
:attr str input_bucket_location: (optional) The geographical location of the
Cloud Object Storage input bucket as listed on the **Endpoint** tab of your COS
instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:attr str input_bucket_name: (optional) The name of the Cloud Object Storage
input bucket.
:attr str output_bucket_location: (optional) The geographical location of the
Cloud Object Storage output bucket as listed on the **Endpoint** tab of your COS
instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:attr str output_bucket_name: (optional) The name of the Cloud Object Storage
output bucket.
:attr str batch_id: (optional) The unique identifier for the batch request.
:attr DocCounts document_counts: (optional) Document counts.
:attr str status: (optional) The status of the batch request.
:attr datetime created: (optional) The creation time of the batch request.
:attr datetime updated: (optional) The time of the most recent update to the
batch request.
"""
def __init__(self,
*,
function: str = None,
input_bucket_location: str = None,
input_bucket_name: str = None,
output_bucket_location: str = None,
output_bucket_name: str = None,
batch_id: str = None,
document_counts: 'DocCounts' = None,
status: str = None,
created: datetime = None,
updated: datetime = None) -> None:
"""
Initialize a BatchStatus object.
:param str function: (optional) The method to be run against the documents.
Possible values are `html_conversion`, `element_classification`, and
`tables`.
:param str input_bucket_location: (optional) The geographical location of
the Cloud Object Storage input bucket as listed on the **Endpoint** tab of
your COS instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str input_bucket_name: (optional) The name of the Cloud Object
Storage input bucket.
:param str output_bucket_location: (optional) The geographical location of
the Cloud Object Storage output bucket as listed on the **Endpoint** tab of
your COS instance; for example, `us-geo`, `eu-geo`, or `ap-geo`.
:param str output_bucket_name: (optional) The name of the Cloud Object
Storage output bucket.
:param str batch_id: (optional) The unique identifier for the batch
request.
:param DocCounts document_counts: (optional) Document counts.
:param str status: (optional) The status of the batch request.
:param datetime created: (optional) The creation time of the batch request.
:param datetime updated: (optional) The time of the most recent update to
the batch request.
"""
self.function = function
self.input_bucket_location = input_bucket_location
self.input_bucket_name = input_bucket_name
self.output_bucket_location = output_bucket_location
self.output_bucket_name = output_bucket_name
self.batch_id = batch_id
self.document_counts = document_counts
self.status = status
self.created = created
self.updated = updated
@classmethod
def from_dict(cls, _dict: Dict) -> 'BatchStatus':
"""Initialize a BatchStatus object from a json dictionary."""
args = {}
valid_keys = [
'function', 'input_bucket_location', 'input_bucket_name',
'output_bucket_location', 'output_bucket_name', 'batch_id',
'document_counts', 'status', 'created', 'updated'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class BatchStatus: '
+ ', '.join(bad_keys))
if 'function' in _dict:
args['function'] = _dict.get('function')
if 'input_bucket_location' in _dict:
args['input_bucket_location'] = _dict.get('input_bucket_location')
if 'input_bucket_name' in _dict:
args['input_bucket_name'] = _dict.get('input_bucket_name')
if 'output_bucket_location' in _dict:
args['output_bucket_location'] = _dict.get('output_bucket_location')
if 'output_bucket_name' in _dict:
args['output_bucket_name'] = _dict.get('output_bucket_name')
if 'batch_id' in _dict:
args['batch_id'] = _dict.get('batch_id')
if 'document_counts' in _dict:
args['document_counts'] = DocCounts._from_dict(
_dict.get('document_counts'))
if 'status' in _dict:
args['status'] = _dict.get('status')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BatchStatus object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'function') and self.function is not None:
_dict['function'] = self.function
if hasattr(self, 'input_bucket_location'
) and self.input_bucket_location is not None:
_dict['input_bucket_location'] = self.input_bucket_location
if hasattr(self,
'input_bucket_name') and self.input_bucket_name is not None:
_dict['input_bucket_name'] = self.input_bucket_name
if hasattr(self, 'output_bucket_location'
) and self.output_bucket_location is not None:
_dict['output_bucket_location'] = self.output_bucket_location
if hasattr(
self,
'output_bucket_name') and self.output_bucket_name is not None:
_dict['output_bucket_name'] = self.output_bucket_name
if hasattr(self, 'batch_id') and self.batch_id is not None:
_dict['batch_id'] = self.batch_id
if hasattr(self,
'document_counts') and self.document_counts is not None:
_dict['document_counts'] = self.document_counts._to_dict()
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BatchStatus object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'BatchStatus') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BatchStatus') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FunctionEnum(Enum):
"""
The method to be run against the documents. Possible values are `html_conversion`,
`element_classification`, and `tables`.
"""
ELEMENT_CLASSIFICATION = "element_classification"
HTML_CONVERSION = "html_conversion"
TABLES = "tables"
class Batches():
"""
The results of a successful **List Batches** request.
:attr List[BatchStatus] batches: (optional) A list of the status of all batch
requests.
"""
def __init__(self, *, batches: List['BatchStatus'] = None) -> None:
"""
Initialize a Batches object.
:param List[BatchStatus] batches: (optional) A list of the status of all
batch requests.
"""
self.batches = batches
@classmethod
def from_dict(cls, _dict: Dict) -> 'Batches':
"""Initialize a Batches object from a json dictionary."""
args = {}
valid_keys = ['batches']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Batches: ' +
', '.join(bad_keys))
if 'batches' in _dict:
args['batches'] = [
BatchStatus._from_dict(x) for x in (_dict.get('batches'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Batches object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'batches') and self.batches is not None:
_dict['batches'] = [x._to_dict() for x in self.batches]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Batches object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Batches') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Batches') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class BodyCells():
"""
Cells that are not table header, column header, or row header cells.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
:attr List[str] row_header_ids: (optional) An array that contains the `id` value
of a row header that is applicable to this body cell.
:attr List[str] row_header_texts: (optional) An array that contains the `text`
value of a row header that is applicable to this body cell.
:attr List[str] row_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the row header texts according to
the customization; otherwise, the same value as `row_header_texts`.
:attr List[str] column_header_ids: (optional) An array that contains the `id`
value of a column header that is applicable to the current cell.
:attr List[str] column_header_texts: (optional) An array that contains the
`text` value of a column header that is applicable to the current cell.
:attr List[str] column_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the column header texts according
to the customization; otherwise, the same value as `column_header_texts`.
:attr List[Attribute] attributes: (optional)
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None,
row_header_ids: List[str] = None,
row_header_texts: List[str] = None,
row_header_texts_normalized: List[str] = None,
column_header_ids: List[str] = None,
column_header_texts: List[str] = None,
column_header_texts_normalized: List[str] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a BodyCells object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
:param List[str] row_header_ids: (optional) An array that contains the `id`
value of a row header that is applicable to this body cell.
:param List[str] row_header_texts: (optional) An array that contains the
`text` value of a row header that is applicable to this body cell.
:param List[str] row_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the row header texts
according to the customization; otherwise, the same value as
`row_header_texts`.
:param List[str] column_header_ids: (optional) An array that contains the
`id` value of a column header that is applicable to the current cell.
:param List[str] column_header_texts: (optional) An array that contains the
`text` value of a column header that is applicable to the current cell.
:param List[str] column_header_texts_normalized: (optional) If you provide
customization input, the normalized version of the column header texts
according to the customization; otherwise, the same value as
`column_header_texts`.
:param List[Attribute] attributes: (optional)
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
self.row_header_ids = row_header_ids
self.row_header_texts = row_header_texts
self.row_header_texts_normalized = row_header_texts_normalized
self.column_header_ids = column_header_ids
self.column_header_texts = column_header_texts
self.column_header_texts_normalized = column_header_texts_normalized
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'BodyCells':
"""Initialize a BodyCells object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end', 'row_header_ids',
'row_header_texts', 'row_header_texts_normalized',
'column_header_ids', 'column_header_texts',
'column_header_texts_normalized', 'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class BodyCells: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
if 'row_header_ids' in _dict:
args['row_header_ids'] = _dict.get('row_header_ids')
if 'row_header_texts' in _dict:
args['row_header_texts'] = _dict.get('row_header_texts')
if 'row_header_texts_normalized' in _dict:
args['row_header_texts_normalized'] = _dict.get(
'row_header_texts_normalized')
if 'column_header_ids' in _dict:
args['column_header_ids'] = _dict.get('column_header_ids')
if 'column_header_texts' in _dict:
args['column_header_texts'] = _dict.get('column_header_texts')
if 'column_header_texts_normalized' in _dict:
args['column_header_texts_normalized'] = _dict.get(
'column_header_texts_normalized')
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BodyCells object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
if hasattr(self, 'row_header_ids') and self.row_header_ids is not None:
_dict['row_header_ids'] = self.row_header_ids
if hasattr(self,
'row_header_texts') and self.row_header_texts is not None:
_dict['row_header_texts'] = self.row_header_texts
if hasattr(self, 'row_header_texts_normalized'
) and self.row_header_texts_normalized is not None:
_dict[
'row_header_texts_normalized'] = self.row_header_texts_normalized
if hasattr(self,
'column_header_ids') and self.column_header_ids is not None:
_dict['column_header_ids'] = self.column_header_ids
if hasattr(
self,
'column_header_texts') and self.column_header_texts is not None:
_dict['column_header_texts'] = self.column_header_texts
if hasattr(self, 'column_header_texts_normalized'
) and self.column_header_texts_normalized is not None:
_dict[
'column_header_texts_normalized'] = self.column_header_texts_normalized
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BodyCells object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'BodyCells') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BodyCells') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Category():
"""
Information defining an element's subject matter.
:attr str label: (optional) The category of the associated element.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
"""
def __init__(self, *, label: str = None,
provenance_ids: List[str] = None) -> None:
"""
Initialize a Category object.
:param str label: (optional) The category of the associated element.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
"""
self.label = label
self.provenance_ids = provenance_ids
@classmethod
def from_dict(cls, _dict: Dict) -> 'Category':
"""Initialize a Category object from a json dictionary."""
args = {}
valid_keys = ['label', 'provenance_ids']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Category: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = _dict.get('label')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Category object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Category object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Category') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Category') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LabelEnum(Enum):
"""
The category of the associated element.
"""
AMENDMENTS = "Amendments"
ASSET_USE = "Asset Use"
ASSIGNMENTS = "Assignments"
AUDITS = "Audits"
BUSINESS_CONTINUITY = "Business Continuity"
COMMUNICATION = "Communication"
CONFIDENTIALITY = "Confidentiality"
DELIVERABLES = "Deliverables"
DELIVERY = "Delivery"
DISPUTE_RESOLUTION = "Dispute Resolution"
FORCE_MAJEURE = "Force Majeure"
INDEMNIFICATION = "Indemnification"
INSURANCE = "Insurance"
INTELLECTUAL_PROPERTY = "Intellectual Property"
LIABILITY = "Liability"
ORDER_OF_PRECEDENCE = "Order of Precedence"
PAYMENT_TERMS_BILLING = "Payment Terms & Billing"
PRICING_TAXES = "Pricing & Taxes"
PRIVACY = "Privacy"
RESPONSIBILITIES = "Responsibilities"
SAFETY_AND_SECURITY = "Safety and Security"
SCOPE_OF_WORK = "Scope of Work"
SUBCONTRACTS = "Subcontracts"
TERM_TERMINATION = "Term & Termination"
WARRANTIES = "Warranties"
class CategoryComparison():
"""
Information defining an element's subject matter.
:attr str label: (optional) The category of the associated element.
"""
def __init__(self, *, label: str = None) -> None:
"""
Initialize a CategoryComparison object.
:param str label: (optional) The category of the associated element.
"""
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'CategoryComparison':
"""Initialize a CategoryComparison object from a json dictionary."""
args = {}
valid_keys = ['label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class CategoryComparison: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = _dict.get('label')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CategoryComparison object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CategoryComparison object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'CategoryComparison') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CategoryComparison') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LabelEnum(Enum):
"""
The category of the associated element.
"""
AMENDMENTS = "Amendments"
ASSET_USE = "Asset Use"
ASSIGNMENTS = "Assignments"
AUDITS = "Audits"
BUSINESS_CONTINUITY = "Business Continuity"
COMMUNICATION = "Communication"
CONFIDENTIALITY = "Confidentiality"
DELIVERABLES = "Deliverables"
DELIVERY = "Delivery"
DISPUTE_RESOLUTION = "Dispute Resolution"
FORCE_MAJEURE = "Force Majeure"
INDEMNIFICATION = "Indemnification"
INSURANCE = "Insurance"
INTELLECTUAL_PROPERTY = "Intellectual Property"
LIABILITY = "Liability"
ORDER_OF_PRECEDENCE = "Order of Precedence"
PAYMENT_TERMS_BILLING = "Payment Terms & Billing"
PRICING_TAXES = "Pricing & Taxes"
PRIVACY = "Privacy"
RESPONSIBILITIES = "Responsibilities"
SAFETY_AND_SECURITY = "Safety and Security"
SCOPE_OF_WORK = "Scope of Work"
SUBCONTRACTS = "Subcontracts"
TERM_TERMINATION = "Term & Termination"
WARRANTIES = "Warranties"
class ClassifyReturn():
"""
The analysis of objects returned by the **Element classification** method.
:attr Document document: (optional) Basic information about the input document.
:attr str model_id: (optional) The analysis model used to classify the input
document. For the **Element classification** method, the only valid value is
`contracts`.
:attr str model_version: (optional) The version of the analysis model identified
by the value of the `model_id` key.
:attr List[Element] elements: (optional) Document elements identified by the
service.
:attr List[EffectiveDates] effective_dates: (optional) The date or dates on
which the document becomes effective.
:attr List[ContractAmts] contract_amounts: (optional) The monetary amounts that
identify the total amount of the contract that needs to be paid from one party
to another.
:attr List[TerminationDates] termination_dates: (optional) The dates on which
the document is to be terminated.
:attr List[ContractTypes] contract_types: (optional) The contract type as
declared in the document.
:attr List[ContractTerms] contract_terms: (optional) The durations of the
contract.
:attr List[PaymentTerms] payment_terms: (optional) The document's payment
durations.
:attr List[ContractCurrencies] contract_currencies: (optional) The contract
currencies as declared in the document.
:attr List[Tables] tables: (optional) Definition of tables identified in the
input document.
:attr DocStructure document_structure: (optional) The structure of the input
document.
:attr List[Parties] parties: (optional) Definitions of the parties identified in
the input document.
"""
def __init__(self,
*,
document: 'Document' = None,
model_id: str = None,
model_version: str = None,
elements: List['Element'] = None,
effective_dates: List['EffectiveDates'] = None,
contract_amounts: List['ContractAmts'] = None,
termination_dates: List['TerminationDates'] = None,
contract_types: List['ContractTypes'] = None,
contract_terms: List['ContractTerms'] = None,
payment_terms: List['PaymentTerms'] = None,
contract_currencies: List['ContractCurrencies'] = None,
tables: List['Tables'] = None,
document_structure: 'DocStructure' = None,
parties: List['Parties'] = None) -> None:
"""
Initialize a ClassifyReturn object.
:param Document document: (optional) Basic information about the input
document.
:param str model_id: (optional) The analysis model used to classify the
input document. For the **Element classification** method, the only valid
value is `contracts`.
:param str model_version: (optional) The version of the analysis model
identified by the value of the `model_id` key.
:param List[Element] elements: (optional) Document elements identified by
the service.
:param List[EffectiveDates] effective_dates: (optional) The date or dates
on which the document becomes effective.
:param List[ContractAmts] contract_amounts: (optional) The monetary amounts
that identify the total amount of the contract that needs to be paid from
one party to another.
:param List[TerminationDates] termination_dates: (optional) The dates on
which the document is to be terminated.
:param List[ContractTypes] contract_types: (optional) The contract type as
declared in the document.
:param List[ContractTerms] contract_terms: (optional) The durations of the
contract.
:param List[PaymentTerms] payment_terms: (optional) The document's payment
durations.
:param List[ContractCurrencies] contract_currencies: (optional) The
contract currencies as declared in the document.
:param List[Tables] tables: (optional) Definition of tables identified in
the input document.
:param DocStructure document_structure: (optional) The structure of the
input document.
:param List[Parties] parties: (optional) Definitions of the parties
identified in the input document.
"""
self.document = document
self.model_id = model_id
self.model_version = model_version
self.elements = elements
self.effective_dates = effective_dates
self.contract_amounts = contract_amounts
self.termination_dates = termination_dates
self.contract_types = contract_types
self.contract_terms = contract_terms
self.payment_terms = payment_terms
self.contract_currencies = contract_currencies
self.tables = tables
self.document_structure = document_structure
self.parties = parties
@classmethod
def from_dict(cls, _dict: Dict) -> 'ClassifyReturn':
"""Initialize a ClassifyReturn object from a json dictionary."""
args = {}
valid_keys = [
'document', 'model_id', 'model_version', 'elements',
'effective_dates', 'contract_amounts', 'termination_dates',
'contract_types', 'contract_terms', 'payment_terms',
'contract_currencies', 'tables', 'document_structure', 'parties'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ClassifyReturn: '
+ ', '.join(bad_keys))
if 'document' in _dict:
args['document'] = Document._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'elements' in _dict:
args['elements'] = [
Element._from_dict(x) for x in (_dict.get('elements'))
]
if 'effective_dates' in _dict:
args['effective_dates'] = [
EffectiveDates._from_dict(x)
for x in (_dict.get('effective_dates'))
]
if 'contract_amounts' in _dict:
args['contract_amounts'] = [
ContractAmts._from_dict(x)
for x in (_dict.get('contract_amounts'))
]
if 'termination_dates' in _dict:
args['termination_dates'] = [
TerminationDates._from_dict(x)
for x in (_dict.get('termination_dates'))
]
if 'contract_types' in _dict:
args['contract_types'] = [
ContractTypes._from_dict(x)
for x in (_dict.get('contract_types'))
]
if 'contract_terms' in _dict:
args['contract_terms'] = [
ContractTerms._from_dict(x)
for x in (_dict.get('contract_terms'))
]
if 'payment_terms' in _dict:
args['payment_terms'] = [
PaymentTerms._from_dict(x) for x in (_dict.get('payment_terms'))
]
if 'contract_currencies' in _dict:
args['contract_currencies'] = [
ContractCurrencies._from_dict(x)
for x in (_dict.get('contract_currencies'))
]
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
if 'document_structure' in _dict:
args['document_structure'] = DocStructure._from_dict(
_dict.get('document_structure'))
if 'parties' in _dict:
args['parties'] = [
Parties._from_dict(x) for x in (_dict.get('parties'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ClassifyReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'elements') and self.elements is not None:
_dict['elements'] = [x._to_dict() for x in self.elements]
if hasattr(self,
'effective_dates') and self.effective_dates is not None:
_dict['effective_dates'] = [
x._to_dict() for x in self.effective_dates
]
if hasattr(self,
'contract_amounts') and self.contract_amounts is not None:
_dict['contract_amounts'] = [
x._to_dict() for x in self.contract_amounts
]
if hasattr(self,
'termination_dates') and self.termination_dates is not None:
_dict['termination_dates'] = [
x._to_dict() for x in self.termination_dates
]
if hasattr(self, 'contract_types') and self.contract_types is not None:
_dict['contract_types'] = [
x._to_dict() for x in self.contract_types
]
if hasattr(self, 'contract_terms') and self.contract_terms is not None:
_dict['contract_terms'] = [
x._to_dict() for x in self.contract_terms
]
if hasattr(self, 'payment_terms') and self.payment_terms is not None:
_dict['payment_terms'] = [x._to_dict() for x in self.payment_terms]
if hasattr(
self,
'contract_currencies') and self.contract_currencies is not None:
_dict['contract_currencies'] = [
x._to_dict() for x in self.contract_currencies
]
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
if hasattr(
self,
'document_structure') and self.document_structure is not None:
_dict['document_structure'] = self.document_structure._to_dict()
if hasattr(self, 'parties') and self.parties is not None:
_dict['parties'] = [x._to_dict() for x in self.parties]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ClassifyReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ClassifyReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ClassifyReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ColumnHeaders():
"""
Column-level cells, each applicable as a header to other cells in the same column as
itself, of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the column header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a ColumnHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the column header cell
in the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'ColumnHeaders':
"""Initialize a ColumnHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ColumnHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ColumnHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ColumnHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ColumnHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ColumnHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CompareReturn():
"""
The comparison of the two submitted documents.
:attr str model_id: (optional) The analysis model used to compare the input
documents. For the **Compare two documents** method, the only valid value is
`contracts`.
:attr str model_version: (optional) The version of the analysis model identified
by the value of the `model_id` key.
:attr List[Document] documents: (optional) Information about the documents being
compared.
:attr List[AlignedElement] aligned_elements: (optional) A list of pairs of
elements that semantically align between the compared documents.
:attr List[UnalignedElement] unaligned_elements: (optional) A list of elements
that do not semantically align between the compared documents.
"""
def __init__(self,
*,
model_id: str = None,
model_version: str = None,
documents: List['Document'] = None,
aligned_elements: List['AlignedElement'] = None,
unaligned_elements: List['UnalignedElement'] = None) -> None:
"""
Initialize a CompareReturn object.
:param str model_id: (optional) The analysis model used to compare the
input documents. For the **Compare two documents** method, the only valid
value is `contracts`.
:param str model_version: (optional) The version of the analysis model
identified by the value of the `model_id` key.
:param List[Document] documents: (optional) Information about the documents
being compared.
:param List[AlignedElement] aligned_elements: (optional) A list of pairs of
elements that semantically align between the compared documents.
:param List[UnalignedElement] unaligned_elements: (optional) A list of
elements that do not semantically align between the compared documents.
"""
self.model_id = model_id
self.model_version = model_version
self.documents = documents
self.aligned_elements = aligned_elements
self.unaligned_elements = unaligned_elements
@classmethod
def from_dict(cls, _dict: Dict) -> 'CompareReturn':
"""Initialize a CompareReturn object from a json dictionary."""
args = {}
valid_keys = [
'model_id', 'model_version', 'documents', 'aligned_elements',
'unaligned_elements'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class CompareReturn: '
+ ', '.join(bad_keys))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'documents' in _dict:
args['documents'] = [
Document._from_dict(x) for x in (_dict.get('documents'))
]
if 'aligned_elements' in _dict:
args['aligned_elements'] = [
AlignedElement._from_dict(x)
for x in (_dict.get('aligned_elements'))
]
if 'unaligned_elements' in _dict:
args['unaligned_elements'] = [
UnalignedElement._from_dict(x)
for x in (_dict.get('unaligned_elements'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CompareReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'documents') and self.documents is not None:
_dict['documents'] = [x._to_dict() for x in self.documents]
if hasattr(self,
'aligned_elements') and self.aligned_elements is not None:
_dict['aligned_elements'] = [
x._to_dict() for x in self.aligned_elements
]
if hasattr(
self,
'unaligned_elements') and self.unaligned_elements is not None:
_dict['unaligned_elements'] = [
x._to_dict() for x in self.unaligned_elements
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CompareReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'CompareReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CompareReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Contact():
"""
A contact.
:attr str name: (optional) A string listing the name of the contact.
:attr str role: (optional) A string listing the role of the contact.
"""
def __init__(self, *, name: str = None, role: str = None) -> None:
"""
Initialize a Contact object.
:param str name: (optional) A string listing the name of the contact.
:param str role: (optional) A string listing the role of the contact.
"""
self.name = name
self.role = role
@classmethod
def from_dict(cls, _dict: Dict) -> 'Contact':
"""Initialize a Contact object from a json dictionary."""
args = {}
valid_keys = ['name', 'role']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Contact: ' +
', '.join(bad_keys))
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'role' in _dict:
args['role'] = _dict.get('role')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Contact object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Contact object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Contact') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Contact') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Contexts():
"""
Text that is related to the contents of the table and that precedes or follows the
current table.
:attr str text: (optional) The related text.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Contexts object.
:param str text: (optional) The related text.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Contexts':
"""Initialize a Contexts object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Contexts: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Contexts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Contexts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Contexts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Contexts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ContractAmts():
"""
A monetary amount identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract amount.
:attr str text: (optional) The monetary amount.
:attr str text_normalized: (optional) The normalized form of the amount, which
is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractAmts object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract amount.
:param str text: (optional) The monetary amount.
:param str text_normalized: (optional) The normalized form of the amount,
which is listed as a string. This element is optional; it is returned only
if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractAmts':
"""Initialize a ContractAmts object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractAmts: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractAmts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractAmts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractAmts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractAmts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract amount.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractCurrencies():
"""
The contract currencies that are declared in the document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract currency.
:attr str text: (optional) The contract currency.
:attr str text_normalized: (optional) The normalized form of the contract
currency, which is listed as a string in
[ISO-4217](https://www.iso.org/iso-4217-currency-codes.html) format. This
element is optional; it is returned only if normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractCurrencies object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract currency.
:param str text: (optional) The contract currency.
:param str text_normalized: (optional) The normalized form of the contract
currency, which is listed as a string in
[ISO-4217](https://www.iso.org/iso-4217-currency-codes.html) format. This
element is optional; it is returned only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractCurrencies':
"""Initialize a ContractCurrencies object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractCurrencies: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractCurrencies object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractCurrencies object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractCurrencies') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractCurrencies') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract currency.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractTerms():
"""
The duration or durations of the contract.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract term.
:attr str text: (optional) The contract term (duration).
:attr str text_normalized: (optional) The normalized form of the contract term,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractTerms object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract term.
:param str text: (optional) The contract term (duration).
:param str text_normalized: (optional) The normalized form of the contract
term, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractTerms':
"""Initialize a ContractTerms object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractTerms: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractTerms object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractTerms object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractTerms') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractTerms') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract term.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class ContractTypes():
"""
The contract type identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the contract type.
:attr str text: (optional) The contract type.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a ContractTypes object.
:param str confidence_level: (optional) The confidence level in the
identification of the contract type.
:param str text: (optional) The contract type.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContractTypes':
"""Initialize a ContractTypes object from a json dictionary."""
args = {}
valid_keys = ['confidence_level', 'text', 'provenance_ids', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ContractTypes: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContractTypes object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContractTypes object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ContractTypes') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContractTypes') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the contract type.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class DocCounts():
"""
Document counts.
:attr int total: (optional) Total number of documents.
:attr int pending: (optional) Number of pending documents.
:attr int successful: (optional) Number of documents successfully processed.
:attr int failed: (optional) Number of documents not successfully processed.
"""
def __init__(self,
*,
total: int = None,
pending: int = None,
successful: int = None,
failed: int = None) -> None:
"""
Initialize a DocCounts object.
:param int total: (optional) Total number of documents.
:param int pending: (optional) Number of pending documents.
:param int successful: (optional) Number of documents successfully
processed.
:param int failed: (optional) Number of documents not successfully
processed.
"""
self.total = total
self.pending = pending
self.successful = successful
self.failed = failed
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocCounts':
"""Initialize a DocCounts object from a json dictionary."""
args = {}
valid_keys = ['total', 'pending', 'successful', 'failed']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocCounts: '
+ ', '.join(bad_keys))
if 'total' in _dict:
args['total'] = _dict.get('total')
if 'pending' in _dict:
args['pending'] = _dict.get('pending')
if 'successful' in _dict:
args['successful'] = _dict.get('successful')
if 'failed' in _dict:
args['failed'] = _dict.get('failed')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocCounts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'total') and self.total is not None:
_dict['total'] = self.total
if hasattr(self, 'pending') and self.pending is not None:
_dict['pending'] = self.pending
if hasattr(self, 'successful') and self.successful is not None:
_dict['successful'] = self.successful
if hasattr(self, 'failed') and self.failed is not None:
_dict['failed'] = self.failed
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocCounts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocCounts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocCounts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocInfo():
"""
Information about the parsed input document.
:attr str html: (optional) The full text of the parsed document in HTML format.
:attr str title: (optional) The title of the parsed document. If the service did
not detect a title, the value of this element is `null`.
:attr str hash: (optional) The MD5 hash of the input document.
"""
def __init__(self, *, html: str = None, title: str = None,
hash: str = None) -> None:
"""
Initialize a DocInfo object.
:param str html: (optional) The full text of the parsed document in HTML
format.
:param str title: (optional) The title of the parsed document. If the
service did not detect a title, the value of this element is `null`.
:param str hash: (optional) The MD5 hash of the input document.
"""
self.html = html
self.title = title
self.hash = hash
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocInfo':
"""Initialize a DocInfo object from a json dictionary."""
args = {}
valid_keys = ['html', 'title', 'hash']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocInfo: ' +
', '.join(bad_keys))
if 'html' in _dict:
args['html'] = _dict.get('html')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocInfo object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocStructure():
"""
The structure of the input document.
:attr List[SectionTitles] section_titles: (optional) An array containing one
object per section or subsection identified in the input document.
:attr List[LeadingSentence] leading_sentences: (optional) An array containing
one object per section or subsection, in parallel with the `section_titles`
array, that details the leading sentences in the corresponding section or
subsection.
:attr List[Paragraphs] paragraphs: (optional) An array containing one object per
paragraph, in parallel with the `section_titles` and `leading_sentences` arrays.
"""
def __init__(self,
*,
section_titles: List['SectionTitles'] = None,
leading_sentences: List['LeadingSentence'] = None,
paragraphs: List['Paragraphs'] = None) -> None:
"""
Initialize a DocStructure object.
:param List[SectionTitles] section_titles: (optional) An array containing
one object per section or subsection identified in the input document.
:param List[LeadingSentence] leading_sentences: (optional) An array
containing one object per section or subsection, in parallel with the
`section_titles` array, that details the leading sentences in the
corresponding section or subsection.
:param List[Paragraphs] paragraphs: (optional) An array containing one
object per paragraph, in parallel with the `section_titles` and
`leading_sentences` arrays.
"""
self.section_titles = section_titles
self.leading_sentences = leading_sentences
self.paragraphs = paragraphs
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocStructure':
"""Initialize a DocStructure object from a json dictionary."""
args = {}
valid_keys = ['section_titles', 'leading_sentences', 'paragraphs']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocStructure: '
+ ', '.join(bad_keys))
if 'section_titles' in _dict:
args['section_titles'] = [
SectionTitles._from_dict(x)
for x in (_dict.get('section_titles'))
]
if 'leading_sentences' in _dict:
args['leading_sentences'] = [
LeadingSentence._from_dict(x)
for x in (_dict.get('leading_sentences'))
]
if 'paragraphs' in _dict:
args['paragraphs'] = [
Paragraphs._from_dict(x) for x in (_dict.get('paragraphs'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocStructure object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'section_titles') and self.section_titles is not None:
_dict['section_titles'] = [
x._to_dict() for x in self.section_titles
]
if hasattr(self,
'leading_sentences') and self.leading_sentences is not None:
_dict['leading_sentences'] = [
x._to_dict() for x in self.leading_sentences
]
if hasattr(self, 'paragraphs') and self.paragraphs is not None:
_dict['paragraphs'] = [x._to_dict() for x in self.paragraphs]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocStructure object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocStructure') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocStructure') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Document():
"""
Basic information about the input document.
:attr str title: (optional) Document title, if detected.
:attr str html: (optional) The input document converted into HTML format.
:attr str hash: (optional) The MD5 hash value of the input document.
:attr str label: (optional) The label applied to the input document with the
calling method's `file_1_label` or `file_2_label` value. This field is specified
only in the output of the **Comparing two documents** method.
"""
def __init__(self,
*,
title: str = None,
html: str = None,
hash: str = None,
label: str = None) -> None:
"""
Initialize a Document object.
:param str title: (optional) Document title, if detected.
:param str html: (optional) The input document converted into HTML format.
:param str hash: (optional) The MD5 hash value of the input document.
:param str label: (optional) The label applied to the input document with
the calling method's `file_1_label` or `file_2_label` value. This field is
specified only in the output of the **Comparing two documents** method.
"""
self.title = title
self.html = html
self.hash = hash
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'Document':
"""Initialize a Document object from a json dictionary."""
args = {}
valid_keys = ['title', 'html', 'hash', 'label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Document: '
+ ', '.join(bad_keys))
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'html' in _dict:
args['html'] = _dict.get('html')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
if 'label' in _dict:
args['label'] = _dict.get('label')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Document object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Document object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Document') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Document') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EffectiveDates():
"""
An effective date.
:attr str confidence_level: (optional) The confidence level in the
identification of the effective date.
:attr str text: (optional) The effective date, listed as a string.
:attr str text_normalized: (optional) The normalized form of the effective date,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a EffectiveDates object.
:param str confidence_level: (optional) The confidence level in the
identification of the effective date.
:param str text: (optional) The effective date, listed as a string.
:param str text_normalized: (optional) The normalized form of the effective
date, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'EffectiveDates':
"""Initialize a EffectiveDates object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class EffectiveDates: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EffectiveDates object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this EffectiveDates object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'EffectiveDates') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'EffectiveDates') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the effective date.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class Element():
"""
A component part of the document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the element.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
location: 'Location' = None,
text: str = None,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a Element object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the element.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.location = location
self.text = text
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'Element':
"""Initialize a Element object from a json dictionary."""
args = {}
valid_keys = ['location', 'text', 'types', 'categories', 'attributes']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Element: ' +
', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Element object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Element object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Element') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Element') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ElementLocations():
"""
A list of `begin` and `end` indexes that indicate the locations of the elements in the
input document.
:attr int begin: (optional) An integer that indicates the starting position of
the element in the input document.
:attr int end: (optional) An integer that indicates the ending position of the
element in the input document.
"""
def __init__(self, *, begin: int = None, end: int = None) -> None:
"""
Initialize a ElementLocations object.
:param int begin: (optional) An integer that indicates the starting
position of the element in the input document.
:param int end: (optional) An integer that indicates the ending position of
the element in the input document.
"""
self.begin = begin
self.end = end
@classmethod
def from_dict(cls, _dict: Dict) -> 'ElementLocations':
"""Initialize a ElementLocations object from a json dictionary."""
args = {}
valid_keys = ['begin', 'end']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ElementLocations: '
+ ', '.join(bad_keys))
if 'begin' in _dict:
args['begin'] = _dict.get('begin')
if 'end' in _dict:
args['end'] = _dict.get('end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ElementLocations object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'begin') and self.begin is not None:
_dict['begin'] = self.begin
if hasattr(self, 'end') and self.end is not None:
_dict['end'] = self.end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ElementLocations object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ElementLocations') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ElementLocations') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ElementPair():
"""
Details of semantically aligned elements.
:attr str document_label: (optional) The label of the document (that is, the
value of either the `file_1_label` or `file_2_label` parameters) in which the
element occurs.
:attr str text: (optional) The contents of the element.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr List[TypeLabelComparison] types: (optional) Description of the action
specified by the element and whom it affects.
:attr List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter of
the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
document_label: str = None,
text: str = None,
location: 'Location' = None,
types: List['TypeLabelComparison'] = None,
categories: List['CategoryComparison'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a ElementPair object.
:param str document_label: (optional) The label of the document (that is,
the value of either the `file_1_label` or `file_2_label` parameters) in
which the element occurs.
:param str text: (optional) The contents of the element.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param List[TypeLabelComparison] types: (optional) Description of the
action specified by the element and whom it affects.
:param List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter
of the element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.document_label = document_label
self.text = text
self.location = location
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'ElementPair':
"""Initialize a ElementPair object from a json dictionary."""
args = {}
valid_keys = [
'document_label', 'text', 'location', 'types', 'categories',
'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ElementPair: '
+ ', '.join(bad_keys))
if 'document_label' in _dict:
args['document_label'] = _dict.get('document_label')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'types' in _dict:
args['types'] = [
TypeLabelComparison._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
CategoryComparison._from_dict(x)
for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ElementPair object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ElementPair object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ElementPair') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ElementPair') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDataInput():
"""
Feedback data for submission.
:attr str feedback_type: The type of feedback. The only permitted value is
`element_classification`.
:attr ShortDoc document: (optional) Brief information about the input document.
:attr str model_id: (optional) An optional string identifying the model ID. The
only permitted value is `contracts`.
:attr str model_version: (optional) An optional string identifying the version
of the model used.
:attr Location location: The numeric location of the identified element in the
document, represented with two integers labeled `begin` and `end`.
:attr str text: The text on which to submit feedback.
:attr OriginalLabelsIn original_labels: The original labeling from the input
document, without the submitted feedback.
:attr UpdatedLabelsIn updated_labels: The updated labeling from the input
document, accounting for the submitted feedback.
"""
def __init__(self,
feedback_type: str,
location: 'Location',
text: str,
original_labels: 'OriginalLabelsIn',
updated_labels: 'UpdatedLabelsIn',
*,
document: 'ShortDoc' = None,
model_id: str = None,
model_version: str = None) -> None:
"""
Initialize a FeedbackDataInput object.
:param str feedback_type: The type of feedback. The only permitted value is
`element_classification`.
:param Location location: The numeric location of the identified element in
the document, represented with two integers labeled `begin` and `end`.
:param str text: The text on which to submit feedback.
:param OriginalLabelsIn original_labels: The original labeling from the
input document, without the submitted feedback.
:param UpdatedLabelsIn updated_labels: The updated labeling from the input
document, accounting for the submitted feedback.
:param ShortDoc document: (optional) Brief information about the input
document.
:param str model_id: (optional) An optional string identifying the model
ID. The only permitted value is `contracts`.
:param str model_version: (optional) An optional string identifying the
version of the model used.
"""
self.feedback_type = feedback_type
self.document = document
self.model_id = model_id
self.model_version = model_version
self.location = location
self.text = text
self.original_labels = original_labels
self.updated_labels = updated_labels
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDataInput':
"""Initialize a FeedbackDataInput object from a json dictionary."""
args = {}
valid_keys = [
'feedback_type', 'document', 'model_id', 'model_version',
'location', 'text', 'original_labels', 'updated_labels'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDataInput: '
+ ', '.join(bad_keys))
if 'feedback_type' in _dict:
args['feedback_type'] = _dict.get('feedback_type')
else:
raise ValueError(
'Required property \'feedback_type\' not present in FeedbackDataInput JSON'
)
if 'document' in _dict:
args['document'] = ShortDoc._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
else:
raise ValueError(
'Required property \'location\' not present in FeedbackDataInput JSON'
)
if 'text' in _dict:
args['text'] = _dict.get('text')
else:
raise ValueError(
'Required property \'text\' not present in FeedbackDataInput JSON'
)
if 'original_labels' in _dict:
args['original_labels'] = OriginalLabelsIn._from_dict(
_dict.get('original_labels'))
else:
raise ValueError(
'Required property \'original_labels\' not present in FeedbackDataInput JSON'
)
if 'updated_labels' in _dict:
args['updated_labels'] = UpdatedLabelsIn._from_dict(
_dict.get('updated_labels'))
else:
raise ValueError(
'Required property \'updated_labels\' not present in FeedbackDataInput JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDataInput object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_type') and self.feedback_type is not None:
_dict['feedback_type'] = self.feedback_type
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'original_labels') and self.original_labels is not None:
_dict['original_labels'] = self.original_labels._to_dict()
if hasattr(self, 'updated_labels') and self.updated_labels is not None:
_dict['updated_labels'] = self.updated_labels._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDataInput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDataInput') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDataInput') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDataOutput():
"""
Information returned from the **Add Feedback** method.
:attr str feedback_type: (optional) A string identifying the user adding the
feedback. The only permitted value is `element_classification`.
:attr ShortDoc document: (optional) Brief information about the input document.
:attr str model_id: (optional) An optional string identifying the model ID. The
only permitted value is `contracts`.
:attr str model_version: (optional) An optional string identifying the version
of the model used.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text to which the feedback applies.
:attr OriginalLabelsOut original_labels: (optional) The original labeling from
the input document, without the submitted feedback.
:attr UpdatedLabelsOut updated_labels: (optional) The updated labeling from the
input document, accounting for the submitted feedback.
:attr Pagination pagination: (optional) Pagination details, if required by the
length of the output.
"""
def __init__(self,
*,
feedback_type: str = None,
document: 'ShortDoc' = None,
model_id: str = None,
model_version: str = None,
location: 'Location' = None,
text: str = None,
original_labels: 'OriginalLabelsOut' = None,
updated_labels: 'UpdatedLabelsOut' = None,
pagination: 'Pagination' = None) -> None:
"""
Initialize a FeedbackDataOutput object.
:param str feedback_type: (optional) A string identifying the user adding
the feedback. The only permitted value is `element_classification`.
:param ShortDoc document: (optional) Brief information about the input
document.
:param str model_id: (optional) An optional string identifying the model
ID. The only permitted value is `contracts`.
:param str model_version: (optional) An optional string identifying the
version of the model used.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text to which the feedback applies.
:param OriginalLabelsOut original_labels: (optional) The original labeling
from the input document, without the submitted feedback.
:param UpdatedLabelsOut updated_labels: (optional) The updated labeling
from the input document, accounting for the submitted feedback.
:param Pagination pagination: (optional) Pagination details, if required by
the length of the output.
"""
self.feedback_type = feedback_type
self.document = document
self.model_id = model_id
self.model_version = model_version
self.location = location
self.text = text
self.original_labels = original_labels
self.updated_labels = updated_labels
self.pagination = pagination
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDataOutput':
"""Initialize a FeedbackDataOutput object from a json dictionary."""
args = {}
valid_keys = [
'feedback_type', 'document', 'model_id', 'model_version',
'location', 'text', 'original_labels', 'updated_labels',
'pagination'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDataOutput: '
+ ', '.join(bad_keys))
if 'feedback_type' in _dict:
args['feedback_type'] = _dict.get('feedback_type')
if 'document' in _dict:
args['document'] = ShortDoc._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'original_labels' in _dict:
args['original_labels'] = OriginalLabelsOut._from_dict(
_dict.get('original_labels'))
if 'updated_labels' in _dict:
args['updated_labels'] = UpdatedLabelsOut._from_dict(
_dict.get('updated_labels'))
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDataOutput object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_type') and self.feedback_type is not None:
_dict['feedback_type'] = self.feedback_type
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'original_labels') and self.original_labels is not None:
_dict['original_labels'] = self.original_labels._to_dict()
if hasattr(self, 'updated_labels') and self.updated_labels is not None:
_dict['updated_labels'] = self.updated_labels._to_dict()
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDataOutput object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDataOutput') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDataOutput') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackDeleted():
"""
The status and message of the deletion request.
:attr int status: (optional) HTTP return code.
:attr str message: (optional) Status message returned from the service.
"""
def __init__(self, *, status: int = None, message: str = None) -> None:
"""
Initialize a FeedbackDeleted object.
:param int status: (optional) HTTP return code.
:param str message: (optional) Status message returned from the service.
"""
self.status = status
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackDeleted':
"""Initialize a FeedbackDeleted object from a json dictionary."""
args = {}
valid_keys = ['status', 'message']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackDeleted: '
+ ', '.join(bad_keys))
if 'status' in _dict:
args['status'] = _dict.get('status')
if 'message' in _dict:
args['message'] = _dict.get('message')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackDeleted object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackDeleted object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackDeleted') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackDeleted') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackList():
"""
The results of a successful **List Feedback** request for all feedback.
:attr List[GetFeedback] feedback: (optional) A list of all feedback for the
document.
"""
def __init__(self, *, feedback: List['GetFeedback'] = None) -> None:
"""
Initialize a FeedbackList object.
:param List[GetFeedback] feedback: (optional) A list of all feedback for
the document.
"""
self.feedback = feedback
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackList':
"""Initialize a FeedbackList object from a json dictionary."""
args = {}
valid_keys = ['feedback']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackList: '
+ ', '.join(bad_keys))
if 'feedback' in _dict:
args['feedback'] = [
GetFeedback._from_dict(x) for x in (_dict.get('feedback'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback') and self.feedback is not None:
_dict['feedback'] = [x._to_dict() for x in self.feedback]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackList object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FeedbackReturn():
"""
Information about the document and the submitted feedback.
:attr str feedback_id: (optional) The unique ID of the feedback object.
:attr str user_id: (optional) An optional string identifying the person
submitting feedback.
:attr str comment: (optional) An optional comment from the person submitting the
feedback.
:attr datetime created: (optional) Timestamp listing the creation time of the
feedback submission.
:attr FeedbackDataOutput feedback_data: (optional) Information returned from the
**Add Feedback** method.
"""
def __init__(self,
*,
feedback_id: str = None,
user_id: str = None,
comment: str = None,
created: datetime = None,
feedback_data: 'FeedbackDataOutput' = None) -> None:
"""
Initialize a FeedbackReturn object.
:param str feedback_id: (optional) The unique ID of the feedback object.
:param str user_id: (optional) An optional string identifying the person
submitting feedback.
:param str comment: (optional) An optional comment from the person
submitting the feedback.
:param datetime created: (optional) Timestamp listing the creation time of
the feedback submission.
:param FeedbackDataOutput feedback_data: (optional) Information returned
from the **Add Feedback** method.
"""
self.feedback_id = feedback_id
self.user_id = user_id
self.comment = comment
self.created = created
self.feedback_data = feedback_data
@classmethod
def from_dict(cls, _dict: Dict) -> 'FeedbackReturn':
"""Initialize a FeedbackReturn object from a json dictionary."""
args = {}
valid_keys = [
'feedback_id', 'user_id', 'comment', 'created', 'feedback_data'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class FeedbackReturn: '
+ ', '.join(bad_keys))
if 'feedback_id' in _dict:
args['feedback_id'] = _dict.get('feedback_id')
if 'user_id' in _dict:
args['user_id'] = _dict.get('user_id')
if 'comment' in _dict:
args['comment'] = _dict.get('comment')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'feedback_data' in _dict:
args['feedback_data'] = FeedbackDataOutput._from_dict(
_dict.get('feedback_data'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FeedbackReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FeedbackReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'FeedbackReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FeedbackReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetFeedback():
"""
The results of a successful **Get Feedback** request for a single feedback entry.
:attr str feedback_id: (optional) A string uniquely identifying the feedback
entry.
:attr datetime created: (optional) A timestamp identifying the creation time of
the feedback entry.
:attr str comment: (optional) A string containing the user's comment about the
feedback entry.
:attr FeedbackDataOutput feedback_data: (optional) Information returned from the
**Add Feedback** method.
"""
def __init__(self,
*,
feedback_id: str = None,
created: datetime = None,
comment: str = None,
feedback_data: 'FeedbackDataOutput' = None) -> None:
"""
Initialize a GetFeedback object.
:param str feedback_id: (optional) A string uniquely identifying the
feedback entry.
:param datetime created: (optional) A timestamp identifying the creation
time of the feedback entry.
:param str comment: (optional) A string containing the user's comment about
the feedback entry.
:param FeedbackDataOutput feedback_data: (optional) Information returned
from the **Add Feedback** method.
"""
self.feedback_id = feedback_id
self.created = created
self.comment = comment
self.feedback_data = feedback_data
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetFeedback':
"""Initialize a GetFeedback object from a json dictionary."""
args = {}
valid_keys = ['feedback_id', 'created', 'comment', 'feedback_data']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class GetFeedback: '
+ ', '.join(bad_keys))
if 'feedback_id' in _dict:
args['feedback_id'] = _dict.get('feedback_id')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'comment' in _dict:
args['comment'] = _dict.get('comment')
if 'feedback_data' in _dict:
args['feedback_data'] = FeedbackDataOutput._from_dict(
_dict.get('feedback_data'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetFeedback object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetFeedback object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'GetFeedback') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetFeedback') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class HTMLReturn():
"""
The HTML converted from an input document.
:attr str num_pages: (optional) The number of pages in the input document.
:attr str author: (optional) The author of the input document, if identified.
:attr str publication_date: (optional) The publication date of the input
document, if identified.
:attr str title: (optional) The title of the input document, if identified.
:attr str html: (optional) The HTML version of the input document.
"""
def __init__(self,
*,
num_pages: str = None,
author: str = None,
publication_date: str = None,
title: str = None,
html: str = None) -> None:
"""
Initialize a HTMLReturn object.
:param str num_pages: (optional) The number of pages in the input document.
:param str author: (optional) The author of the input document, if
identified.
:param str publication_date: (optional) The publication date of the input
document, if identified.
:param str title: (optional) The title of the input document, if
identified.
:param str html: (optional) The HTML version of the input document.
"""
self.num_pages = num_pages
self.author = author
self.publication_date = publication_date
self.title = title
self.html = html
@classmethod
def from_dict(cls, _dict: Dict) -> 'HTMLReturn':
"""Initialize a HTMLReturn object from a json dictionary."""
args = {}
valid_keys = [
'num_pages', 'author', 'publication_date', 'title', 'html'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class HTMLReturn: '
+ ', '.join(bad_keys))
if 'num_pages' in _dict:
args['num_pages'] = _dict.get('num_pages')
if 'author' in _dict:
args['author'] = _dict.get('author')
if 'publication_date' in _dict:
args['publication_date'] = _dict.get('publication_date')
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'html' in _dict:
args['html'] = _dict.get('html')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a HTMLReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'num_pages') and self.num_pages is not None:
_dict['num_pages'] = self.num_pages
if hasattr(self, 'author') and self.author is not None:
_dict['author'] = self.author
if hasattr(self,
'publication_date') and self.publication_date is not None:
_dict['publication_date'] = self.publication_date
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'html') and self.html is not None:
_dict['html'] = self.html
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this HTMLReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'HTMLReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'HTMLReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Interpretation():
"""
The details of the normalized text, if applicable. This element is optional; it is
returned only if normalized text exists.
:attr str value: (optional) The value that was located in the normalized text.
:attr float numeric_value: (optional) An integer or float expressing the numeric
value of the `value` key.
:attr str unit: (optional) A string listing the unit of the value that was found
in the normalized text.
**Note:** The value of `unit` is the [ISO-4217 currency
code](https://www.iso.org/iso-4217-currency-codes.html) identified for the
currency amount (for example, `USD` or `EUR`). If the service cannot
disambiguate a currency symbol (for example, `$` or `£`), the value of `unit`
contains the ambiguous symbol as-is.
"""
def __init__(self,
*,
value: str = None,
numeric_value: float = None,
unit: str = None) -> None:
"""
Initialize a Interpretation object.
:param str value: (optional) The value that was located in the normalized
text.
:param float numeric_value: (optional) An integer or float expressing the
numeric value of the `value` key.
:param str unit: (optional) A string listing the unit of the value that was
found in the normalized text.
**Note:** The value of `unit` is the [ISO-4217 currency
code](https://www.iso.org/iso-4217-currency-codes.html) identified for the
currency amount (for example, `USD` or `EUR`). If the service cannot
disambiguate a currency symbol (for example, `$` or `£`), the value of
`unit` contains the ambiguous symbol as-is.
"""
self.value = value
self.numeric_value = numeric_value
self.unit = unit
@classmethod
def from_dict(cls, _dict: Dict) -> 'Interpretation':
"""Initialize a Interpretation object from a json dictionary."""
args = {}
valid_keys = ['value', 'numeric_value', 'unit']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Interpretation: '
+ ', '.join(bad_keys))
if 'value' in _dict:
args['value'] = _dict.get('value')
if 'numeric_value' in _dict:
args['numeric_value'] = _dict.get('numeric_value')
if 'unit' in _dict:
args['unit'] = _dict.get('unit')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Interpretation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'numeric_value') and self.numeric_value is not None:
_dict['numeric_value'] = self.numeric_value
if hasattr(self, 'unit') and self.unit is not None:
_dict['unit'] = self.unit
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Interpretation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Interpretation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Interpretation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Key():
"""
A key in a key-value pair.
:attr str cell_id: (optional) The unique ID of the key in the table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a Key object.
:param str cell_id: (optional) The unique ID of the key in the table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'Key':
"""Initialize a Key object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Key: ' +
', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Key object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Key object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Key') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Key') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class KeyValuePair():
"""
Key-value pairs detected across cell boundaries.
:attr Key key: (optional) A key in a key-value pair.
:attr List[Value] value: (optional) A list of values in a key-value pair.
"""
def __init__(self, *, key: 'Key' = None,
value: List['Value'] = None) -> None:
"""
Initialize a KeyValuePair object.
:param Key key: (optional) A key in a key-value pair.
:param List[Value] value: (optional) A list of values in a key-value pair.
"""
self.key = key
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'KeyValuePair':
"""Initialize a KeyValuePair object from a json dictionary."""
args = {}
valid_keys = ['key', 'value']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class KeyValuePair: '
+ ', '.join(bad_keys))
if 'key' in _dict:
args['key'] = Key._from_dict(_dict.get('key'))
if 'value' in _dict:
args['value'] = [Value._from_dict(x) for x in (_dict.get('value'))]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a KeyValuePair object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = [x._to_dict() for x in self.value]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this KeyValuePair object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'KeyValuePair') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'KeyValuePair') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Label():
"""
A pair of `nature` and `party` objects. The `nature` object identifies the effect of
the element on the identified `party`, and the `party` object identifies the affected
party.
:attr str nature: The identified `nature` of the element.
:attr str party: The identified `party` of the element.
"""
def __init__(self, nature: str, party: str) -> None:
"""
Initialize a Label object.
:param str nature: The identified `nature` of the element.
:param str party: The identified `party` of the element.
"""
self.nature = nature
self.party = party
@classmethod
def from_dict(cls, _dict: Dict) -> 'Label':
"""Initialize a Label object from a json dictionary."""
args = {}
valid_keys = ['nature', 'party']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Label: ' +
', '.join(bad_keys))
if 'nature' in _dict:
args['nature'] = _dict.get('nature')
else:
raise ValueError(
'Required property \'nature\' not present in Label JSON')
if 'party' in _dict:
args['party'] = _dict.get('party')
else:
raise ValueError(
'Required property \'party\' not present in Label JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Label object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'nature') and self.nature is not None:
_dict['nature'] = self.nature
if hasattr(self, 'party') and self.party is not None:
_dict['party'] = self.party
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Label object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Label') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Label') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class LeadingSentence():
"""
The leading sentences in a section or subsection of the input document.
:attr str text: (optional) The text of the leading sentence.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected leading sentences.
"""
def __init__(self,
*,
text: str = None,
location: 'Location' = None,
element_locations: List['ElementLocations'] = None) -> None:
"""
Initialize a LeadingSentence object.
:param str text: (optional) The text of the leading sentence.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected leading sentences.
"""
self.text = text
self.location = location
self.element_locations = element_locations
@classmethod
def from_dict(cls, _dict: Dict) -> 'LeadingSentence':
"""Initialize a LeadingSentence object from a json dictionary."""
args = {}
valid_keys = ['text', 'location', 'element_locations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class LeadingSentence: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'element_locations' in _dict:
args['element_locations'] = [
ElementLocations._from_dict(x)
for x in (_dict.get('element_locations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a LeadingSentence object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self,
'element_locations') and self.element_locations is not None:
_dict['element_locations'] = [
x._to_dict() for x in self.element_locations
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this LeadingSentence object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'LeadingSentence') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'LeadingSentence') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Location():
"""
The numeric location of the identified element in the document, represented with two
integers labeled `begin` and `end`.
:attr int begin: The element's `begin` index.
:attr int end: The element's `end` index.
"""
def __init__(self, begin: int, end: int) -> None:
"""
Initialize a Location object.
:param int begin: The element's `begin` index.
:param int end: The element's `end` index.
"""
self.begin = begin
self.end = end
@classmethod
def from_dict(cls, _dict: Dict) -> 'Location':
"""Initialize a Location object from a json dictionary."""
args = {}
valid_keys = ['begin', 'end']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Location: '
+ ', '.join(bad_keys))
if 'begin' in _dict:
args['begin'] = _dict.get('begin')
else:
raise ValueError(
'Required property \'begin\' not present in Location JSON')
if 'end' in _dict:
args['end'] = _dict.get('end')
else:
raise ValueError(
'Required property \'end\' not present in Location JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Location object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'begin') and self.begin is not None:
_dict['begin'] = self.begin
if hasattr(self, 'end') and self.end is not None:
_dict['end'] = self.end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Location object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Location') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Location') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Mention():
"""
A mention of a party.
:attr str text: (optional) The name of the party.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a Mention object.
:param str text: (optional) The name of the party.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Mention':
"""Initialize a Mention object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Mention: ' +
', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Mention object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Mention object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Mention') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Mention') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OriginalLabelsIn():
"""
The original labeling from the input document, without the submitted feedback.
:attr List[TypeLabel] types: Description of the action specified by the element
and whom it affects.
:attr List[Category] categories: List of functional categories into which the
element falls; in other words, the subject matter of the element.
"""
def __init__(self, types: List['TypeLabel'],
categories: List['Category']) -> None:
"""
Initialize a OriginalLabelsIn object.
:param List[TypeLabel] types: Description of the action specified by the
element and whom it affects.
:param List[Category] categories: List of functional categories into which
the element falls; in other words, the subject matter of the element.
"""
self.types = types
self.categories = categories
@classmethod
def from_dict(cls, _dict: Dict) -> 'OriginalLabelsIn':
"""Initialize a OriginalLabelsIn object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class OriginalLabelsIn: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
else:
raise ValueError(
'Required property \'types\' not present in OriginalLabelsIn JSON'
)
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
else:
raise ValueError(
'Required property \'categories\' not present in OriginalLabelsIn JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OriginalLabelsIn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OriginalLabelsIn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'OriginalLabelsIn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OriginalLabelsIn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class OriginalLabelsOut():
"""
The original labeling from the input document, without the submitted feedback.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr str modification: (optional) A string identifying the type of modification
the feedback entry in the `updated_labels` array. Possible values are `added`,
`not_changed`, and `removed`.
"""
def __init__(self,
*,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
modification: str = None) -> None:
"""
Initialize a OriginalLabelsOut object.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param str modification: (optional) A string identifying the type of
modification the feedback entry in the `updated_labels` array. Possible
values are `added`, `not_changed`, and `removed`.
"""
self.types = types
self.categories = categories
self.modification = modification
@classmethod
def from_dict(cls, _dict: Dict) -> 'OriginalLabelsOut':
"""Initialize a OriginalLabelsOut object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories', 'modification']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class OriginalLabelsOut: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'modification' in _dict:
args['modification'] = _dict.get('modification')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a OriginalLabelsOut object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'modification') and self.modification is not None:
_dict['modification'] = self.modification
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this OriginalLabelsOut object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'OriginalLabelsOut') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'OriginalLabelsOut') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ModificationEnum(Enum):
"""
A string identifying the type of modification the feedback entry in the
`updated_labels` array. Possible values are `added`, `not_changed`, and `removed`.
"""
ADDED = "added"
NOT_CHANGED = "not_changed"
REMOVED = "removed"
class Pagination():
"""
Pagination details, if required by the length of the output.
:attr str refresh_cursor: (optional) A token identifying the current page of
results.
:attr str next_cursor: (optional) A token identifying the next page of results.
:attr str refresh_url: (optional) The URL that returns the current page of
results.
:attr str next_url: (optional) The URL that returns the next page of results.
:attr int total: (optional) Reserved for future use.
"""
def __init__(self,
*,
refresh_cursor: str = None,
next_cursor: str = None,
refresh_url: str = None,
next_url: str = None,
total: int = None) -> None:
"""
Initialize a Pagination object.
:param str refresh_cursor: (optional) A token identifying the current page
of results.
:param str next_cursor: (optional) A token identifying the next page of
results.
:param str refresh_url: (optional) The URL that returns the current page of
results.
:param str next_url: (optional) The URL that returns the next page of
results.
:param int total: (optional) Reserved for future use.
"""
self.refresh_cursor = refresh_cursor
self.next_cursor = next_cursor
self.refresh_url = refresh_url
self.next_url = next_url
self.total = total
@classmethod
def from_dict(cls, _dict: Dict) -> 'Pagination':
"""Initialize a Pagination object from a json dictionary."""
args = {}
valid_keys = [
'refresh_cursor', 'next_cursor', 'refresh_url', 'next_url', 'total'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Pagination: '
+ ', '.join(bad_keys))
if 'refresh_cursor' in _dict:
args['refresh_cursor'] = _dict.get('refresh_cursor')
if 'next_cursor' in _dict:
args['next_cursor'] = _dict.get('next_cursor')
if 'refresh_url' in _dict:
args['refresh_url'] = _dict.get('refresh_url')
if 'next_url' in _dict:
args['next_url'] = _dict.get('next_url')
if 'total' in _dict:
args['total'] = _dict.get('total')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Pagination object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None:
_dict['refresh_cursor'] = self.refresh_cursor
if hasattr(self, 'next_cursor') and self.next_cursor is not None:
_dict['next_cursor'] = self.next_cursor
if hasattr(self, 'refresh_url') and self.refresh_url is not None:
_dict['refresh_url'] = self.refresh_url
if hasattr(self, 'next_url') and self.next_url is not None:
_dict['next_url'] = self.next_url
if hasattr(self, 'total') and self.total is not None:
_dict['total'] = self.total
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Pagination object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Pagination') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Pagination') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Paragraphs():
"""
The locations of each paragraph in the input document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, location: 'Location' = None) -> None:
"""
Initialize a Paragraphs object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'Paragraphs':
"""Initialize a Paragraphs object from a json dictionary."""
args = {}
valid_keys = ['location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Paragraphs: '
+ ', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Paragraphs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Paragraphs object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Paragraphs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Paragraphs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Parties():
"""
A party and its corresponding role, including address and contact information if
identified.
:attr str party: (optional) The normalized form of the party's name.
:attr str role: (optional) A string identifying the party's role.
:attr str importance: (optional) A string that identifies the importance of the
party.
:attr List[Address] addresses: (optional) A list of the party's address or
addresses.
:attr List[Contact] contacts: (optional) A list of the names and roles of
contacts identified in the input document.
:attr List[Mention] mentions: (optional) A list of the party's mentions in the
input document.
"""
def __init__(self,
*,
party: str = None,
role: str = None,
importance: str = None,
addresses: List['Address'] = None,
contacts: List['Contact'] = None,
mentions: List['Mention'] = None) -> None:
"""
Initialize a Parties object.
:param str party: (optional) The normalized form of the party's name.
:param str role: (optional) A string identifying the party's role.
:param str importance: (optional) A string that identifies the importance
of the party.
:param List[Address] addresses: (optional) A list of the party's address or
addresses.
:param List[Contact] contacts: (optional) A list of the names and roles of
contacts identified in the input document.
:param List[Mention] mentions: (optional) A list of the party's mentions in
the input document.
"""
self.party = party
self.role = role
self.importance = importance
self.addresses = addresses
self.contacts = contacts
self.mentions = mentions
@classmethod
def from_dict(cls, _dict: Dict) -> 'Parties':
"""Initialize a Parties object from a json dictionary."""
args = {}
valid_keys = [
'party', 'role', 'importance', 'addresses', 'contacts', 'mentions'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Parties: ' +
', '.join(bad_keys))
if 'party' in _dict:
args['party'] = _dict.get('party')
if 'role' in _dict:
args['role'] = _dict.get('role')
if 'importance' in _dict:
args['importance'] = _dict.get('importance')
if 'addresses' in _dict:
args['addresses'] = [
Address._from_dict(x) for x in (_dict.get('addresses'))
]
if 'contacts' in _dict:
args['contacts'] = [
Contact._from_dict(x) for x in (_dict.get('contacts'))
]
if 'mentions' in _dict:
args['mentions'] = [
Mention._from_dict(x) for x in (_dict.get('mentions'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Parties object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'party') and self.party is not None:
_dict['party'] = self.party
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
if hasattr(self, 'importance') and self.importance is not None:
_dict['importance'] = self.importance
if hasattr(self, 'addresses') and self.addresses is not None:
_dict['addresses'] = [x._to_dict() for x in self.addresses]
if hasattr(self, 'contacts') and self.contacts is not None:
_dict['contacts'] = [x._to_dict() for x in self.contacts]
if hasattr(self, 'mentions') and self.mentions is not None:
_dict['mentions'] = [x._to_dict() for x in self.mentions]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Parties object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Parties') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Parties') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ImportanceEnum(Enum):
"""
A string that identifies the importance of the party.
"""
PRIMARY = "Primary"
UNKNOWN = "Unknown"
class PaymentTerms():
"""
The document's payment duration or durations.
:attr str confidence_level: (optional) The confidence level in the
identification of the payment term.
:attr str text: (optional) The payment term (duration).
:attr str text_normalized: (optional) The normalized form of the payment term,
which is listed as a string. This element is optional; it is returned only if
normalized text exists.
:attr Interpretation interpretation: (optional) The details of the normalized
text, if applicable. This element is optional; it is returned only if normalized
text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
interpretation: 'Interpretation' = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a PaymentTerms object.
:param str confidence_level: (optional) The confidence level in the
identification of the payment term.
:param str text: (optional) The payment term (duration).
:param str text_normalized: (optional) The normalized form of the payment
term, which is listed as a string. This element is optional; it is returned
only if normalized text exists.
:param Interpretation interpretation: (optional) The details of the
normalized text, if applicable. This element is optional; it is returned
only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.interpretation = interpretation
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'PaymentTerms':
"""Initialize a PaymentTerms object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'interpretation',
'provenance_ids', 'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class PaymentTerms: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'interpretation' in _dict:
args['interpretation'] = Interpretation._from_dict(
_dict.get('interpretation'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a PaymentTerms object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'interpretation') and self.interpretation is not None:
_dict['interpretation'] = self.interpretation._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this PaymentTerms object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'PaymentTerms') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'PaymentTerms') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the payment term.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class RowHeaders():
"""
Row-level cells, each applicable as a header to other cells in the same row as itself,
of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a RowHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'RowHeaders':
"""Initialize a RowHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class RowHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RowHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RowHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'RowHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RowHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SectionTitle():
"""
The table's section title, if identified.
:attr str text: (optional) The text of the section title, if identified.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self, *, text: str = None,
location: 'Location' = None) -> None:
"""
Initialize a SectionTitle object.
:param str text: (optional) The text of the section title, if identified.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'SectionTitle':
"""Initialize a SectionTitle object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class SectionTitle: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SectionTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this SectionTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'SectionTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'SectionTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SectionTitles():
"""
An array containing one object per section or subsection detected in the input
document. Sections and subsections are not nested; instead, they are flattened out and
can be placed back in order by using the `begin` and `end` values of the element and
the `level` value of the section.
:attr str text: (optional) The text of the section title, if identified.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr int level: (optional) An integer indicating the level at which the section
is located in the input document. For example, `1` represents a top-level
section, `2` represents a subsection within the level `1` section, and so forth.
:attr List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected section titles.
"""
def __init__(self,
*,
text: str = None,
location: 'Location' = None,
level: int = None,
element_locations: List['ElementLocations'] = None) -> None:
"""
Initialize a SectionTitles object.
:param str text: (optional) The text of the section title, if identified.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param int level: (optional) An integer indicating the level at which the
section is located in the input document. For example, `1` represents a
top-level section, `2` represents a subsection within the level `1`
section, and so forth.
:param List[ElementLocations] element_locations: (optional) An array of
`location` objects that lists the locations of detected section titles.
"""
self.text = text
self.location = location
self.level = level
self.element_locations = element_locations
@classmethod
def from_dict(cls, _dict: Dict) -> 'SectionTitles':
"""Initialize a SectionTitles object from a json dictionary."""
args = {}
valid_keys = ['text', 'location', 'level', 'element_locations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class SectionTitles: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'level' in _dict:
args['level'] = _dict.get('level')
if 'element_locations' in _dict:
args['element_locations'] = [
ElementLocations._from_dict(x)
for x in (_dict.get('element_locations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SectionTitles object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'level') and self.level is not None:
_dict['level'] = self.level
if hasattr(self,
'element_locations') and self.element_locations is not None:
_dict['element_locations'] = [
x._to_dict() for x in self.element_locations
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this SectionTitles object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'SectionTitles') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'SectionTitles') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ShortDoc():
"""
Brief information about the input document.
:attr str title: (optional) The title of the input document, if identified.
:attr str hash: (optional) The MD5 hash of the input document.
"""
def __init__(self, *, title: str = None, hash: str = None) -> None:
"""
Initialize a ShortDoc object.
:param str title: (optional) The title of the input document, if
identified.
:param str hash: (optional) The MD5 hash of the input document.
"""
self.title = title
self.hash = hash
@classmethod
def from_dict(cls, _dict: Dict) -> 'ShortDoc':
"""Initialize a ShortDoc object from a json dictionary."""
args = {}
valid_keys = ['title', 'hash']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ShortDoc: '
+ ', '.join(bad_keys))
if 'title' in _dict:
args['title'] = _dict.get('title')
if 'hash' in _dict:
args['hash'] = _dict.get('hash')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ShortDoc object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ShortDoc object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ShortDoc') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ShortDoc') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableHeaders():
"""
The contents of the current table's header.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the table header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a TableHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the table header cell in
the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableHeaders':
"""Initialize a TableHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableReturn():
"""
The analysis of the document's tables.
:attr DocInfo document: (optional) Information about the parsed input document.
:attr str model_id: (optional) The ID of the model used to extract the table
contents. The value for table extraction is `tables`.
:attr str model_version: (optional) The version of the `tables` model ID.
:attr List[Tables] tables: (optional) Definitions of the tables identified in
the input document.
"""
def __init__(self,
*,
document: 'DocInfo' = None,
model_id: str = None,
model_version: str = None,
tables: List['Tables'] = None) -> None:
"""
Initialize a TableReturn object.
:param DocInfo document: (optional) Information about the parsed input
document.
:param str model_id: (optional) The ID of the model used to extract the
table contents. The value for table extraction is `tables`.
:param str model_version: (optional) The version of the `tables` model ID.
:param List[Tables] tables: (optional) Definitions of the tables identified
in the input document.
"""
self.document = document
self.model_id = model_id
self.model_version = model_version
self.tables = tables
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableReturn':
"""Initialize a TableReturn object from a json dictionary."""
args = {}
valid_keys = ['document', 'model_id', 'model_version', 'tables']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableReturn: '
+ ', '.join(bad_keys))
if 'document' in _dict:
args['document'] = DocInfo._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableReturn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableReturn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableReturn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableReturn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableTitle():
"""
If identified, the title or caption of the current table of the form `Table x.: ...`.
Empty when no title is identified. When exposed, the `title` is also excluded from the
`contexts` array of the same table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the identified table title or caption.
"""
def __init__(self, *, location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a TableTitle object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the identified table title or
caption.
"""
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableTitle':
"""Initialize a TableTitle object from a json dictionary."""
args = {}
valid_keys = ['location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableTitle: '
+ ', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Tables():
"""
The contents of the tables extracted from a document.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The textual contents of the current table from the
input document without associated markup content.
:attr SectionTitle section_title: (optional) The table's section title, if
identified.
:attr TableTitle title: (optional) If identified, the title or caption of the
current table of the form `Table x.: ...`. Empty when no title is identified.
When exposed, the `title` is also excluded from the `contexts` array of the same
table.
:attr List[TableHeaders] table_headers: (optional) An array of table-level cells
that apply as headers to all the other cells in the current table.
:attr List[RowHeaders] row_headers: (optional) An array of row-level cells, each
applicable as a header to other cells in the same row as itself, of the current
table.
:attr List[ColumnHeaders] column_headers: (optional) An array of column-level
cells, each applicable as a header to other cells in the same column as itself,
of the current table.
:attr List[BodyCells] body_cells: (optional) An array of cells that are neither
table header nor column header nor row header cells, of the current table with
corresponding row and column header associations.
:attr List[Contexts] contexts: (optional) An array of objects that list text
that is related to the table contents and that precedes or follows the current
table.
:attr List[KeyValuePair] key_value_pairs: (optional) An array of key-value pairs
identified in the current table.
"""
def __init__(self,
*,
location: 'Location' = None,
text: str = None,
section_title: 'SectionTitle' = None,
title: 'TableTitle' = None,
table_headers: List['TableHeaders'] = None,
row_headers: List['RowHeaders'] = None,
column_headers: List['ColumnHeaders'] = None,
body_cells: List['BodyCells'] = None,
contexts: List['Contexts'] = None,
key_value_pairs: List['KeyValuePair'] = None) -> None:
"""
Initialize a Tables object.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The textual contents of the current table from
the input document without associated markup content.
:param SectionTitle section_title: (optional) The table's section title, if
identified.
:param TableTitle title: (optional) If identified, the title or caption of
the current table of the form `Table x.: ...`. Empty when no title is
identified. When exposed, the `title` is also excluded from the `contexts`
array of the same table.
:param List[TableHeaders] table_headers: (optional) An array of table-level
cells that apply as headers to all the other cells in the current table.
:param List[RowHeaders] row_headers: (optional) An array of row-level
cells, each applicable as a header to other cells in the same row as
itself, of the current table.
:param List[ColumnHeaders] column_headers: (optional) An array of
column-level cells, each applicable as a header to other cells in the same
column as itself, of the current table.
:param List[BodyCells] body_cells: (optional) An array of cells that are
neither table header nor column header nor row header cells, of the current
table with corresponding row and column header associations.
:param List[Contexts] contexts: (optional) An array of objects that list
text that is related to the table contents and that precedes or follows the
current table.
:param List[KeyValuePair] key_value_pairs: (optional) An array of key-value
pairs identified in the current table.
"""
self.location = location
self.text = text
self.section_title = section_title
self.title = title
self.table_headers = table_headers
self.row_headers = row_headers
self.column_headers = column_headers
self.body_cells = body_cells
self.contexts = contexts
self.key_value_pairs = key_value_pairs
@classmethod
def from_dict(cls, _dict: Dict) -> 'Tables':
"""Initialize a Tables object from a json dictionary."""
args = {}
valid_keys = [
'location', 'text', 'section_title', 'title', 'table_headers',
'row_headers', 'column_headers', 'body_cells', 'contexts',
'key_value_pairs'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Tables: ' +
', '.join(bad_keys))
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'section_title' in _dict:
args['section_title'] = SectionTitle._from_dict(
_dict.get('section_title'))
if 'title' in _dict:
args['title'] = TableTitle._from_dict(_dict.get('title'))
if 'table_headers' in _dict:
args['table_headers'] = [
TableHeaders._from_dict(x) for x in (_dict.get('table_headers'))
]
if 'row_headers' in _dict:
args['row_headers'] = [
RowHeaders._from_dict(x) for x in (_dict.get('row_headers'))
]
if 'column_headers' in _dict:
args['column_headers'] = [
ColumnHeaders._from_dict(x)
for x in (_dict.get('column_headers'))
]
if 'body_cells' in _dict:
args['body_cells'] = [
BodyCells._from_dict(x) for x in (_dict.get('body_cells'))
]
if 'contexts' in _dict:
args['contexts'] = [
Contexts._from_dict(x) for x in (_dict.get('contexts'))
]
if 'key_value_pairs' in _dict:
args['key_value_pairs'] = [
KeyValuePair._from_dict(x)
for x in (_dict.get('key_value_pairs'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Tables object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'section_title') and self.section_title is not None:
_dict['section_title'] = self.section_title._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title._to_dict()
if hasattr(self, 'table_headers') and self.table_headers is not None:
_dict['table_headers'] = [x._to_dict() for x in self.table_headers]
if hasattr(self, 'row_headers') and self.row_headers is not None:
_dict['row_headers'] = [x._to_dict() for x in self.row_headers]
if hasattr(self, 'column_headers') and self.column_headers is not None:
_dict['column_headers'] = [
x._to_dict() for x in self.column_headers
]
if hasattr(self, 'body_cells') and self.body_cells is not None:
_dict['body_cells'] = [x._to_dict() for x in self.body_cells]
if hasattr(self, 'contexts') and self.contexts is not None:
_dict['contexts'] = [x._to_dict() for x in self.contexts]
if hasattr(self,
'key_value_pairs') and self.key_value_pairs is not None:
_dict['key_value_pairs'] = [
x._to_dict() for x in self.key_value_pairs
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Tables object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Tables') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Tables') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TerminationDates():
"""
Termination dates identified in the input document.
:attr str confidence_level: (optional) The confidence level in the
identification of the termination date.
:attr str text: (optional) The termination date.
:attr str text_normalized: (optional) The normalized form of the termination
date, which is listed as a string. This element is optional; it is returned only
if normalized text exists.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
def __init__(self,
*,
confidence_level: str = None,
text: str = None,
text_normalized: str = None,
provenance_ids: List[str] = None,
location: 'Location' = None) -> None:
"""
Initialize a TerminationDates object.
:param str confidence_level: (optional) The confidence level in the
identification of the termination date.
:param str text: (optional) The termination date.
:param str text_normalized: (optional) The normalized form of the
termination date, which is listed as a string. This element is optional; it
is returned only if normalized text exists.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
"""
self.confidence_level = confidence_level
self.text = text
self.text_normalized = text_normalized
self.provenance_ids = provenance_ids
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'TerminationDates':
"""Initialize a TerminationDates object from a json dictionary."""
args = {}
valid_keys = [
'confidence_level', 'text', 'text_normalized', 'provenance_ids',
'location'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TerminationDates: '
+ ', '.join(bad_keys))
if 'confidence_level' in _dict:
args['confidence_level'] = _dict.get('confidence_level')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TerminationDates object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'confidence_level') and self.confidence_level is not None:
_dict['confidence_level'] = self.confidence_level
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TerminationDates object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TerminationDates') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TerminationDates') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfidenceLevelEnum(Enum):
"""
The confidence level in the identification of the termination date.
"""
HIGH = "High"
MEDIUM = "Medium"
LOW = "Low"
class TypeLabel():
"""
Identification of a specific type.
:attr Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified `party`,
and the `party` object identifies the affected party.
:attr List[str] provenance_ids: (optional) Hashed values that you can send to
IBM to provide feedback or receive support.
"""
def __init__(self,
*,
label: 'Label' = None,
provenance_ids: List[str] = None) -> None:
"""
Initialize a TypeLabel object.
:param Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified
`party`, and the `party` object identifies the affected party.
:param List[str] provenance_ids: (optional) Hashed values that you can send
to IBM to provide feedback or receive support.
"""
self.label = label
self.provenance_ids = provenance_ids
@classmethod
def from_dict(cls, _dict: Dict) -> 'TypeLabel':
"""Initialize a TypeLabel object from a json dictionary."""
args = {}
valid_keys = ['label', 'provenance_ids']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TypeLabel: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = Label._from_dict(_dict.get('label'))
if 'provenance_ids' in _dict:
args['provenance_ids'] = _dict.get('provenance_ids')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TypeLabel object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TypeLabel object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TypeLabel') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TypeLabel') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeLabelComparison():
"""
Identification of a specific type.
:attr Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified `party`,
and the `party` object identifies the affected party.
"""
def __init__(self, *, label: 'Label' = None) -> None:
"""
Initialize a TypeLabelComparison object.
:param Label label: (optional) A pair of `nature` and `party` objects. The
`nature` object identifies the effect of the element on the identified
`party`, and the `party` object identifies the affected party.
"""
self.label = label
@classmethod
def from_dict(cls, _dict: Dict) -> 'TypeLabelComparison':
"""Initialize a TypeLabelComparison object from a json dictionary."""
args = {}
valid_keys = ['label']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TypeLabelComparison: '
+ ', '.join(bad_keys))
if 'label' in _dict:
args['label'] = Label._from_dict(_dict.get('label'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TypeLabelComparison object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TypeLabelComparison object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TypeLabelComparison') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TypeLabelComparison') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UnalignedElement():
"""
Element that does not align semantically between two compared documents.
:attr str document_label: (optional) The label assigned to the document by the
value of the `file_1_label` or `file_2_label` parameters on the **Compare two
documents** method.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text of the element.
:attr List[TypeLabelComparison] types: (optional) Description of the action
specified by the element and whom it affects.
:attr List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter of
the element.
:attr List[Attribute] attributes: (optional) List of document attributes.
"""
def __init__(self,
*,
document_label: str = None,
location: 'Location' = None,
text: str = None,
types: List['TypeLabelComparison'] = None,
categories: List['CategoryComparison'] = None,
attributes: List['Attribute'] = None) -> None:
"""
Initialize a UnalignedElement object.
:param str document_label: (optional) The label assigned to the document by
the value of the `file_1_label` or `file_2_label` parameters on the
**Compare two documents** method.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text of the element.
:param List[TypeLabelComparison] types: (optional) Description of the
action specified by the element and whom it affects.
:param List[CategoryComparison] categories: (optional) List of functional
categories into which the element falls; in other words, the subject matter
of the element.
:param List[Attribute] attributes: (optional) List of document attributes.
"""
self.document_label = document_label
self.location = location
self.text = text
self.types = types
self.categories = categories
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'UnalignedElement':
"""Initialize a UnalignedElement object from a json dictionary."""
args = {}
valid_keys = [
'document_label', 'location', 'text', 'types', 'categories',
'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UnalignedElement: '
+ ', '.join(bad_keys))
if 'document_label' in _dict:
args['document_label'] = _dict.get('document_label')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'types' in _dict:
args['types'] = [
TypeLabelComparison._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
CategoryComparison._from_dict(x)
for x in (_dict.get('categories'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UnalignedElement object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UnalignedElement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UnalignedElement') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UnalignedElement') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UpdatedLabelsIn():
"""
The updated labeling from the input document, accounting for the submitted feedback.
:attr List[TypeLabel] types: Description of the action specified by the element
and whom it affects.
:attr List[Category] categories: List of functional categories into which the
element falls; in other words, the subject matter of the element.
"""
def __init__(self, types: List['TypeLabel'],
categories: List['Category']) -> None:
"""
Initialize a UpdatedLabelsIn object.
:param List[TypeLabel] types: Description of the action specified by the
element and whom it affects.
:param List[Category] categories: List of functional categories into which
the element falls; in other words, the subject matter of the element.
"""
self.types = types
self.categories = categories
@classmethod
def from_dict(cls, _dict: Dict) -> 'UpdatedLabelsIn':
"""Initialize a UpdatedLabelsIn object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UpdatedLabelsIn: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
else:
raise ValueError(
'Required property \'types\' not present in UpdatedLabelsIn JSON'
)
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
else:
raise ValueError(
'Required property \'categories\' not present in UpdatedLabelsIn JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UpdatedLabelsIn object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UpdatedLabelsIn object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UpdatedLabelsIn') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UpdatedLabelsIn') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UpdatedLabelsOut():
"""
The updated labeling from the input document, accounting for the submitted feedback.
:attr List[TypeLabel] types: (optional) Description of the action specified by
the element and whom it affects.
:attr List[Category] categories: (optional) List of functional categories into
which the element falls; in other words, the subject matter of the element.
:attr str modification: (optional) The type of modification the feedback entry
in the `updated_labels` array. Possible values are `added`, `not_changed`, and
`removed`.
"""
def __init__(self,
*,
types: List['TypeLabel'] = None,
categories: List['Category'] = None,
modification: str = None) -> None:
"""
Initialize a UpdatedLabelsOut object.
:param List[TypeLabel] types: (optional) Description of the action
specified by the element and whom it affects.
:param List[Category] categories: (optional) List of functional categories
into which the element falls; in other words, the subject matter of the
element.
:param str modification: (optional) The type of modification the feedback
entry in the `updated_labels` array. Possible values are `added`,
`not_changed`, and `removed`.
"""
self.types = types
self.categories = categories
self.modification = modification
@classmethod
def from_dict(cls, _dict: Dict) -> 'UpdatedLabelsOut':
"""Initialize a UpdatedLabelsOut object from a json dictionary."""
args = {}
valid_keys = ['types', 'categories', 'modification']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class UpdatedLabelsOut: '
+ ', '.join(bad_keys))
if 'types' in _dict:
args['types'] = [
TypeLabel._from_dict(x) for x in (_dict.get('types'))
]
if 'categories' in _dict:
args['categories'] = [
Category._from_dict(x) for x in (_dict.get('categories'))
]
if 'modification' in _dict:
args['modification'] = _dict.get('modification')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UpdatedLabelsOut object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'modification') and self.modification is not None:
_dict['modification'] = self.modification
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UpdatedLabelsOut object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'UpdatedLabelsOut') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UpdatedLabelsOut') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ModificationEnum(Enum):
"""
The type of modification the feedback entry in the `updated_labels` array.
Possible values are `added`, `not_changed`, and `removed`.
"""
ADDED = "added"
NOT_CHANGED = "not_changed"
REMOVED = "removed"
class Value():
"""
A value in a key-value pair.
:attr str cell_id: (optional) The unique ID of the value in the table.
:attr Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'Location' = None,
text: str = None) -> None:
"""
Initialize a Value object.
:param str cell_id: (optional) The unique ID of the value in the table.
:param Location location: (optional) The numeric location of the identified
element in the document, represented with two integers labeled `begin` and
`end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'Value':
"""Initialize a Value object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Value: ' +
', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Value object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Value object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Value') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Value') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| [
[
[
744,
748
],
[
45212,
45216
],
[
49973,
49977
],
[
53008,
53012
],
[
61265,
61269
],
[
63751,
63755
],
[
75257,
75261
],
[
77861,
77865
],
[
81020,
81024
],
[
93761,
93765
],
[
100550,
100554
],
[
105913,
105917
],
[
108206,
108210
],
[
110863,
110867
],
[
116467,
116471
],
[
121706,
121710
],
[
127563,
127567
],
[
131729,
131733
],
[
135222,
135226
],
[
138130,
138134
],
[
142498,
142502
],
[
146020,
146024
],
[
150833,
150837
],
[
155642,
155646
],
[
158304,
158308
],
[
163849,
163853
],
[
170893,
170897
],
[
178082,
178086
],
[
180495,
180499
],
[
182727,
182731
],
[
187086,
187090
],
[
190992,
190996
],
[
194969,
194973
],
[
198963,
198967
],
[
202082,
202086
],
[
204519,
204523
],
[
207174,
207178
],
[
210808,
210812
],
[
213347,
213351
],
[
215929,
215933
],
[
219177,
219181
],
[
223043,
223047
],
[
227447,
227451
],
[
229735,
229739
],
[
234576,
234580
],
[
240369,
240373
],
[
247327,
247331
],
[
250014,
250018
],
[
254691,
254695
],
[
257052,
257056
],
[
262935,
262939
],
[
266737,
266741
],
[
269612,
269616
],
[
278971,
278975
],
[
283799,
283803
],
[
287012,
287016
],
[
289404,
289408
],
[
295025,
295029
],
[
298288,
298292
],
[
302109,
302113
],
[
305549,
305553
]
],
[
[
809,
822
],
[
1723,
1736
]
],
[
[
843,
858
],
[
4693,
4708
],
[
6936,
6951
],
[
9141,
9156
],
[
11968,
11983
],
[
14514,
14529
],
[
20555,
20570
],
[
22989,
23004
],
[
24832,
24847
],
[
29556,
29571
],
[
31717,
31732
],
[
33046,
33061
],
[
35195,
35210
]
],
[
[
880,
884
],
[
15400,
15404
],
[
15442,
15446
]
],
[
[
906,
914
],
[
55584,
55592
],
[
55627,
55635
],
[
183999,
184007
],
[
188226,
188234
]
],
[
[
932,
936
],
[
35904,
35908
],
[
36434,
36438
],
[
36937,
36941
],
[
37433,
37437
],
[
37933,
37937
],
[
38463,
38467
],
[
38967,
38971
],
[
39522,
39526
],
[
40066,
40070
],
[
40554,
40558
],
[
41045,
41049
],
[
41536,
41540
],
[
41795,
41799
],
[
42284,
42288
],
[
42462,
42466
],
[
53479,
53483
],
[
61744,
61748
],
[
78331,
78335
],
[
81510,
81514
],
[
116955,
116959
],
[
122206,
122210
],
[
128053,
128057
],
[
132219,
132223
],
[
151325,
151329
],
[
223538,
223542
],
[
235049,
235053
],
[
240857,
240861
],
[
284295,
284299
],
[
302602,
302606
]
],
[
[
968,
979
],
[
1451,
1462
],
[
3031,
3042
]
],
[
[
1011,
1027
]
],
[
[
1059,
1077
],
[
60851,
60869
],
[
60981,
60999
],
[
186654,
186672
],
[
190453,
190471
]
],
[
[
1079,
1097
],
[
59077,
59095
],
[
59179,
59197
],
[
185721,
185739
],
[
189661,
189679
]
],
[
[
1147,
1181
],
[
2974,
3008
]
],
[
[
1201,
1209
],
[
3464,
3472
],
[
5655,
5663
],
[
7885,
7893
],
[
10083,
10091
],
[
10127,
10135
],
[
25654,
25662
],
[
25804,
25812
]
],
[
[
1229,
1233
],
[
43899,
43903
],
[
44653,
44657
],
[
47833,
47837
],
[
49019,
49023
],
[
51506,
51510
],
[
52349,
52353
],
[
57567,
57571
],
[
59429,
59433
],
[
62564,
62568
],
[
63281,
63285
],
[
70145,
70149
],
[
72666,
72670
],
[
76519,
76523
],
[
77284,
77288
],
[
79887,
79891
],
[
80571,
80575
],
[
87889,
87893
],
[
91001,
91005
],
[
97561,
97565
],
[
99078,
99082
],
[
103283,
103287
],
[
104721,
104725
],
[
106957,
106961
],
[
107674,
107678
],
[
109545,
109549
],
[
110303,
110307
],
[
113942,
113946
],
[
115290,
115294
],
[
119465,
119469
],
[
120672,
120676
],
[
125033,
125037
],
[
126385,
126389
],
[
129833,
129837
],
[
130861,
130865
],
[
133509,
133513
],
[
134444,
134448
],
[
136694,
136698
],
[
137496,
137500
],
[
140418,
140422
],
[
141596,
141600
],
[
144392,
144396
],
[
145283,
145287
],
[
148612,
148616
],
[
149803,
149807
],
[
153416,
153420
],
[
154665,
154669
],
[
157010,
157014
],
[
157763,
157767
],
[
161274,
161278
],
[
162730,
162734
],
[
167143,
167147
],
[
169496,
169500
],
[
174791,
174795
],
[
176551,
176555
],
[
179166,
179170
],
[
179935,
179939
],
[
181507,
181511
],
[
182248,
182252
],
[
184944,
184948
],
[
186122,
186126
],
[
189090,
189094
],
[
190141,
190145
],
[
192967,
192971
],
[
194041,
194045
],
[
197420,
197424
],
[
198286,
198290
],
[
200586,
200590
],
[
201417,
201421
],
[
203161,
203165
],
[
203947,
203951
],
[
205676,
205680
],
[
206632,
206636
],
[
209009,
209013
],
[
210018,
210022
],
[
211855,
211859
],
[
212814,
212818
],
[
214616,
214620
],
[
215370,
215374
],
[
217359,
217363
],
[
218560,
218564
],
[
221264,
221268
],
[
222295,
222299
],
[
225392,
225396
],
[
226502,
226506
],
[
228586,
228590
],
[
229271,
229275
],
[
232162,
232166
],
[
233500,
233504
],
[
237844,
237848
],
[
239192,
239196
],
[
244321,
244325
],
[
245847,
245851
],
[
248676,
248680
],
[
249450,
249454
],
[
252715,
252719
],
[
253801,
253805
],
[
255790,
255794
],
[
256515,
256519
],
[
260237,
260241
],
[
261625,
261629
],
[
264839,
264843
],
[
265894,
265898
],
[
268284,
268288
],
[
269050,
269054
],
[
274975,
274979
],
[
277156,
277160
],
[
281568,
281572
],
[
282767,
282771
],
[
285636,
285640
],
[
286423,
286427
],
[
288237,
288241
],
[
288943,
288947
],
[
292425,
292429
],
[
293901,
293905
],
[
296477,
296481
],
[
297672,
297676
],
[
300335,
300339
],
[
301362,
301366
],
[
304043,
304047
],
[
304882,
304886
]
],
[
[
1253,
1257
],
[
46537,
46541
],
[
46644,
46648
],
[
62274,
62278
],
[
66746,
66750
],
[
66799,
66803
],
[
66863,
66867
],
[
66917,
66921
],
[
66973,
66977
],
[
67040,
67044
],
[
67087,
67091
],
[
76086,
76090
],
[
84654,
84658
],
[
84712,
84716
],
[
84778,
84782
],
[
84843,
84847
],
[
84909,
84913
],
[
84972,
84976
],
[
85034,
85038
],
[
85101,
85105
],
[
85161,
85165
],
[
85270,
85274
],
[
101993,
101997
],
[
102053,
102057
],
[
102121,
102125
],
[
112556,
112560
],
[
118228,
118232
],
[
123632,
123636
],
[
128967,
128971
],
[
139344,
139348
],
[
139410,
139414
],
[
139471,
139475
],
[
147454,
147458
],
[
152332,
152336
],
[
152387,
152391
],
[
152441,
152445
],
[
159888,
159892
],
[
159953,
159957
],
[
160017,
160021
],
[
181212,
181216
],
[
202820,
202824
],
[
208304,
208308
],
[
216808,
216812
],
[
216856,
216860
],
[
220348,
220352
],
[
220403,
220407
],
[
231093,
231097
],
[
231144,
231148
],
[
231195,
231199
],
[
236447,
236451
],
[
251686,
251690
],
[
264091,
264095
],
[
272223,
272227
],
[
272282,
272286
],
[
272342,
272346
],
[
272401,
272405
],
[
272454,
272458
],
[
272513,
272517
],
[
280422,
280426
],
[
285032,
285036
],
[
291032,
291036
],
[
291097,
291101
],
[
291161,
291165
],
[
295927,
295931
],
[
295975,
295979
],
[
299441,
299445
],
[
299496,
299500
]
],
[
[
1435,
1450
]
],
[
[
35849,
35867
]
],
[
[
36879,
36900
]
],
[
[
37878,
37896
]
],
[
[
38908,
38929
]
],
[
[
40511,
40527
]
],
[
[
40999,
41018
]
],
[
[
41490,
41506
]
],
[
[
42240,
42256
]
],
[
[
43076,
43083
],
[
232925,
232932
]
],
[
[
45667,
45681
],
[
104205,
104219
]
],
[
[
50442,
50451
],
[
72386,
72395
],
[
154387,
154396
],
[
162448,
162457
],
[
293614,
293623
]
],
[
[
53829,
53840
],
[
63004,
63015
]
],
[
[
62050,
62057
]
],
[
[
64206,
64215
],
[
276536,
276545
]
],
[
[
75716,
75724
],
[
154230,
154238
],
[
218128,
218136
],
[
221911,
221919
],
[
297242,
297250
],
[
300979,
300987
]
],
[
[
79442,
79460
],
[
162265,
162283
],
[
293431,
293449
]
],
[
[
82621,
82635
]
],
[
[
94230,
94243
],
[
276354,
276367
]
],
[
[
101017,
101030
]
],
[
[
106380,
106387
],
[
233076,
233083
]
],
[
[
108661,
108669
],
[
276690,
276698
]
],
[
[
111320,
111332
],
[
89282,
89294
]
],
[
[
117137,
117155
],
[
90235,
90253
]
],
[
[
122390,
122403
],
[
89865,
89878
]
],
[
[
128233,
128246
],
[
89675,
89688
]
],
[
[
132399,
132408
],
[
58869,
58878
]
],
[
[
135681,
135688
],
[
265315,
265322
]
],
[
[
138585,
138597
],
[
90568,
90580
]
],
[
[
142963,
142971
],
[
88613,
88621
],
[
104037,
104045
]
],
[
[
146477,
146491
],
[
89086,
89100
]
],
[
[
151506,
151513
],
[
88922,
88929
]
],
[
[
156097,
156113
],
[
209702,
209718
],
[
253487,
253503
]
],
[
[
158777,
158788
],
[
48403,
48414
]
],
[
[
164312,
164329
]
],
[
[
171368,
171386
],
[
185835,
185853
],
[
189857,
189875
]
],
[
[
178559,
178574
]
],
[
[
180966,
180978
]
],
[
[
183192,
183206
]
],
[
[
187555,
187566
],
[
181965,
181976
]
],
[
[
191455,
191465
]
],
[
[
195430,
195444
],
[
114799,
114813
],
[
125893,
125907
],
[
238701,
238715
]
],
[
[
199432,
199435
],
[
203595,
203598
]
],
[
[
202529,
202541
],
[
276855,
276867
]
],
[
[
204984,
204989
],
[
286076,
286081
],
[
288689,
288694
]
],
[
[
207625,
207640
],
[
141125,
141140
]
],
[
[
211279,
211287
],
[
44405,
44413
],
[
52099,
52107
],
[
70986,
70994
],
[
110054,
110062
],
[
115037,
115045
],
[
120413,
120421
],
[
126131,
126139
],
[
130607,
130615
],
[
149548,
149556
],
[
153886,
153894
],
[
161984,
161992
],
[
168276,
168284
],
[
175803,
175811
],
[
201100,
201108
],
[
209560,
209568
],
[
215122,
215130
],
[
229020,
229028
],
[
238939,
238947
],
[
244987,
244995
],
[
249197,
249205
],
[
253269,
253277
],
[
268726,
268734
],
[
275572,
275580
],
[
282510,
282518
],
[
293077,
293085
],
[
304563,
304571
]
],
[
[
213804,
213811
],
[
233226,
233233
]
],
[
[
216384,
216400
],
[
168755,
168771
]
],
[
[
219650,
219667
],
[
175996,
176013
]
],
[
[
223836,
223846
],
[
176288,
176298
]
],
[
[
227908,
227918
],
[
141312,
141322
]
],
[
[
230196,
230203
],
[
90721,
90728
]
],
[
[
235206,
235218
],
[
90053,
90065
]
],
[
[
241036,
241046
],
[
276186,
276196
]
],
[
[
247788,
247800
],
[
275761,
275773
]
],
[
[
250479,
250492
],
[
140929,
140942
]
],
[
[
255158,
255166
],
[
167985,
167993
],
[
175512,
175520
]
],
[
[
257509,
257521
],
[
276020,
276032
]
],
[
[
263400,
263411
]
],
[
[
267200,
267210
],
[
275887,
275897
]
],
[
[
270073,
270079
],
[
90419,
90425
],
[
265619,
265625
]
],
[
[
279424,
279440
],
[
89479,
89495
]
],
[
[
284478,
284487
],
[
154077,
154086
],
[
217834,
217843
],
[
221758,
221767
],
[
296949,
296958
],
[
300826,
300835
]
],
[
[
287471,
287490
],
[
162102,
162121
],
[
293268,
293287
]
],
[
[
289883,
289899
],
[
104406,
104422
]
],
[
[
295498,
295513
],
[
169057,
169072
]
],
[
[
298759,
298775
],
[
176147,
176163
]
],
[
[
302879,
302884
],
[
203686,
203691
]
]
] |
import torch
from torch.nn import functional as F
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.engine.trainer import SimpleNet
@TRAINER_REGISTRY.register()
class CrossGrad(TrainerX):
"""Cross-gradient training.
https://arxiv.org/abs/1804.10745.
"""
def __init__(self, cfg):
super().__init__(cfg)
self.eps_f = cfg.TRAINER.CG.EPS_F
self.eps_d = cfg.TRAINER.CG.EPS_D
self.alpha_f = cfg.TRAINER.CG.ALPHA_F
self.alpha_d = cfg.TRAINER.CG.ALPHA_D
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, self.num_classes)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
print('Building D')
self.D = SimpleNet(cfg, cfg.MODEL, self.dm.num_source_domains)
self.D.to(self.device)
print('# params: {:,}'.format(count_num_param(self.D)))
self.optim_D = build_optimizer(self.D, cfg.OPTIM)
self.sched_D = build_lr_scheduler(self.optim_D, cfg.OPTIM)
self.register_model('D', self.D, self.optim_D, self.sched_D)
def forward_backward(self, batch):
input, label, domain = self.parse_batch_train(batch)
input.requires_grad = True
# Compute domain perturbation
loss_d = F.cross_entropy(self.D(input), domain)
loss_d.backward()
grad_d = torch.clamp(input.grad.data, min=-0.1, max=0.1)
input_d = input.data + self.eps_f * grad_d
# Compute label perturbation
input.grad.data.zero_()
loss_f = F.cross_entropy(self.F(input), label)
loss_f.backward()
grad_f = torch.clamp(input.grad.data, min=-0.1, max=0.1)
input_f = input.data + self.eps_d * grad_f
input = input.detach()
# Update label net
loss_f1 = F.cross_entropy(self.F(input), label)
loss_f2 = F.cross_entropy(self.F(input_d), label)
loss_f = (1 - self.alpha_f) * loss_f1 + self.alpha_f * loss_f2
self.model_backward_and_update(loss_f, 'F')
# Update domain net
loss_d1 = F.cross_entropy(self.D(input), domain)
loss_d2 = F.cross_entropy(self.D(input_f), domain)
loss_d = (1 - self.alpha_d) * loss_d1 + self.alpha_d * loss_d2
self.model_backward_and_update(loss_d, 'D')
output_dict = {
'loss_f': loss_f.item(),
'loss_d': loss_d.item(),
'lr': self.optim_F.param_groups[0]['lr']
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return output_dict
def model_inference(self, input):
return self.F(input)
| [
[
[
7,
12
],
[
1713,
1718
],
[
1980,
1985
]
],
[
[
34,
49
],
[
1631,
1632
],
[
1899,
1900
],
[
2157,
2158
],
[
2213,
2214
],
[
2423,
2424
],
[
2480,
2481
]
],
[
[
75,
90
],
[
878,
893
],
[
1267,
1282
]
],
[
[
92,
110
],
[
936,
954
],
[
1325,
1343
]
],
[
[
135,
150
],
[
829,
844
],
[
1218,
1233
]
],
[
[
176,
192
],
[
249,
265
]
],
[
[
194,
202
],
[
293,
301
]
],
[
[
236,
245
],
[
716,
725
],
[
1095,
1104
]
],
[
[
283,
292
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class DeleteGlobalAccelerationInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'DeleteGlobalAccelerationInstance','vpc')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_GlobalAccelerationInstanceId(self):
return self.get_query_params().get('GlobalAccelerationInstanceId')
def set_GlobalAccelerationInstanceId(self,GlobalAccelerationInstanceId):
self.add_query_param('GlobalAccelerationInstanceId',GlobalAccelerationInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
[
[
826,
836
],
[
932,
942
],
[
969,
979
]
],
[
[
871,
884
],
[
1127,
1140
],
[
1238,
1251
]
],
[
[
892,
931
]
]
] |
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import itertools
import collections
import matplotlib.pyplot as plt
# Read in data
df = pd.read_csv("Chinese_Names_Corpus_Gender(120W).txt", header=2)
df = df[df.sex != "未知"]
names = df["dict"].values
# Compute character frequency
chars = [list(name) for name in names]
chars_flatten = list(itertools.chain(*chars))
freq = collections.Counter(chars_flatten)
freq = pd.DataFrame(freq.items(), columns=["char", "freq"])
freq = freq.sort_values(by="freq", ascending=False)
# Power law (?)
char_rank = np.arange(freq.shape[0])
char_freq = freq["freq"].values
plt.plot(char_rank, char_freq)
plt.plot(np.log(1.0 + char_rank), np.log(char_freq))
# Prepare data
dict_size = 500
dict = list(freq["char"].values[:dict_size])
dict_set = set(dict)
filtered = list(filter(lambda item: set(item[1]).issubset(dict_set), enumerate(names)))
ind = [idx for idx, name in filtered]
dat = df.iloc[ind]
dat["y"] = np.where(dat["sex"] == "男", 0, 1)
# Split training set and test set
# train = dat.sample(frac=0.8, random_state=123)
# test = dat.drop(train.index)
train = dat.sample(n=10000, random_state=123)
test = dat.sample(n=1000, random_state=321)
# One-hot encoding
def char2index(char):
return dict.index(char)
def name2index(name):
return [char2index(char) for char in name]
def name2tensor(name):
tensor = torch.zeros(len(name), 1, dict_size)
for i, char in enumerate(name):
tensor[i, 0, char2index(char)] = 1
return tensor
char2index("李")
name2index("李兴")
name2tensor("李兴")
# Build model
class RNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.h2o = nn.Linear(hidden_size, 1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), dim=1)
hidden = torch.tanh(self.i2h(combined))
output = torch.sigmoid(self.h2o(hidden))
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
# n_hidden = 128
# rnn = RNN(dict_size, n_hidden)
# input = name2tensor("李兴")
# hidden = rnn.init_hidden()
# output, next_hidden = rnn(input[0], hidden)
np.random.seed(123)
torch.random.manual_seed(123)
n = train.shape[0]
n_hidden = 64
nepoch = 5
bs = 100
rnn = RNN(dict_size, n_hidden)
opt = torch.optim.Adam(rnn.parameters(), lr=0.001)
train_ind = np.arange(n)
losses = []
t1 = time.time()
for k in range(nepoch):
np.random.shuffle(train_ind)
# Update on mini-batches
for j in range(0, n, bs):
# Create mini-batch
mb = train.iloc[train_ind[j:(j + bs)]]
mb_size = mb.shape[0]
loss = 0.0
# Loop over each name in the mini-batch
for i in range(mb_size):
name = mb["dict"].values[i]
input = name2tensor(name)
hidden = rnn.init_hidden()
y = mb["y"].values[i]
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
loss = loss - y * torch.log(output) - (1.0 - y) * torch.log(1.0 - output)
loss = loss / mb_size
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.item())
if j // bs % 10 == 0:
print(f"epoch {k}, batch {j // bs}, loss = {loss.item()}")
t2 = time.time()
print(t2 - t1)
plt.plot(losses)
# Prediction on test set
ntest = test.shape[0]
true_label = test["y"].values
pred = np.zeros(ntest)
rnn.eval()
for i in range(ntest):
input = name2tensor(test["dict"].values[i])
hidden = rnn.init_hidden()
with torch.no_grad():
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
pred[i] = output.item()
if i % 100 == 0:
print(f"processed {i}")
loss = -np.mean(true_label * np.log(pred) + (1.0 - true_label) * np.log(1.0 - pred))
print(loss)
pred_label = (pred > 0.5).astype(int)
print(np.mean(pred_label == true_label))
# Random cases
np.random.seed(123)
torch.random.manual_seed(123)
ind = np.random.choice(ntest, 10)
ypred = 1 * (pred[ind] > 0.5)
print(test.iloc[ind])
print(test["y"].values[ind])
print(ypred)
names = ["李", "李雪", "李雪峰"]
for name in names:
input = name2tensor(name)
hidden = rnn.init_hidden()
with torch.no_grad():
for s in range(input.shape[0]):
output, hidden = rnn(input[s], hidden)
pred = output.item()
print(f"namae: {name}, P(female) = {pred}")
| [
[
[
7,
11
],
[
2641,
2645
],
[
3577,
3581
]
],
[
[
20,
31
],
[
609,
611
],
[
709,
711
],
[
734,
736
],
[
1016,
1018
],
[
2398,
2400
],
[
2607,
2609
],
[
2683,
2685
],
[
3715,
3717
],
[
4061,
4063
],
[
4082,
4084
],
[
4118,
4120
],
[
4197,
4199
],
[
4251,
4253
],
[
4309,
4311
]
],
[
[
40,
52
],
[
185,
187
],
[
471,
473
]
],
[
[
61,
66
],
[
2419,
2424
],
[
2549,
2554
],
[
3271,
3276
],
[
3303,
3308
],
[
3858,
3863
],
[
4272,
4277
],
[
4562,
4567
],
[
1448,
1453
],
[
1988,
1993
],
[
2040,
2045
],
[
2089,
2094
],
[
2198,
2203
]
],
[
[
75,
89
],
[
1673,
1675
],
[
1832,
1834
],
[
1901,
1903
]
],
[
[
98,
107
],
[
395,
404
]
],
[
[
116,
127
],
[
428,
439
]
],
[
[
136,
160
],
[
668,
671
],
[
700,
703
],
[
3608,
3611
]
],
[
[
180,
182
],
[
254,
256
],
[
257,
259
]
],
[
[
249,
251
],
[
282,
284
],
[
991,
993
]
],
[
[
274,
279
],
[
366,
371
],
[
936,
941
]
],
[
[
334,
339
],
[
412,
417
]
],
[
[
374,
387
],
[
448,
461
]
],
[
[
421,
425
],
[
484,
488
]
],
[
[
464,
468
],
[
532,
536
]
],
[
[
525,
529
],
[
619,
623
],
[
647,
651
],
[
801,
805
]
],
[
[
597,
606
],
[
677,
686
],
[
722,
731
]
],
[
[
635,
644
],
[
688,
697
],
[
741,
750
]
],
[
[
772,
781
],
[
822,
831
],
[
2521,
2530
],
[
1474,
1483
]
],
[
[
789,
793
],
[
850,
854
],
[
1318,
1322
]
],
[
[
835,
843
],
[
915,
923
]
],
[
[
857,
865
],
[
974,
982
]
],
[
[
946,
949
],
[
999,
1002
]
],
[
[
985,
988
],
[
1025,
1028
],
[
1005,
1008
],
[
1178,
1181
],
[
1224,
1227
]
],
[
[
1170,
1175
],
[
2456,
2461
],
[
2816,
2821
]
],
[
[
1217,
1221
],
[
3662,
3666
],
[
3690,
3694
],
[
3792,
3796
],
[
4375,
4379
],
[
4398,
4402
]
],
[
[
1288,
1298
],
[
1588,
1598
],
[
1373,
1383
],
[
1544,
1554
]
],
[
[
1342,
1352
],
[
1605,
1615
]
],
[
[
1415,
1426
],
[
1623,
1634
],
[
3046,
3057
],
[
3780,
3791
],
[
4502,
4513
]
],
[
[
1669,
1672
],
[
2517,
2520
],
[
1750,
1753
]
],
[
[
2452,
2453
],
[
2617,
2618
],
[
2765,
2766
]
],
[
[
2472,
2480
],
[
2532,
2540
]
],
[
[
2487,
2493
],
[
2669,
2675
]
],
[
[
2499,
2501
],
[
2768,
2770
],
[
2844,
2846
],
[
3485,
3487
],
[
3543,
3545
]
],
[
[
2511,
2514
],
[
2566,
2569
],
[
3086,
3089
],
[
3218,
3221
],
[
3732,
3735
],
[
3830,
3833
],
[
3946,
3949
],
[
4534,
4537
],
[
4650,
4653
]
],
[
[
2543,
2546
],
[
3369,
3372
],
[
3419,
3422
]
],
[
[
2595,
2604
],
[
2701,
2710
],
[
2827,
2836
]
],
[
[
2621,
2627
],
[
3441,
3447
],
[
3617,
3623
]
],
[
[
2636,
2638
],
[
3601,
3603
]
],
[
[
2658,
2659
],
[
3527,
3528
]
],
[
[
2751,
2752
],
[
2837,
2838
],
[
2840,
2841
],
[
3480,
3481
],
[
3538,
3539
]
],
[
[
2811,
2813
],
[
2869,
2871
],
[
3004,
3006
],
[
3121,
3123
]
],
[
[
2859,
2866
],
[
2974,
2981
],
[
3352,
3359
]
],
[
[
2890,
2894
],
[
3260,
3264
],
[
3345,
3349
]
],
[
[
2963,
2964
],
[
3022,
3023
],
[
3136,
3137
]
],
[
[
2997,
3001
],
[
3058,
3062
]
],
[
[
3038,
3043
],
[
3167,
3172
],
[
3222,
3227
]
],
[
[
3077,
3083
],
[
3232,
3238
]
],
[
[
3117,
3118
],
[
3267,
3268
],
[
3298,
3299
]
],
[
[
3156,
3157
],
[
3228,
3229
]
],
[
[
3201,
3207
],
[
3281,
3287
],
[
3319,
3325
],
[
3983,
3989
],
[
4684,
4690
]
],
[
[
3209,
3215
],
[
3232,
3238
]
],
[
[
3253,
3257
],
[
3260,
3264
],
[
3345,
3349
]
],
[
[
3338,
3342
],
[
3394,
3398
],
[
3455,
3459
],
[
3556,
3560
]
],
[
[
3572,
3574
],
[
3596,
3598
]
],
[
[
3654,
3659
],
[
3724,
3729
],
[
3759,
3764
],
[
4326,
4331
]
],
[
[
3677,
3687
],
[
4069,
4079
],
[
4104,
4114
],
[
4219,
4229
]
],
[
[
3708,
3712
],
[
3973,
3977
],
[
4089,
4093
],
[
4131,
4135
],
[
4166,
4170
],
[
4351,
4355
]
],
[
[
3748,
3749
],
[
3812,
3813
],
[
3978,
3979
],
[
4005,
4006
],
[
4047,
4048
]
],
[
[
3772,
3777
],
[
3899,
3904
],
[
3950,
3955
]
],
[
[
3821,
3827
],
[
3960,
3966
]
],
[
[
3888,
3889
],
[
3956,
3957
]
],
[
[
3929,
3935
],
[
3983,
3989
],
[
4684,
4690
]
],
[
[
3937,
3943
],
[
3960,
3966
]
],
[
[
4053,
4057
],
[
4145,
4149
]
],
[
[
4152,
4162
],
[
4205,
4215
]
],
[
[
4303,
4306
],
[
4356,
4359
],
[
4385,
4388
],
[
4415,
4418
]
],
[
[
4338,
4343
],
[
4428,
4433
]
],
[
[
4442,
4447
],
[
4482,
4487
]
],
[
[
4474,
4478
],
[
4514,
4518
],
[
4719,
4723
]
],
[
[
4494,
4499
],
[
4603,
4608
],
[
4654,
4659
]
],
[
[
4525,
4531
],
[
4664,
4670
]
],
[
[
4592,
4593
],
[
4660,
4661
]
],
[
[
4633,
4639
],
[
4684,
4690
]
],
[
[
4641,
4647
],
[
4664,
4670
]
],
[
[
4677,
4681
],
[
4739,
4743
]
]
] |
import sys
import numpy as np
import pennylane as qml
import pytest
import qiskit
from pennylane_qiskit import AerDevice, BasicAerDevice
from conftest import state_backends
pldevices = [("qiskit.aer", qiskit.Aer), ("qiskit.basicaer", qiskit.BasicAer)]
class TestDeviceIntegration:
"""Test the devices work correctly from the PennyLane frontend."""
@pytest.mark.parametrize("d", pldevices)
def test_load_device(self, d, backend):
"""Test that the qiskit device loads correctly"""
dev = qml.device(d[0], wires=2, backend=backend, shots=1024)
assert dev.num_wires == 2
assert dev.shots == 1024
assert dev.short_name == d[0]
assert dev.provider == d[1]
def test_incorrect_backend(self):
"""Test that exception is raised if name is incorrect"""
with pytest.raises(ValueError, match="Backend 'none' does not exist"):
qml.device("qiskit.aer", wires=2, backend="none")
def test_incorrect_backend_wires(self):
"""Test that exception is raised if number of wires is too large"""
with pytest.raises(ValueError, match=r"Backend 'statevector\_simulator' supports maximum"):
qml.device("qiskit.aer", wires=100, backend="statevector_simulator")
def test_args(self):
"""Test that the device requires correct arguments"""
with pytest.raises(TypeError, match="missing 1 required positional argument"):
qml.device("qiskit.aer")
with pytest.raises(qml.DeviceError, match="specified number of shots needs to be at least 1"):
qml.device("qiskit.aer", backend="qasm_simulator", wires=1, shots=0)
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [True, False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
"""Test that devices provide correct result for a simple circuit"""
if backend not in state_backends and analytic:
pytest.skip("Hardware simulators do not support analytic mode")
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0.543
b = 0.123
c = 0.987
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([1]), wires=0)
qml.Hadamard(wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol)
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
"""Integration test for the Basisstate and Rot operations for when analytic
is False"""
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0
b = 0
c = np.pi
expected = 1
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([0]), wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), expected, **tol)
def test_gradient_for_tensor_product(self):
"""Test that the gradient of a circuit containing a tensor product is
computed without any errors."""
n_qubits = 2
depth = 2
def ansatz(weights):
weights = weights.reshape(depth, n_qubits)
qml.RX(weights[0][0], wires=[0])
qml.RZ(weights[0][1], wires=[0])
qml.RX(weights[1][0], wires=[0])
qml.RZ(weights[1][1], wires=[0])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
dev_qsk = qml.device(
"qiskit.aer",
wires=n_qubits,
shots=1000,
backend="qasm_simulator",
)
weights = np.random.random((depth, n_qubits)).flatten()
# Want to get expectation value and gradient
exp_sampled = qml.QNode(ansatz, dev_qsk, diff_method="parameter-shift")
grad_shift = qml.grad(exp_sampled, argnum=0)
exp_sampled(weights)
grad_shift(weights)
class TestKeywordArguments:
"""Test keyword argument logic is correct"""
@pytest.mark.parametrize("d", pldevices)
def test_compile_backend(self, d):
"""Test that the compile backend argument is properly
extracted"""
dev = qml.device(d[0], wires=2, compile_backend="test value")
assert dev.compile_backend == "test value"
def test_noise_model(self):
"""Test that the noise model argument is properly
extracted if the backend supports it"""
dev = qml.device("qiskit.aer", wires=2, noise_model="test value")
assert dev.noise_model == "test value"
def test_invalid_noise_model(self):
"""Test that the noise model argument causes an exception to be raised
if the backend does not support it"""
with pytest.raises(ValueError, match="does not support noisy simulations"):
dev = qml.device("qiskit.basicaer", wires=2, noise_model="test value")
def test_overflow_kwargs(self):
"""Test all overflow kwargs are extracted for the AerDevice"""
dev = qml.device('qiskit.aer', wires=2, k1="v1", k2="v2")
assert dev.run_args["k1"] == "v1"
assert dev.run_args["k2"] == "v2"
class TestLoadIntegration:
"""Integration tests for the PennyLane load function. This test ensures that the PennyLane-Qiskit
specific load functions integrate properly with the PennyLane-Qiskit plugin."""
hadamard_qasm = 'OPENQASM 2.0;' \
'include "qelib1.inc";' \
'qreg q[1];' \
'h q[0];'
def test_load_qiskit_circuit(self):
"""Test that the default load function works correctly."""
theta = qiskit.circuit.Parameter('θ')
qc = qiskit.QuantumCircuit(2)
qc.rx(theta, 0)
my_template = qml.load(qc, format='qiskit')
dev = qml.device('default.qubit', wires=2)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def loaded_quantum_circuit(angle):
my_template({theta: angle})
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit(angle):
qml.RX(angle, wires=[0])
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(loaded_quantum_circuit(x), quantum_circuit(x))
def test_load_from_qasm_string(self):
"""Test that quantum circuits can be loaded from a qasm string."""
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
qml.from_qasm(TestLoadIntegration.hadamard_qasm)(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
@pytest.mark.skipif(sys.version_info < (3, 6), reason="tmpdir fixture requires Python >=3.6")
def test_load_qasm_from_file(self, tmpdir):
"""Test that quantum circuits can be loaded from a qasm file."""
apply_hadamard = tmpdir.join("hadamard.qasm")
with open(apply_hadamard, "w") as f:
f.write(TestLoadIntegration.hadamard_qasm)
hadamard = qml.from_qasm_file(apply_hadamard)
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
class TestPLOperations:
"""Integration tests for checking certain PennyLane specific operations."""
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_rotation(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the QubitStateVector and Rot operations are decomposed using a
Qiskit device with statevector backend"""
dev = state_vector_device(1)
if dev.backend_name == "unitary_simulator":
pytest.skip("Test only runs for backends that are not the unitary simulator.")
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
I = np.eye(2)
Y = np.array([[0, -1j], [1j, 0]]) #: Pauli-Y matrix
Z = np.array([[1, 0], [0, -1]]) #: Pauli-Z matrix
def ry(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y
def rz(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z
@qml.qnode(dev)
def qubitstatevector_and_rot():
qml.QubitStateVector(state, wires=[0])
qml.Rot(a, b, c, wires=[0])
return qml.expval(qml.Identity(0))
qubitstatevector_and_rot()
assert np.allclose(np.abs(dev.state) ** 2, np.abs(rz(c) @ ry(b) @ rz(a) @ state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate is decomposed using a Qiskit device with
statevector backend"""
dev = state_vector_device(2)
state = np.array([1, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[2] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate_init_all_zero_states(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate that receives the all zero state is decomposed using
a Qiskit device with statevector backend"""
dev = state_vector_device(4)
state = np.array([0, 0, 0, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1, 2, 3])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[0] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
class TestInverses:
"""Integration tests checking that the inverse of the operations are applied."""
def test_inverse_of_operation(self):
"""Test that the inverse of operations works as expected
by comparing a simple circuit with default.qubit."""
dev = qml.device('default.qubit', wires=2)
dev2 = qml.device('qiskit.aer', backend='statevector_simulator', shots=5, wires=2, analytic=True)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def circuit_with_inverses(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev2)
def circuit_with_inverses_default_qubit(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(circuit_with_inverses(x), circuit_with_inverses_default_qubit(x))
| [
[
[
7,
10
],
[
7323,
7326
]
],
[
[
19,
30
],
[
2505,
2507
],
[
2535,
2537
],
[
2547,
2549
],
[
3013,
3015
],
[
3279,
3281
],
[
4074,
4076
],
[
6287,
6289
],
[
6679,
6681
],
[
7241,
7243
],
[
8073,
8075
],
[
8840,
8842
],
[
8862,
8864
],
[
8923,
8925
],
[
9415,
9417
],
[
9427,
9429
],
[
9451,
9453
],
[
9853,
9855
],
[
10064,
10066
],
[
10137,
10139
],
[
10149,
10151
],
[
10173,
10175
],
[
10614,
10616
],
[
10837,
10839
],
[
10910,
10912
],
[
10922,
10924
],
[
10946,
10948
],
[
11433,
11435
],
[
11918,
11920
],
[
2348,
2350
],
[
3156,
3158
],
[
9013,
9015
],
[
9042,
9044
],
[
9108,
9110
],
[
9137,
9139
]
],
[
[
38,
54
],
[
520,
523
],
[
911,
914
],
[
1194,
1197
],
[
1450,
1453
],
[
1503,
1506
],
[
1591,
1594
],
[
2102,
2105
],
[
2242,
2245
],
[
2897,
2900
],
[
3050,
3053
],
[
3877,
3880
],
[
4196,
4199
],
[
4275,
4278
],
[
4624,
4627
],
[
4884,
4887
],
[
5259,
5262
],
[
5446,
5449
],
[
6187,
6190
],
[
6232,
6235
],
[
6348,
6351
],
[
6501,
6504
],
[
6871,
6874
],
[
6918,
6921
],
[
7098,
7101
],
[
7693,
7696
],
[
7743,
7746
],
[
7790,
7793
],
[
7930,
7933
],
[
9170,
9173
],
[
9880,
9883
],
[
10647,
10650
],
[
11271,
11274
],
[
11324,
11327
],
[
11494,
11497
],
[
11681,
11684
],
[
2333,
2336
],
[
2384,
2387
],
[
2418,
2421
],
[
2463,
2466
],
[
2474,
2477
],
[
3141,
3144
],
[
3192,
3195
],
[
3237,
3240
],
[
3248,
3251
],
[
3629,
3632
],
[
3674,
3677
],
[
3719,
3722
],
[
3764,
3767
],
[
3816,
3819
],
[
3827,
3830
],
[
3843,
3846
],
[
6465,
6468
],
[
6476,
6479
],
[
6564,
6567
],
[
6608,
6611
],
[
6619,
6622
],
[
6983,
6986
],
[
7062,
7065
],
[
7073,
7076
],
[
7156,
7159
],
[
7199,
7202
],
[
7210,
7213
],
[
7894,
7897
],
[
7905,
7908
],
[
7988,
7991
],
[
8031,
8034
],
[
8042,
8045
],
[
9237,
9240
],
[
9288,
9291
],
[
9335,
9338
],
[
9346,
9349
],
[
9933,
9936
],
[
9988,
9991
],
[
9999,
10002
],
[
10700,
10703
],
[
10761,
10764
],
[
10772,
10775
],
[
11563,
11566
],
[
11597,
11600
],
[
11645,
11648
],
[
11656,
11659
],
[
11765,
11768
],
[
11799,
11802
],
[
11847,
11850
],
[
11858,
11861
]
],
[
[
62,
68
],
[
364,
370
],
[
1666,
1672
],
[
1711,
1717
],
[
1767,
1773
],
[
2571,
2577
],
[
2616,
2622
],
[
2666,
2672
],
[
4448,
4454
],
[
7304,
7310
],
[
8242,
8248
],
[
8288,
8294
],
[
9508,
9514
],
[
9554,
9560
],
[
10215,
10221
],
[
10261,
10267
],
[
833,
839
],
[
1095,
1101
],
[
1364,
1370
],
[
1489,
1495
],
[
2023,
2029
],
[
5170,
5176
],
[
8660,
8666
]
],
[
[
76,
82
],
[
205,
211
],
[
238,
244
],
[
6071,
6077
],
[
6115,
6121
]
],
[
[
113,
122
]
],
[
[
124,
138
]
],
[
[
161,
175
],
[
1982,
1996
]
],
[
[
177,
186
],
[
393,
402
],
[
1695,
1704
],
[
2600,
2609
],
[
4477,
4486
]
],
[
[
264,
285
]
],
[
[
4371,
4391
]
],
[
[
5590,
5609
],
[
7638,
7657
],
[
6997,
7016
]
],
[
[
8138,
8154
]
],
[
[
10990,
11002
]
]
] |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util import."""
# pylint: disable=wildcard-import
from tensorflow_datasets.core.utils.image_utils import *
from tensorflow_datasets.core.utils.py_utils import *
from tensorflow_datasets.core.utils.tf_utils import *
from tensorflow_datasets.core.utils.tqdm_utils import *
from tensorflow_datasets.core.utils.version import Experiment
from tensorflow_datasets.core.utils.version import Version
# pylint: enable=wildcard-import
| [
[
[
721,
722
]
],
[
[
775,
776
]
],
[
[
829,
830
]
],
[
[
885,
886
]
],
[
[
938,
948
]
],
[
[
1000,
1007
]
]
] |
# -*- coding: utf-8 -*-
""" rwlock.py
A class to implement read-write locks on top of the standard threading
library.
This is implemented with two mutexes (threading.Lock instances) as per this
wikipedia pseudocode:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Using_two_mutexes
Code written by Tyler Neylon at Unbox Research.
This file is public domain.
Modified to add a w_demote function to convert a writer lock to a reader lock
"""
# _______________________________________________________________________
# Imports
from contextlib import contextmanager
from threading import Lock
# _______________________________________________________________________
# Class
class RWLock(object):
""" RWLock class; this is meant to allow an object to be read from by
multiple threads, but only written to by a single thread at a time. See:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
Usage:
from rwlock import RWLock
my_obj_rwlock = RWLock()
# When reading from my_obj:
with my_obj_rwlock.r_locked():
do_read_only_things_with(my_obj)
# When writing to my_obj:
with my_obj_rwlock.w_locked():
mutate(my_obj)
"""
def __init__(self):
self.w_lock = Lock()
self.num_r_lock = Lock()
self.num_r = 0
# The d_lock is needed to handle the demotion case,
# so that the writer can become a reader without releasing the w_lock.
# the d_lock is held by the writer, and prevents any other thread from taking the
# num_r_lock during that time, which means the writer thread is able to take the
# num_r_lock to update the num_r.
self.d_lock = Lock()
# ___________________________________________________________________
# Reading methods.
def r_acquire(self):
self.d_lock.acquire()
self.num_r_lock.acquire()
self.num_r += 1
if self.num_r == 1:
self.w_lock.acquire()
self.num_r_lock.release()
self.d_lock.release()
def r_release(self):
assert self.num_r > 0
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
@contextmanager
def r_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.r_acquire()
yield
finally:
self.r_release()
# ___________________________________________________________________
# Writing methods.
def w_acquire(self):
self.d_lock.acquire()
self.w_lock.acquire()
def w_acquire_non_blocking(self):
# if d_lock and w_lock can be acquired without blocking, acquire and return True,
# else immediately return False.
if self.d_lock.acquire(blocking=False):
if self.w_lock.acquire(blocking=False):
return True
else:
self.d_lock.release()
return False
def w_release(self):
self.w_lock.release()
self.d_lock.release()
def w_demote(self):
"""demote a writer lock to a reader lock"""
# the d_lock is already held from w_acquire.
# releasing the d_lock at the end of this function allows multiple readers.
# incrementing num_r makes this thread one of those readers.
self.num_r_lock.acquire()
self.num_r += 1
self.num_r_lock.release()
self.d_lock.release()
@contextmanager
def w_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.w_acquire()
yield
finally:
self.w_release()
| [
[
[
598,
612
],
[
2374,
2388
],
[
3653,
3667
]
],
[
[
635,
639
],
[
1364,
1368
],
[
1397,
1401
],
[
1810,
1814
]
],
[
[
732,
738
]
]
] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 11:36:58 2020
@author: nastavirs
"""
import numpy as np
import tensorflow as tf
def net_u(self, x, t):
u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)
return u | [
[
[
101,
112
]
],
[
[
121,
137
],
[
193,
195
]
],
[
[
143,
148
]
]
] |
# flake8: noqa
# DEV: Skip linting, we lint with Python 2, we'll get SyntaxErrors from `yield from`
# stdlib
import time
import asyncio
# 3p
import aiopg
from psycopg2 import extras
# project
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.aiopg.patch import patch, unpatch
from ddtrace import Pin
# testing
from tests.opentracer.utils import init_tracer
from tests.contrib.config import POSTGRES_CONFIG
from tests.test_tracer import get_dummy_tracer
from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio
TEST_PORT = str(POSTGRES_CONFIG['port'])
class AiopgTestCase(AsyncioTestCase):
# default service
TEST_SERVICE = 'postgres'
def setUp(self):
super().setUp()
self._conn = None
patch()
def tearDown(self):
super().tearDown()
if self._conn and not self._conn.closed:
self._conn.close()
unpatch()
@asyncio.coroutine
def _get_conn_and_tracer(self):
conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(tracer=self.tracer).onto(conn)
return conn, self.tracer
@asyncio.coroutine
def assert_conn_is_traced(self, tracer, db, service):
# ensure the trace aiopg client doesn't add non-standard
# methods
try:
yield from db.execute('select \'foobar\'')
except AttributeError:
pass
writer = tracer.writer
# Ensure we can run a query and it's correctly traced
q = 'select \'foobarblah\''
start = time.time()
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
end = time.time()
assert rows == [('foobarblah',)]
assert rows
spans = writer.pop()
assert spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 0
assert span.span_type == 'sql'
assert start <= span.start <= end
assert span.duration <= end - start
# Ensure OpenTracing compatibility
ot_tracer = init_tracer('aiopg_svc', tracer)
with ot_tracer.start_active_span('aiopg_op'):
cursor = yield from db.cursor()
yield from cursor.execute(q)
rows = yield from cursor.fetchall()
assert rows == [('foobarblah',)]
spans = writer.pop()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm the parenting
assert ot_span.parent_id == None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == 'aiopg_op'
assert ot_span.service == 'aiopg_svc'
assert dd_span.name == 'postgres.query'
assert dd_span.resource == q
assert dd_span.service == service
assert dd_span.meta['sql.query'] == q
assert dd_span.error == 0
assert dd_span.span_type == 'sql'
# run a query with an error and ensure all is well
q = 'select * from some_non_existant_table'
cur = yield from db.cursor()
try:
yield from cur.execute(q)
except Exception:
pass
else:
assert 0, 'should have an error'
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'postgres.query'
assert span.resource == q
assert span.service == service
assert span.meta['sql.query'] == q
assert span.error == 1
# assert span.meta['out.host'] == 'localhost'
assert span.meta['out.port'] == TEST_PORT
assert span.span_type == 'sql'
@mark_asyncio
def test_disabled_execute(self):
conn, tracer = yield from self._get_conn_and_tracer()
tracer.enabled = False
# these calls were crashing with a previous version of the code.
yield from (yield from conn.cursor()).execute(query='select \'blah\'')
yield from (yield from conn.cursor()).execute('select \'blah\'')
assert not tracer.writer.pop()
@mark_asyncio
def test_manual_wrap_extension_types(self):
conn, _ = yield from self._get_conn_and_tracer()
# NOTE: this will crash if it doesn't work.
# _ext.register_type(_ext.UUID, conn_or_curs)
# TypeError: argument 2 must be a connection, cursor or None
extras.register_uuid(conn_or_curs=conn)
@mark_asyncio
def test_connect_factory(self):
tracer = get_dummy_tracer()
services = ['db', 'another']
for service in services:
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from self.assert_conn_is_traced(tracer, conn, service)
conn.close()
# ensure we have the service types
service_meta = tracer.writer.pop_services()
expected = {}
assert service_meta == expected
@mark_asyncio
def test_patch_unpatch(self):
tracer = get_dummy_tracer()
writer = tracer.writer
# Test patch idempotence
patch()
patch()
service = 'fo'
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert not spans, spans
# Test patch again
patch()
conn = yield from aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn)
yield from (yield from conn.cursor()).execute('select \'blah\'')
conn.close()
spans = writer.pop()
assert spans, spans
assert len(spans) == 1
class AiopgAnalyticsTestCase(AiopgTestCase):
@asyncio.coroutine
def trace_spans(self):
service = 'db'
conn, _ = yield from self._get_conn_and_tracer()
Pin.get_from(conn).clone(service='db', tracer=self.tracer).onto(conn)
cursor = yield from conn.cursor()
yield from cursor.execute('select \'foobar\'')
rows = yield from cursor.fetchall()
assert rows
return self.get_spans()
@mark_asyncio
def test_analytics_default(self):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY))
@mark_asyncio
def test_analytics_with_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
@mark_asyncio
def test_analytics_without_rate(self):
with self.override_config(
'aiopg',
dict(analytics_enabled=True)
):
spans = yield from self.trace_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
| [
[
[
116,
120
],
[
1585,
1589
],
[
1732,
1736
]
],
[
[
128,
135
],
[
933,
940
],
[
1163,
1170
],
[
6385,
6392
]
],
[
[
149,
154
],
[
1026,
1031
],
[
5420,
5425
],
[
5781,
5786
],
[
6041,
6046
]
],
[
[
176,
182
],
[
4584,
4590
]
],
[
[
224,
249
],
[
6973,
6998
],
[
7338,
7363
],
[
7684,
7709
]
],
[
[
290,
295
],
[
768,
773
],
[
5345,
5350
],
[
5361,
5366
],
[
6006,
6011
]
],
[
[
297,
304
],
[
917,
924
],
[
5744,
5751
]
],
[
[
325,
328
],
[
1067,
1070
],
[
4859,
4862
],
[
5461,
5464
],
[
6082,
6085
],
[
6519,
6522
]
],
[
[
375,
386
],
[
2291,
2302
]
],
[
[
420,
435
],
[
571,
586
],
[
1042,
1057
],
[
5436,
5451
],
[
5797,
5812
],
[
6057,
6072
]
],
[
[
466,
482
],
[
4696,
4712
],
[
5253,
5269
]
],
[
[
523,
538
],
[
618,
633
]
],
[
[
540,
552
],
[
3866,
3878
],
[
4279,
4291
],
[
4630,
4642
],
[
5189,
5201
],
[
6790,
6802
],
[
7007,
7019
],
[
7377,
7389
]
],
[
[
555,
564
],
[
3811,
3820
]
],
[
[
604,
617
],
[
6364,
6377
]
],
[
[
6341,
6363
]
]
] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisionDataset that allows user to get example indices."""
import torchvision
class VisionDatasetWithIndices(torchvision.datasets.vision.VisionDataset):
"""VisionDataset that allows user to get example indices.
Dataset that returns a triple (data, targets, indices)
instead of just (data, targets). Indices of training examples can be
used to track model performance on individual examples, for instance to find
training examples that are learned faster than others.
"""
def __init__(self, dataset):
super(VisionDatasetWithIndices, self).__init__(None)
self.dataset = dataset
def __getitem__(self, index):
data, target = self.dataset.__getitem__(index)
return data, target, index
def __len__(self):
return len(self.dataset)
| [
[
[
677,
688
],
[
722,
733
]
],
[
[
697,
721
],
[
1139,
1163
]
]
] |
# import riaps
from riaps.run.comp import Component
import logging
class Hello(Component):
def __init__(self):
super(Hello, self).__init__()
def on_clock(self):
now = self.clock.recv_pyobj() # Receive time.time() as float
self.logger.info('on_clock(): %s' % str(now))
| [
[
[
42,
51
],
[
80,
89
]
],
[
[
59,
66
]
],
[
[
74,
79
],
[
130,
135
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['DataConnector']
class DataConnector(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataConnectorKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Data connector.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_connector_id: Connector ID
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[Union[str, 'DataConnectorKind']] kind: The data connector kind
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_connector_id'] = data_connector_id
__props__['etag'] = etag
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__['kind'] = kind
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:DataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights/latest:DataConnector")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataConnector, __self__).__init__(
'azure-nextgen:securityinsights/v20200101:DataConnector',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataConnector':
"""
Get an existing DataConnector resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DataConnector(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The data connector kind
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
[
[
176,
184
],
[
1669,
1677
],
[
1827,
1835
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
389,
395
],
[
4434,
4440
],
[
4619,
4625
],
[
4791,
4797
],
[
4959,
4965
],
[
509,
515
],
[
586,
592
],
[
645,
651
],
[
704,
710
],
[
806,
812
],
[
875,
881
],
[
1995,
2001
],
[
2052,
2058
],
[
3233,
3239
],
[
3265,
3271
],
[
3333,
3339
],
[
3424,
3430
],
[
3725,
3731
],
[
3771,
3777
],
[
4243,
4249
],
[
4278,
4284
],
[
4470,
4476
],
[
4574,
4580
],
[
4655,
4661
],
[
4746,
4752
],
[
4827,
4833
],
[
4914,
4920
],
[
4995,
5001
],
[
5082,
5088
]
],
[
[
240,
243
]
],
[
[
245,
252
]
],
[
[
254,
262
],
[
500,
508
],
[
577,
585
],
[
636,
644
],
[
695,
703
],
[
797,
805
],
[
866,
874
],
[
3762,
3770
],
[
4484,
4492
]
],
[
[
264,
272
]
],
[
[
274,
279
],
[
717,
722
]
],
[
[
296,
306
],
[
2227,
2237
]
],
[
[
308,
315
],
[
5170,
5177
],
[
5284,
5291
]
],
[
[
336,
337
]
],
[
[
339,
346
]
],
[
[
375,
388
],
[
3485,
3498
],
[
4353,
4366
]
]
] |
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
# CONV => RELU => POOL #1
model.add(Conv2D(20, (5, 5), padding='same', input_shape=inputShape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
# CONV => RELU => POOL #2
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# FC => RELU
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
# softmax clfier
model.add(Dense(classes))
model.add(Activation('softmax'))
return model
| [
[
[
25,
35
],
[
375,
385
]
],
[
[
75,
81
],
[
588,
594
],
[
804,
810
]
],
[
[
121,
133
],
[
704,
716
],
[
896,
908
]
],
[
[
165,
175
],
[
666,
676
],
[
858,
868
],
[
1043,
1053
],
[
1141,
1151
]
],
[
[
206,
213
],
[
984,
991
]
],
[
[
244,
249
],
[
1013,
1018
],
[
1107,
1112
]
],
[
[
268,
280
],
[
444,
445
]
],
[
[
288,
293
]
]
] |
# Copyright 2021 Torsten Mehnert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from complatecpp import Stream
class TestStream(Stream):
__test__ = False
def __init__(self, *args, **kwargs):
# Don't use super()
Stream.__init__(self, *args, **kwargs)
self.data = str()
def write(self, string, length):
self.data += string[0:length]
def writeln(self, string, length):
self.data += string[0:length]
self.data += '\n'
def flush(self):
pass
def str(self):
return self.data
| [
[
[
609,
615
],
[
635,
641
],
[
743,
749
]
],
[
[
624,
634
]
]
] |
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def multiple_line_items():
clientReferenceInformationCode = "addressEg"
clientReferenceInformationComments = "dav-All fields"
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments
)
orderInformationBillToAddress1 = "12301 research st"
orderInformationBillToAddress2 = "1"
orderInformationBillToAddress3 = "2"
orderInformationBillToAddress4 = "3"
orderInformationBillToAdministrativeArea = "TX"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Austin"
orderInformationBillToPostalCode = "78759"
orderInformationBillTo = Riskv1addressverificationsOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
address2 = orderInformationBillToAddress2,
address3 = orderInformationBillToAddress3,
address4 = orderInformationBillToAddress4,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode
)
orderInformationShipToAddress1 = "PO Box 9088"
orderInformationShipToAddress2 = ""
orderInformationShipToAddress3 = ""
orderInformationShipToAddress4 = ""
orderInformationShipToAdministrativeArea = "CA"
orderInformationShipToCountry = "US"
orderInformationShipToLocality = "San Jose"
orderInformationShipToPostalCode = "95132"
orderInformationShipTo = Riskv1addressverificationsOrderInformationShipTo(
address1 = orderInformationShipToAddress1,
address2 = orderInformationShipToAddress2,
address3 = orderInformationShipToAddress3,
address4 = orderInformationShipToAddress4,
administrative_area = orderInformationShipToAdministrativeArea,
country = orderInformationShipToCountry,
locality = orderInformationShipToLocality,
postal_code = orderInformationShipToPostalCode
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1addressverificationsOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "9966223",
product_name = "headset",
product_code = "electronix"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformationLineItems2 = Riskv1addressverificationsOrderInformationLineItems(
unit_price = "10.50",
quantity = 2,
product_sku = "9966226",
product_name = "wwrdf",
product_code = "electronic"
)
orderInformationLineItems.append(orderInformationLineItems2.__dict__)
orderInformation = Riskv1addressverificationsOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
buyerInformationMerchantCustomerId = "QWERTY"
buyerInformation = Riskv1addressverificationsBuyerInformation(
merchant_customer_id = buyerInformationMerchantCustomerId
)
requestObj = VerifyCustomerAddressRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__,
buyer_information = buyerInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.verify_customer_address(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->verify_customer_address: %s\n" % e)
if __name__ == "__main__":
multiple_line_items()
| [
[
[
24,
25
],
[
645,
686
],
[
1191,
1239
],
[
2067,
2115
],
[
2624,
2675
],
[
2948,
2999
],
[
3259,
3301
],
[
3532,
3574
],
[
3666,
3694
],
[
4116,
4131
]
],
[
[
33,
35
],
[
112,
114
],
[
125,
127
]
],
[
[
43,
47
],
[
3953,
3957
]
],
[
[
80,
96
],
[
182,
198
]
],
[
[
98,
109
],
[
214,
225
]
],
[
[
166,
179
],
[
4008,
4021
]
],
[
[
297,
305
],
[
448,
456
],
[
3906,
3914
]
],
[
[
482,
501
],
[
4507,
4526
]
]
] |
#Crie um programa que vai ler vários números e colocar em uma lista.
#Depois disso, crie duas listas extras que vão conter apenas valores pares
#e os valores impares digitados, respectivamente.
#Ao final, mostre o conteúdo das três listas geradas
principal = []
par = []
impar = []
while True:
n = int(input('Digite um valor: '))
principal.append(n)
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
while True:
opção = str(input('Quer continuar? [S/N]: ')).upper()
if opção == 'S':
break
elif opção == 'N':
break
elif opção not in 'SN':
print('Opção inválida. Digite apenas S ou N')
if opção == 'N':
break
print(f'Lista principal de números: {principal}')
print(f'Lista dos números pares: {par}')
print(f'Lista dos números impares: {impar}')
| [
[
[
248,
257
],
[
339,
348
],
[
763,
772
]
],
[
[
263,
266
],
[
386,
389
],
[
810,
813
]
],
[
[
272,
277
],
[
418,
423
],
[
853,
858
]
],
[
[
299,
300
],
[
356,
357
],
[
366,
367
],
[
397,
398
],
[
431,
432
]
],
[
[
458,
465
],
[
523,
530
],
[
568,
575
],
[
613,
620
],
[
697,
704
]
]
] |
# -*- coding: utf-8 -*-
import sys
import os
import re
sys.path.append('../') # noqa
from jinja2 import Template
from cli_bdd.core.steps import (
command,
environment,
file as file_steps,
)
BASE_PATH = os.path.dirname(os.path.normpath(__file__))
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
STEPS_MODULES = [
command,
environment,
file_steps,
]
def _prepare_docstring(value):
if not value:
return ''
remove_spaces = 0
for line in value.split('\n')[1:]:
if line:
for char in line:
if char != ' ':
break
else:
remove_spaces += 1
break
return re.sub(
r'^ {%s}' % remove_spaces,
'',
unicode(value),
flags=re.MULTILINE
).strip()
def _render_and_save_template(path, dest, context):
template_path = os.path.join(TEMPLATES_PATH, path + '.tpl')
destination_path = os.path.join(BASE_PATH, dest + '.md')
with open(destination_path, 'wt') as dest_file:
dest_file.write(
Template(open(template_path).read()).render(context)
)
def generate_api_reference():
generate_steps_reference()
def generate_steps_reference():
steps_by_types = []
for step_module in STEPS_MODULES:
name = step_module.__name__.split('.')[-1]
steps_by_types.append({
'name': name,
'module': step_module.__name__,
'base_steps': step_module.base_steps
})
steps_dir = os.path.join(BASE_PATH, 'steps/')
if not os.path.exists(steps_dir):
os.makedirs(steps_dir)
for step_type in steps_by_types:
_render_and_save_template(
'steps',
'steps/' + step_type['name'],
{
'step_type': step_type,
'prepare_docstring': _prepare_docstring
}
)
| [
[
[
31,
34
],
[
55,
58
]
],
[
[
42,
44
],
[
219,
221
],
[
235,
237
],
[
280,
282
],
[
905,
907
],
[
972,
974
],
[
1552,
1554
],
[
1597,
1599
],
[
1632,
1634
]
],
[
[
52,
54
],
[
711,
713
],
[
804,
806
]
],
[
[
106,
114
],
[
1099,
1107
]
],
[
[
153,
160
],
[
341,
348
]
],
[
[
166,
177
],
[
354,
365
]
],
[
[
183,
201
],
[
371,
381
]
],
[
[
207,
216
],
[
293,
302
],
[
985,
994
],
[
1565,
1574
]
],
[
[
263,
277
],
[
918,
932
]
],
[
[
319,
332
],
[
1307,
1320
]
],
[
[
391,
409
],
[
1882,
1900
]
],
[
[
837,
862
],
[
1701,
1726
]
],
[
[
1168,
1190
]
],
[
[
1231,
1255
],
[
1198,
1222
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Michaellaoliu Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import MichaellaoliuTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
import os
class WalletHDTest(MichaellaoliuTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| [
[
[
318,
344
],
[
465,
491
]
],
[
[
383,
395
],
[
1079,
1091
],
[
1279,
1291
],
[
2066,
2078
],
[
2136,
2148
],
[
2535,
2547
],
[
2649,
2661
],
[
3447,
3459
],
[
3519,
3531
],
[
3581,
3593
],
[
3809,
3821
],
[
4357,
4369
],
[
4402,
4414
],
[
4493,
4505
],
[
4538,
4550
],
[
4610,
4622
],
[
5126,
5138
]
],
[
[
401,
417
],
[
918,
934
],
[
3620,
3636
],
[
4239,
4255
]
],
[
[
428,
434
],
[
2933,
2939
],
[
3001,
3007
],
[
3073,
3079
],
[
3935,
3941
],
[
4003,
4009
],
[
4075,
4081
]
],
[
[
442,
444
],
[
2947,
2949
],
[
3015,
3017
],
[
3089,
3091
],
[
3121,
3123
],
[
3949,
3951
],
[
4017,
4019
],
[
4091,
4093
],
[
4123,
4125
]
],
[
[
452,
464
],
[
5196,
5208
]
]
] |
"""Utilities for working with data.
These are intended to be used predominantly by the filing cabinet
in order to read and write datasets appropriately.
"""
from typing import Any
def _get_handlers(location:str=""):
"""Returns available file handlers.
This checks for a handlers folder at this location and will
scrape the handlers available, returning them, along with all
the handlers in this package, as a dictionary keyed by the file
type.
Parameters
----------
location: str = ""
The location where *extra* handlers should be examined for.
Note that any malformed handlers will simply be skipped.
"""
| [
[
[
178,
181
]
],
[
[
187,
200
]
]
] |
from __future__ import print_function
import yaml
import subprocess
import re
import argparse
from keras.models import model_from_yaml
from betago.model import KerasBot
from betago.processor import SevenPlaneProcessor
from betago.gtp.board import gtp_position_to_coords, coords_to_gtp_position
argparser = argparse.ArgumentParser()
argparser.add_argument('handicap', type=int, nargs=1)
argparser.add_argument('output_sgf', nargs='?', default='output.sgf')
args = argparser.parse_args()
processor = SevenPlaneProcessor()
bot_name = '100_epochs_cnn'
model_file = 'model_zoo/' + bot_name + '_bot.yml'
weight_file = 'model_zoo/' + bot_name + '_weights.hd5'
with open(model_file, 'r') as f:
yml = yaml.load(f)
model = model_from_yaml(yaml.dump(yml))
# Note that in Keras 1.0 we have to recompile the model explicitly
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.load_weights(weight_file)
bot = KerasBot(model=model, processor=processor)
pachi_cmd = ["pachi"]
p = subprocess.Popen(pachi_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def send_command(gtpStream, cmd):
gtpStream.stdin.write(cmd)
print(cmd.strip())
def get_response(gtpStream):
succeeded = False
result = ''
while succeeded == False:
line = gtpStream.stdout.readline()
if line[0] == '=':
succeeded = True
line = line.strip()
print("Response is: " + line)
result = re.sub('^= ?', '', line)
return result
letters = 'abcdefghijklmnopqrs'
def sgfCoord(coords):
row, col = coords
return letters[col] + letters[18 - row]
# deal with handicap. Parse multi-stone response to see where it was placed.
handicap = args.handicap[0]
send_command(p, "boardsize 19\n")
get_response(p)
sgf = "(;GM[1]FF[4]CA[UTF-8]SZ[19]RU[Chinese]\n"
if(handicap == 0):
send_command(p, "komi 7.5\n")
get_response(p)
sgf = sgf + "KM[7.5]\n"
else:
send_command(p, "fixed_handicap " + str(handicap) + "\n")
stones = get_response(p)
sgf_handicap = "HA[" + str(handicap) + "]AB"
for pos in stones.split(" "):
move = gtp_position_to_coords(pos)
bot.apply_move('b', move)
sgf_handicap = sgf_handicap + "[" + sgfCoord(move) + "]"
sgf = sgf + sgf_handicap + "\n"
passes = 0
our_color = 'b' # assume we are black for now
their_color = 'w' # assume we are black for now
last_color = 'w'
if(handicap > 1):
last_color = 'b'
colors = {}
colors['w'] = 'white'
colors['b'] = 'black'
while passes < 2:
if(last_color != our_color):
move = bot.select_move(our_color) # applies the move too
if move is None:
send_command(p, "play " + colors[our_color] + " pass\n")
sgf = sgf + ";" + our_color.upper() + "[]\n"
passes = passes + 1
else:
pos = coords_to_gtp_position(move)
send_command(p, "play " + colors[our_color] + " " + pos + "\n")
sgf = sgf + ";" + our_color.upper() + "[" + sgfCoord(move) + "]\n"
passes = 0
resp = get_response(p)
last_color = our_color
else:
send_command(p, "genmove " + colors[their_color] + "\n")
pos = get_response(p)
if(pos == 'resign'):
passes = 2
elif(pos == 'pass'):
sgf = sgf + ";" + their_color.upper() + "[]\n"
passes = passes + 1
else:
move = gtp_position_to_coords(pos)
bot.apply_move(their_color, move)
sgf = sgf + ";" + their_color.upper() + "[" + sgfCoord(move) + "]\n"
passes = 0
last_color = their_color
sgf = sgf + ")\n"
with open(args.output_sgf, 'w') as out_h:
out_h.write(sgf)
| [
[
[
23,
37
]
],
[
[
45,
49
],
[
701,
705
],
[
742,
746
]
],
[
[
57,
67
],
[
1037,
1047
],
[
1071,
1081
],
[
1095,
1105
]
],
[
[
75,
77
],
[
1495,
1497
]
],
[
[
85,
93
],
[
308,
316
]
],
[
[
120,
135
],
[
726,
741
]
],
[
[
161,
169
],
[
967,
975
]
],
[
[
199,
218
],
[
501,
520
]
],
[
[
248,
270
],
[
2166,
2188
],
[
3465,
3487
]
],
[
[
272,
294
],
[
2886,
2908
]
],
[
[
296,
305
],
[
334,
343
],
[
388,
397
],
[
465,
474
]
],
[
[
458,
462
],
[
1751,
1755
],
[
3706,
3710
]
],
[
[
489,
498
],
[
999,
1008
]
],
[
[
524,
532
],
[
580,
588
],
[
631,
639
]
],
[
[
552,
562
],
[
668,
678
]
],
[
[
602,
613
],
[
947,
958
]
],
[
[
688,
689
],
[
711,
712
]
],
[
[
695,
698
],
[
752,
755
]
],
[
[
718,
723
],
[
833,
838
],
[
928,
933
],
[
982,
987
]
],
[
[
961,
964
],
[
2202,
2205
],
[
2620,
2623
],
[
3505,
3508
]
],
[
[
1011,
1020
],
[
1054,
1063
]
],
[
[
1033,
1034
],
[
1782,
1783
],
[
1816,
1817
],
[
1906,
1907
],
[
1940,
1941
],
[
1994,
1995
],
[
2065,
2066
],
[
2721,
2722
],
[
2940,
2941
],
[
3121,
3122
],
[
3186,
3187
],
[
3257,
3258
]
],
[
[
1118,
1130
],
[
1769,
1781
],
[
1893,
1905
],
[
1981,
1993
],
[
2708,
2720
],
[
2927,
2939
],
[
3173,
3185
]
],
[
[
1208,
1220
],
[
1803,
1815
],
[
1927,
1939
],
[
2052,
2064
],
[
3108,
3120
],
[
3244,
3256
]
],
[
[
1539,
1546
],
[
1628,
1635
],
[
1643,
1650
]
],
[
[
1577,
1585
],
[
2272,
2280
],
[
3047,
3055
],
[
3597,
3605
]
],
[
[
1740,
1748
],
[
1873,
1881
],
[
2021,
2029
],
[
2099,
2107
],
[
2460,
2468
]
],
[
[
1820,
1823
],
[
1953,
1956
],
[
2303,
2306
]
],
[
[
1947,
1950
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
2043,
2049
],
[
2132,
2138
]
],
[
[
2072,
2084
],
[
2251,
2263
],
[
2309,
2321
]
],
[
[
2125,
2128
],
[
2189,
2192
]
],
[
[
2159,
2163
],
[
2222,
2226
],
[
2281,
2285
]
],
[
[
2236,
2248
],
[
2251,
2263
],
[
2309,
2321
]
],
[
[
2297,
2300
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
2330,
2336
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
2341,
2350
],
[
2593,
2602
],
[
2636,
2645
],
[
2741,
2750
],
[
2795,
2804
],
[
2960,
2969
],
[
3021,
3030
],
[
3145,
3154
]
],
[
[
2389,
2400
],
[
3209,
3220
],
[
3371,
3382
],
[
3520,
3531
],
[
3569,
3580
],
[
3664,
3675
]
],
[
[
2439,
2449
],
[
2579,
2589
]
],
[
[
2479,
2489
],
[
2579,
2589
]
],
[
[
2497,
2503
],
[
2509,
2515
],
[
2531,
2537
],
[
2734,
2740
],
[
2953,
2959
],
[
3202,
3208
]
],
[
[
2613,
2617
],
[
2682,
2686
],
[
2909,
2913
],
[
3056,
3060
]
],
[
[
2777,
2780
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
2834,
2840
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
2880,
2883
],
[
2979,
2982
]
],
[
[
3003,
3006
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
3082,
3088
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
3101,
3105
]
],
[
[
3132,
3142
],
[
2579,
2589
]
],
[
[
3238,
3241
],
[
3271,
3274
],
[
3325,
3328
],
[
3488,
3491
]
],
[
[
3301,
3307
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
3353,
3356
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
3412,
3418
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
3458,
3462
],
[
3533,
3537
],
[
3606,
3610
]
],
[
[
3551,
3554
],
[
2783,
2786
],
[
3009,
3012
],
[
3359,
3362
],
[
3557,
3560
],
[
3683,
3686
]
],
[
[
3632,
3638
],
[
2560,
2566
],
[
2843,
2849
],
[
3421,
3427
]
],
[
[
3651,
3661
],
[
2579,
2589
]
],
[
[
3677,
3680
],
[
3754,
3757
]
],
[
[
3731,
3736
],
[
3742,
3747
]
]
] |
import os
import sys
from time import sleep
import threading
import window
import comunication
import DisplayActions
debugMode = True
def main():
print("starting...",end='')
state = True
print("[" + ("OK" if state else "ERROR" ) + "]")
winThread.start()
comunication.start()
winThread = threading.Thread(target=window.start)
if __name__ == '__main__':
main() | [
[
[
7,
9
]
],
[
[
17,
20
]
],
[
[
38,
43
]
],
[
[
52,
61
],
[
316,
325
]
],
[
[
69,
75
],
[
340,
346
]
],
[
[
83,
95
],
[
281,
293
]
],
[
[
103,
117
]
],
[
[
118,
127
]
],
[
[
143,
147
],
[
387,
391
]
],
[
[
304,
313
],
[
259,
268
]
]
] |
# Write a script that generates daily edit AND view counts for Panama Papers over its first 30 days of existence, and prints them to a CSV or TSV file in reverse-chronological order. You file should have three colums with the headers "date", "edits" and "views".
import csv
import json
import requests
import operator
from urllib.parse import quote
ENDPOINT = 'https://en.wikipedia.org/w/api.php'
parameters = { 'action' : 'query',
'prop' : 'revisions',
'titles' : 'Panama_Papers',
'format' : 'json',
'rvdir' : 'newer',
'rvstart': '2016-04-03T17:59:05Z',
'rvend' : '2016-05-03T00:00:00Z',
'rvlimit' : 500,
'continue' : '' }
days = {}
done = False
while not done:
wp_call = requests.get(ENDPOINT, params=parameters)
response = wp_call.json()
pages = response['query']['pages']
for page_id in pages:
page = pages[page_id]
revisions = page['revisions']
for rev in revisions:
revday = rev['timestamp'][:10].replace("-","")
revhour = rev['timestamp'][11:13]
if revday in days.keys():
if revhour in days[revday].keys():
days[revday][revhour] += 1
else:
days[revday][revhour] = 1
else:
days[revday] = {}
days[revday][revhour] = 1
if 'continue' in response:
parameters['continue'] = response['continue']['continue']
parameters['rvcontinue'] = response['continue']['rvcontinue']
else:
done = True
# print(days)
for dkey, dval in days.items():
daily_edits = 0
for hkey, hval in dval.items():
daily_edits += hval
days[dkey]['total'] = daily_edits
# print(days)
ENDPOINT = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/'
wp_code = 'en.wikipedia'
access = 'all-access'
agents = 'all-agents'
page_title = 'Panama Papers'
period = 'daily'
start_date = '20160403'
end_date = '20160502'
wp_call = requests.get(ENDPOINT + wp_code + '/' + access + '/' + agents + '/' + quote(page_title, safe='') + '/' + period + '/' + start_date + '/' + end_date)
response = wp_call.json()
# print(json.dumps(response, indent=4))
for dv in response['items']:
# print(dv['timestamp'])
ts = dv['timestamp'][:-2]
if ts in days.keys():
days[ts]['views'] = dv['views']
# print(json.dumps(days, indent=4))
days_sorted = sorted(days.items(), key=operator.itemgetter(0), reverse=True)
print(days_sorted)
with open('pp30days_views_edits.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(('date', 'edits', 'views'))
for n in days_sorted:
writer.writerow((n[0], n[1]['total'], n[1]['views'],)) | [
[
[
271,
274
],
[
2653,
2656
]
],
[
[
282,
286
]
],
[
[
294,
302
],
[
801,
809
],
[
2085,
2093
]
],
[
[
310,
318
],
[
2533,
2541
]
],
[
[
344,
349
],
[
2155,
2160
]
],
[
[
351,
359
],
[
814,
822
]
],
[
[
400,
410
],
[
831,
841
],
[
1491,
1501
],
[
1557,
1567
]
],
[
[
748,
752
],
[
1178,
1182
],
[
1221,
1225
],
[
1262,
1266
],
[
1331,
1335
],
[
1391,
1395
],
[
1425,
1429
],
[
1682,
1686
],
[
1784,
1788
],
[
2403,
2407
],
[
2424,
2428
],
[
2515,
2519
]
],
[
[
758,
762
],
[
781,
785
]
],
[
[
791,
798
],
[
858,
865
]
],
[
[
847,
855
],
[
885,
893
],
[
1473,
1481
],
[
1516,
1524
],
[
1584,
1592
]
],
[
[
877,
882
],
[
931,
936
],
[
953,
958
]
],
[
[
920,
927
],
[
959,
966
]
],
[
[
946,
950
],
[
988,
992
]
],
[
[
976,
985
],
[
1025,
1034
]
],
[
[
1018,
1021
],
[
1057,
1060
],
[
1117,
1120
]
],
[
[
1048,
1054
],
[
1168,
1174
],
[
1226,
1232
],
[
1267,
1273
],
[
1336,
1342
],
[
1396,
1402
],
[
1430,
1436
]
],
[
[
1107,
1114
],
[
1210,
1217
],
[
1275,
1282
],
[
1344,
1351
],
[
1438,
1445
]
],
[
[
1637,
1641
],
[
781,
785
]
],
[
[
1668,
1672
],
[
1789,
1793
]
],
[
[
1674,
1678
],
[
1738,
1742
]
],
[
[
1700,
1711
],
[
1760,
1771
],
[
1806,
1817
]
],
[
[
1724,
1728
]
],
[
[
1730,
1734
],
[
1775,
1779
]
],
[
[
1834,
1842
],
[
2098,
2106
]
],
[
[
1913,
1920
],
[
2109,
2116
]
],
[
[
1938,
1944
],
[
2125,
2131
]
],
[
[
1960,
1966
],
[
2140,
2146
]
],
[
[
1982,
1992
],
[
2161,
2171
]
],
[
[
2011,
2017
],
[
2190,
2196
]
],
[
[
2028,
2038
],
[
2205,
2215
]
],
[
[
2052,
2060
],
[
2224,
2232
]
],
[
[
2075,
2082
],
[
2245,
2252
]
],
[
[
2234,
2242
],
[
2312,
2320
]
],
[
[
2306,
2308
],
[
2369,
2371
],
[
2444,
2446
]
],
[
[
2364,
2366
],
[
2397,
2399
],
[
2429,
2431
]
],
[
[
2494,
2505
],
[
2577,
2588
],
[
2728,
2739
]
],
[
[
2637,
2638
],
[
2664,
2665
]
],
[
[
2644,
2650
],
[
2671,
2677
],
[
2750,
2756
]
],
[
[
2723,
2724
],
[
2767,
2768
],
[
2773,
2774
],
[
2788,
2789
]
]
] |
from unittest.mock import MagicMock
from twisted.web.resource import NoResource
from txweb.resources import RoutingResource
from txweb import App
from txweb.http_codes import Unrenderable
from txweb.resources import ViewClassResource
from unittest.mock import sentinel
import typing as T
from .helper import RequestRetval
import pytest
def test_instantiates_without_error():
class FakeSite:
pass
fake_site = FakeSite()
resource = RoutingResource(fake_site)
def test_how_head_requests_are_handled(dummy_request:RequestRetval):
app = App(__name__)
@app.add("/foo", methods=["POST"])
def handle_foo(request):
return b"123"
dummy_request.request.site = app.site
dummy_request.channel.site = app.site
dummy_request.request.requestReceived(b"HEAD", b"/foo", b"HTTP/1.1")
assert dummy_request.request.code == 405
assert dummy_request.request.code_message == b"Method not allowed"
def test_ensure_blows_up_with_a_bad_add():
app = App(__name__)
bad_asset = sentinel
with pytest.raises(ValueError) as excinfo:
app.add("/trash")(bad_asset)
assert "expected callable|Object|twisted.web.resource.Resource" in str(excinfo.value)
def test_ensure_blowsup_with_a_class_that_has_no_way_to_render():
app = App(__name__)
with pytest.raises(Unrenderable):
@app.add("/trash")
class BaseClass(object):
pass
def test_ensure_a_classic_like_class_is_routed():
app = App(__name__)
@app.add("/trash")
class GoodClass(object):
def render(self, request):
return b"Rendered"
first_key = next(iter(app.router.iter_rules()))
endpoint = app.router._endpoints[first_key.endpoint]
assert isinstance(endpoint, ViewClassResource)
debug = 1
def test_ensure_resource_is_added():
app = App(__name__)
app.add_resource("/404", resource=NoResource())
first_key = next(iter(app.router.iter_rules()))
endpoint = app.router._endpoints[first_key.endpoint]
assert isinstance(endpoint, NoResource)
debug = 1
def test_handle_add_slashes(dummy_request:RequestRetval):
app = App(__name__)
mock = MagicMock()
app.route("/js/")(mock)
dummy_request.request.site = app.site
dummy_request.channel.site = app.site
dummy_request.request.requestReceived(b"GET", b"/js", b"HTTP/1.1")
assert dummy_request.request.code == 308
assert dummy_request.request.code_message == b"Permanent Redirect"
assert dummy_request.request.responseHeaders.getRawHeaders(b"location") == [b"http://10.0.0.1/js/"]
assert mock.call_count == 0
| [
[
[
26,
35
],
[
2188,
2197
]
],
[
[
70,
80
],
[
1909,
1919
],
[
2065,
2075
]
],
[
[
110,
125
],
[
460,
475
]
],
[
[
144,
147
],
[
569,
572
],
[
1005,
1008
],
[
1304,
1307
],
[
1497,
1500
],
[
1856,
1859
],
[
2162,
2165
]
],
[
[
177,
189
],
[
1342,
1354
]
],
[
[
218,
235
],
[
1774,
1791
]
],
[
[
263,
271
],
[
1035,
1043
]
],
[
[
279,
290
]
],
[
[
312,
325
],
[
542,
555
],
[
2135,
2148
]
],
[
[
334,
340
],
[
1055,
1061
],
[
1328,
1334
]
],
[
[
347,
378
]
],
[
[
493,
527
]
],
[
[
955,
990
]
],
[
[
1231,
1289
]
],
[
[
1439,
1481
]
],
[
[
1812,
1841
]
],
[
[
2097,
2120
]
]
] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Test that maximizing likelihood allows to correctly recover distribution parameters for all
distributions exposed to the user.
"""
# Standard library imports
from typing import Iterable, List, Tuple
# Third-party imports
import mxnet as mx
import numpy as np
import pytest
from pydantic import PositiveFloat, PositiveInt
# First-party imports
from gluonts.model.common import NPArrayLike
from gluonts.distribution.box_cox_tranform import (
InverseBoxCoxTransform,
InverseBoxCoxTransformOutput,
)
from gluonts.distribution import (
DistributionOutput,
StudentT,
StudentTOutput,
MultivariateGaussian,
MultivariateGaussianOutput,
LowrankMultivariateGaussian,
LowrankMultivariateGaussianOutput,
NegativeBinomial,
NegativeBinomialOutput,
Laplace,
LaplaceOutput,
Gaussian,
GaussianOutput,
PiecewiseLinear,
PiecewiseLinearOutput,
Binned,
BinnedOutput,
)
from gluonts.distribution.transformed_distribution_output import (
TransformedDistributionOutput,
)
from gluonts.distribution.transformed_distribution import (
TransformedDistribution,
)
NUM_SAMPLES = 2000
BATCH_SIZE = 32
TOL = 0.3
START_TOL_MULTIPLE = 1
np.random.seed(1)
mx.random.seed(1)
def inv_softplus(y: NPArrayLike) -> np.ndarray:
# y = log(1 + exp(x)) ==> x = log(exp(y) - 1)
return np.log(np.exp(y) - 1)
def maximum_likelihood_estimate_sgd(
distr_output: DistributionOutput,
samples: mx.ndarray,
init_biases: List[mx.ndarray.NDArray] = None,
num_epochs: PositiveInt = PositiveInt(5),
learning_rate: PositiveFloat = PositiveFloat(1e-2),
hybridize: bool = True,
) -> Iterable[float]:
model_ctx = mx.cpu()
arg_proj = distr_output.get_args_proj()
arg_proj.initialize()
if hybridize:
arg_proj.hybridize()
if init_biases is not None:
for param, bias in zip(arg_proj.proj, init_biases):
param.params[param.prefix + "bias"].initialize(
mx.initializer.Constant(bias), force_reinit=True
)
trainer = mx.gluon.Trainer(
arg_proj.collect_params(),
"sgd",
{"learning_rate": learning_rate, "clip_gradient": 10.0},
)
# The input data to our model is one-dimensional
dummy_data = mx.nd.array(np.ones((len(samples), 1)))
train_data = mx.gluon.data.DataLoader(
mx.gluon.data.ArrayDataset(dummy_data, samples),
batch_size=BATCH_SIZE,
shuffle=True,
)
for e in range(num_epochs):
cumulative_loss = 0
num_batches = 0
# inner loop
for i, (data, sample_label) in enumerate(train_data):
data = data.as_in_context(model_ctx)
sample_label = sample_label.as_in_context(model_ctx)
with mx.autograd.record():
distr_args = arg_proj(data)
distr = distr_output.distribution(distr_args)
loss = distr.loss(sample_label)
if not hybridize:
assert loss.shape == distr.batch_shape
loss.backward()
trainer.step(BATCH_SIZE)
num_batches += 1
cumulative_loss += mx.nd.mean(loss).asscalar()
print("Epoch %s, loss: %s" % (e, cumulative_loss / num_batches))
return [
param[0].asnumpy() for param in arg_proj(mx.nd.array(np.ones((1, 1))))
]
@pytest.mark.parametrize("mu, sigma, nu", [(2.3, 0.7, 6.0)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_studentT_likelihood(
mu: float, sigma: float, nu: float, hybridize: bool
) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
nus = mx.nd.zeros((NUM_SAMPLES,)) + nu
distr = StudentT(mus, sigmas, nus)
samples = distr.sample()
# nu takes very long to learn, so we initialize it at the true value.
# transform used is softplus(x) + 2
init_bias = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
inv_softplus(nu - 2),
]
mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(
StudentTOutput(),
samples,
init_biases=init_bias,
hybridize=hybridize,
num_epochs=PositiveInt(10),
learning_rate=PositiveFloat(1e-2),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
assert (
np.abs(nu_hat - nu) < TOL * nu
), "nu0 did not match: nu0 = %s, nu_hat = %s" % (nu, nu_hat)
@pytest.mark.parametrize("mu, sigma", [(1.0, 0.1)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_gaussian_likelihood(mu: float, sigma: float, hybridize: bool):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
distr = Gaussian(mus, sigmas)
samples = distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
]
mu_hat, sigma_hat = maximum_likelihood_estimate_sgd(
GaussianOutput(),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.001),
num_epochs=PositiveInt(5),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"alpha did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
@pytest.mark.timeout(10)
def test_multivariate_gaussian() -> None:
num_samples = 2000
dim = 2
mu = np.arange(0, dim) / float(dim)
L_diag = np.ones((dim,))
L_low = 0.1 * np.ones((dim, dim)) * np.tri(dim, k=-1)
L = np.diag(L_diag) + L_low
Sigma = L.dot(L.transpose())
distr = MultivariateGaussian(mu=mx.nd.array(mu), L=mx.nd.array(L))
samples = distr.sample(num_samples)
mu_hat, L_hat = maximum_likelihood_estimate_sgd(
MultivariateGaussianOutput(dim=dim),
samples,
init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case
hybridize=False,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(10),
)
distr = MultivariateGaussian(
mu=mx.nd.array([mu_hat]), L=mx.nd.array([L_hat])
)
Sigma_hat = distr.variance[0].asnumpy()
assert np.allclose(
mu_hat, mu, atol=0.1, rtol=0.1
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert np.allclose(
Sigma_hat, Sigma, atol=0.1, rtol=0.1
), f"Sigma did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}"
@pytest.mark.timeout(10)
def test_lowrank_multivariate_gaussian() -> None:
num_samples = 2000
dim = 2
rank = 1
mu = np.arange(0, dim) / float(dim)
D = np.eye(dim) * (np.arange(dim) / dim + 0.5)
W = np.sqrt(np.ones((dim, rank)) * 0.2)
Sigma = D + W.dot(W.transpose())
distr = LowrankMultivariateGaussian(
mu=mx.nd.array([mu]),
D=mx.nd.array([np.diag(D)]),
W=mx.nd.array([W]),
dim=dim,
rank=rank,
)
assert np.allclose(
distr.variance[0].asnumpy(), Sigma, atol=0.1, rtol=0.1
), f"did not match: sigma = {Sigma}, sigma_hat = {distr.variance[0]}"
samples = distr.sample(num_samples).squeeze().asnumpy()
mu_hat, D_hat, W_hat = maximum_likelihood_estimate_sgd(
LowrankMultivariateGaussianOutput(dim=dim, rank=rank),
samples,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(10),
init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case
hybridize=False,
)
distr = LowrankMultivariateGaussian(
dim=dim,
rank=rank,
mu=mx.nd.array([mu_hat]),
D=mx.nd.array([D_hat]),
W=mx.nd.array([W_hat]),
)
Sigma_hat = distr.variance.asnumpy()
assert np.allclose(
mu_hat, mu, atol=0.2, rtol=0.1
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert np.allclose(
Sigma_hat, Sigma, atol=0.1, rtol=0.1
), f"alpha did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}"
@pytest.mark.parametrize("mu", [6.0])
@pytest.mark.parametrize("hybridize", [True, False])
def test_deterministic_l2(mu: float, hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters.
This tests uses the Gaussian distribution with fixed variance and sample mean.
This essentially reduces to determistic L2.
"""
# generate samples
mu = mu
mus = mx.nd.zeros(NUM_SAMPLES) + mu
deterministic_distr = Gaussian(mu=mus, sigma=0.1 * mx.nd.ones_like(mus))
samples = deterministic_distr.sample()
class GaussianFixedVarianceOutput(GaussianOutput):
@classmethod
def domain_map(cls, F, mu, sigma):
sigma = 0.1 * F.ones_like(sigma)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1)
mu_hat, _ = maximum_likelihood_estimate_sgd(
GaussianFixedVarianceOutput(),
samples,
init_biases=[3 * mu, 0.1],
hybridize=hybridize,
num_epochs=PositiveInt(1),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
@pytest.mark.parametrize("mu", [1.0])
@pytest.mark.parametrize("hybridize", [True, False])
def test_deterministic_l1(mu: float, hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters.
This tests uses the Laplace distribution with fixed variance and sample mean.
This essentially reduces to determistic L1.
"""
# generate samples
mu = mu
mus = mx.nd.zeros(NUM_SAMPLES) + mu
class LaplaceFixedVarianceOutput(LaplaceOutput):
@classmethod
def domain_map(cls, F, mu, b):
b = 0.1 * F.ones_like(b)
return mu.squeeze(axis=-1), b.squeeze(axis=-1)
deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))
samples = deterministic_distr.sample()
mu_hat, _ = maximum_likelihood_estimate_sgd(
LaplaceFixedVarianceOutput(),
samples,
init_biases=[3 * mu, 0.1],
learning_rate=PositiveFloat(1e-3),
hybridize=hybridize,
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
@pytest.mark.parametrize("mu_alpha", [(2.5, 0.7)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_neg_binomial(mu_alpha: Tuple[float, float], hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# test instance
mu, alpha = mu_alpha
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
alphas = mx.nd.zeros((NUM_SAMPLES,)) + alpha
neg_bin_distr = NegativeBinomial(mu=mus, alpha=alphas)
samples = neg_bin_distr.sample()
init_biases = [
inv_softplus(mu - START_TOL_MULTIPLE * TOL * mu),
inv_softplus(alpha + START_TOL_MULTIPLE * TOL * alpha),
]
mu_hat, alpha_hat = maximum_likelihood_estimate_sgd(
NegativeBinomialOutput(),
samples,
hybridize=hybridize,
init_biases=init_biases,
num_epochs=PositiveInt(15),
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(alpha_hat - alpha) < TOL * alpha
), f"alpha did not match: alpha = {alpha}, alpha_hat = {alpha_hat}"
@pytest.mark.timeout(10)
@pytest.mark.parametrize("mu_b", [(3.3, 0.7)])
@pytest.mark.parametrize("hybridize", [True, False])
def test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# test instance
mu, b = mu_b
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
bs = mx.nd.zeros((NUM_SAMPLES,)) + b
laplace_distr = Laplace(mu=mus, b=bs)
samples = laplace_distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(b + START_TOL_MULTIPLE * TOL * b),
]
mu_hat, b_hat = maximum_likelihood_estimate_sgd(
LaplaceOutput(), samples, hybridize=hybridize, init_biases=init_biases
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(b_hat - b) < TOL * b
), f"b did not match: b = {b}, b_hat = {b_hat}"
@pytest.mark.parametrize(
"gamma, slopes, knot_spacings",
[(2.0, np.array([3, 1, 3, 4]), np.array([0.3, 0.2, 0.35, 0.15]))],
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_piecewise_linear(
gamma: float,
slopes: np.ndarray,
knot_spacings: np.ndarray,
hybridize: bool,
) -> None:
"""
Test to check that minimizing the CRPS recovers the quantile function
"""
num_samples = 500 # use a few samples for timeout failure
gammas = mx.nd.zeros((num_samples,)) + gamma
slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes)
knot_spacingss = mx.nd.zeros(
(num_samples, len(knot_spacings))
) + mx.nd.array(knot_spacings)
pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss)
samples = pwl_sqf.sample()
# Parameter initialization
gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma
slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes
knot_spacings_init = knot_spacings
# We perturb knot spacings such that even after the perturbation they sum to 1.
mid = len(slopes) // 2
knot_spacings_init[:mid] = (
knot_spacings[:mid] - START_TOL_MULTIPLE * TOL * knot_spacings[:mid]
)
knot_spacings_init[mid:] = (
knot_spacings[mid:] + START_TOL_MULTIPLE * TOL * knot_spacings[mid:]
)
init_biases = [gamma_init, slopes_init, knot_spacings_init]
# check if it returns original parameters of mapped
gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd(
PiecewiseLinearOutput(len(slopes)),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(20),
)
# Since the problem is highly non-convex we may not be able to recover the exact parameters
# Here we check if the estimated parameters yield similar function evaluations at different quantile levels.
quantile_levels = np.arange(0.1, 1.0, 0.1)
# create a LinearSplines instance with the estimated parameters to have access to .quantile
pwl_sqf_hat = PiecewiseLinear(
mx.nd.array(gamma_hat),
mx.nd.array(slopes_hat).expand_dims(axis=0),
mx.nd.array(knot_spacings_hat).expand_dims(axis=0),
)
# Compute quantiles with the estimated parameters
quantiles_hat = np.squeeze(
pwl_sqf_hat.quantile(
mx.nd.array(quantile_levels).expand_dims(axis=0), axis=1
).asnumpy()
)
# Compute quantiles with the original parameters
# Since params is replicated across samples we take only the first entry
quantiles = np.squeeze(
pwl_sqf.quantile(
mx.nd.array(quantile_levels)
.expand_dims(axis=0)
.repeat(axis=0, repeats=num_samples),
axis=1,
).asnumpy()[0, :]
)
for ix, (quantile, quantile_hat) in enumerate(
zip(quantiles, quantiles_hat)
):
assert np.abs(quantile_hat - quantile) < TOL * quantile, (
f"quantile level {quantile_levels[ix]} didn't match:"
f" "
f"q = {quantile}, q_hat = {quantile_hat}"
)
@pytest.mark.skip("this test fails when run locally")
@pytest.mark.parametrize("lam_1, lam_2", [(0.1, 0.01)])
@pytest.mark.parametrize("mu, sigma", [(-1.5, 0.5)])
@pytest.mark.parametrize("hybridize", [True])
def test_box_cox_tranform(
lam_1: float, lam_2: float, mu: float, sigma: float, hybridize: bool
):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# generate samples
lamdas_1 = mx.nd.zeros((NUM_SAMPLES,)) + lam_1
lamdas_2 = mx.nd.zeros((NUM_SAMPLES,)) + lam_2
transform = InverseBoxCoxTransform(lamdas_1, lamdas_2)
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
gausian_distr = Gaussian(mus, sigmas)
# Here the base distribution is Guassian which is transformed to
# non-Gaussian via the inverse Box-Cox transform.
# Sampling from `trans_distr` gives non-Gaussian samples
trans_distr = TransformedDistribution(gausian_distr, transform)
# Given the non-Gaussian samples find the true parameters
# of the Box-Cox transformation as well as the underlying Gaussian distribution.
samples = trans_distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
lam_1 - START_TOL_MULTIPLE * TOL * lam_1,
inv_softplus(lam_2 - START_TOL_MULTIPLE * TOL * lam_2),
]
mu_hat, sigma_hat, lam_1_hat, lam_2_hat = maximum_likelihood_estimate_sgd(
TransformedDistributionOutput(
GaussianOutput(),
InverseBoxCoxTransformOutput(lb_obs=lam_2, fix_lambda_2=True),
),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.01),
num_epochs=PositiveInt(18),
)
assert (
np.abs(lam_1_hat - lam_1) < TOL * lam_1
), f"lam_1 did not match: lam_1 = {lam_1}, lam_1_hat = {lam_1_hat}"
# assert (
# np.abs(lam_2_hat - lam_2) < TOL * lam_2
# ), f"lam_2 did not match: lam_2 = {lam_2}, lam_2_hat = {lam_2_hat}"
assert np.abs(mu_hat - mu) < TOL * np.abs(
mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
assert (
np.abs(sigma_hat - sigma) < TOL * sigma
), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
@pytest.mark.parametrize("num_bins", [6])
@pytest.mark.parametrize(
"bin_probabilites", [np.array([0.3, 0.1, 0.05, 0.2, 0.1, 0.25])]
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_binned_likelihood(
num_bins: float, bin_probabilites: np.ndarray, hybridize: bool
):
"""
Test to check that maximizing the likelihood recovers the parameters
"""
bin_prob = mx.nd.array(bin_probabilites)
bin_center = mx.nd.array(np.logspace(-1, 1, num_bins))
# generate samples
bin_probs = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_prob
bin_centers = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_center
distr = Binned(bin_probs, bin_centers)
samples = distr.sample()
# add some jitter to the uniform initialization and normalize
bin_prob_init = mx.nd.random_uniform(1 - TOL, 1 + TOL, num_bins) * bin_prob
bin_prob_init = bin_prob_init / bin_prob_init.sum()
init_biases = [bin_prob_init]
bin_prob_hat, = maximum_likelihood_estimate_sgd(
BinnedOutput(list(bin_center.asnumpy())),
samples,
init_biases=init_biases,
hybridize=hybridize,
learning_rate=PositiveFloat(0.05),
num_epochs=PositiveInt(25),
)
assert all(
mx.nd.abs(mx.nd.array(bin_prob_hat) - bin_prob) < TOL * bin_prob
), f"bin_prob did not match: bin_prob = {bin_prob}, bin_prob_hat = {bin_prob_hat}"
| [
[
[
757,
765
],
[
2229,
2237
]
],
[
[
767,
771
],
[
2061,
2065
]
],
[
[
773,
778
],
[
11493,
11498
],
[
12642,
12647
]
],
[
[
809,
820
],
[
1789,
1791
],
[
2032,
2034
],
[
2066,
2068
],
[
2262,
2264
],
[
2559,
2561
],
[
2637,
2639
],
[
2847,
2849
],
[
2905,
2907
],
[
2939,
2941
],
[
3346,
3348
],
[
3741,
3743
],
[
3905,
3907
],
[
4277,
4279
],
[
4323,
4325
],
[
4369,
4371
],
[
5664,
5666
],
[
5710,
5712
],
[
6771,
6773
],
[
6790,
6792
],
[
7225,
7227
],
[
7250,
7252
],
[
7935,
7937
],
[
7964,
7966
],
[
8001,
8003
],
[
8730,
8732
],
[
8763,
8765
],
[
8795,
8797
],
[
9550,
9552
],
[
9636,
9638
],
[
10671,
10673
],
[
10962,
10964
],
[
11708,
11710
],
[
11754,
11756
],
[
12849,
12851
],
[
12891,
12893
],
[
13969,
13971
],
[
14019,
14021
],
[
14061,
14063
],
[
14102,
14104
],
[
14165,
14167
],
[
15628,
15630
],
[
15660,
15662
],
[
15713,
15715
],
[
15900,
15902
],
[
16180,
16182
],
[
17097,
17099
],
[
17148,
17150
],
[
17254,
17256
],
[
17300,
17302
],
[
19381,
19383
],
[
19428,
19430
],
[
19510,
19512
],
[
19576,
19578
],
[
19786,
19788
],
[
20230,
20232
],
[
20240,
20242
]
],
[
[
828,
839
],
[
1771,
1773
],
[
13555,
13557
],
[
13579,
13581
],
[
19079,
19081
],
[
1845,
1847
],
[
1920,
1922
],
[
1927,
1929
],
[
2859,
2861
],
[
3917,
3919
],
[
5024,
5026
],
[
5133,
5135
],
[
5266,
5268
],
[
6215,
6217
],
[
6324,
6326
],
[
6550,
6552
],
[
6595,
6597
],
[
6629,
6631
],
[
6651,
6653
],
[
6677,
6679
],
[
7334,
7336
],
[
7454,
7456
],
[
7719,
7721
],
[
7758,
7760
],
[
7773,
7775
],
[
7809,
7811
],
[
7817,
7819
],
[
7977,
7979
],
[
8073,
8075
],
[
8877,
8879
],
[
8998,
9000
],
[
10162,
10164
],
[
11267,
11269
],
[
12271,
12273
],
[
12380,
12382
],
[
13291,
13293
],
[
13400,
13402
],
[
13727,
13729
],
[
13758,
13760
],
[
15463,
15465
],
[
15846,
15848
],
[
16130,
16132
],
[
16456,
16458
],
[
18484,
18486
],
[
18747,
18749
],
[
18775,
18777
],
[
18872,
18874
],
[
19245,
19247
],
[
19440,
19442
]
],
[
[
847,
853
],
[
3944,
3950
],
[
4005,
4011
],
[
5365,
5371
],
[
5417,
5423
],
[
6439,
6445
],
[
7587,
7593
],
[
9131,
9137
],
[
9169,
9175
],
[
10253,
10259
],
[
10291,
10297
],
[
11358,
11364
],
[
11409,
11415
],
[
12495,
12501
],
[
12520,
12526
],
[
12567,
12573
],
[
13483,
13489
],
[
13618,
13624
],
[
16658,
16664
],
[
16712,
16718
],
[
16768,
16774
],
[
16821,
16827
],
[
18987,
18993
],
[
19029,
19035
],
[
19126,
19132
]
],
[
[
875,
888
],
[
2175,
2188
],
[
2159,
2172
],
[
4975,
4988
],
[
6130,
6143
],
[
7116,
7129
],
[
8447,
8460
],
[
11189,
11202
],
[
15168,
15181
],
[
18399,
18412
],
[
20142,
20155
]
],
[
[
890,
901
],
[
2124,
2135
],
[
2110,
2121
],
[
4936,
4947
],
[
6171,
6182
],
[
7156,
7167
],
[
8487,
8498
],
[
10118,
10129
],
[
12226,
12237
],
[
15208,
15219
],
[
18439,
18450
],
[
20182,
20193
]
],
[
[
958,
969
],
[
1829,
1840
]
],
[
[
1026,
1048
],
[
17200,
17222
]
],
[
[
1054,
1082
],
[
18224,
18252
]
],
[
[
1125,
1143
],
[
1999,
2017
]
],
[
[
1149,
1157
],
[
4415,
4423
]
],
[
[
1163,
1177
],
[
4822,
4836
]
],
[
[
1183,
1203
],
[
6747,
6767
],
[
7192,
7212
]
],
[
[
1209,
1235
],
[
6909,
6935
]
],
[
[
1241,
1268
],
[
7895,
7922
],
[
8654,
8681
]
],
[
[
1274,
1307
],
[
8353,
8386
]
],
[
[
1313,
1329
],
[
11811,
11827
]
],
[
[
1335,
1357
],
[
12102,
12124
]
],
[
[
1363,
1370
],
[
10938,
10945
],
[
12944,
12951
]
],
[
[
1376,
1389
],
[
10739,
10752
],
[
13192,
13205
]
],
[
[
1395,
1403
],
[
5759,
5767
],
[
9607,
9615
],
[
17356,
17364
]
],
[
[
1409,
1423
],
[
6011,
6025
],
[
9740,
9754
],
[
18194,
18208
]
],
[
[
1429,
1444
],
[
14207,
14222
],
[
15603,
15618
]
],
[
[
1450,
1471
],
[
15031,
15052
]
],
[
[
1477,
1483
],
[
19639,
19645
]
],
[
[
1489,
1501
],
[
19999,
20011
]
],
[
[
1576,
1605
],
[
18151,
18180
]
],
[
[
1673,
1696
],
[
17581,
17604
]
],
[
[
1702,
1713
],
[
4290,
4301
],
[
4336,
4347
],
[
4382,
4393
],
[
5677,
5688
],
[
5723,
5734
],
[
9562,
9573
],
[
10683,
10694
],
[
11721,
11732
],
[
11767,
11778
],
[
12862,
12873
],
[
12904,
12915
],
[
17110,
17121
],
[
17161,
17172
],
[
17267,
17278
],
[
17313,
17324
],
[
19523,
19534
],
[
19589,
19600
]
],
[
[
1721,
1731
],
[
3007,
3017
],
[
3668,
3678
]
],
[
[
1737,
1740
],
[
4638,
4641
],
[
4698,
4701
],
[
5046,
5049
],
[
5161,
5164
],
[
5288,
5291
],
[
5865,
5868
],
[
5925,
5928
],
[
6237,
6240
],
[
6352,
6355
],
[
10184,
10187
],
[
11289,
11292
],
[
11955,
11958
],
[
12016,
12019
],
[
12293,
12296
],
[
12408,
12411
],
[
13058,
13061
],
[
13114,
13117
],
[
13313,
13316
],
[
13420,
13423
],
[
14366,
14369
],
[
14426,
14429
],
[
14673,
14676
],
[
14789,
14792
],
[
16490,
16493
],
[
17869,
17872
],
[
17929,
17932
],
[
17980,
17983
],
[
18043,
18046
],
[
18512,
18515
],
[
18769,
18772
],
[
18900,
18903
],
[
19811,
19814
],
[
19820,
19823
],
[
20280,
20283
]
],
[
[
1747,
1765
],
[
4617,
4635
],
[
4677,
4695
],
[
5844,
5862
],
[
5904,
5922
],
[
11934,
11952
],
[
11995,
12013
],
[
13037,
13055
],
[
13093,
13111
],
[
14345,
14363
],
[
14405,
14423
],
[
14652,
14670
],
[
14768,
14786
],
[
17848,
17866
],
[
17908,
17926
],
[
17959,
17977
],
[
18022,
18040
]
],
[
[
1813,
1825
],
[
4656,
4668
],
[
4720,
4732
],
[
5883,
5895
],
[
11916,
11928
],
[
11974,
11986
],
[
13076,
13088
],
[
17887,
17899
],
[
18001,
18013
]
],
[
[
1948,
1979
],
[
4781,
4812
],
[
5970,
6001
],
[
6868,
6899
],
[
8312,
8343
],
[
9946,
9977
],
[
11044,
11075
],
[
12061,
12092
],
[
13151,
13182
],
[
14990,
15021
],
[
18110,
18141
],
[
19958,
19989
]
],
[
[
4061,
4085
]
],
[
[
5473,
5497
]
],
[
[
6467,
6493
]
],
[
[
7615,
7649
]
],
[
[
9225,
9246
]
],
[
[
10347,
10368
]
],
[
[
11465,
11482
]
],
[
[
12623,
12635
]
],
[
[
13674,
13695
]
],
[
[
16870,
16891
]
],
[
[
19182,
19204
]
]
] |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Nitf(KaitaiStruct):
"""The NITF (National Image Transition Format) format is a file format developed by the U.S. Government for
storing imagery, e.g. from satellites.
According to the [foreword of the specification](https://gwg.nga.mil/ntb/baseline/docs/2500c/2500C.pdf):
> The National Imagery Transmission Format Standard (NITFS) is the suite of standards for formatting digital
> imagery and imagery-related products and exchanging them among members of the Intelligence Community (IC) as
> defined by the Executive Order 12333, and other United States Government departments and agencies."
This implementation is set to version format (`file_version`) of 02.10 and `standard_type` of `BF01`.
It was implemented by [River Loop Security](https://riverloopsecurity.com).
.. seealso::
Source - https://gwg.nga.mil/ntb/baseline/docs/2500c/2500C.pdf
"""
SEQ_FIELDS = ["header", "image_segments", "graphics_segments", "text_segments", "data_extension_segments", "reserved_extension_segments"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.Header(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['image_segments']['start'] = self._io.pos()
self.image_segments = [None] * (int(self.header.num_image_segments))
for i in range(int(self.header.num_image_segments)):
if not 'arr' in self._debug['image_segments']:
self._debug['image_segments']['arr'] = []
self._debug['image_segments']['arr'].append({'start': self._io.pos()})
_t_image_segments = self._root.ImageSegment(i, self._io, self, self._root)
_t_image_segments._read()
self.image_segments[i] = _t_image_segments
self._debug['image_segments']['arr'][i]['end'] = self._io.pos()
self._debug['image_segments']['end'] = self._io.pos()
self._debug['graphics_segments']['start'] = self._io.pos()
self.graphics_segments = [None] * (int(self.header.num_graphics_segments))
for i in range(int(self.header.num_graphics_segments)):
if not 'arr' in self._debug['graphics_segments']:
self._debug['graphics_segments']['arr'] = []
self._debug['graphics_segments']['arr'].append({'start': self._io.pos()})
_t_graphics_segments = self._root.GraphicsSegment(i, self._io, self, self._root)
_t_graphics_segments._read()
self.graphics_segments[i] = _t_graphics_segments
self._debug['graphics_segments']['arr'][i]['end'] = self._io.pos()
self._debug['graphics_segments']['end'] = self._io.pos()
self._debug['text_segments']['start'] = self._io.pos()
self.text_segments = [None] * (int(self.header.num_text_files))
for i in range(int(self.header.num_text_files)):
if not 'arr' in self._debug['text_segments']:
self._debug['text_segments']['arr'] = []
self._debug['text_segments']['arr'].append({'start': self._io.pos()})
_t_text_segments = self._root.TextSegment(i, self._io, self, self._root)
_t_text_segments._read()
self.text_segments[i] = _t_text_segments
self._debug['text_segments']['arr'][i]['end'] = self._io.pos()
self._debug['text_segments']['end'] = self._io.pos()
self._debug['data_extension_segments']['start'] = self._io.pos()
self.data_extension_segments = [None] * (int(self.header.num_data_extension))
for i in range(int(self.header.num_data_extension)):
if not 'arr' in self._debug['data_extension_segments']:
self._debug['data_extension_segments']['arr'] = []
self._debug['data_extension_segments']['arr'].append({'start': self._io.pos()})
_t_data_extension_segments = self._root.DataExtensionSegment(i, self._io, self, self._root)
_t_data_extension_segments._read()
self.data_extension_segments[i] = _t_data_extension_segments
self._debug['data_extension_segments']['arr'][i]['end'] = self._io.pos()
self._debug['data_extension_segments']['end'] = self._io.pos()
self._debug['reserved_extension_segments']['start'] = self._io.pos()
self.reserved_extension_segments = [None] * (int(self.header.num_reserved_extension))
for i in range(int(self.header.num_reserved_extension)):
if not 'arr' in self._debug['reserved_extension_segments']:
self._debug['reserved_extension_segments']['arr'] = []
self._debug['reserved_extension_segments']['arr'].append({'start': self._io.pos()})
_t_reserved_extension_segments = self._root.ReservedExtensionSegment(i, self._io, self, self._root)
_t_reserved_extension_segments._read()
self.reserved_extension_segments[i] = _t_reserved_extension_segments
self._debug['reserved_extension_segments']['arr'][i]['end'] = self._io.pos()
self._debug['reserved_extension_segments']['end'] = self._io.pos()
class ReservedExtensionSegment(KaitaiStruct):
SEQ_FIELDS = ["reserved_sub_header", "reserved_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved_sub_header']['start'] = self._io.pos()
self._raw_reserved_sub_header = self._io.read_bytes(int(self._parent.header.lrnfo[self.idx].length_reserved_extension_subheader))
_io__raw_reserved_sub_header = KaitaiStream(BytesIO(self._raw_reserved_sub_header))
self.reserved_sub_header = self._root.ReservedSubHeader(_io__raw_reserved_sub_header, self, self._root)
self.reserved_sub_header._read()
self._debug['reserved_sub_header']['end'] = self._io.pos()
self._debug['reserved_data_field']['start'] = self._io.pos()
self.reserved_data_field = self._io.read_bytes(int(self._parent.header.lrnfo[self.idx].length_reserved_extension_segment))
self._debug['reserved_data_field']['end'] = self._io.pos()
class ImageComment(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class LengthReservedInfo(KaitaiStruct):
SEQ_FIELDS = ["length_reserved_extension_subheader", "length_reserved_extension_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_reserved_extension_subheader']['start'] = self._io.pos()
self.length_reserved_extension_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_reserved_extension_subheader']['end'] = self._io.pos()
self._debug['length_reserved_extension_segment']['start'] = self._io.pos()
self.length_reserved_extension_segment = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['length_reserved_extension_segment']['end'] = self._io.pos()
class Tre(KaitaiStruct):
SEQ_FIELDS = ["extension_type_id", "edata_length", "edata"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['extension_type_id']['start'] = self._io.pos()
self.extension_type_id = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['extension_type_id']['end'] = self._io.pos()
self._debug['edata_length']['start'] = self._io.pos()
self.edata_length = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['edata_length']['end'] = self._io.pos()
self._debug['edata']['start'] = self._io.pos()
self.edata = (self._io.read_bytes(int(self.edata_length))).decode(u"UTF-8")
self._debug['edata']['end'] = self._io.pos()
class BandInfo(KaitaiStruct):
SEQ_FIELDS = ["representation", "subcategory", "img_filter_condition", "img_filter_code", "num_luts", "num_lut_entries", "luts"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['representation']['start'] = self._io.pos()
self.representation = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['representation']['end'] = self._io.pos()
self._debug['subcategory']['start'] = self._io.pos()
self.subcategory = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['subcategory']['end'] = self._io.pos()
self._debug['img_filter_condition']['start'] = self._io.pos()
self.img_filter_condition = self._io.read_bytes(1)
self._debug['img_filter_condition']['end'] = self._io.pos()
if not self.img_filter_condition == b"\x4E":
raise kaitaistruct.ValidationNotEqualError(b"\x4E", self.img_filter_condition, self._io, u"/types/band_info/seq/2")
self._debug['img_filter_code']['start'] = self._io.pos()
self.img_filter_code = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['img_filter_code']['end'] = self._io.pos()
self._debug['num_luts']['start'] = self._io.pos()
self.num_luts = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_luts']['end'] = self._io.pos()
if int(self.num_luts) != 0:
self._debug['num_lut_entries']['start'] = self._io.pos()
self.num_lut_entries = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['num_lut_entries']['end'] = self._io.pos()
self._debug['luts']['start'] = self._io.pos()
self.luts = [None] * (int(self.num_luts))
for i in range(int(self.num_luts)):
if not 'arr' in self._debug['luts']:
self._debug['luts']['arr'] = []
self._debug['luts']['arr'].append({'start': self._io.pos()})
self.luts[i] = self._io.read_bytes(int(self.num_lut_entries))
self._debug['luts']['arr'][i]['end'] = self._io.pos()
self._debug['luts']['end'] = self._io.pos()
class ImageSegment(KaitaiStruct):
SEQ_FIELDS = ["image_sub_header", "image_data_mask", "image_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['image_sub_header']['start'] = self._io.pos()
self.image_sub_header = self._root.ImageSubHeader(self._io, self, self._root)
self.image_sub_header._read()
self._debug['image_sub_header']['end'] = self._io.pos()
if self.has_mask:
self._debug['image_data_mask']['start'] = self._io.pos()
self.image_data_mask = self._root.ImageDataMask(self._io, self, self._root)
self.image_data_mask._read()
self._debug['image_data_mask']['end'] = self._io.pos()
if self.has_mask:
self._debug['image_data_field']['start'] = self._io.pos()
self.image_data_field = self._io.read_bytes((int(self._parent.header.linfo[self.idx].length_image_segment) - self.image_data_mask.total_size))
self._debug['image_data_field']['end'] = self._io.pos()
@property
def has_mask(self):
if hasattr(self, '_m_has_mask'):
return self._m_has_mask if hasattr(self, '_m_has_mask') else None
self._m_has_mask = ((self.image_sub_header.img_compression[0:1] == u"M") or (self.image_sub_header.img_compression[1:2] == u"M"))
return self._m_has_mask if hasattr(self, '_m_has_mask') else None
class TextSegment(KaitaiStruct):
SEQ_FIELDS = ["text_sub_header", "text_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['text_sub_header']['start'] = self._io.pos()
self.text_sub_header = self._io.read_bytes(1)
self._debug['text_sub_header']['end'] = self._io.pos()
self._debug['text_data_field']['start'] = self._io.pos()
self.text_data_field = self._io.read_bytes(int(self._parent.header.ltnfo[self.idx].length_text_segment))
self._debug['text_data_field']['end'] = self._io.pos()
class GraphicSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_sy", "graphic_id", "graphic_name", "graphic_classification", "encryption", "graphic_type", "reserved1", "graphic_display_level", "graphic_attachment_level", "graphic_location", "first_graphic_bound_loc", "graphic_color", "second_graphic_bound_loc", "reserved2", "graphics_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_sy']['start'] = self._io.pos()
self.file_part_type_sy = self._io.read_bytes(2)
self._debug['file_part_type_sy']['end'] = self._io.pos()
if not self.file_part_type_sy == b"\x53\x59":
raise kaitaistruct.ValidationNotEqualError(b"\x53\x59", self.file_part_type_sy, self._io, u"/types/graphic_sub_header/seq/0")
self._debug['graphic_id']['start'] = self._io.pos()
self.graphic_id = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['graphic_id']['end'] = self._io.pos()
self._debug['graphic_name']['start'] = self._io.pos()
self.graphic_name = (self._io.read_bytes(20)).decode(u"UTF-8")
self._debug['graphic_name']['end'] = self._io.pos()
self._debug['graphic_classification']['start'] = self._io.pos()
self.graphic_classification = self._root.Clasnfo(self._io, self, self._root)
self.graphic_classification._read()
self._debug['graphic_classification']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['graphic_type']['start'] = self._io.pos()
self.graphic_type = self._io.read_bytes(1)
self._debug['graphic_type']['end'] = self._io.pos()
if not self.graphic_type == b"\x43":
raise kaitaistruct.ValidationNotEqualError(b"\x43", self.graphic_type, self._io, u"/types/graphic_sub_header/seq/5")
self._debug['reserved1']['start'] = self._io.pos()
self.reserved1 = (self._io.read_bytes(13)).decode(u"UTF-8")
self._debug['reserved1']['end'] = self._io.pos()
self._debug['graphic_display_level']['start'] = self._io.pos()
self.graphic_display_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['graphic_display_level']['end'] = self._io.pos()
self._debug['graphic_attachment_level']['start'] = self._io.pos()
self.graphic_attachment_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['graphic_attachment_level']['end'] = self._io.pos()
self._debug['graphic_location']['start'] = self._io.pos()
self.graphic_location = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['graphic_location']['end'] = self._io.pos()
self._debug['first_graphic_bound_loc']['start'] = self._io.pos()
self.first_graphic_bound_loc = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['first_graphic_bound_loc']['end'] = self._io.pos()
self._debug['graphic_color']['start'] = self._io.pos()
self.graphic_color = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['graphic_color']['end'] = self._io.pos()
self._debug['second_graphic_bound_loc']['start'] = self._io.pos()
self.second_graphic_bound_loc = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['second_graphic_bound_loc']['end'] = self._io.pos()
self._debug['reserved2']['start'] = self._io.pos()
self.reserved2 = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['reserved2']['end'] = self._io.pos()
self._debug['graphics_extended_sub_header']['start'] = self._io.pos()
self.graphics_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.graphics_extended_sub_header._read()
self._debug['graphics_extended_sub_header']['end'] = self._io.pos()
class Clasnfo(KaitaiStruct):
SEQ_FIELDS = ["security_class", "security_system", "codewords", "control_and_handling", "releaseability", "declass_type", "declass_date", "declass_exemption", "downgrade", "downgrade_date", "class_text", "class_authority_type", "class_authority", "class_reason", "source_date", "control_number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['security_class']['start'] = self._io.pos()
self.security_class = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['security_class']['end'] = self._io.pos()
self._debug['security_system']['start'] = self._io.pos()
self.security_system = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['security_system']['end'] = self._io.pos()
self._debug['codewords']['start'] = self._io.pos()
self.codewords = (self._io.read_bytes(11)).decode(u"UTF-8")
self._debug['codewords']['end'] = self._io.pos()
self._debug['control_and_handling']['start'] = self._io.pos()
self.control_and_handling = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['control_and_handling']['end'] = self._io.pos()
self._debug['releaseability']['start'] = self._io.pos()
self.releaseability = (self._io.read_bytes(20)).decode(u"UTF-8")
self._debug['releaseability']['end'] = self._io.pos()
self._debug['declass_type']['start'] = self._io.pos()
self.declass_type = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['declass_type']['end'] = self._io.pos()
self._debug['declass_date']['start'] = self._io.pos()
self.declass_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['declass_date']['end'] = self._io.pos()
self._debug['declass_exemption']['start'] = self._io.pos()
self.declass_exemption = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['declass_exemption']['end'] = self._io.pos()
self._debug['downgrade']['start'] = self._io.pos()
self.downgrade = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['downgrade']['end'] = self._io.pos()
self._debug['downgrade_date']['start'] = self._io.pos()
self.downgrade_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['downgrade_date']['end'] = self._io.pos()
self._debug['class_text']['start'] = self._io.pos()
self.class_text = (self._io.read_bytes(43)).decode(u"UTF-8")
self._debug['class_text']['end'] = self._io.pos()
self._debug['class_authority_type']['start'] = self._io.pos()
self.class_authority_type = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['class_authority_type']['end'] = self._io.pos()
self._debug['class_authority']['start'] = self._io.pos()
self.class_authority = (self._io.read_bytes(40)).decode(u"UTF-8")
self._debug['class_authority']['end'] = self._io.pos()
self._debug['class_reason']['start'] = self._io.pos()
self.class_reason = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['class_reason']['end'] = self._io.pos()
self._debug['source_date']['start'] = self._io.pos()
self.source_date = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['source_date']['end'] = self._io.pos()
self._debug['control_number']['start'] = self._io.pos()
self.control_number = (self._io.read_bytes(15)).decode(u"UTF-8")
self._debug['control_number']['end'] = self._io.pos()
class LengthGraphicInfo(KaitaiStruct):
SEQ_FIELDS = ["length_graphic_subheader", "length_graphic_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_graphic_subheader']['start'] = self._io.pos()
self.length_graphic_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_graphic_subheader']['end'] = self._io.pos()
self._debug['length_graphic_segment']['start'] = self._io.pos()
self.length_graphic_segment = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['length_graphic_segment']['end'] = self._io.pos()
class Encrypt(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class ImageDataMask(KaitaiStruct):
SEQ_FIELDS = ["blocked_img_data_offset", "bmrlnth", "tmrlnth", "tpxcdlnth", "tpxcd", "bmrbnd", "tmrbnd"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['blocked_img_data_offset']['start'] = self._io.pos()
self.blocked_img_data_offset = self._io.read_u4be()
self._debug['blocked_img_data_offset']['end'] = self._io.pos()
self._debug['bmrlnth']['start'] = self._io.pos()
self.bmrlnth = self._io.read_u2be()
self._debug['bmrlnth']['end'] = self._io.pos()
self._debug['tmrlnth']['start'] = self._io.pos()
self.tmrlnth = self._io.read_u2be()
self._debug['tmrlnth']['end'] = self._io.pos()
self._debug['tpxcdlnth']['start'] = self._io.pos()
self.tpxcdlnth = self._io.read_u2be()
self._debug['tpxcdlnth']['end'] = self._io.pos()
self._debug['tpxcd']['start'] = self._io.pos()
self.tpxcd = self._io.read_bytes(self.tpxcd_size)
self._debug['tpxcd']['end'] = self._io.pos()
if self.has_bmr:
self._debug['bmrbnd']['start'] = self._io.pos()
self.bmrbnd = [None] * (self.bmrtmr_count)
for i in range(self.bmrtmr_count):
if not 'arr' in self._debug['bmrbnd']:
self._debug['bmrbnd']['arr'] = []
self._debug['bmrbnd']['arr'].append({'start': self._io.pos()})
self.bmrbnd[i] = self._io.read_u4be()
self._debug['bmrbnd']['arr'][i]['end'] = self._io.pos()
self._debug['bmrbnd']['end'] = self._io.pos()
if self.has_tmr:
self._debug['tmrbnd']['start'] = self._io.pos()
self.tmrbnd = [None] * (self.bmrtmr_count)
for i in range(self.bmrtmr_count):
if not 'arr' in self._debug['tmrbnd']:
self._debug['tmrbnd']['arr'] = []
self._debug['tmrbnd']['arr'].append({'start': self._io.pos()})
self.tmrbnd[i] = self._io.read_u4be()
self._debug['tmrbnd']['arr'][i]['end'] = self._io.pos()
self._debug['tmrbnd']['end'] = self._io.pos()
@property
def has_bmr(self):
if hasattr(self, '_m_has_bmr'):
return self._m_has_bmr if hasattr(self, '_m_has_bmr') else None
self._m_has_bmr = self.bmrlnth != 0
return self._m_has_bmr if hasattr(self, '_m_has_bmr') else None
@property
def has_tmr(self):
if hasattr(self, '_m_has_tmr'):
return self._m_has_tmr if hasattr(self, '_m_has_tmr') else None
self._m_has_tmr = self.tmrlnth != 0
return self._m_has_tmr if hasattr(self, '_m_has_tmr') else None
@property
def tmrbnd_size(self):
if hasattr(self, '_m_tmrbnd_size'):
return self._m_tmrbnd_size if hasattr(self, '_m_tmrbnd_size') else None
self._m_tmrbnd_size = ((self.bmrtmr_count * 4) if self.has_tmr else 0)
return self._m_tmrbnd_size if hasattr(self, '_m_tmrbnd_size') else None
@property
def tpxcd_size(self):
if hasattr(self, '_m_tpxcd_size'):
return self._m_tpxcd_size if hasattr(self, '_m_tpxcd_size') else None
self._m_tpxcd_size = (self.tpxcdlnth if (self.tpxcdlnth % 8) == 0 else (self.tpxcdlnth + (8 - (self.tpxcdlnth % 8)))) // 8
return self._m_tpxcd_size if hasattr(self, '_m_tpxcd_size') else None
@property
def total_size(self):
if hasattr(self, '_m_total_size'):
return self._m_total_size if hasattr(self, '_m_total_size') else None
self._m_total_size = ((((((4 + 2) + 2) + 2) + self.tpxcd_size) + self.bmrbnd_size) + self.tmrbnd_size)
return self._m_total_size if hasattr(self, '_m_total_size') else None
@property
def bmrbnd_size(self):
if hasattr(self, '_m_bmrbnd_size'):
return self._m_bmrbnd_size if hasattr(self, '_m_bmrbnd_size') else None
self._m_bmrbnd_size = ((self.bmrtmr_count * 4) if self.has_bmr else 0)
return self._m_bmrbnd_size if hasattr(self, '_m_bmrbnd_size') else None
@property
def bmrtmr_count(self):
if hasattr(self, '_m_bmrtmr_count'):
return self._m_bmrtmr_count if hasattr(self, '_m_bmrtmr_count') else None
self._m_bmrtmr_count = ((int(self._parent.image_sub_header.num_blocks_per_row) * int(self._parent.image_sub_header.num_blocks_per_col)) * (1 if self._parent.image_sub_header.img_mode != u"S" else (int(self._parent.image_sub_header.num_bands) if int(self._parent.image_sub_header.num_bands) != 0 else int(self._parent.image_sub_header.num_multispectral_bands))))
return self._m_bmrtmr_count if hasattr(self, '_m_bmrtmr_count') else None
class GraphicsSegment(KaitaiStruct):
SEQ_FIELDS = ["graphic_sub_header", "graphic_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['graphic_sub_header']['start'] = self._io.pos()
self.graphic_sub_header = self._root.GraphicSubHeader(self._io, self, self._root)
self.graphic_sub_header._read()
self._debug['graphic_sub_header']['end'] = self._io.pos()
self._debug['graphic_data_field']['start'] = self._io.pos()
self.graphic_data_field = self._io.read_bytes(int(self._parent.header.lnnfo[self.idx].length_graphic_segment))
self._debug['graphic_data_field']['end'] = self._io.pos()
class DataSubHeader(KaitaiStruct):
SEQ_FIELDS = ["des_base", "overflowed_header_type", "data_item_overflowed", "des_defined_subheader_fields_len", "desshf", "des_defined_data_field"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
if self.tre_ofl:
self._debug['overflowed_header_type']['start'] = self._io.pos()
self.overflowed_header_type = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['overflowed_header_type']['end'] = self._io.pos()
if self.tre_ofl:
self._debug['data_item_overflowed']['start'] = self._io.pos()
self.data_item_overflowed = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['data_item_overflowed']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['desshf']['start'] = self._io.pos()
self.desshf = (self._io.read_bytes(int(self.des_defined_subheader_fields_len))).decode(u"UTF-8")
self._debug['desshf']['end'] = self._io.pos()
self._debug['des_defined_data_field']['start'] = self._io.pos()
self.des_defined_data_field = (self._io.read_bytes_full()).decode(u"UTF-8")
self._debug['des_defined_data_field']['end'] = self._io.pos()
@property
def tre_ofl(self):
if hasattr(self, '_m_tre_ofl'):
return self._m_tre_ofl if hasattr(self, '_m_tre_ofl') else None
self._m_tre_ofl = self.des_base.desid == u"TRE_OVERFLOW"
return self._m_tre_ofl if hasattr(self, '_m_tre_ofl') else None
class DataExtensionSegment(KaitaiStruct):
SEQ_FIELDS = ["data_sub_header", "data_data_field"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_sub_header']['start'] = self._io.pos()
self._raw_data_sub_header = self._io.read_bytes(int(self._parent.header.ldnfo[self.idx].length_data_extension_subheader))
_io__raw_data_sub_header = KaitaiStream(BytesIO(self._raw_data_sub_header))
self.data_sub_header = self._root.DataSubHeader(_io__raw_data_sub_header, self, self._root)
self.data_sub_header._read()
self._debug['data_sub_header']['end'] = self._io.pos()
self._debug['data_data_field']['start'] = self._io.pos()
self.data_data_field = self._io.read_bytes(int(self._parent.header.ldnfo[self.idx].length_data_extension_segment))
self._debug['data_data_field']['end'] = self._io.pos()
class DataSubHeaderTre(KaitaiStruct):
SEQ_FIELDS = ["des_base", "overflowed_header_type", "data_item_overflowed", "des_defined_subheader_fields_len", "des_defined_data_field"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
if self.des_base.desid == u"TRE_OVERFLOW":
self._debug['overflowed_header_type']['start'] = self._io.pos()
self.overflowed_header_type = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['overflowed_header_type']['end'] = self._io.pos()
if self.des_base.desid == u"TRE_OVERFLOW":
self._debug['data_item_overflowed']['start'] = self._io.pos()
self.data_item_overflowed = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['data_item_overflowed']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['des_defined_data_field']['start'] = self._io.pos()
self.des_defined_data_field = (self._io.read_bytes(int(self.des_defined_subheader_fields_len))).decode(u"UTF-8")
self._debug['des_defined_data_field']['end'] = self._io.pos()
class ImageSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type", "image_id_1", "image_date_time", "target_id", "image_id_2", "image_security_classification", "encryption", "image_source", "num_sig_rows", "num_sig_cols", "pixel_value_type", "image_representation", "image_category", "actual_bits_per_pixel_per_band", "pixel_justification", "image_coordinate_rep", "image_geo_loc", "num_img_comments", "img_comments", "img_compression", "compression_rate_code", "num_bands", "num_multispectral_bands", "bands", "img_sync_code", "img_mode", "num_blocks_per_row", "num_blocks_per_col", "num_pixels_per_block_horz", "num_pixels_per_block_vert", "num_pixels_per_band", "img_display_level", "attachment_level", "img_location", "img_magnification", "user_def_img_data_len", "user_def_overflow", "user_def_img_data", "image_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type']['start'] = self._io.pos()
self.file_part_type = self._io.read_bytes(2)
self._debug['file_part_type']['end'] = self._io.pos()
if not self.file_part_type == b"\x49\x4D":
raise kaitaistruct.ValidationNotEqualError(b"\x49\x4D", self.file_part_type, self._io, u"/types/image_sub_header/seq/0")
self._debug['image_id_1']['start'] = self._io.pos()
self.image_id_1 = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['image_id_1']['end'] = self._io.pos()
self._debug['image_date_time']['start'] = self._io.pos()
self.image_date_time = self._root.DateTime(self._io, self, self._root)
self.image_date_time._read()
self._debug['image_date_time']['end'] = self._io.pos()
self._debug['target_id']['start'] = self._io.pos()
self.target_id = (self._io.read_bytes(17)).decode(u"UTF-8")
self._debug['target_id']['end'] = self._io.pos()
self._debug['image_id_2']['start'] = self._io.pos()
self.image_id_2 = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['image_id_2']['end'] = self._io.pos()
self._debug['image_security_classification']['start'] = self._io.pos()
self.image_security_classification = self._root.Clasnfo(self._io, self, self._root)
self.image_security_classification._read()
self._debug['image_security_classification']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['image_source']['start'] = self._io.pos()
self.image_source = (self._io.read_bytes(42)).decode(u"UTF-8")
self._debug['image_source']['end'] = self._io.pos()
self._debug['num_sig_rows']['start'] = self._io.pos()
self.num_sig_rows = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['num_sig_rows']['end'] = self._io.pos()
self._debug['num_sig_cols']['start'] = self._io.pos()
self.num_sig_cols = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['num_sig_cols']['end'] = self._io.pos()
self._debug['pixel_value_type']['start'] = self._io.pos()
self.pixel_value_type = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['pixel_value_type']['end'] = self._io.pos()
self._debug['image_representation']['start'] = self._io.pos()
self.image_representation = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['image_representation']['end'] = self._io.pos()
self._debug['image_category']['start'] = self._io.pos()
self.image_category = (self._io.read_bytes(8)).decode(u"UTF-8")
self._debug['image_category']['end'] = self._io.pos()
self._debug['actual_bits_per_pixel_per_band']['start'] = self._io.pos()
self.actual_bits_per_pixel_per_band = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['actual_bits_per_pixel_per_band']['end'] = self._io.pos()
self._debug['pixel_justification']['start'] = self._io.pos()
self.pixel_justification = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['pixel_justification']['end'] = self._io.pos()
self._debug['image_coordinate_rep']['start'] = self._io.pos()
self.image_coordinate_rep = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['image_coordinate_rep']['end'] = self._io.pos()
self._debug['image_geo_loc']['start'] = self._io.pos()
self.image_geo_loc = (self._io.read_bytes(60)).decode(u"UTF-8")
self._debug['image_geo_loc']['end'] = self._io.pos()
self._debug['num_img_comments']['start'] = self._io.pos()
self.num_img_comments = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_img_comments']['end'] = self._io.pos()
self._debug['img_comments']['start'] = self._io.pos()
self.img_comments = [None] * (int(self.num_img_comments))
for i in range(int(self.num_img_comments)):
if not 'arr' in self._debug['img_comments']:
self._debug['img_comments']['arr'] = []
self._debug['img_comments']['arr'].append({'start': self._io.pos()})
_t_img_comments = self._root.ImageComment(self._io, self, self._root)
_t_img_comments._read()
self.img_comments[i] = _t_img_comments
self._debug['img_comments']['arr'][i]['end'] = self._io.pos()
self._debug['img_comments']['end'] = self._io.pos()
self._debug['img_compression']['start'] = self._io.pos()
self.img_compression = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['img_compression']['end'] = self._io.pos()
self._debug['compression_rate_code']['start'] = self._io.pos()
self.compression_rate_code = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['compression_rate_code']['end'] = self._io.pos()
self._debug['num_bands']['start'] = self._io.pos()
self.num_bands = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['num_bands']['end'] = self._io.pos()
if int(self.num_bands) == 0:
self._debug['num_multispectral_bands']['start'] = self._io.pos()
self.num_multispectral_bands = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['num_multispectral_bands']['end'] = self._io.pos()
self._debug['bands']['start'] = self._io.pos()
self.bands = [None] * ((int(self.num_bands) if int(self.num_bands) != 0 else int(self.num_multispectral_bands)))
for i in range((int(self.num_bands) if int(self.num_bands) != 0 else int(self.num_multispectral_bands))):
if not 'arr' in self._debug['bands']:
self._debug['bands']['arr'] = []
self._debug['bands']['arr'].append({'start': self._io.pos()})
_t_bands = self._root.BandInfo(self._io, self, self._root)
_t_bands._read()
self.bands[i] = _t_bands
self._debug['bands']['arr'][i]['end'] = self._io.pos()
self._debug['bands']['end'] = self._io.pos()
self._debug['img_sync_code']['start'] = self._io.pos()
self.img_sync_code = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['img_sync_code']['end'] = self._io.pos()
self._debug['img_mode']['start'] = self._io.pos()
self.img_mode = (self._io.read_bytes(1)).decode(u"UTF-8")
self._debug['img_mode']['end'] = self._io.pos()
self._debug['num_blocks_per_row']['start'] = self._io.pos()
self.num_blocks_per_row = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_blocks_per_row']['end'] = self._io.pos()
self._debug['num_blocks_per_col']['start'] = self._io.pos()
self.num_blocks_per_col = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_blocks_per_col']['end'] = self._io.pos()
self._debug['num_pixels_per_block_horz']['start'] = self._io.pos()
self.num_pixels_per_block_horz = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_pixels_per_block_horz']['end'] = self._io.pos()
self._debug['num_pixels_per_block_vert']['start'] = self._io.pos()
self.num_pixels_per_block_vert = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['num_pixels_per_block_vert']['end'] = self._io.pos()
self._debug['num_pixels_per_band']['start'] = self._io.pos()
self.num_pixels_per_band = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['num_pixels_per_band']['end'] = self._io.pos()
self._debug['img_display_level']['start'] = self._io.pos()
self.img_display_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['img_display_level']['end'] = self._io.pos()
self._debug['attachment_level']['start'] = self._io.pos()
self.attachment_level = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['attachment_level']['end'] = self._io.pos()
self._debug['img_location']['start'] = self._io.pos()
self.img_location = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['img_location']['end'] = self._io.pos()
self._debug['img_magnification']['start'] = self._io.pos()
self.img_magnification = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['img_magnification']['end'] = self._io.pos()
self._debug['user_def_img_data_len']['start'] = self._io.pos()
self.user_def_img_data_len = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['user_def_img_data_len']['end'] = self._io.pos()
if int(self.user_def_img_data_len) != 0:
self._debug['user_def_overflow']['start'] = self._io.pos()
self.user_def_overflow = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['user_def_overflow']['end'] = self._io.pos()
if int(self.user_def_img_data_len) > 2:
self._debug['user_def_img_data']['start'] = self._io.pos()
self.user_def_img_data = [None] * ((int(self.user_def_img_data_len) - 3))
for i in range((int(self.user_def_img_data_len) - 3)):
if not 'arr' in self._debug['user_def_img_data']:
self._debug['user_def_img_data']['arr'] = []
self._debug['user_def_img_data']['arr'].append({'start': self._io.pos()})
self.user_def_img_data[i] = self._io.read_u1()
self._debug['user_def_img_data']['arr'][i]['end'] = self._io.pos()
self._debug['user_def_img_data']['end'] = self._io.pos()
self._debug['image_extended_sub_header']['start'] = self._io.pos()
self.image_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.image_extended_sub_header._read()
self._debug['image_extended_sub_header']['end'] = self._io.pos()
class ReservedSubHeader(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_re", "res_type_id", "res_version", "reclasnfo", "res_user_defined_subheader_length", "res_user_defined_subheader_fields", "res_user_defined_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_re']['start'] = self._io.pos()
self.file_part_type_re = self._io.read_bytes(2)
self._debug['file_part_type_re']['end'] = self._io.pos()
if not self.file_part_type_re == b"\x52\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x52\x45", self.file_part_type_re, self._io, u"/types/reserved_sub_header/seq/0")
self._debug['res_type_id']['start'] = self._io.pos()
self.res_type_id = (self._io.read_bytes(25)).decode(u"UTF-8")
self._debug['res_type_id']['end'] = self._io.pos()
self._debug['res_version']['start'] = self._io.pos()
self.res_version = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['res_version']['end'] = self._io.pos()
self._debug['reclasnfo']['start'] = self._io.pos()
self.reclasnfo = self._root.Clasnfo(self._io, self, self._root)
self.reclasnfo._read()
self._debug['reclasnfo']['end'] = self._io.pos()
self._debug['res_user_defined_subheader_length']['start'] = self._io.pos()
self.res_user_defined_subheader_length = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['res_user_defined_subheader_length']['end'] = self._io.pos()
self._debug['res_user_defined_subheader_fields']['start'] = self._io.pos()
self.res_user_defined_subheader_fields = (self._io.read_bytes(int(self.res_user_defined_subheader_length))).decode(u"UTF-8")
self._debug['res_user_defined_subheader_fields']['end'] = self._io.pos()
self._debug['res_user_defined_data']['start'] = self._io.pos()
self.res_user_defined_data = (self._io.read_bytes_full()).decode(u"UTF-8")
self._debug['res_user_defined_data']['end'] = self._io.pos()
class DataSubHeaderBase(KaitaiStruct):
SEQ_FIELDS = ["file_part_type_de", "desid", "data_definition_version", "declasnfo"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_part_type_de']['start'] = self._io.pos()
self.file_part_type_de = self._io.read_bytes(2)
self._debug['file_part_type_de']['end'] = self._io.pos()
if not self.file_part_type_de == b"\x44\x45":
raise kaitaistruct.ValidationNotEqualError(b"\x44\x45", self.file_part_type_de, self._io, u"/types/data_sub_header_base/seq/0")
self._debug['desid']['start'] = self._io.pos()
self.desid = (self._io.read_bytes(25)).decode(u"UTF-8")
self._debug['desid']['end'] = self._io.pos()
self._debug['data_definition_version']['start'] = self._io.pos()
self.data_definition_version = (self._io.read_bytes(2)).decode(u"UTF-8")
self._debug['data_definition_version']['end'] = self._io.pos()
self._debug['declasnfo']['start'] = self._io.pos()
self.declasnfo = self._root.Clasnfo(self._io, self, self._root)
self.declasnfo._read()
self._debug['declasnfo']['end'] = self._io.pos()
class TextSubHeader(KaitaiStruct):
SEQ_FIELDS = ["text_date_time", "text_title", "text_security_class", "encryp", "text_format", "text_extended_sub_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['text_date_time']['start'] = self._io.pos()
self.text_date_time = (self._io.read_bytes(14)).decode(u"UTF-8")
self._debug['text_date_time']['end'] = self._io.pos()
self._debug['text_title']['start'] = self._io.pos()
self.text_title = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['text_title']['end'] = self._io.pos()
self._debug['text_security_class']['start'] = self._io.pos()
self.text_security_class = self._root.Clasnfo(self._io, self, self._root)
self.text_security_class._read()
self._debug['text_security_class']['end'] = self._io.pos()
self._debug['encryp']['start'] = self._io.pos()
self.encryp = self._root.Encrypt(self._io, self, self._root)
self.encryp._read()
self._debug['encryp']['end'] = self._io.pos()
self._debug['text_format']['start'] = self._io.pos()
self.text_format = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['text_format']['end'] = self._io.pos()
self._debug['text_extended_sub_header']['start'] = self._io.pos()
self.text_extended_sub_header = self._root.TreHeader(self._io, self, self._root)
self.text_extended_sub_header._read()
self._debug['text_extended_sub_header']['end'] = self._io.pos()
class DateTime(KaitaiStruct):
SEQ_FIELDS = ["_unnamed0"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['_unnamed0']['start'] = self._io.pos()
self._unnamed0 = (self._io.read_bytes(14)).decode(u"UTF-8")
self._debug['_unnamed0']['end'] = self._io.pos()
class Header(KaitaiStruct):
SEQ_FIELDS = ["file_profile_name", "file_version", "complexity_level", "standard_type", "originating_station_id", "file_date_time", "file_title", "file_security", "file_copy_number", "file_num_of_copys", "encryption", "file_bg_color", "originator_name", "originator_phone", "file_length", "file_header_length", "num_image_segments", "linfo", "num_graphics_segments", "lnnfo", "reserved_numx", "num_text_files", "ltnfo", "num_data_extension", "ldnfo", "num_reserved_extension", "lrnfo", "user_defined_header", "extended_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_profile_name']['start'] = self._io.pos()
self.file_profile_name = self._io.read_bytes(4)
self._debug['file_profile_name']['end'] = self._io.pos()
if not self.file_profile_name == b"\x4E\x49\x54\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x4E\x49\x54\x46", self.file_profile_name, self._io, u"/types/header/seq/0")
self._debug['file_version']['start'] = self._io.pos()
self.file_version = self._io.read_bytes(5)
self._debug['file_version']['end'] = self._io.pos()
if not self.file_version == b"\x30\x32\x2E\x31\x30":
raise kaitaistruct.ValidationNotEqualError(b"\x30\x32\x2E\x31\x30", self.file_version, self._io, u"/types/header/seq/1")
self._debug['complexity_level']['start'] = self._io.pos()
self.complexity_level = self._io.read_bytes(2)
self._debug['complexity_level']['end'] = self._io.pos()
self._debug['standard_type']['start'] = self._io.pos()
self.standard_type = self._io.read_bytes(4)
self._debug['standard_type']['end'] = self._io.pos()
if not self.standard_type == b"\x42\x46\x30\x31":
raise kaitaistruct.ValidationNotEqualError(b"\x42\x46\x30\x31", self.standard_type, self._io, u"/types/header/seq/3")
self._debug['originating_station_id']['start'] = self._io.pos()
self.originating_station_id = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['originating_station_id']['end'] = self._io.pos()
self._debug['file_date_time']['start'] = self._io.pos()
self.file_date_time = self._root.DateTime(self._io, self, self._root)
self.file_date_time._read()
self._debug['file_date_time']['end'] = self._io.pos()
self._debug['file_title']['start'] = self._io.pos()
self.file_title = (self._io.read_bytes(80)).decode(u"UTF-8")
self._debug['file_title']['end'] = self._io.pos()
self._debug['file_security']['start'] = self._io.pos()
self.file_security = self._root.Clasnfo(self._io, self, self._root)
self.file_security._read()
self._debug['file_security']['end'] = self._io.pos()
self._debug['file_copy_number']['start'] = self._io.pos()
self.file_copy_number = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['file_copy_number']['end'] = self._io.pos()
self._debug['file_num_of_copys']['start'] = self._io.pos()
self.file_num_of_copys = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['file_num_of_copys']['end'] = self._io.pos()
self._debug['encryption']['start'] = self._io.pos()
self.encryption = self._root.Encrypt(self._io, self, self._root)
self.encryption._read()
self._debug['encryption']['end'] = self._io.pos()
self._debug['file_bg_color']['start'] = self._io.pos()
self.file_bg_color = self._io.read_bytes(3)
self._debug['file_bg_color']['end'] = self._io.pos()
self._debug['originator_name']['start'] = self._io.pos()
self.originator_name = (self._io.read_bytes(24)).decode(u"UTF-8")
self._debug['originator_name']['end'] = self._io.pos()
self._debug['originator_phone']['start'] = self._io.pos()
self.originator_phone = (self._io.read_bytes(18)).decode(u"UTF-8")
self._debug['originator_phone']['end'] = self._io.pos()
self._debug['file_length']['start'] = self._io.pos()
self.file_length = (self._io.read_bytes(12)).decode(u"UTF-8")
self._debug['file_length']['end'] = self._io.pos()
self._debug['file_header_length']['start'] = self._io.pos()
self.file_header_length = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['file_header_length']['end'] = self._io.pos()
self._debug['num_image_segments']['start'] = self._io.pos()
self.num_image_segments = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_image_segments']['end'] = self._io.pos()
self._debug['linfo']['start'] = self._io.pos()
self.linfo = [None] * (int(self.num_image_segments))
for i in range(int(self.num_image_segments)):
if not 'arr' in self._debug['linfo']:
self._debug['linfo']['arr'] = []
self._debug['linfo']['arr'].append({'start': self._io.pos()})
_t_linfo = self._root.LengthImageInfo(self._io, self, self._root)
_t_linfo._read()
self.linfo[i] = _t_linfo
self._debug['linfo']['arr'][i]['end'] = self._io.pos()
self._debug['linfo']['end'] = self._io.pos()
self._debug['num_graphics_segments']['start'] = self._io.pos()
self.num_graphics_segments = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_graphics_segments']['end'] = self._io.pos()
self._debug['lnnfo']['start'] = self._io.pos()
self.lnnfo = [None] * (int(self.num_graphics_segments))
for i in range(int(self.num_graphics_segments)):
if not 'arr' in self._debug['lnnfo']:
self._debug['lnnfo']['arr'] = []
self._debug['lnnfo']['arr'].append({'start': self._io.pos()})
_t_lnnfo = self._root.LengthGraphicInfo(self._io, self, self._root)
_t_lnnfo._read()
self.lnnfo[i] = _t_lnnfo
self._debug['lnnfo']['arr'][i]['end'] = self._io.pos()
self._debug['lnnfo']['end'] = self._io.pos()
self._debug['reserved_numx']['start'] = self._io.pos()
self.reserved_numx = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['reserved_numx']['end'] = self._io.pos()
self._debug['num_text_files']['start'] = self._io.pos()
self.num_text_files = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_text_files']['end'] = self._io.pos()
self._debug['ltnfo']['start'] = self._io.pos()
self.ltnfo = [None] * (int(self.num_text_files))
for i in range(int(self.num_text_files)):
if not 'arr' in self._debug['ltnfo']:
self._debug['ltnfo']['arr'] = []
self._debug['ltnfo']['arr'].append({'start': self._io.pos()})
_t_ltnfo = self._root.LengthTextInfo(self._io, self, self._root)
_t_ltnfo._read()
self.ltnfo[i] = _t_ltnfo
self._debug['ltnfo']['arr'][i]['end'] = self._io.pos()
self._debug['ltnfo']['end'] = self._io.pos()
self._debug['num_data_extension']['start'] = self._io.pos()
self.num_data_extension = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_data_extension']['end'] = self._io.pos()
self._debug['ldnfo']['start'] = self._io.pos()
self.ldnfo = [None] * (int(self.num_data_extension))
for i in range(int(self.num_data_extension)):
if not 'arr' in self._debug['ldnfo']:
self._debug['ldnfo']['arr'] = []
self._debug['ldnfo']['arr'].append({'start': self._io.pos()})
_t_ldnfo = self._root.LengthDataInfo(self._io, self, self._root)
_t_ldnfo._read()
self.ldnfo[i] = _t_ldnfo
self._debug['ldnfo']['arr'][i]['end'] = self._io.pos()
self._debug['ldnfo']['end'] = self._io.pos()
self._debug['num_reserved_extension']['start'] = self._io.pos()
self.num_reserved_extension = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['num_reserved_extension']['end'] = self._io.pos()
self._debug['lrnfo']['start'] = self._io.pos()
self.lrnfo = [None] * (int(self.num_reserved_extension))
for i in range(int(self.num_reserved_extension)):
if not 'arr' in self._debug['lrnfo']:
self._debug['lrnfo']['arr'] = []
self._debug['lrnfo']['arr'].append({'start': self._io.pos()})
_t_lrnfo = self._root.LengthReservedInfo(self._io, self, self._root)
_t_lrnfo._read()
self.lrnfo[i] = _t_lrnfo
self._debug['lrnfo']['arr'][i]['end'] = self._io.pos()
self._debug['lrnfo']['end'] = self._io.pos()
self._debug['user_defined_header']['start'] = self._io.pos()
self.user_defined_header = self._root.TreHeader(self._io, self, self._root)
self.user_defined_header._read()
self._debug['user_defined_header']['end'] = self._io.pos()
self._debug['extended_header']['start'] = self._io.pos()
self.extended_header = self._root.TreHeader(self._io, self, self._root)
self.extended_header._read()
self._debug['extended_header']['end'] = self._io.pos()
class DataSubHeaderStreaming(KaitaiStruct):
"""Streaming file Header Data Extension Segment Subheader."""
SEQ_FIELDS = ["des_base", "des_defined_subheader_fields_len", "sfh_l1", "sfh_delim1", "sfh_dr", "sfh_delim2", "sfh_l2"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['des_base']['start'] = self._io.pos()
self.des_base = self._root.DataSubHeaderBase(self._io, self, self._root)
self.des_base._read()
self._debug['des_base']['end'] = self._io.pos()
self._debug['des_defined_subheader_fields_len']['start'] = self._io.pos()
self.des_defined_subheader_fields_len = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['des_defined_subheader_fields_len']['end'] = self._io.pos()
self._debug['sfh_l1']['start'] = self._io.pos()
self.sfh_l1 = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['sfh_l1']['end'] = self._io.pos()
self._debug['sfh_delim1']['start'] = self._io.pos()
self.sfh_delim1 = self._io.read_u4be()
self._debug['sfh_delim1']['end'] = self._io.pos()
self._debug['sfh_dr']['start'] = self._io.pos()
self.sfh_dr = [None] * (int(self.sfh_l1))
for i in range(int(self.sfh_l1)):
if not 'arr' in self._debug['sfh_dr']:
self._debug['sfh_dr']['arr'] = []
self._debug['sfh_dr']['arr'].append({'start': self._io.pos()})
self.sfh_dr[i] = self._io.read_u1()
self._debug['sfh_dr']['arr'][i]['end'] = self._io.pos()
self._debug['sfh_dr']['end'] = self._io.pos()
self._debug['sfh_delim2']['start'] = self._io.pos()
self.sfh_delim2 = self._io.read_u4be()
self._debug['sfh_delim2']['end'] = self._io.pos()
self._debug['sfh_l2']['start'] = self._io.pos()
self.sfh_l2 = (self._io.read_bytes(7)).decode(u"UTF-8")
self._debug['sfh_l2']['end'] = self._io.pos()
class TreHeader(KaitaiStruct):
SEQ_FIELDS = ["header_data_length", "header_overflow", "header_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_data_length']['start'] = self._io.pos()
self.header_data_length = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['header_data_length']['end'] = self._io.pos()
if int(self.header_data_length) != 0:
self._debug['header_overflow']['start'] = self._io.pos()
self.header_overflow = (self._io.read_bytes(3)).decode(u"UTF-8")
self._debug['header_overflow']['end'] = self._io.pos()
if int(self.header_data_length) > 2:
self._debug['header_data']['start'] = self._io.pos()
self.header_data = [None] * ((int(self.header_data_length) - 3))
for i in range((int(self.header_data_length) - 3)):
if not 'arr' in self._debug['header_data']:
self._debug['header_data']['arr'] = []
self._debug['header_data']['arr'].append({'start': self._io.pos()})
self.header_data[i] = self._io.read_u1()
self._debug['header_data']['arr'][i]['end'] = self._io.pos()
self._debug['header_data']['end'] = self._io.pos()
class LengthImageInfo(KaitaiStruct):
SEQ_FIELDS = ["length_image_subheader", "length_image_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_image_subheader']['start'] = self._io.pos()
self.length_image_subheader = (self._io.read_bytes(6)).decode(u"UTF-8")
self._debug['length_image_subheader']['end'] = self._io.pos()
self._debug['length_image_segment']['start'] = self._io.pos()
self.length_image_segment = (self._io.read_bytes(10)).decode(u"UTF-8")
self._debug['length_image_segment']['end'] = self._io.pos()
class LengthDataInfo(KaitaiStruct):
SEQ_FIELDS = ["length_data_extension_subheader", "length_data_extension_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_data_extension_subheader']['start'] = self._io.pos()
self.length_data_extension_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_data_extension_subheader']['end'] = self._io.pos()
self._debug['length_data_extension_segment']['start'] = self._io.pos()
self.length_data_extension_segment = (self._io.read_bytes(9)).decode(u"UTF-8")
self._debug['length_data_extension_segment']['end'] = self._io.pos()
class LengthTextInfo(KaitaiStruct):
SEQ_FIELDS = ["length_text_subheader", "length_text_segment"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['length_text_subheader']['start'] = self._io.pos()
self.length_text_subheader = (self._io.read_bytes(4)).decode(u"UTF-8")
self._debug['length_text_subheader']['end'] = self._io.pos()
self._debug['length_text_segment']['start'] = self._io.pos()
self.length_text_segment = (self._io.read_bytes(5)).decode(u"UTF-8")
self._debug['length_text_segment']['end'] = self._io.pos()
| [
[
[
126,
139
],
[
245,
258
],
[
287,
300
]
],
[
[
147,
159
],
[
259,
271
],
[
415,
427
],
[
10628,
10640
],
[
15417,
15429
],
[
16723,
16735
],
[
37035,
37047
],
[
48120,
48132
],
[
50360,
50372
],
[
54599,
54611
],
[
54987,
54999
],
[
55571,
55583
]
],
[
[
185,
197
],
[
454,
466
],
[
5873,
5885
],
[
7088,
7100
],
[
7618,
7630
],
[
8540,
8552
],
[
9524,
9536
],
[
11981,
11993
],
[
13701,
13713
],
[
14537,
14549
],
[
18932,
18944
],
[
22881,
22893
],
[
23719,
23731
],
[
24243,
24255
],
[
29487,
29499
],
[
30424,
30436
],
[
32713,
32725
],
[
33868,
33880
],
[
35687,
35699
],
[
47387,
47399
],
[
49720,
49732
],
[
51164,
51176
],
[
52989,
53001
],
[
53507,
53519
],
[
63509,
63521
],
[
65759,
65771
],
[
67324,
67336
],
[
68154,
68166
],
[
69055,
69067
]
],
[
[
199,
211
],
[
6499,
6511
],
[
33315,
33327
]
],
[
[
213,
220
],
[
6512,
6519
],
[
33328,
33335
]
],
[
[
228,
239
],
[
1682,
1693
],
[
6185,
6196
],
[
7335,
7346
],
[
7928,
7939
],
[
8820,
8831
],
[
9873,
9884
],
[
12306,
12317
],
[
14005,
14016
],
[
15081,
15092
],
[
19448,
19459
],
[
23169,
23180
],
[
23966,
23977
],
[
24568,
24579
],
[
29797,
29808
],
[
30792,
30803
],
[
33017,
33028
],
[
34226,
34237
],
[
36711,
36722
],
[
47784,
47795
],
[
50024,
50035
],
[
51506,
51517
],
[
53236,
53247
],
[
54255,
54266
],
[
63919,
63930
],
[
66049,
66060
],
[
67608,
67619
],
[
68456,
68467
],
[
69337,
69348
]
],
[
[
449,
453
]
]
] |
import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_minnesota_zip(zip: str):
list_of_dicts_of_minnesota_zips = zipcodes.filter_by(state="MN")
list_of_minnesota_zips = [d["zip_code"] for d in list_of_dicts_of_minnesota_zips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_minnesota_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidMinnesotaZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_minnesota_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_minnesota_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidMinnesotaZip(ColumnMapExpectation):
"""Expect values in this column to be valid Minnesota zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_minnesota_zip": ["55040", "55330", "55781", "55968"],
"invalid_minnesota_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_minnesota_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_minnesota_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_minnesota_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMinnesotaZip().print_diagnostic_checklist()
| [
[
[
7,
11
]
],
[
[
32,
40
],
[
3960,
3968
]
],
[
[
51,
59
],
[
652,
660
]
],
[
[
125,
149
],
[
3969,
3993
]
],
[
[
193,
229
]
],
[
[
286,
307
],
[
1446,
1467
]
],
[
[
314,
336
]
],
[
[
343,
368
]
],
[
[
430,
450
],
[
2201,
2221
]
],
[
[
511,
534
],
[
1165,
1188
]
],
[
[
541,
565
],
[
1414,
1438
]
],
[
[
579,
601
],
[
1549,
1571
]
],
[
[
1131,
1164
]
],
[
[
2161,
2200
],
[
5669,
5708
]
]
] |
#!/usr/bin/python
from setuptools import setup, find_packages
import sys
install_requires = []
pyversion = sys.version_info[:2]
if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
install_requires.append('argparse')
setup(
name='radosgw-agent',
version='1.0',
packages=find_packages(),
author='Josh Durgin',
author_email='[email protected]',
description='Synchronize users and data between radosgw clusters',
license='MIT',
keywords='radosgw ceph radosgw-agent',
url="https://github.com/ceph/radosgw-agent",
install_requires=[
'setuptools',
'boto ==2.2.2',
'requests',
] + install_requires,
entry_points={
'console_scripts': [
'radosgw-agent = radosgw_agent.cli:main',
],
},
)
| [
[
[
41,
46
],
[
227,
232
]
],
[
[
48,
61
],
[
292,
305
]
],
[
[
69,
72
],
[
109,
112
]
],
[
[
75,
91
],
[
190,
206
],
[
664,
680
]
],
[
[
97,
106
],
[
133,
142
],
[
165,
174
]
]
] |
from flask import Blueprint, request
from app.spiders.core import *
from app.utils import build_result
from app.constants import code
core = Blueprint('core', __name__)
@core.route('/login', methods=['POST'])
def login():
data = request.form
username = data.get('username')
password = data.get('password')
return core_login(username, password)
@core.route('/book_borrow_info', methods=['GET'])
def book_borrow_info():
token = request.args.get('token')
return get_book_borrow_info(token)
@core.route('/trans_list', methods=['GET'])
def trans_list():
token = request.args.get('token')
return get_trans_list(token)
@core.route('/tel_book', methods=['GET'])
def tel_book():
department_id = request.args.get('department_id')
return get_tel_book(department_id)
| [
[
[
18,
27
],
[
143,
152
]
],
[
[
29,
36
],
[
237,
244
],
[
452,
459
],
[
593,
600
],
[
732,
739
]
],
[
[
67,
68
],
[
333,
343
],
[
489,
509
],
[
630,
644
],
[
777,
789
]
],
[
[
91,
103
]
],
[
[
130,
134
]
],
[
[
136,
140
],
[
174,
178
],
[
367,
371
],
[
520,
524
],
[
655,
659
]
],
[
[
217,
222
]
],
[
[
420,
436
]
],
[
[
567,
577
]
],
[
[
700,
708
]
]
] |
#!C:\Users\Aditya\PycharmProjects\python-docs-azure-app\venv\Scripts\python.exe
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pygal
parser = argparse.ArgumentParser(
description='Generate pygal chart in command line',
prog='pygal_gen')
parser.add_argument('-t', '--type', dest='type', default='Line',
choices=map(lambda x: x.__name__, pygal.CHARTS),
help='Kind of chart to generate')
parser.add_argument('-o', '--output', dest='filename', default='pygal_out.svg',
help='Filename to write the svg to')
parser.add_argument('-s', '--serie', dest='series', nargs='+', action='append',
help='Add a serie in the form (title val1 val2...)')
parser.add_argument('--version', action='version',
version='pygal %s' % pygal.__version__)
for key in pygal.config.CONFIG_ITEMS:
opt_name = key.name
val = key.value
opts = {}
if key.type == list:
opts['type'] = key.subtype
opts['nargs'] = '+'
else:
opts['type'] = key.type
if opts['type'] == bool:
del opts['type']
opts['action'] = 'store_true' if not val else 'store_false'
if val:
opt_name = 'no-' + opt_name
if key.name == 'interpolate':
opts['choices'] = list(pygal.interpolate.INTERPOLATIONS.keys())
parser.add_argument(
'--%s' % opt_name, dest=key.name, default=val, **opts)
config = parser.parse_args()
chart = getattr(pygal, config.type)(**vars(config))
for serie in config.series:
chart.add(serie[0], map(float, serie[1:]))
chart.render_to_file(config.filename)
| [
[
[
859,
867
],
[
892,
900
]
],
[
[
876,
881
],
[
1115,
1120
],
[
1569,
1574
],
[
1600,
1605
],
[
2059,
2064
],
[
2235,
2240
]
],
[
[
883,
889
],
[
996,
1002
],
[
1185,
1191
],
[
1323,
1329
],
[
1477,
1483
],
[
2104,
2110
],
[
2198,
2204
]
],
[
[
1593,
1596
],
[
1642,
1645
],
[
1661,
1664
],
[
1692,
1695
],
[
1733,
1736
],
[
1806,
1809
],
[
2001,
2004
],
[
2157,
2160
]
],
[
[
1631,
1639
],
[
1985,
1993
],
[
2142,
2150
]
],
[
[
1655,
1658
],
[
1915,
1918
],
[
1949,
1952
],
[
2175,
2178
]
],
[
[
1675,
1679
],
[
1718,
1722
],
[
1753,
1757
],
[
1791,
1795
],
[
1823,
1827
],
[
1857,
1861
],
[
1878,
1882
],
[
2036,
2040
],
[
2182,
2186
]
],
[
[
1966,
1974
],
[
2142,
2150
]
],
[
[
2189,
2195
],
[
2242,
2248
],
[
2262,
2268
],
[
2285,
2291
],
[
2369,
2375
]
],
[
[
2219,
2224
],
[
2304,
2309
],
[
2348,
2353
]
],
[
[
2276,
2281
],
[
2314,
2319
],
[
2335,
2340
]
]
] |
"""
Python wrapper for libui.
"""
import ctypes
from . import clibui
class uiCombobox(ctypes.Structure):
"""Wrapper for the uiCombobox C struct."""
pass
def uiComboboxPointer(obj):
"""
Casts an object to uiCombobox pointer type.
:param obj: a generic object
:return: uiCombobox
"""
return ctypes.cast(obj, ctypes.POINTER(uiCombobox))
# - void uiComboboxAppend(uiCombobox *c, const char *text);
def uiComboboxAppend(combobox, text):
"""
Appends a new item to the combobox.
:param combobox: uiCombobox
:param text: string
:return: None
"""
clibui.uiComboboxAppend(combobox, bytes(text, 'utf-8'))
# - int uiComboboxSelected(uiCombobox *c);
def uiComboboxSelected(combobox):
"""
Returns selected items index.
:param combobox: uiCombobox
:return: int
"""
return clibui.uiComboboxSelected(combobox)
# - void uiComboboxSetSelected(uiCombobox *c, int n);
def uiComboboxSetSelected(combobox, n):
"""
Sets selected item.
:param combobox: uiCombobox
:param n: integer
:return: None
"""
clibui.uiComboboxSetSelected(combobox, n)
# - void uiComboboxOnSelected(uiCombobox *c, void (*f)(uiCombobox *c, void *data), void *data);
def uiComboboxOnSelected(combobox, callback, data):
"""
Executes a callback function when an item selected.
:param combobox: uiCombobox
:param callback: function
:param data: data
:return: reference to C callback function
"""
c_type = ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.POINTER(uiCombobox), ctypes.c_void_p)
c_callback = c_type(callback)
clibui.uiComboboxOnSelected(combobox, c_callback, data)
return c_callback
def uiNewCombobox():
"""
Creates a new combobox.
:return: uiCombobox
"""
clibui.uiNewCombobox.restype = ctypes.POINTER(uiCombobox)
return clibui.uiNewCombobox()
| [
[
[
44,
50
],
[
91,
97
],
[
331,
337
],
[
348,
354
],
[
1518,
1524
],
[
1544,
1550
],
[
1558,
1564
],
[
1586,
1592
],
[
1849,
1855
]
],
[
[
65,
71
],
[
612,
618
],
[
859,
865
],
[
1109,
1115
],
[
1642,
1648
],
[
1818,
1824
],
[
1888,
1894
]
],
[
[
80,
90
],
[
363,
373
],
[
1573,
1583
],
[
1864,
1874
]
],
[
[
173,
190
]
],
[
[
442,
458
]
],
[
[
717,
735
]
],
[
[
955,
976
]
],
[
[
1253,
1273
]
],
[
[
1727,
1740
]
]
] |
import click
import pandas as pd
# Due textacy problems
try:
from textacy.preprocess import preprocess_text
except Exception:
from textacy.preprocess import preprocess_text
def preprocess_f(text, fix_unicode=True, lowercase=True,
no_urls=True, no_emails=True,
no_phone_numbers=True,
no_numbers=True, no_currency_symbols=True,
no_punct=True, no_accents=True):
"""Preprocess text."""
clean_text = preprocess_text(text, fix_unicode=fix_unicode,
lowercase=lowercase,
no_urls=no_urls, no_emails=no_emails,
no_phone_numbers=no_phone_numbers,
no_numbers=no_numbers,
no_currency_symbols=no_currency_symbols,
no_punct=no_punct,
no_accents=no_accents)
return clean_text
@click.command()
@click.option('--input_path', type=click.STRING, help='Path to input file')
@click.option('--output_path', type=click.STRING, help='Path to input file')
@click.option('--set_', type=click.Choice(['train', 'test']), help="set")
def preprocess(input_path, output_path, set_):
"""pre-process script
:param input_path: path to input file
:type input_path: str
:param output_path: path to output file
:type output_path: str
:param set_: kind of data
:type set_: str
"""
if set_ == "train":
df = pd.read_csv(input_path, sep='|')
else:
df = pd.read_csv(input_path)
df["clean_txt"] = df["Pregunta"].apply(lambda x: preprocess_f(x))
df.to_csv(output_path, index=False)
if __name__ == "__main__":
preprocess()
| [
[
[
7,
12
],
[
984,
989
],
[
1001,
1006
],
[
1035,
1040
],
[
1077,
1082
],
[
1112,
1117
],
[
1154,
1159
],
[
1182,
1187
]
],
[
[
20,
32
],
[
1535,
1537
],
[
1591,
1593
]
],
[
[
96,
111
],
[
481,
496
]
],
[
[
165,
180
],
[
481,
496
]
],
[
[
187,
199
],
[
1669,
1681
]
],
[
[
1231,
1241
],
[
1760,
1770
]
]
] |
#coding=utf-8
import sys
import getopt
from core.interface.action import server_action
from core.helper.usage import usage_helper
from prettytable import PrettyTable
from core.helper.parser import config_parser
import re
class action_list(server_action):
# 参数列表示例,可以在这边增加参数描述,最终代码将根据argname生成对对应的参数
_parameters = [
{"name":"h", "needarg":False, "desc":"显示这条帮助信息", "argname":"help"},
{"name":"n", "needarg":True, "desc":"根据服务器名称进行模糊搜索", "argname":"name"},
{"name":"t", "needarg":True, "desc":"根据标签进行搜索", "argname":"tag"}
]
def __init__(self):
# 创建帮助信息
self._usage_helper = usage_helper(sys.argv[0], "list", self._parameters)
self._config = config_parser()
def _usage(self):
# 输出action的帮助信息
self._usage_helper.output()
# 对参数进行预处理
# 将参数描述数组重组成便于描述的字典,用于后续参数解析
# 另外根据argname初始化参数,需要参数值的初始化成None,不需要参数值的初始化成False
def _prepare_parameters(self):
recognized_parameter={}
for obj in self._parameters:
obj_key = '-' + obj['name'] # 类似参数-h -n -a的样式作为字典的key
recognized_parameter[obj_key] = obj # 原参数描述的内容原封不动的存到字典里
parameter_name = "_%s"%(obj['argname'])
if obj['needarg'] == True:
setattr(self, parameter_name, None)
else:
setattr(self, parameter_name, False)
return recognized_parameter
# action的简要描述,当执行smng help时,这个会输出到屏幕
def description(self):
return "列出服务器信息"
# 通用的参数解析方法,如果需要增加参数处理过程请在这个方法内添加
def parse_parameters(self):
try:
opts, argv = getopt.getopt(sys.argv[2:], self._usage_helper.get_opt_string())
except Exception as e:
self._usage()
exit()
parameters = self._prepare_parameters()
for opt,arg in opts:
if parameters[opt]['needarg'] == True:
setattr(self,"_%s"%(parameters[opt]['argname']), arg) # 需要传入值的参数设置值
else:
setattr(self,"_%s"%(parameters[opt]['argname']), True) # 不需要传入值的参数设置成True
# 单独处理help参数,默认输出帮助信息
if self._help == True:
self._usage()
exit()
# ToDo: 自定义的解析方法
self._tag=[]
prog_with_value = re.compile(r'^[\w]+=[0-9a-zA-Z-_]+$')
prog_without_value = re.compile(r'^[\w]+$')
for opt, arg in opts:
if opt == '-t':
if prog_with_value.match(arg) is not None:
# 带值的标签,例如tag=hello
name,value = arg.split('=')
self._tag.append({name:value})
elif prog_without_value.match(arg) is not None:
# 不带值的标签,例如tag
self._tag.append({arg:''})
else:
print("%s is bad value"%(arg))
# 根据服务器名称进行模糊搜索,如果没有要求名称搜索,则返回全集
def _search_by_name(self):
ret_array = []
# 不需要使用名称进行过滤时,返回全集
if self._name == None:
for i in self._config:
ret_array.append(i['ip'])
return set(ret_array)
# 否则进行模糊搜索,把搜索出来的ip放到集合
prog = re.compile('^.*%s.*$'%(self._name))
for i in self._config:
if 'name' not in i:
continue
if prog.match(i['name']) != None:
ret_array.append(i['ip'])
return set(ret_array)
# 根据服务器的标签进行搜索,如果没有要求标签进行搜索,则返回全集
def _search_by_tag(self):
ret_array = []
# 没有使用tag筛选,返回全集
if len(self._tag) == 0:
for i in self._config:
ret_array.append(i['ip'])
return set(ret_array)
# 格式化tag输入参数
for i in self._config:
if 'tags' in i:
print(i['tags'])
return set([])
pass
# action实际执行的动作,请将action的行为添加到这个方法内
def run(self):
# 先按照参数过滤
name_set = self._search_by_name()
tag_set = self._search_by_tag()
finnal_set = name_set & tag_set
prog = re.compile('^%s$'%(self._name))
disp = PrettyTable(["IP", "服务器名称","标签"])
for i in self._config:
# 检测记录是否在搜索结果内
if i['ip'] in finnal_set:
name = i['name'] if 'name' in i else ''
tag = []
if 'tags' in i:
for t in i['tags']:
tag.append("%s:%s"%(t, i['tags'][t]))
disp.add_row([i['ip'], name, ','.join(tag)])
print(disp)
| [
[
[
21,
24
],
[
643,
646
],
[
1622,
1625
]
],
[
[
32,
38
],
[
1608,
1614
]
],
[
[
73,
86
],
[
240,
253
]
],
[
[
117,
129
],
[
630,
642
]
],
[
[
154,
165
],
[
4043,
4054
]
],
[
[
197,
210
],
[
705,
718
]
],
[
[
218,
220
],
[
2256,
2258
],
[
2323,
2325
],
[
3130,
3132
],
[
3996,
3998
]
],
[
[
228,
239
]
]
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import timeutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import backup as backup_api
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context as cinder_context
from cinder import coordination
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder.image import image_utils
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
If rescheduling doesn't occur this task errors out the volume.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, driver, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.driver = driver
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
exception.ImageTooBig,
exception.InvalidSignatureImage,
exception.ImageSignatureVerificationException
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, volume):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Update volume's timestamp and host.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
# We don't need to update the volume's status to creating, since
# we haven't changed it to error.
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume.id})
volume.update(update)
volume.save()
except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail.
LOG.exception("Volume %s: update volume state failed.",
volume.id)
def _reschedule(self, context, cause, request_spec, filter_properties,
volume):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume.id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume.id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, volume, request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, volume):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume.id)
# NOTE(dulek): Here we should be sure that rescheduling occurred and
# host field will be erased. Just in case volume was already created at
# the backend, we attempt to delete it.
try:
self.driver.delete_volume(volume)
except Exception:
# Most likely the volume weren't created at the backend. We can
# safely ignore this.
pass
def revert(self, context, result, flow_failures, volume, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store
# through get_revert_result method.
# If do not want to be rescheduled, just set the volume's status to
# error and return.
if not self.do_reschedule:
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Use a different context when rescheduling.
if self.reschedule_context:
cause = list(flow_failures.values())[0]
context = self.reschedule_context
try:
self._pre_reschedule(volume)
self._reschedule(context, cause, volume=volume, **kwargs)
self._post_reschedule(volume)
return True
except exception.CinderException:
LOG.exception("Volume %s: rescheduling failed", volume.id)
return False
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'refreshed'
def __init__(self, db, host, set_error=True):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
self.set_error = set_error
def execute(self, context, volume):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume.refresh()
return volume
def revert(self, context, volume, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error:
return
reason = _('Volume create failed while extracting volume ref.')
common.error_out(volume, reason)
LOG.error("Volume %s: create failed", volume.id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['volume', 'request_spec']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume, request_spec):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume.name
volume_size = utils.as_int(volume.size, quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume.status,
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image, backup]
'volume_id': volume.id,
'volume_name': volume_name,
'volume_size': volume_size,
}
if volume.snapshot_id:
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': volume.snapshot_id,
})
elif volume.source_volid:
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = volume.source_volid
source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref.status,
'type': 'source_vol',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
elif request_spec.get('backup_id'):
# We are making a backup based volume instead of a raw volume.
specs.update({
'type': 'backup',
'backup_id': request_spec['backup_id'],
# NOTE(luqitao): if the driver does not implement the method
# `create_volume_from_backup`, cinder-backup will update the
# volume's status, otherwise we need update it in the method
# `CreateVolumeOnFinishTask`.
'need_update_volume': True,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume):
try:
volume_utils.notify_about_volume_usage(context, volume,
self.event_suffix,
host=volume.host)
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s",
{'event': self.event_suffix, 'volume_id': volume.id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, manager, db, driver, image_volume_cache=None):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.manager = manager
self.db = db
self.driver = driver
self.image_volume_cache = image_volume_cache
self.message = message_api.API()
self.backup_api = backup_api.API()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
def _handle_bootable_volume_glance_meta(self, context, volume,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
volume_utils.enable_bootable_flag(volume)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume.id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume.id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self._capture_volume_image_metadata(context, volume.id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume, snapshot_id,
**kwargs):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
try:
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = objects.Volume.get_by_id(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference",
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume,
snapshot_id=snapshot_id)
return model_update
def _create_from_source_volume(self, context, volume, source_volid,
**kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = objects.Volume.get_by_id(context, source_volid)
try:
model_update = self.driver.create_cloned_volume(volume, srcvol_ref)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context, volume, source_volid=srcvol_ref.id)
return model_update
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
volume_metadata = volume_utils.get_volume_image_metadata(
image_id, image_meta)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
self.db.volume_glance_metadata_bulk_create(context, volume_id,
volume_metadata)
def _clone_image_volume(self, context, volume, image_location, image_meta):
"""Create a volume efficiently from an existing image.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
# NOTE (lixiaoy1): currently can't create volume from source vol with
# different encryptions, so just return.
if not image_location or volume.encryption_key_id:
return None, False
if (image_meta.get('container_format') != 'bare' or
image_meta.get('disk_format') != 'raw'):
LOG.info("Requested image %(id)s is not in raw format.",
{'id': image_meta.get('id')})
return None, False
image_volume = None
direct_url, locations = image_location
urls = set([direct_url] + [loc.get('url') for loc in locations or []])
image_volume_ids = [url[9:] for url in urls
if url and url.startswith('cinder://')]
image_volumes = self.db.volume_get_all_by_host(
context, volume['host'], filters={'id': image_volume_ids})
for image_volume in image_volumes:
# For the case image volume is stored in the service tenant,
# image_owner volume metadata should also be checked.
image_owner = None
volume_metadata = image_volume.get('volume_metadata') or {}
for m in volume_metadata:
if m['key'] == 'image_owner':
image_owner = m['value']
if (image_meta['owner'] != volume['project_id'] and
image_meta['owner'] != image_owner):
LOG.info("Skipping image volume %(id)s because "
"it is not accessible by current Tenant.",
{'id': image_volume.id})
continue
LOG.info("Will clone a volume from the image volume "
"%(id)s.", {'id': image_volume.id})
break
else:
LOG.debug("No accessible image volume for image %(id)s found.",
{'id': image_meta['id']})
return None, False
try:
ret = self.driver.create_cloned_volume(volume, image_volume)
self._cleanup_cg_in_volume(volume)
return ret, True
except (NotImplementedError, exception.CinderException):
LOG.exception('Failed to clone image volume %(id)s.',
{'id': image_volume['id']})
return None, False
def _create_from_image_download(self, context, volume, image_location,
image_meta, image_service):
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
model_update = self.driver.create_volume(volume) or {}
self._cleanup_cg_in_volume(volume)
model_update['status'] = 'downloading'
try:
volume.update(model_update)
volume.save()
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(updates)s",
{'volume_id': volume.id,
'updates': model_update})
try:
volume_utils.copy_image_to_volume(self.driver, context, volume,
image_meta, image_location,
image_service)
except exception.ImageTooBig:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to copy image to volume "
"%(volume_id)s due to insufficient space",
{'volume_id': volume.id})
return model_update
def _create_from_image_cache(self, context, internal_context, volume,
image_id, image_meta):
"""Attempt to create the volume using the image cache.
Best case this will simply clone the existing volume in the cache.
Worst case the image is out of date and will be evicted. In that case
a clone will not be created and the image must be downloaded again.
"""
LOG.debug('Attempting to retrieve cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
# Currently can't create volume from source vol with different
# encryptions, so just return
if volume.encryption_key_id:
return None, False
try:
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
if cache_entry:
LOG.debug('Creating from source image-volume %(volume_id)s',
{'volume_id': cache_entry['volume_id']})
model_update = self._create_from_source_volume(
context,
volume,
cache_entry['volume_id']
)
return model_update, True
except NotImplementedError:
LOG.warning('Backend does not support creating image-volume '
'clone. Image will be downloaded from Glance.')
return None, False
@coordination.synchronized('{image_id}')
def _prepare_image_cache_entry(self, context, volume,
image_location, image_id,
image_meta, image_service):
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
return None, False
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
# If the entry is in the cache then return ASAP in order to minimize
# the scope of the lock. If it isn't in the cache then do the work
# that adds it. The work is done inside the locked region to ensure
# only one cache entry is created.
if cache_entry:
LOG.debug('Found cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
return None, False
else:
LOG.debug('Preparing cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service,
update_cache=True)
return model_update, True
def _create_from_image_cache_or_download(self, context, volume,
image_location, image_id,
image_meta, image_service,
update_cache=False):
# NOTE(e0ne): check for free space in image_conversion_dir before
# image downloading.
# NOTE(mnaser): This check *only* happens if the backend is not able
# to clone volumes and we have to resort to downloading
# the image from Glance and uploading it.
if CONF.image_conversion_dir:
fileutils.ensure_tree(CONF.image_conversion_dir)
try:
image_utils.check_available_space(
CONF.image_conversion_dir,
image_meta['size'], image_id)
except exception.ImageTooBig as err:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=err)
# Try and use the image cache.
should_create_cache_entry = False
cloned = False
model_update = None
if self.image_volume_cache:
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
LOG.info('Unable to get Cinder internal context, will '
'not use image-volume cache.')
else:
try:
model_update, cloned = self._create_from_image_cache(
context,
internal_context,
volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create volume from image-volume '
'cache, image will be downloaded from Glance. '
'Error: %(exception)s',
{'exception': e})
# If an exception occurred when cloning the image-volume,
# it may be the image-volume reached its snapshot limit.
# Create another "fresh" cache entry.
update_cache = True
# Don't cache unless directed.
if not cloned and update_cache:
should_create_cache_entry = True
# cleanup consistencygroup field in the volume,
# because when creating cache entry, it will need
# to update volume object.
self._cleanup_cg_in_volume(volume)
# Fall back to default behavior of creating volume,
# download the image data and copy it into the volume.
original_size = volume.size
backend_name = volume_utils.extract_host(volume.service_topic_queue)
try:
if not cloned:
try:
with image_utils.TemporaryImages.fetch(
image_service, context, image_id,
backend_name) as tmp_image:
if CONF.verify_glance_signatures != 'disabled':
# Verify image signature via reading content from
# temp image, and store the verification flag if
# required.
verified = \
image_utils.verify_glance_image_signature(
context, image_service,
image_id, tmp_image)
self.db.volume_glance_metadata_bulk_create(
context, volume.id,
{'signature_verified': verified})
# Try to create the volume as the minimal size,
# then we can extend once the image has been
# downloaded.
data = image_utils.qemu_img_info(tmp_image)
virtual_size = image_utils.check_virtual_size(
data.virtual_size, volume.size, image_id)
if should_create_cache_entry:
if virtual_size and virtual_size != original_size:
volume.size = virtual_size
volume.save()
model_update = self._create_from_image_download(
context,
volume,
image_location,
image_meta,
image_service
)
except exception.ImageTooBig as e:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=
message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=e)
except exception.ImageSignatureVerificationException as err:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=
message_field.Detail.SIGNATURE_VERIFICATION_FAILED,
exception=err)
if should_create_cache_entry:
# Update the newly created volume db entry before we clone it
# for the image-volume creation.
if model_update:
volume.update(model_update)
volume.save()
self.manager._create_image_cache_volume_entry(internal_context,
volume,
image_id,
image_meta)
finally:
# If we created the volume as the minimal size, extend it back to
# what was originally requested. If an exception has occurred or
# extending it back failed, we still need to put this back before
# letting it be raised further up the stack.
if volume.size != original_size:
try:
self.driver.extend_volume(volume, original_size)
finally:
volume.size = original_size
volume.save()
return model_update
def _create_from_image(self, context, volume,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume.id,
'image_location': image_location, 'image_id': image_id})
virtual_size = image_meta.get('virtual_size')
if virtual_size:
virtual_size = image_utils.check_virtual_size(virtual_size,
volume.size,
image_id)
# Create the volume from an image.
#
# First see if the driver can clone the image directly.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
# NOTE (lixiaoy1): Currently all images are raw data, we can't
# use clone_image to copy data if new volume is encrypted.
volume_is_encrypted = volume.encryption_key_id is not None
cloned = False
model_update = None
if not volume_is_encrypted:
model_update, cloned = self.driver.clone_image(context,
volume,
image_location,
image_meta,
image_service)
# Try and clone the image if we have it set as a glance location.
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume,
image_location,
image_meta)
# If we're going to try using the image cache then prepare the cache
# entry. Note: encrypted volume images are not cached.
if not cloned and self.image_volume_cache and not volume_is_encrypted:
# If _prepare_image_cache_entry() has to create the cache entry
# then it will also create the volume. But if the volume image
# is already in the cache then it returns (None, False), and
# _create_from_image_cache_or_download() will use the cache.
model_update, cloned = self._prepare_image_cache_entry(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
# Try and use the image cache, and download if not cached.
if not cloned:
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
self._handle_bootable_volume_glance_meta(context, volume,
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_from_backup(self, context, volume, backup_id, **kwargs):
LOG.info("Creating volume %(volume_id)s from backup %(backup_id)s.",
{'volume_id': volume.id,
'backup_id': backup_id})
ret = {}
backup = objects.Backup.get_by_id(context, backup_id)
try:
ret = self.driver.create_volume_from_backup(volume, backup)
need_update_volume = True
except NotImplementedError:
LOG.info("Backend does not support creating volume from "
"backup %(id)s. It will directly create the raw volume "
"at the backend and then schedule the request to the "
"backup service to restore the volume with backup.",
{'id': backup_id})
model_update = self._create_raw_volume(
context, volume, **kwargs) or {}
volume.update(model_update)
volume.save()
backup_host = self.backup_api.get_available_backup_service_host(
backup.host, backup.availability_zone)
updates = {'status': fields.BackupStatus.RESTORING,
'restore_volume_id': volume.id,
'host': backup_host}
backup.update(updates)
backup.save()
self.backup_rpcapi.restore_backup(context, backup.host, backup,
volume.id)
need_update_volume = False
LOG.info("Created volume %(volume_id)s from backup %(backup_id)s "
"successfully.",
{'volume_id': volume.id,
'backup_id': backup_id})
return ret, need_update_volume
def _create_raw_volume(self, context, volume, **kwargs):
try:
ret = self.driver.create_volume(volume)
except Exception as ex:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.CREATE_VOLUME_FROM_BACKEND,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_CREATE,
exception=ex)
finally:
self._cleanup_cg_in_volume(volume)
return ret
def execute(self, context, volume, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume.id
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error("Unable to create volume. "
"Volume driver %s not initialized", driver_name)
raise exception.DriverNotInitialized()
# NOTE(xyang): Populate consistencygroup_id and consistencygroup
# fields before passing to the driver. This is to support backward
# compatibility of consistencygroup.
if volume.group_id:
volume.consistencygroup_id = volume.group_id
cg = consistencygroup.ConsistencyGroup()
cg.from_group(volume.group)
volume.consistencygroup = cg
create_type = volume_spec.pop('type', None)
LOG.info("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s",
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(
context, volume, **volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context, volume,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume,
**volume_spec)
elif create_type == 'backup':
model_update, need_update_volume = self._create_from_backup(
context, volume, **volume_spec)
volume_spec.update({'need_update_volume': need_update_volume})
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
with volume.obj_as_admin():
volume.update(model_update)
volume.save()
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s",
{'volume_id': volume_id, 'model': model_update})
raise
return volume_spec
def _cleanup_cg_in_volume(self, volume):
# NOTE(xyang): Cannot have both group_id and consistencygroup_id.
# consistencygroup_id needs to be removed to avoid DB reference
# error because there isn't an entry in the consistencygroups table.
if (('group_id' in volume and volume.group_id) and
('consistencygroup_id' in volume and
volume.consistencygroup_id)):
volume.consistencygroup_id = None
if 'consistencygroup' in volume:
volume.consistencygroup = None
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
need_update_volume = volume_spec.pop('need_update_volume', True)
if not need_update_volume:
super(CreateVolumeOnFinishTask, self).execute(context, volume)
return
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)??
volume.update(update)
volume.save()
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume)
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(update)s", {'volume_id': volume.id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info("Volume %(volume_name)s (%(volume_id)s): "
"created successfully",
{'volume_name': volume_spec['volume_name'],
'volume_id': volume.id})
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
allow_reschedule, reschedule_context, request_spec,
filter_properties, image_volume_cache=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has started to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches an on-success *only* task that notifies that the volume
creation has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'request_spec': request_spec,
'volume': volume,
}
volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
retry = filter_properties.get('retry', None)
# Always add OnFailureRescheduleTask and we handle the change of volume's
# status when reverting the flow. Meanwhile, no need to revert process of
# ExtractVolumeRefTask.
do_reschedule = allow_reschedule and request_spec and retry
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver,
scheduler_rpcapi, do_reschedule))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(manager,
db,
driver,
image_volume_cache),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
| [
[
[
580,
589
],
[
6191,
6200
]
],
[
[
615,
618
],
[
1554,
1557
]
],
[
[
640,
654
],
[
1493,
1500
]
],
[
[
678,
686
],
[
25978,
25986
],
[
30500,
30508
],
[
34674,
34682
],
[
35175,
35183
],
[
42022,
42030
]
],
[
[
710,
719
],
[
30240,
30249
]
],
[
[
743,
752
],
[
4685,
4694
],
[
47170,
47179
]
],
[
[
760,
776
],
[
50652,
50660
]
],
[
[
807,
818
],
[
49142,
49153
]
],
[
[
846,
859
],
[
9300,
9302
],
[
13927,
13929
]
],
[
[
880,
900
],
[
15841,
15851
]
],
[
[
927,
950
],
[
15887,
15900
]
],
[
[
970,
995
],
[
28198,
28212
],
[
31024,
31038
]
],
[
[
1015,
1027
],
[
27949,
27961
]
],
[
[
1047,
1056
],
[
3115,
3124
],
[
3375,
3384
],
[
3418,
3427
],
[
3463,
3472
],
[
3631,
3640
],
[
3669,
3678
],
[
3709,
3718
],
[
3751,
3760
],
[
3792,
3801
],
[
3827,
3836
],
[
3872,
3881
],
[
4962,
4971
],
[
8340,
8349
],
[
14839,
14848
],
[
18313,
18322
],
[
18544,
18553
],
[
18798,
18807
],
[
19718,
19727
],
[
20105,
20114
],
[
24510,
24519
],
[
25456,
25465
],
[
25938,
25947
],
[
30453,
30462
],
[
31568,
31577
],
[
34621,
34630
],
[
35096,
35105
],
[
42931,
42940
],
[
44625,
44634
],
[
44928,
44937
],
[
47675,
47684
]
],
[
[
1076,
1086
],
[
1825,
1835
],
[
8493,
8503
],
[
9555,
9565
],
[
14185,
14195
],
[
15343,
15353
]
],
[
[
1111,
1112
],
[
9372,
9373
],
[
16362,
16363
],
[
16485,
16486
]
],
[
[
1138,
1144
],
[
10348,
10354
]
],
[
[
1170,
1181
],
[
30314,
30325
],
[
32829,
32840
],
[
33322,
33333
],
[
33882,
33893
],
[
33959,
33970
],
[
37254,
37265
]
],
[
[
1209,
1227
],
[
15797,
15808
]
],
[
[
1255,
1268
],
[
30625,
30638
],
[
30740,
30753
],
[
34823,
34836
],
[
34983,
34996
],
[
35324,
35337
],
[
35484,
35497
],
[
42147,
42160
],
[
42268,
42281
]
],
[
[
1288,
1295
],
[
11704,
11711
],
[
18966,
18973
],
[
19539,
19546
],
[
20853,
20860
],
[
40372,
40379
]
],
[
[
1323,
1339
],
[
43260,
43276
]
],
[
[
1367,
1373
],
[
41248,
41254
]
],
[
[
1393,
1398
],
[
10437,
10442
]
],
[
[
1431,
1437
],
[
5919,
5925
],
[
7453,
7459
],
[
7804,
7810
],
[
9435,
9441
],
[
14094,
14100
]
],
[
[
1464,
1485
],
[
14629,
14641
],
[
16707,
16719
],
[
21608,
21620
],
[
25724,
25736
],
[
32689,
32701
]
],
[
[
1487,
1490
],
[
4761,
4764
],
[
5076,
5079
],
[
5732,
5735
],
[
6502,
6505
],
[
7490,
7493
],
[
7845,
7848
],
[
8383,
8386
],
[
9476,
9479
],
[
15108,
15111
],
[
16942,
16945
],
[
17417,
17420
],
[
17983,
17986
],
[
18589,
18592
],
[
19763,
19766
],
[
21690,
21693
],
[
22714,
22717
],
[
23807,
23810
],
[
24012,
24015
],
[
24167,
24170
],
[
24550,
24553
],
[
25495,
25498
],
[
26033,
26036
],
[
26682,
26685
],
[
27380,
27383
],
[
27782,
27785
],
[
28888,
28891
],
[
29111,
29114
],
[
31122,
31125
],
[
31620,
31623
],
[
36915,
36918
],
[
40184,
40187
],
[
40589,
40592
],
[
41621,
41624
],
[
42804,
42807
],
[
43438,
43441
],
[
45159,
45162
],
[
47714,
47717
],
[
47963,
47966
],
[
50036,
50039
]
],
[
[
1522,
1528
],
[
2453,
2459
],
[
8720,
8726
],
[
10157,
10163
],
[
14417,
14423
],
[
15631,
15637
],
[
49086,
49092
]
],
[
[
1547,
1551
],
[
30201,
30205
],
[
30262,
30266
],
[
30365,
30369
],
[
33009,
33013
],
[
38471,
38475
]
],
[
[
1668,
1684
]
],
[
[
1801,
1824
],
[
2405,
2428
],
[
49897,
49920
]
],
[
[
8472,
8492
],
[
8675,
8695
],
[
49529,
49549
]
],
[
[
9533,
9554
],
[
10111,
10132
],
[
50189,
50210
]
],
[
[
14162,
14184
],
[
46009,
46031
],
[
14370,
14392
],
[
50236,
50258
]
],
[
[
15318,
15342
],
[
15582,
15606
],
[
50300,
50324
]
],
[
[
45984,
46008
],
[
46529,
46553
],
[
46875,
46899
],
[
47603,
47627
],
[
50522,
50546
]
],
[
[
48166,
48174
]
]
] |
#make print in python 2, 3 compatible
from __future__ import print_function
import numpy as np
import pyedda as edda
#Univariate Gaussian
print("//////////Univariate Gaussian///////")
dummy_data = np.random.rand(100)
gaussian = edda.Gaussian(100, 20)
print("gaussian.getMean():", gaussian.getMean())
print("gaussian.getVar():", gaussian.getVar())
print("gaussian.getPdf(105):", gaussian.getPdf(105))
print("gaussian.getSample():", gaussian.getSample())
print("gaussian.getCdf(105):", gaussian.getCdf(105))
print("gaussian.getCdfPrecise():", gaussian.getCdfPrecise(105))
print("Output gaussian:")
gaussian.output()
print()
| [
[
[
61,
75
]
],
[
[
84,
95
],
[
200,
202
]
],
[
[
103,
117
],
[
231,
235
]
],
[
[
187,
197
]
],
[
[
220,
228
],
[
283,
291
],
[
331,
339
],
[
381,
389
],
[
434,
442
],
[
487,
495
],
[
544,
552
],
[
599,
607
]
]
] |
import graphene
from graphene import Node
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.rest_framework.mutation import SerializerMutation
from graphene_django.types import DjangoObjectType
from rest_framework.generics import get_object_or_404
from contact.models import Contact
from contact.serializers import ContactSerializer
class ContactModelMutation(SerializerMutation):
class Meta:
serializer_class = ContactSerializer
convert_choices_to_enum = False
class ContactNode(DjangoObjectType):
class Meta:
model = Contact
interfaces = (Node,)
fields = "__all__"
filter_fields = ["first_name"]
class ContactType(DjangoObjectType):
class Meta:
model = Contact
fields = "__all__"
class Query(graphene.ObjectType):
contact_node = Node.Field(ContactNode)
contacts_node = DjangoFilterConnectionField(ContactNode)
contact = graphene.Field(ContactType, id=graphene.Int())
contacts = graphene.List(ContactType)
def resolve_contacts(self, info, **kwargs):
return Contact.objects.all()
def resolve_contact(self, info, id):
return get_object_or_404(Contact, pk=id)
class DeleteMutation(graphene.Mutation):
class Arguments:
# The input arguments for this mutation
id = graphene.Int(required=True)
# The class attributes define the response of the mutation
id = graphene.ID()
message = graphene.String()
@classmethod
def mutate(cls, root, info, id):
contact = get_object_or_404(Contact, pk=id)
contact.delete()
return cls(id=id, message='deleted')
class Mutation(graphene.ObjectType):
create_contact = ContactModelMutation.Field()
update_contact = ContactModelMutation.Field()
delete_contact = DeleteMutation.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
[
[
7,
15
],
[
812,
820
],
[
953,
961
],
[
984,
992
],
[
1015,
1023
],
[
1242,
1250
],
[
1344,
1352
],
[
1445,
1453
],
[
1473,
1481
],
[
1685,
1693
],
[
1862,
1870
]
],
[
[
37,
41
],
[
619,
623
],
[
853,
857
]
],
[
[
77,
104
],
[
897,
924
]
],
[
[
157,
175
],
[
396,
414
]
],
[
[
210,
226
],
[
538,
554
],
[
712,
728
]
],
[
[
263,
280
],
[
1185,
1202
],
[
1564,
1581
]
],
[
[
309,
316
],
[
589,
596
],
[
763,
770
],
[
1106,
1113
],
[
1203,
1210
],
[
1582,
1589
]
],
[
[
349,
366
],
[
460,
477
]
],
[
[
375,
395
],
[
1728,
1748
],
[
1778,
1798
]
],
[
[
526,
537
],
[
864,
875
],
[
925,
936
]
],
[
[
700,
711
],
[
968,
979
],
[
1029,
1040
]
],
[
[
806,
811
],
[
1884,
1889
]
],
[
[
1227,
1241
],
[
1828,
1842
]
],
[
[
1676,
1684
],
[
1900,
1908
]
],
[
[
1853,
1859
]
]
] |
# µPing (MicroPing) for MicroPython
# copyright (c) 2018 Shawwwn <[email protected]>
# License: MIT
# Internet Checksum Algorithm
# Author: Olav Morken
# https://github.com/olavmrk/python-ping/blob/master/ping.py
# @data: bytes
# ping statistics, loop mode and KeyboardInterrupt handler, + esp8266 compatible
# copyright (c) 2020 Carglglz
# License: MIT
def checksum(data):
if len(data) & 0x1: # Odd number of bytes
data += b'\0'
cs = 0
for pos in range(0, len(data), 2):
b1 = data[pos]
b2 = data[pos + 1]
cs += (b1 << 8) + b2
while cs >= 0x10000:
cs = (cs & 0xffff) + (cs >> 16)
cs = ~cs & 0xffff
return cs
def stddev(data):
N = len(data)
avg = sum(data)/N
num = sum([(x-avg)**2 for x in data])
den = N - 1
stddev = (num/den)**0.5
return stddev
def ping(host, count=4, timeout=5000, interval=10, quiet=False, size=64,
rtn=True, loop=False, int_loop=800):
import utime
import uselect
import uctypes
import usocket
import ustruct
import urandom
from sys import platform
import gc
from array import array
# prepare packet
assert size >= 16, "pkt size too small"
pkt = b'Q'*size
pkt_desc = {
"type": uctypes.UINT8 | 0,
"code": uctypes.UINT8 | 1,
"checksum": uctypes.UINT16 | 2,
"id": uctypes.UINT16 | 4,
"seq": uctypes.INT16 | 6,
"timestamp": uctypes.UINT64 | 8,
} # packet header descriptor
h = uctypes.struct(uctypes.addressof(pkt), pkt_desc, uctypes.BIG_ENDIAN)
h.type = 8 # ICMP_ECHO_REQUEST
h.code = 0
h.checksum = 0
if platform == 'esp8266':
h.id = urandom.getrandbits(16)
else:
h.id = urandom.randint(0, 65535)
h.seq = 1
time_data = array("f", (0 for _ in range(0)))
# init socket
sock = usocket.socket(usocket.AF_INET, usocket.SOCK_RAW, 1)
sock.setblocking(0)
sock.settimeout(timeout/1000)
addr = usocket.getaddrinfo(host, 1)[0][-1][0] # ip address
sock.connect((addr, 1))
not quiet and print("PING %s (%s): %u data bytes" % (host, addr, len(pkt)))
seq_loop = -1
try:
if loop:
n_trans = 0
n_recv = 0
while True:
gc.collect()
utime.sleep_ms(int_loop)
count = 1
seq_loop += 1
seqs = list(range(1, count+1)) # [1,2,...,count]
c = 1
t = 0
finish = False
while t < timeout:
if t == interval and c <= count:
# send packet
h.checksum = 0
h.seq = c
h.timestamp = utime.ticks_us()
h.checksum = checksum(pkt)
if sock.send(pkt) == size:
n_trans += 1
t = 0 # reset timeout
else:
seqs.remove(c)
if loop:
count += 1
seqs.append(count)
c += 1
# recv packet
while 1:
socks, _, _ = uselect.select([sock], [], [], 0)
if socks:
resp = socks[0].recv(4096)
resp_mv = memoryview(resp)
h2 = uctypes.struct(uctypes.addressof(
resp_mv[20:]), pkt_desc, uctypes.BIG_ENDIAN)
# TODO: validate checksum (optional)
seq = h2.seq
# 0: ICMP_ECHO_REPLY
if h2.type == 0 and h2.id == h.id and (seq in seqs):
t_elapsed = (utime.ticks_us()-h2.timestamp) / 1000
ttl = ustruct.unpack('!B', resp_mv[8:9])[0] # time-to-live
n_recv += 1
not quiet and print("{} bytes from {}: icmp_seq={} ttl={} time={:.3f} ms".format(
len(resp), addr, seq_loop, ttl, t_elapsed))
time_data.append(t_elapsed)
seqs.remove(seq)
if len(seqs) == 0:
finish = True
break
else:
break
if finish:
break
utime.sleep_ms(1)
t += 1
else:
seqs = list(range(1, count+1)) # [1,2,...,count]
c = 1
t = 0
n_trans = 0
n_recv = 0
finish = False
while t < timeout:
if t == interval and c <= count:
# send packet
h.checksum = 0
h.seq = c
h.timestamp = utime.ticks_us()
h.checksum = checksum(pkt)
if sock.send(pkt) == size:
n_trans += 1
t = 0 # reset timeout
else:
seqs.remove(c)
if loop:
count += 1
seqs.append(count)
c += 1
# recv packet
while 1:
socks, _, _ = uselect.select([sock], [], [], 0)
if socks:
resp = socks[0].recv(4096)
resp_mv = memoryview(resp)
h2 = uctypes.struct(uctypes.addressof(
resp_mv[20:]), pkt_desc, uctypes.BIG_ENDIAN)
# TODO: validate checksum (optional)
seq = h2.seq
# 0: ICMP_ECHO_REPLY
if h2.type == 0 and h2.id == h.id and (seq in seqs):
t_elapsed = (utime.ticks_us()-h2.timestamp) / 1000
ttl = ustruct.unpack('!B', resp_mv[8:9])[0] # time-to-live
n_recv += 1
not quiet and print("{} bytes from {}: icmp_seq={} ttl={} time={:.3f} ms".format(
len(resp), addr, seq, ttl, t_elapsed))
time_data.append(t_elapsed)
seqs.remove(seq)
if loop:
count += 1
seqs.append(count)
utime.sleep_ms(int_loop)
if len(seqs) == 0:
finish = True
break
else:
break
if finish:
if not loop:
break
utime.sleep_ms(1)
t += 1
sock.close()
if not quiet:
print('--- {} ping statistics ---'.format(host))
print("{} packets transmitted, {} packets received, {:.1f}% packet loss".format(
n_trans, n_recv, (1-(n_recv/n_trans))*100))
print("round-trip min/avg/max/stddev = {:.2f}/{:.2f}/{:.2f}/{:.2f} ms".format(min(time_data),sum(time_data)/len(time_data),max(time_data), stddev(time_data)))
gc.collect()
if rtn:
return (n_trans, n_recv)
except KeyboardInterrupt:
# close
sock.close()
gc.collect()
if not quiet:
print('^C')
print('--- {} ping statistics ---'.format(host))
print("{} packets transmitted, {} packets received, {:.1f}% packet loss".format(
n_trans, n_recv, (1-(n_recv/n_trans))*100))
print("round-trip min/avg/max/stddev = {:.2f}/{:.2f}/{:.2f}/{:.2f} ms".format(min(time_data),sum(time_data)/len(time_data),max(time_data), stddev(time_data)))
if rtn:
return (n_trans, n_recv)
except Exception as e:
print(e)
| [
[
[
363,
371
],
[
2819,
2827
],
[
5178,
5186
]
],
[
[
684,
690
],
[
7567,
7573
],
[
8160,
8166
]
],
[
[
848,
852
]
]
] |
from __future__ import absolute_import
import operator
from celery.concurrency import solo
from celery.utils.functional import noop
from celery.tests.case import AppCase
class test_solo_TaskPool(AppCase):
def test_on_start(self):
x = solo.TaskPool()
x.on_start()
def test_on_apply(self):
x = solo.TaskPool()
x.on_start()
x.on_apply(operator.add, (2, 2), {}, noop, noop)
def test_info(self):
x = solo.TaskPool()
x.on_start()
self.assertTrue(x.info)
| [
[
[
23,
38
]
],
[
[
47,
55
],
[
386,
394
]
],
[
[
88,
92
],
[
251,
255
],
[
330,
334
],
[
462,
466
]
],
[
[
129,
133
],
[
412,
416
],
[
418,
422
]
],
[
[
164,
171
],
[
199,
206
]
],
[
[
180,
198
]
]
] |
#-*- coding: utf-8 -*-
import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
#sys.path.append('../')
from hybrid_bid_t1_model import Seq2Seq
from hybrid_data_utils import *
sub = '-'*20
def init_command_line(argv):
from argparse import ArgumentParser
usage = "seq2seq"
description = ArgumentParser(usage)
description.add_argument("--w2v_path", type=str, default="/users3/yfwang/data/w2v/opensubtitle/")
description.add_argument("--corpus_path", type=str, default="/users3/yfwang/data/corpus/opensubtitle/")
description.add_argument("--w2v", type=str, default="train_all_200e.w2v")
description.add_argument("--test_file", type=str, default="test_sessions.txt")
description.add_argument("--max_context_size", type=int, default=2)
description.add_argument("--batch_size", type=int, default=64)
description.add_argument("--enc_hidden_size", type=int, default=512)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--print_every", type=int, default=100, help="print every batches when training")
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" w2v:",os.path.join(opts.w2v_path,opts.w2v))
print (" test_file:",os.path.join(opts.corpus_path,opts.test_file))
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" enc_hidden_size:",opts.enc_hidden_size)
print (" max_senten_len:",opts.max_senten_len)
print (" dropout:",opts.dropout)
print (" teach_forcing:",opts.teach_forcing)
print (" print_every:",opts.print_every)
print (" weights:",opts.weights)
print ("")
def readingTestCorpus(test_file_path):
print ("reading...")
test_file = open(test_file_path,'r')
list_pairs = []
tmp_pair = []
for line in test_file:
line = line.strip('\n')
if line == sub:
list_pairs.append(tmp_pair)
tmp_pair = []
else:
tmp_pair.append(line)
test_file.close()
test_contexts = []
test_replys = []
max_con_size = 0
min_con_size = 10000
for pair in list_pairs:
if len(pair) >= 3:
test_contexts.append(pair[0:-1])
test_replys.append(pair[-1])
max_con_size = max(len(pair[0:-1]),max_con_size)
min_con_size = min(len(pair[0:-1]),min_con_size)
else:
pass
print (max_con_size)
print (min_con_size)
return test_contexts,test_replys
def preProcess(word2index,test_contexts,unk_char,ini_char,max_senten_len,max_context_size):
print ("preprocessing...")
filter_test_contexts = []
for context in test_contexts:
filter_context = [filteringSenten(word2index,senten,unk_char,ini_char) for senten in context]
filter_test_contexts.append(filter_context)
padded_test_pairs = []
for context in filter_test_contexts:
pad_list = [0]*len(context)
if len(context) <= max_context_size:
pad_list = [1]*(max_context_size-len(context)) + pad_list
context = ['<unk>']*(max_context_size-len(context)) + context
else:
pad_list = pad_list[-max_context_size:]
context = context[-max_context_size:]
padded_context = [paddingSenten(senten,max_senten_len) for senten in context]
padded_test_pairs.append([padded_context,pad_list])
return padded_test_pairs
# 读入一个句子的list,构建batch后进行预测
def predictSentences(index2word,unk_char,ini_char,ini_idx,model,test_pairs,
print_every,batch_size,max_senten_len,max_context_size):
model.eval()
#构造batch的list
pairs_batches,num_batches = buildingPairsBatch(test_pairs,batch_size,shuffle=False)
print ("")
print ("num of batch:",num_batches)
predict_sentences = []
idx_batch = 0
for contexts_tensor_batch, pad_matrix_batch in getTensorsContextPairsBatch(word2index,pairs_batches,max_context_size):
predict_batch = model.predict(contexts_tensor_batch,index2word,pad_matrix_batch,ini_idx,sep_char='\t')
predict_sentences.extend(predict_batch)
if (idx_batch+1)%print_every == 0:
print ("{} batches finished".format(idx_batch+1))
idx_batch += 1
predict_sentences = predict_sentences[0:len(test_pairs)]
return predict_sentences
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print (time.time()-t0)
print ("")
seq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
if opts.weights != None:
print ("load model parameters...")
seq2seq.load_state_dict(torch.load(opts.weights))
else:
print ("No model parameters!")
exit()
test_contexts,test_replys = readingTestCorpus(os.path.join(opts.corpus_path,opts.test_file))
print ("len(test_contexts):",len(test_contexts))
print ("len(test_replys):",len(test_replys))
word2index = ctable.getWord2Index()
test_pairs = preProcess(word2index,test_contexts,unk_char,ini_char,opts.max_senten_len,opts.max_context_size)
print ("len(test_pairs):",len(test_pairs))
'''test_pair = test_pairs[100]
test_context = test_pair[0]
pad_list = test_pair[1]
for senten in test_context:
print senten
print pad_list'''
print ("start predicting...")
ini_idx = word2index[ini_char]
predict_sentences = predictSentences(ctable.getIndex2Word(),unk_char,ini_char,ini_idx,seq2seq,test_pairs,
opts.print_every,opts.batch_size,opts.max_senten_len,opts.max_context_size)
print ("writing...")
if not os.path.exists('./result/'):
os.mkdir('./result/')
pred_res_file = open("./result/open_pred_res_hyb_t1_len2",'w')
pred_ans_file = open("./result/open_pred_ans_hyb_t1_len2",'w')
for idx,senten in enumerate(predict_sentences):
test_context = test_contexts[idx]
for test_post in test_context:
pred_res_file.write(test_post+'\n')
pred_res_file.write(senten+'\n')
pred_res_file.write(sub+'\n')
senten_l = [c for c in senten.split('\t') if c != '</s>']
pred_ans_file.write(' '.join(senten_l)+' __eou__'+'\n')
pred_res_file.close()
pred_ans_file.close()
print ("end")
| [
[
[
32,
35
],
[
1443,
1446
]
],
[
[
44,
46
],
[
1495,
1497
],
[
1555,
1557
],
[
4544,
4546
],
[
5114,
5116
],
[
5909,
5911
],
[
5941,
5943
]
],
[
[
55,
61
]
],
[
[
70,
72
]
],
[
[
81,
85
],
[
4471,
4475
],
[
4696,
4700
]
],
[
[
94,
99
],
[
4986,
4991
]
],
[
[
129,
137
]
],
[
[
157,
162
]
],
[
[
171,
185
]
],
[
[
244,
251
],
[
4739,
4746
]
],
[
[
283,
284
],
[
4526,
4543
],
[
2893,
2908
],
[
3397,
3410
],
[
3774,
3792
],
[
3974,
4001
]
],
[
[
288,
291
],
[
6313,
6316
],
[
2170,
2173
]
],
[
[
306,
323
],
[
1425,
1442
]
],
[
[
1418,
1422
],
[
1508,
1512
],
[
1522,
1526
],
[
1568,
1572
],
[
1585,
1589
],
[
1633,
1637
],
[
1679,
1683
],
[
1724,
1728
],
[
1773,
1777
],
[
1816,
1820
],
[
1858,
1862
],
[
1902,
1906
],
[
1940,
1944
],
[
4557,
4561
],
[
4571,
4575
],
[
4788,
4792
],
[
4809,
4813
],
[
4825,
4829
],
[
4845,
4849
],
[
4865,
4869
],
[
4899,
4903
],
[
4997,
5001
],
[
5127,
5131
],
[
5144,
5148
],
[
5368,
5372
],
[
5388,
5392
],
[
5799,
5803
],
[
5816,
5820
],
[
5832,
5836
],
[
5852,
5856
]
],
[
[
1973,
1990
],
[
5096,
5113
]
],
[
[
2695,
2705
],
[
5314,
5324
]
],
[
[
3578,
3594
],
[
5703,
5719
]
],
[
[
4425,
4433
],
[
4581,
4589
],
[
5359,
5367
],
[
5671,
5679
],
[
5752,
5760
]
],
[
[
4445,
4453
],
[
4590,
4598
],
[
5350,
5358
],
[
5743,
5751
]
],
[
[
4466,
4468
],
[
4708,
4710
]
],
[
[
4517,
4523
],
[
4622,
4628
],
[
4666,
4672
],
[
4747,
4753
],
[
4768,
4774
],
[
5276,
5282
],
[
5720,
5726
]
],
[
[
4729,
4736
],
[
4962,
4969
],
[
5769,
5776
]
],
[
[
5068,
5081
],
[
5196,
5209
],
[
5336,
5349
],
[
6161,
6174
]
],
[
[
5082,
5093
],
[
5245,
5256
]
],
[
[
5263,
5273
],
[
5325,
5335
],
[
5660,
5670
],
[
4002,
4012
]
],
[
[
5301,
5311
],
[
5443,
5453
],
[
5777,
5787
]
],
[
[
5650,
5657
],
[
5761,
5768
]
],
[
[
5683,
5700
],
[
6123,
6140
]
],
[
[
5965,
5978
],
[
6218,
6231
],
[
6257,
6270
],
[
6293,
6306
],
[
6447,
6460
]
],
[
[
6030,
6043
],
[
6387,
6400
],
[
6471,
6484
]
],
[
[
6099,
6102
],
[
6175,
6178
]
],
[
[
6103,
6109
],
[
6277,
6283
],
[
6349,
6355
]
],
[
[
6146,
6158
],
[
6200,
6212
]
],
[
[
6187,
6196
],
[
6238,
6247
]
],
[
[
6326,
6334
],
[
6416,
6424
]
]
] |
import json
import logging
import os
import re
import sys
import time
def setup_logger():
console = logging.StreamHandler(sys.stdout)
handlers = [console]
logging.basicConfig(handlers=handlers)
root = logging.getLogger()
root.setLevel(logging.INFO)
setup_logger()
log = logging.getLogger(__name__)
class NS:
@staticmethod
def dict(ns, deep=True):
dic = ns.__dict__
if not deep:
return dic
for k, v in dic.items():
if isinstance(v, NS):
dic[k] = NS.dict(v)
return dic
@staticmethod
def from_dict(dic, deep=True):
ns = NS(dic)
if not deep:
return ns
for k, v in ns.__dict__.items():
if isinstance(v, dict):
ns.__dict__[k] = NS.from_dict(v)
return ns
@staticmethod
def walk(ns, fn, inplace=False):
nns = ns if inplace else NS()
for k, v in ns.__dict__.items():
nk, nv = fn(k, v)
if nk is not None:
if v is nv and isinstance(v, NS):
nv = NS.walk(nv, fn, inplace)
nns.__dict__[nk] = nv
return nns
def __init__(self, *args, **kwargs):
self.__dict__.update(dict(*args, **kwargs))
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
class Timer:
@staticmethod
def _zero():
return 0
def __init__(self, clock=time.time, enabled=True):
self.start = 0
self.stop = 0
self._time = clock if enabled else Timer._zero
def __enter__(self):
self.start = self._time()
return self
def __exit__(self, *args):
self.stop = self._time()
@property
def duration(self):
if self.stop > 0:
return self.stop - self.start
return self._time() - self.start
def result(output_file=None,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
error_message=None,
models_count=None,
training_duration=None):
return locals()
data_keys = re.compile("^(X|y|data)(_.+)?$")
def call_run(run_fn):
import numpy as np
params = NS.from_dict(json.loads(sys.stdin.read()))
def load_data(name, path):
if isinstance(path, str) and data_keys.match(name):
return name, np.load(path, allow_pickle=True)
return name, path
print(params.dataset)
ds = NS.walk(params.dataset, load_data)
config = params.config
config.framework_params = NS.dict(config.framework_params)
try:
result = run_fn(ds, config)
res = dict(result)
for name in ['predictions', 'truth', 'probabilities']:
arr = result[name]
if arr is not None:
res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))
np.save(res[name], arr, allow_pickle=True)
except Exception as e:
log.exception(e)
res = dict(
error_message=str(e),
models_count=0
)
print(config.result_token)
print(json.dumps(res, separators=(',', ':')))
| [
[
[
7,
11
],
[
2323,
2327
],
[
3219,
3223
]
],
[
[
19,
26
],
[
295,
302
],
[
106,
113
],
[
169,
176
],
[
219,
226
],
[
257,
264
]
],
[
[
34,
36
],
[
2918,
2920
]
],
[
[
44,
46
],
[
2216,
2218
]
],
[
[
54,
57
],
[
128,
131
],
[
2334,
2337
]
],
[
[
65,
69
],
[
1500,
1504
]
],
[
[
76,
88
],
[
273,
285
]
],
[
[
289,
292
],
[
3069,
3072
]
],
[
[
331,
333
],
[
515,
517
],
[
545,
547
],
[
642,
644
],
[
803,
805
],
[
926,
928
],
[
1078,
1080
],
[
1108,
1110
],
[
2310,
2312
],
[
2565,
2567
],
[
2658,
2660
]
],
[
[
1410,
1415
],
[
1614,
1619
]
],
[
[
1925,
1931
]
],
[
[
2204,
2213
],
[
2422,
2431
]
],
[
[
2255,
2263
]
]
] |
# -*- coding: utf-8 -*-
"""
Goes through the clean dataset and determines when audio is occuring by
measuring across a simple threshold
We use these labels for the 15, 10 5, 0dB SNR samples
Created on Sun Dec 4 15:37:11 2016
@author: brady
"""
import os
import wavio
from fe_utils import *
from config import TRAIN_CLEAN
os.chdir(TRAIN_CLEAN)
for file in os.listdir():
if not file.endswith('.wav'):
continue
mWav = wavio.read(file)
frame_len = int(getFrameSize(mWav.rate))
mWav.data = normalizeAudio(mWav.data, mWav.sampwidth)
frame_cnt = int(len(mWav.data)/frame_len )
if (len(mWav.data)%frame_len):
frame_cnt += 1
class_list = []
for idx in range(frame_cnt):
if (idx == frame_cnt-1):
# last chunk may be truncated
chunk = mWav.data[idx*frame_len :]
else:
chunk = mWav.data[idx*frame_len : (idx+1)*frame_len]
if aboveFrameThreshold(chunk):
class_list.append(1)
else:
class_list.append(0)
filename = os.path.splitext(file)[0]
with open(filename + '.csv', 'w') as f:
f.write(','.join([str(c) for c in class_list])) | [
[
[
257,
259
],
[
329,
331
],
[
364,
366
],
[
1100,
1102
]
],
[
[
267,
272
],
[
445,
450
]
],
[
[
295,
296
],
[
482,
494
],
[
528,
542
],
[
964,
983
]
],
[
[
316,
327
],
[
338,
349
]
],
[
[
356,
360
],
[
389,
393
],
[
456,
460
],
[
1117,
1121
]
],
[
[
438,
442
],
[
495,
499
],
[
543,
547
],
[
554,
558
],
[
516,
520
],
[
594,
598
],
[
634,
638
],
[
834,
838
],
[
895,
899
]
],
[
[
466,
475
],
[
605,
614
],
[
645,
654
],
[
848,
857
],
[
909,
918
],
[
929,
938
]
],
[
[
574,
583
],
[
665,
674
],
[
726,
735
],
[
757,
766
]
],
[
[
689,
699
],
[
1004,
1014
],
[
1051,
1061
],
[
1212,
1222
]
],
[
[
713,
716
],
[
750,
753
],
[
844,
847
],
[
905,
908
],
[
922,
925
]
],
[
[
826,
831
],
[
984,
989
]
],
[
[
887,
892
],
[
984,
989
]
],
[
[
1089,
1097
],
[
1140,
1148
]
],
[
[
1167,
1168
],
[
1178,
1179
]
]
] |
# Copyright (c) 2014-2015 Matthias Geier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""JACK Client for Python.
http://jackclient-python.readthedocs.io/
"""
__version__ = '0.4.3'
from ctypes.util import find_library as _find_library
import errno as _errno
import platform as _platform
import warnings as _warnings
from _jack import ffi as _ffi
if _platform.system() == 'Windows':
if _platform.architecture()[0] == '64bit':
_libname = _find_library('libjack64')
else:
_libname = _find_library('libjack')
else:
_libname = _find_library('jack')
if _libname is None:
raise OSError('JACK library not found')
_lib = _ffi.dlopen(_libname)
_AUDIO = b'32 bit float mono audio'
_MIDI = b'8 bit raw midi'
STOPPED = _lib.JackTransportStopped
"""Transport halted."""
ROLLING = _lib.JackTransportRolling
"""Transport playing."""
STARTING = _lib.JackTransportStarting
"""Waiting for sync ready."""
NETSTARTING = _lib.JackTransportNetStarting
"""Waiting for sync ready on the network."""
_SUCCESS = 0
_FAILURE = 1
class Client(object):
"""A client that can connect to the JACK audio server."""
def __init__(self, name, use_exact_name=False, no_start_server=False,
servername=None, session_id=None):
"""Create a new JACK client.
A client object is a *context manager*, i.e. it can be used in a
*with statement* to automatically call `activate()` in the
beginning of the statement and `deactivate()` and `close()` on
exit.
Parameters
----------
name : str
The desired client name of at most `client_name_size()`
characters. The name scope is local to each server.
Unless forbidden by the *use_exact_name* option, the server
will modify this name to create a unique variant, if needed.
Other Parameters
----------------
use_exact_name : bool
Whether an error should be raised if *name* is not unique.
See `Status.name_not_unique`.
no_start_server : bool
Do not automatically start the JACK server when it is not
already running. This option is always selected if
``JACK_NO_START_SERVER`` is defined in the calling process
environment.
servername : str
Selects from among several possible concurrent server
instances.
Server names are unique to each user. If unspecified, use
``'default'`` unless ``JACK_DEFAULT_SERVER`` is defined in
the process environment.
session_id : str
Pass a SessionID Token. This allows the sessionmanager to
identify the client again.
"""
status = _ffi.new('jack_status_t*')
options = _lib.JackNullOption
optargs = []
if use_exact_name:
options |= _lib.JackUseExactName
if no_start_server:
options |= _lib.JackNoStartServer
if servername:
options |= _lib.JackServerName
optargs.append(_ffi.new('char[]', servername.encode()))
if session_id:
options |= _lib.JackSessionID
optargs.append(_ffi.new('char[]', session_id.encode()))
self._ptr = _lib.jack_client_open(name.encode(), options, status,
*optargs)
self._status = Status(status[0])
if not self._ptr:
raise JackError('Error initializing "{0}": {1}'.format(
name, self.status))
self._inports = Ports(self, _AUDIO, _lib.JackPortIsInput)
self._outports = Ports(self, _AUDIO, _lib.JackPortIsOutput)
self._midi_inports = Ports(self, _MIDI, _lib.JackPortIsInput)
self._midi_outports = Ports(self, _MIDI, _lib.JackPortIsOutput)
self._keepalive = []
self._position = _ffi.new('jack_position_t*')
# Avoid confusion if something goes wrong before opening the client:
_ptr = _ffi.NULL
def __enter__(self):
self.activate()
return self
def __exit__(self, *args):
self.deactivate()
self.close()
def __del__(self):
"""Close JACK client on garbage collection."""
self.close()
@property
def name(self):
"""The name of the JACK client (read-only)."""
return _ffi.string(_lib.jack_get_client_name(self._ptr)).decode()
@property
def samplerate(self):
"""The sample rate of the JACK system (read-only)."""
return _lib.jack_get_sample_rate(self._ptr)
@property
def blocksize(self):
"""The JACK block size (must be a power of two).
The current maximum size that will ever be passed to the process
callback. It should only be queried *before* `activate()` has
been called. This size may change, clients that depend on it
must register a callback with `set_blocksize_callback()` so they
will be notified if it does.
Changing the blocksize stops the JACK engine process cycle, then
calls all registered callback functions (see
`set_blocksize_callback()`) before restarting the process
cycle. This will cause a gap in the audio flow, so it should
only be done at appropriate stopping points.
"""
return _lib.jack_get_buffer_size(self._ptr)
@blocksize.setter
def blocksize(self, blocksize):
_check(_lib.jack_set_buffer_size(self._ptr, blocksize),
'Error setting JACK blocksize')
@property
def status(self):
"""JACK client status. See `Status`."""
return self._status
@property
def realtime(self):
"""Whether JACK is running with ``-R`` (``--realtime``)."""
return bool(_lib.jack_is_realtime(self._ptr))
@property
def frames_since_cycle_start(self):
"""Time since start of audio block.
The estimated time in frames that has passed since the JACK
server began the current process cycle.
"""
return _lib.jack_frames_since_cycle_start(self._ptr)
@property
def frame_time(self):
"""The estimated current time in frames.
This is intended for use in other threads (not the process
callback). The return value can be compared with the value of
`last_frame_time` to relate time in other threads to JACK time.
"""
return _lib.jack_frame_time(self._ptr)
@property
def last_frame_time(self):
"""The precise time at the start of the current process cycle.
This may only be used from the process callback (see
`set_process_callback()`), and can be used to interpret
timestamps generated by `frame_time` in other threads with
respect to the current process cycle.
This is the only jack time function that returns exact time:
when used during the process callback it always returns the same
value (until the next process callback, where it will return
that value + `blocksize`, etc). The return value is guaranteed
to be monotonic and linear in this fashion unless an xrun occurs
(see `set_xrun_callback()`). If an xrun occurs, clients must
check this value again, as time may have advanced in a
non-linear way (e.g. cycles may have been skipped).
"""
return _lib.jack_last_frame_time(self._ptr)
@property
def inports(self):
"""A list of audio input `Ports`.
New ports can be created and added to this list with
`inports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`inports.clear() <Ports.clear>` can be used to unregister all
audio input ports at once.
See Also
--------
Ports, OwnPort
"""
return self._inports
@property
def outports(self):
"""A list of audio output :class:`Ports`.
New ports can be created and added to this list with
`outports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`outports.clear() <Ports.clear>` can be used to unregister all
audio output ports at once.
See Also
--------
Ports, OwnPort
"""
return self._outports
@property
def midi_inports(self):
"""A list of MIDI input :class:`Ports`.
New MIDI ports can be created and added to this list with
`midi_inports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`midi_inports.clear() <Ports.clear>` can be used to unregister
all MIDI input ports at once.
See Also
--------
Ports, OwnMidiPort
"""
return self._midi_inports
@property
def midi_outports(self):
"""A list of MIDI output :class:`Ports`.
New MIDI ports can be created and added to this list with
`midi_outports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`midi_outports.clear() <Ports.clear>` can be used to unregister
all MIDI output ports at once.
See Also
--------
Ports, OwnMidiPort
"""
return self._midi_outports
def owns(self, port):
"""Check if a given port belongs to *self*.
Parameters
----------
port : str or Port
Full port name or `Port`, `MidiPort`, `OwnPort` or
`OwnMidiPort` object.
"""
port = self._get_port_ptr(port)
return bool(_lib.jack_port_is_mine(self._ptr, port))
def activate(self):
"""Activate JACK client.
Tell the JACK server that the program is ready to start
processing audio.
"""
_check(_lib.jack_activate(self._ptr), 'Error activating JACK client')
def deactivate(self, ignore_errors=True):
"""De-activate JACK client.
Tell the JACK server to remove *self* from the process graph.
Also, disconnect all ports belonging to it, since inactive
clients have no port connections.
"""
err = _lib.jack_deactivate(self._ptr)
if not ignore_errors:
_check(err, 'Error deactivating JACK client')
def cpu_load(self):
"""Return the current CPU load estimated by JACK.
This is a running average of the time it takes to execute a full
process cycle for all clients as a percentage of the real time
available per cycle determined by `blocksize` and `samplerate`.
"""
return _lib.jack_cpu_load(self._ptr)
def close(self, ignore_errors=True):
"""Close the JACK client."""
if self._ptr:
err = _lib.jack_client_close(self._ptr)
self._ptr = _ffi.NULL
if not ignore_errors:
_check(err, 'Error closing JACK client')
def connect(self, source, destination):
"""Establish a connection between two ports.
When a connection exists, data written to the source port will
be available to be read at the destination port.
Audio ports can obviously not be connected with MIDI ports.
Parameters
----------
source : str or Port
One end of the connection. Must be an output port.
destination : str or Port
The other end of the connection. Must be an input port.
See Also
--------
OwnPort.connect, disconnect
"""
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
err = _lib.jack_connect(self._ptr, source.encode(),
destination.encode())
if err == _errno.EEXIST:
raise JackError('Connection {0!r} -> {1!r} '
'already exists'.format(source, destination))
_check(err,
'Error connecting {0!r} -> {1!r}'.format(source, destination))
def disconnect(self, source, destination):
"""Remove a connection between two ports.
Parameters
----------
source, destination : str or Port
See `connect()`.
"""
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
_check(_lib.jack_disconnect(
self._ptr, source.encode(), destination.encode()),
"Couldn't disconnect {0!r} -> {1!r}".format(source, destination))
def transport_start(self):
"""Start JACK transport."""
_lib.jack_transport_start(self._ptr)
def transport_stop(self):
"""Stop JACK transport."""
_lib.jack_transport_stop(self._ptr)
@property
def transport_state(self):
"""JACK transport state.
This is one of `STOPPED`, `ROLLING`, `STARTING`, `NETSTARTING`.
See Also
--------
transport_query
"""
return TransportState(_lib.jack_transport_query(self._ptr, _ffi.NULL))
@property
def transport_frame(self):
"""Get/set current JACK transport frame.
Return an estimate of the current transport frame, including any
time elapsed since the last transport positional update.
Assigning a frame number repositions the JACK transport.
"""
return _lib.jack_get_current_transport_frame(self._ptr)
@transport_frame.setter
def transport_frame(self, frame):
_check(_lib.jack_transport_locate(self._ptr, frame),
'Error locating JACK transport')
def transport_locate(self, frame):
"""
.. deprecated:: 0.4.1
Use `transport_frame` instead
"""
_warnings.warn(
'transport_locate() is deprecated, use transport_frame',
DeprecationWarning)
self.transport_frame = frame
def transport_query(self):
"""Query the current transport state and position.
This is a convenience function that does the same as
`transport_query_struct()`, but it only returns the valid fields
in an easy-to-use ``dict``.
Returns
-------
state : TransportState
The transport state can take following values:
`STOPPED`, `ROLLING`, `STARTING` and `NETSTARTING`.
position : dict
A dictionary containing only the valid fields of the
structure returned by `transport_query_struct()`.
See Also
--------
:attr:`transport_state`, transport_query_struct
"""
state, pos = self.transport_query_struct()
return TransportState(state), position2dict(pos)
def transport_query_struct(self):
"""Query the current transport state and position.
This function is realtime-safe, and can be called from any
thread. If called from the process thread, the returned
position corresponds to the first frame of the current cycle and
the state returned is valid for the entire cycle.
Returns
-------
state : int
The transport state can take following values: `STOPPED`,
`ROLLING`, `STARTING` and `NETSTARTING`.
position : jack_position_t
See the `JACK transport documentation`__ for the available
fields.
__ http://jackaudio.org/files/docs/html/
structjack__position__t.html
See Also
--------
transport_query, transport_reposition_struct
"""
state = _lib.jack_transport_query(self._ptr, self._position)
return state, self._position
def transport_reposition_struct(self, position):
"""Request a new transport position.
May be called at any time by any client. The new position takes
effect in two process cycles. If there are slow-sync clients
and the transport is already rolling, it will enter the
`STARTING` state and begin invoking their sync callbacks
(see `jack_set_sync_callback()`__) until ready.
This function is realtime-safe.
__ http://jackaudio.org/files/docs/html/group__TransportControl.html
Parameters
----------
position : jack_position_t
Requested new transport position. This is the same
structure as returned by `transport_query_struct()`.
See Also
--------
transport_query_struct, transport_locate
"""
_check(_lib.jack_transport_reposition(self._ptr, position),
'Error re-positioning transport')
def set_freewheel(self, onoff):
"""Start/Stop JACK's "freewheel" mode.
When in "freewheel" mode, JACK no longer waits for any external
event to begin the start of the next process cycle.
As a result, freewheel mode causes "faster than realtime"
execution of a JACK graph. If possessed, real-time scheduling is
dropped when entering freewheel mode, and if appropriate it is
reacquired when stopping.
IMPORTANT: on systems using capabilities to provide real-time
scheduling (i.e. Linux kernel 2.4), if onoff is zero, this
function must be called from the thread that originally called
`activate()`. This restriction does not apply to other systems
(e.g. Linux kernel 2.6 or OS X).
Parameters
----------
onoff : bool
If ``True``, freewheel mode starts. Otherwise freewheel mode
ends.
See Also
--------
set_freewheel_callback
"""
_check(_lib.jack_set_freewheel(self._ptr, onoff),
'Error setting freewheel mode')
def set_shutdown_callback(self, callback):
"""Register shutdown callback.
Register a function (and optional argument) to be called if and
when the JACK server shuts down the client thread.
The function must be written as if it were an asynchonrous POSIX
signal handler -- use only async-safe functions, and remember
that it is executed from another thread.
A typical function might set a flag or write to a pipe so that
the rest of the application knows that the JACK client thread
has shut down.
.. note:: Clients do not need to call this. It exists only to
help more complex clients understand what is going on. It
should be called before `activate()`.
Parameters
----------
callback : callable
User-supplied function that is called whenever the JACK
daemon is shutdown. It must have this signature::
callback(status: Status, reason: str) -> None
The argument *status* is of type `jack.Status`.
.. note:: The *callback* should typically signal another
thread to correctly finish cleanup by calling `close()`
(since it cannot be called directly in the context of the
thread that calls the shutdown callback).
After server shutdown, the client is *not* deallocated by
JACK, the user (that's you!) is responsible to properly
use `close()` to release client ressources.
Alternatively, the `Client` object can be used as a
*context manager* in a *with statement*, which takes care
of activating, deactivating and closing the client
automatically.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackInfoShutdownCallback')
def callback_wrapper(code, reason, _):
callback(Status(code), _ffi.string(reason).decode())
_lib.jack_on_info_shutdown(self._ptr, callback_wrapper, _ffi.NULL)
def set_process_callback(self, callback):
"""Register process callback.
Tell the JACK server to call *callback* whenever there is work
be done.
The code in the supplied function must be suitable for real-time
execution. That means that it cannot call functions that might
block for a long time. This includes malloc, free, printf,
pthread_mutex_lock, sleep, wait, poll, select, pthread_join,
pthread_cond_wait, etc, etc.
.. warning:: Most Python interpreters use a `global interpreter
lock (GIL)`__, which violates the above real-time
requirement. Furthermore, Python's `garbage collector`__
might become active at an inconvenient time and block the
process callback for some time.
Because of this, Python is not really suitable for real-time
processing. If you want to implement a *reliable* real-time
audio/MIDI application, you should use a different
programming language, such as C or C++.
If you can live with some random audio drop-outs now and
then, feel free to continue using Python!
__ https://en.wikipedia.org/wiki/Global_Interpreter_Lock
__ https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called by the engine anytime
there is work to be done. It must have this signature::
callback(frames: int) -> None
The argument *frames* specifies the number of frames that
have to be processed in the current audio block.
It will be the same number as `blocksize` and it will be a
power of two.
As long as the client is active, the *callback* will be
called once in each process cycle. However, if an exception
is raised inside of a *callback*, it will not be called
anymore. The exception `CallbackExit` can be used to
silently prevent further callback invocations, all other
exceptions will print an error message to *stderr*.
"""
@self._callback('JackProcessCallback', error=_FAILURE)
def callback_wrapper(frames, _):
try:
callback(frames)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_process_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting process callback')
def set_freewheel_callback(self, callback):
"""Register freewheel callback.
Tell the JACK server to call *callback* whenever we enter or
leave "freewheel" mode.
The argument to the callback will be ``True`` if JACK is
entering freewheel mode, and ``False`` otherwise.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever JACK starts
or stops freewheeling. It must have this signature::
callback(starting: bool) -> None
The argument *starting* is ``True`` if we start to
freewheel, ``False`` otherwise.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
set_freewheel
"""
@self._callback('JackFreewheelCallback')
def callback_wrapper(starting, _):
callback(bool(starting))
_check(_lib.jack_set_freewheel_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting freewheel callback')
def set_blocksize_callback(self, callback):
"""Register blocksize callback.
Tell JACK to call *callback* whenever the size of the the buffer
that will be passed to the process callback is about to change.
Clients that depend on knowing the buffer size must supply a
*callback* before activating themselves.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is invoked whenever the JACK
engine buffer size changes. It must have this signature::
callback(blocksize: int) -> None
The argument *blocksize* is the new buffer size.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Although this function is called in the JACK
process thread, the normal process cycle is suspended
during its operation, causing a gap in the audio flow.
So, the *callback* can allocate storage, touch memory not
previously referenced, and perform other operations that
are not realtime safe.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
:attr:`blocksize`
"""
@self._callback('JackBufferSizeCallback', error=_FAILURE)
def callback_wrapper(blocksize, _):
try:
callback(blocksize)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_buffer_size_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting blocksize callback')
def set_samplerate_callback(self, callback):
"""Register samplerate callback.
Tell the JACK server to call *callback* whenever the system
sample rate changes.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called when the engine sample
rate changes. It must have this signature::
callback(samplerate: int) -> None
The argument *samplerate* is the new engine sample rate.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
:attr:`samplerate`
"""
@self._callback('JackSampleRateCallback', error=_FAILURE)
def callback_wrapper(samplerate, _):
try:
callback(samplerate)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_sample_rate_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting samplerate callback')
def set_client_registration_callback(self, callback):
"""Register client registration callback.
Tell the JACK server to call *callback* whenever a client is
registered or unregistered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever a client is
registered or unregistered. It must have this signature::
callback(name: str, register: bool) -> None
The first argument contains the client name, the second
argument is ``True`` if the client is being registered and
``False`` if the client is being unregistered.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackClientRegistrationCallback')
def callback_wrapper(name, register, _):
callback(_ffi.string(name).decode(), bool(register))
_check(_lib.jack_set_client_registration_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting client registration callback')
def set_port_registration_callback(self, callback=None,
only_available=True):
"""Register port registration callback.
Tell the JACK server to call *callback* whenever a port is
registered or unregistered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
.. note:: Due to JACK 1 behavior, it is not possible to get
the pointer to an unregistering JACK Port if it already
existed before `activate()` was called. This will cause
the callback not to be called if *only_available* is
``True``, or called with ``None`` as first argument (see
below).
To avoid this, call `Client.get_ports()` just after
`activate()`, allowing the module to store pointers to
already existing ports and always receive a `Port`
argument for this callback.
Parameters
----------
callback : callable
User-supplied function that is called whenever a port is
registered or unregistered. It must have this signature::
callback(port: Port, register: bool) -> None
The first argument is a `Port`, `MidiPort`, `OwnPort` or
`OwnMidiPort` object, the second argument is ``True`` if the
port is being registered, ``False`` if the port is being
unregistered.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
If ``True``, the *callback* is not called if the port in
question is not available anymore (after another JACK client
has unregistered it).
If ``False``, it is called nonetheless, but the first
argument of the *callback* will be ``None`` if the port is
not available anymore.
See Also
--------
Ports.register
"""
if callback is None:
return lambda cb: self.set_port_registration_callback(
cb, only_available)
@self._callback('JackPortRegistrationCallback')
def callback_wrapper(port_id, register, _):
port_ptr = _lib.jack_port_by_id(self._ptr, port_id)
if port_ptr:
port = self._wrap_port_ptr(port_ptr)
elif only_available:
return
else:
port = None
callback(port, bool(register))
_check(_lib.jack_set_port_registration_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port registration callback')
def set_port_connect_callback(self, callback=None, only_available=True):
"""Register port connect callback.
Tell the JACK server to call *callback* whenever a port is
connected or disconnected.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
.. note:: Due to JACK 1 behavior, it is not possible to get
the pointer to an unregistering JACK Port if it already
existed before `activate()` was called. This will cause
the callback not to be called if *only_available* is
``True``, or called with ``None`` as first argument (see
below).
To avoid this, call `Client.get_ports()` just after
`activate()`, allowing the module to store pointers to
already existing ports and always receive a `Port`
argument for this callback.
Parameters
----------
callback : callable
User-supplied function that is called whenever a port is
connected or disconnected. It must have this signature::
callback(a: Port, b: Port, connect: bool) -> None
The first and second arguments contain `Port`, `MidiPort`,
`OwnPort` or `OwnMidiPort` objects of the ports which are
connected or disconnected. The third argument is ``True``
if the ports were connected and ``False`` if the ports were
disconnected.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
See `set_port_registration_callback()`.
If ``False``, the first and/or the second argument to the
*callback* may be ``None``.
See Also
--------
Client.connect, OwnPort.connect
"""
if callback is None:
return lambda cb: self.set_port_connect_callback(
cb, only_available)
@self._callback('JackPortConnectCallback')
def callback_wrapper(a, b, connect, _):
port_ids = a, b
ports = [None, None]
for idx in 0, 1:
ptr = _lib.jack_port_by_id(self._ptr, port_ids[idx])
if ptr:
ports[idx] = self._wrap_port_ptr(ptr)
elif only_available:
return
else:
pass # Do nothing, port is already None
callback(ports[0], ports[1], bool(connect))
_check(_lib.jack_set_port_connect_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port connect callback')
def set_port_rename_callback(self, callback=None, only_available=True):
"""Register port rename callback.
Tell the JACK server to call *callback* whenever a port is
renamed.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever the port name
has been changed. It must have this signature::
callback(port: Port, old: str, new: str) -> None
The first argument is the port that has been renamed (a
`Port`, `MidiPort`, `OwnPort` or `OwnMidiPort` object); the
second and third argument is the old and new name,
respectively. The *callback* is supposed to raise
`CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
See `set_port_registration_callback()`.
See Also
--------
:attr:`Port.shortname`
Notes
-----
The port rename callback is not available in JACK 1!
See `this mailing list posting`__ and `this commit message`__.
__ http://comments.gmane.org/gmane.comp.audio.jackit/28888
__ https://github.com/jackaudio/jack1/commit/
94c819accfab2612050e875c24cf325daa0fd26d
"""
if callback is None:
return lambda cb: self.set_port_rename_callback(cb, only_available)
@self._callback('JackPortRenameCallback', error=_FAILURE)
def callback_wrapper(port_id, old_name, new_name, _):
port_ptr = _lib.jack_port_by_id(self._ptr, port_id)
if port_ptr:
port = self._wrap_port_ptr(port_ptr)
elif only_available:
return
else:
port = None
try:
callback(port, _ffi.string(old_name).decode(),
_ffi.string(new_name).decode())
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_port_rename_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port rename callback')
def set_graph_order_callback(self, callback):
"""Register graph order callback.
Tell the JACK server to call *callback* whenever the processing
graph is reordered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after :meth:`activate` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever the
processing graph is reordered.
It must have this signature::
callback() -> None
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackGraphOrderCallback', error=_FAILURE)
def callback_wrapper(_):
try:
callback()
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_graph_order_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting graph order callback')
def set_xrun_callback(self, callback):
"""Register xrun callback.
Tell the JACK server to call *callback* whenever there is an
xrun.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever an xrun has
occured. It must have this signature::
callback(delayed_usecs: float) -> None
The callback argument is the delay in microseconds due to
the most recent XRUN occurrence.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackXRunCallback', error=_FAILURE)
def callback_wrapper(_):
try:
callback(_lib.jack_get_xrun_delayed_usecs(self._ptr))
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_xrun_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting xrun callback')
def set_timebase_callback(self, callback=None, conditional=False):
"""Register as timebase master for the JACK subsystem.
The timebase master registers a callback that updates extended
position information such as beats or timecode whenever
necessary. Without this extended information, there is no need
for this function.
There is never more than one master at a time. When a new
client takes over, the former callback is no longer called.
Taking over the timebase may be done conditionally, so that
*callback* is not registered if there was a master already.
Parameters
----------
callback : callable
Realtime function that returns extended position
information. Its output affects all of the following
process cycle. This realtime function must not wait.
It is called immediately after the process callback (see
`set_process_callback()`) in the same thread whenever the
transport is rolling, or when any client has requested a new
position in the previous cycle. The first cycle after
`set_timebase_callback()` is also treated as a new position,
or the first cycle after `activate()` if the client had been
inactive. The *callback* must have this signature::
callback(state: int, blocksize: int, pos: jack_position_t, new_pos: bool) -> None
state
The current transport state. See `transport_state`.
blocksize
The number of frames in the current period.
See `blocksize`.
pos
The position structure for the next cycle; ``pos.frame``
will be its frame number. If *new_pos* is ``False``,
this structure contains extended position information
from the current cycle. If *new_pos* is ``True``, it
contains whatever was set by the requester.
The *callback*'s task is to update the extended
information here. See `transport_query_struct()`
for details about ``jack_position_t``.
new_pos
``True`` for a newly requested *pos*, or for the first
cycle after the timebase callback is defined.
.. note:: The *pos* argument must not be used to set
``pos.frame``. To change position, use
`transport_reposition_struct()` or `transport_locate()`.
These functions are realtime-safe, the timebase callback
can call them directly.
conditional : bool
Set to ``True`` for a conditional request.
Returns
-------
bool
``True`` if the timebase callback was registered.
``False`` if a conditional request failed because another
timebase master is already registered.
"""
if callback is None:
return lambda cb: self.set_timebase_callback(cb, conditional)
@self._callback('JackTimebaseCallback')
def callback_wrapper(state, blocksize, pos, new_pos, _):
callback(state, blocksize, pos, bool(new_pos))
err = _lib.jack_set_timebase_callback(self._ptr, conditional,
callback_wrapper, _ffi.NULL)
# Because of a bug in JACK2 version <= 1.9.10, we also check for -1.
# See https://github.com/jackaudio/jack2/pull/123
if conditional and err in (_errno.EBUSY, -1):
return False
_check(err, 'Error setting timebase callback')
return True
def get_uuid_for_client_name(self, name):
"""Get the session ID for a client name.
The session manager needs this to reassociate a client name to
the session ID.
"""
uuid = _ffi.gc(_lib.jack_get_uuid_for_client_name(
self._ptr, name.encode()), _lib.jack_free)
if not uuid:
raise JackError('Unable to get session ID for {0!r}'.format(name))
return _ffi.string(uuid).decode()
def get_client_name_by_uuid(self, uuid):
"""Get the client name for a session ID.
In order to snapshot the graph connections, the session manager
needs to map session IDs to client names.
"""
name = _ffi.gc(_lib.jack_get_client_name_by_uuid(
self._ptr, uuid.encode()), _lib.jack_free)
if not name:
raise JackError('Unable to get client name for {0!r}'.format(uuid))
return _ffi.string(name).decode()
def get_port_by_name(self, name):
"""Get port by name.
Given a full port name, this returns a `Port`, `MidiPort`,
`OwnPort` or `OwnMidiPort` object.
"""
port_ptr = _lib.jack_port_by_name(self._ptr, name.encode())
if not port_ptr:
raise JackError('Port {0!r} not available'.format(name))
return self._wrap_port_ptr(port_ptr)
def get_all_connections(self, port):
"""Return a list of ports which the given port is connected to.
This differs from `OwnPort.connections` (also available on
`OwnMidiPort`) in two important respects:
1) You may not call this function from code that is executed in
response to a JACK event. For example, you cannot use it in a
graph order callback.
2) You need not be the owner of the port to get information
about its connections.
"""
port = self._get_port_ptr(port)
names = _ffi.gc(_lib.jack_port_get_all_connections(self._ptr, port),
_lib.jack_free)
return self._port_list_from_pointers(names)
def get_ports(self, name_pattern='', is_audio=False, is_midi=False,
is_input=False, is_output=False, is_physical=False,
can_monitor=False, is_terminal=False):
"""Return a list of selected ports.
Parameters
----------
name_pattern : str
A regular expression used to select ports by name. If
empty, no selection based on name will be carried out.
is_audio, is_midi : bool
Select audio/MIDI ports. If neither of them is ``True``,
both types of ports are selected.
is_input, is_output, is_physical, can_monitor, is_terminal : bool
Select ports by their flags. If none of them are ``True``,
no selection based on flags will be carried out.
Returns
-------
list of Port/MidiPort/OwnPort/OwnMidiPort
All ports that satisfy the given conditions.
"""
if is_audio and not is_midi:
type_pattern = _AUDIO
elif is_midi and not is_audio:
type_pattern = _MIDI
else:
type_pattern = b''
flags = 0x0
if is_input:
flags |= _lib.JackPortIsInput
if is_output:
flags |= _lib.JackPortIsOutput
if is_physical:
flags |= _lib.JackPortIsPhysical
if can_monitor:
flags |= _lib.JackPortCanMonitor
if is_terminal:
flags |= _lib.JackPortIsTerminal
names = _ffi.gc(_lib.jack_get_ports(
self._ptr, name_pattern.encode(), type_pattern, flags),
_lib.jack_free)
return self._port_list_from_pointers(names)
def _callback(self, cdecl, **kwargs):
"""Wrapper for ffi.callback() that keeps callback alive."""
def callback_decorator(python_callable):
function_ptr = _ffi.callback(cdecl, python_callable, **kwargs)
self._keepalive.append(function_ptr)
return function_ptr
return callback_decorator
def _register_port(self, name, porttype, is_terminal, is_physical, flags):
"""Create a new port."""
if is_terminal:
flags |= _lib.JackPortIsTerminal
if is_physical:
flags |= _lib.JackPortIsPhysical
port_ptr = _lib.jack_port_register(self._ptr, name.encode(), porttype,
flags, 0)
if not port_ptr:
raise JackError(
'{0!r}: port registration failed'.format(name))
return self._wrap_port_ptr(port_ptr)
def _port_list_from_pointers(self, names):
"""Get list of Port objects from char**."""
ports = []
if names:
idx = 0
while True:
name = names[idx]
if not name:
break
ports.append(self.get_port_by_name(_ffi.string(name).decode()))
idx += 1
return ports
def _get_port_ptr(self, port):
"""Get port pointer from Port object or string or port pointer."""
if isinstance(port, Port):
port = port._ptr
elif isinstance(port, str):
port = self.get_port_by_name(port)._ptr
return port
def _wrap_port_ptr(self, ptr):
"""Create appropriate port object for a given port pointer."""
porttype = _ffi.string(_lib.jack_port_type(ptr))
if porttype == _AUDIO:
port = OwnPort(ptr, self) if self.owns(ptr) else Port(ptr)
elif porttype == _MIDI:
port = OwnMidiPort(ptr, self) if self.owns(ptr) else MidiPort(ptr)
else:
assert False
return port
class Port(object):
"""A JACK audio port.
This class cannot be instantiated directly. Instead, instances of
this class are returned from `Client.get_port_by_name()`,
`Client.get_ports()`, `Client.get_all_connections()` and
`OwnPort.connections`.
In addition, instances of this class are available in the callbacks
which are set with `Client.set_port_registration_callback()`,
`Client.set_port_connect_callback()` or
`Client.set_port_rename_callback`.
Note, however, that if the used `Client` owns the respective port,
instances of `OwnPort` (instead of `Port`) will be created. In case
of MIDI ports, instances of `MidiPort` or `OwnMidiPort` are created.
Besides being the type of non-owned JACK audio ports, this class
also serves as base class for all other port classes (`OwnPort`,
`MidiPort` and `OwnMidiPort`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, port_ptr):
self._ptr = port_ptr
def __repr__(self):
return "jack.{0.__class__.__name__}('{0.name}')".format(self)
def __eq__(self, other):
"""Ports are equal if their underlying port pointers are."""
return self._ptr == other._ptr
def __ne__(self, other):
"""This should be implemented whenever __eq__() is implemented."""
return not self.__eq__(other)
@property
def name(self):
"""Full name of the JACK port (read-only)."""
return _ffi.string(_lib.jack_port_name(self._ptr)).decode()
@property
def shortname(self):
"""Short name of the JACK port, not including the client name.
Must be unique among all ports owned by a client.
May be modified at any time. If the resulting full name
(including the ``client_name:`` prefix) is longer than
`port_name_size()`, it will be truncated.
"""
return _ffi.string(_lib.jack_port_short_name(self._ptr)).decode()
@shortname.setter
def shortname(self, shortname):
_check(_lib.jack_port_set_name(self._ptr, shortname.encode()),
'Error setting port name')
@property
def aliases(self):
"""Returns a list of strings with the aliases for the JACK port."""
ctype = "char[{}]".format(_lib.jack_port_name_size())
aliases = [_ffi.new(ctype), _ffi.new(ctype)]
aliasesptr = _ffi.new("char *[]", aliases)
result = []
if _lib.jack_port_get_aliases(self._ptr, aliasesptr) > 0:
for i in 0, 1:
alias = _ffi.string(aliases[i]).decode()
if alias:
result.append(alias)
return result
def set_alias(self, alias):
"""Set an alias for the JACK port.
Ports can have up to two aliases. If both are already set,
this function will return an error.
"""
_check(_lib.jack_port_set_alias(self._ptr, alias.encode()),
'Error setting port alias')
def unset_alias(self, alias):
"""Remove an alias for the JACK port.
If the alias doesn't exist this function will return an error.
"""
_check(_lib.jack_port_unset_alias(self._ptr, alias.encode()),
'Error unsetting port alias')
@property
def uuid(self):
"""The UUID of the JACK port."""
return _lib.jack_port_uuid(self._ptr)
is_audio = property(lambda self: True, doc='This is always ``True``.')
is_midi = property(lambda self: False, doc='This is always ``False``.')
@property
def is_input(self):
"""Can the port receive data?"""
return self._hasflag(_lib.JackPortIsInput)
@property
def is_output(self):
"""Can data be read from this port?"""
return self._hasflag(_lib.JackPortIsOutput)
@property
def is_physical(self):
"""Does it correspond to some kind of physical I/O connector?"""
return self._hasflag(_lib.JackPortIsPhysical)
@property
def can_monitor(self):
"""Does a call to `request_monitor()` make sense?"""
return self._hasflag(_lib.JackPortCanMonitor)
@property
def is_terminal(self):
"""Is the data consumed/generated?"""
return self._hasflag(_lib.JackPortIsTerminal)
def request_monitor(self, onoff):
"""Set input monitoring.
If `can_monitor` is ``True``, turn input monitoring on or
off. Otherwise, do nothing.
Parameters
----------
onoff : bool
If ``True``, switch monitoring on; if ``False``, switch it
off.
"""
_check(_lib.jack_port_request_monitor(self._ptr, onoff),
'Unable to switch monitoring on/off')
def _hasflag(self, flag):
"""Helper method for is_*()."""
return bool(_lib.jack_port_flags(self._ptr) & flag)
class MidiPort(Port):
"""A JACK MIDI port.
This class is derived from `Port` and has exactly the same
attributes and methods.
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
See Also
--------
Port, OwnMidiPort
"""
is_audio = property(lambda self: False, doc='This is always ``False``.')
is_midi = property(lambda self: True, doc='This is always ``True``.')
class OwnPort(Port):
"""A JACK audio port owned by a `Client`.
This class is derived from `Port`. `OwnPort` objects can do
everything that `Port` objects can, plus a lot more.
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, port_ptr, client):
Port.__init__(self, port_ptr)
self._client = client
@property
def number_of_connections(self):
"""Number of connections to or from port."""
return _lib.jack_port_connected(self._ptr)
@property
def connections(self):
"""List of ports which the port is connected to."""
names = _ffi.gc(_lib.jack_port_get_connections(self._ptr),
_lib.jack_free)
return self._client._port_list_from_pointers(names)
def is_connected_to(self, port):
"""Am I *directly* connected to *port*?
Parameters
----------
port : str or Port
Full port name or port object.
"""
if isinstance(port, Port):
port = port.name
return bool(_lib.jack_port_connected_to(self._ptr, port.encode()))
def connect(self, port):
"""Connect to given port.
Parameters
----------
port : str or Port
Full port name or port object.
See Also
--------
Client.connect
"""
if not isinstance(port, Port):
port = self._client.get_port_by_name(port)
if self.is_output:
source = self
if not port.is_input:
raise ValueError('Input port expected')
destination = port
elif self.is_input:
destination = self
if not port.is_output:
raise ValueError('Output port expected')
source = port
else:
assert False
self._client.connect(source.name, destination.name)
def disconnect(self, other=None):
"""Disconnect this port.
Parameters
----------
other : str or Port
Port to disconnect from.
By default, disconnect from all connected ports.
"""
if other is None:
_check(_lib.jack_port_disconnect(self._client._ptr, self._ptr),
'Error disconnecting {0!r}'.format(self.name))
else:
if self.is_output:
args = self, other
elif self.is_input:
args = other, self
self._client.disconnect(*args)
def unregister(self):
"""Unregister port.
Remove the port from the client, disconnecting any existing
connections. This also removes the port from
`Client.inports`, `Client.outports`, `Client.midi_inports` or
`Client.midi_outports`.
"""
if self.is_audio:
listname = ''
elif self.is_midi:
listname = 'midi_'
if self.is_input:
listname += 'inports'
elif self.is_output:
listname += 'outports'
ports = getattr(self._client, listname)
ports._portlist.remove(self)
_check(_lib.jack_port_unregister(self._client._ptr, self._ptr),
'Error unregistering {0!r}'.format(self.name))
def get_buffer(self):
"""Get buffer for audio data.
This returns a buffer holding the memory area associated with
the specified port. For an output port, it will be a memory
area that can be written to; for an input port, it will be an
area containing the data from the port's connection(s), or
zero-filled. If there are multiple inbound connections, the
data will be mixed appropriately.
Caching output ports is DEPRECATED in JACK 2.0, due to some new
optimization (like "pipelining"). Port buffers have to be
retrieved in each callback for proper functioning.
This method shall only be called from within the process
callback (see `Client.set_process_callback()`).
"""
blocksize = self._client.blocksize
return _ffi.buffer(_lib.jack_port_get_buffer(self._ptr, blocksize),
blocksize * _ffi.sizeof('float'))
def get_array(self):
"""Get audio buffer as NumPy array.
Make sure to ``import numpy`` before calling this, otherwise the
first call might take a long time.
This method shall only be called from within the process
callback (see `Client.set_process_callback()`).
See Also
--------
get_buffer
"""
import numpy as np
return np.frombuffer(self.get_buffer(), dtype=np.float32)
class OwnMidiPort(MidiPort, OwnPort):
"""A JACK MIDI port owned by a `Client`.
This class is derived from `OwnPort` and `MidiPort`, which are
themselves derived from `Port`. It has the same attributes and
methods as `OwnPort`, but `get_buffer()` and `get_array()` are
disabled. Instead, it has methods for sending and receiving MIDI
events (to be used only from within the process callback -- see
`Client.set_process_callback()`).
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, *args, **kwargs):
OwnPort.__init__(self, *args, **kwargs)
self._event = _ffi.new('jack_midi_event_t*')
def get_buffer(self):
"""Not available for MIDI ports."""
raise NotImplementedError('get_buffer() not available on MIDI ports')
def get_array(self):
"""Not available for MIDI ports."""
raise NotImplementedError('get_array() not available on MIDI ports')
@property
def max_event_size(self):
"""Get the size of the largest event that can be stored by the port.
This returns the current space available, taking into
account events already stored in the port.
"""
return _lib.jack_midi_max_event_size(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
@property
def lost_midi_events(self):
"""Get the number of events that could not be written to the port.
This being a non-zero value implies that the port is full.
Currently the only way this can happen is if events are lost on
port mixdown.
"""
return _lib.jack_midi_get_lost_event_count(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
def incoming_midi_events(self):
"""Return generator for incoming MIDI events.
JACK MIDI is normalised, the MIDI events yielded by this
generator are guaranteed to be complete MIDI events (the status
byte will always be present, and no realtime events will be
interspersed with the events).
Yields
------
time : int
Time (in samples) relative to the beginning of the current
audio block.
event : buffer
The actual MIDI event data.
"""
event = self._event
buf = _lib.jack_port_get_buffer(self._ptr, self._client.blocksize)
for i in range(_lib.jack_midi_get_event_count(buf)):
err = _lib.jack_midi_event_get(event, buf, i)
# TODO: proper error handling if this ever happens:
assert not err, err
yield event.time, _ffi.buffer(event.buffer, event.size)
def clear_buffer(self):
"""Clear an event buffer.
This should be called at the beginning of each process cycle
before calling `reserve_midi_event()` or `write_midi_event()`.
This function may not be called on an input port.
"""
_lib.jack_midi_clear_buffer(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
def write_midi_event(self, time, event):
"""Create an outgoing MIDI event.
Clients must write normalised MIDI data to the port - no running
status and no (one-byte) realtime messages interspersed with
other messages (realtime messages are fine when they occur on
their own, like other messages).
Events must be written in order, sorted by their sample offsets.
JACK will not sort the events for you, and will refuse to store
out-of-order events.
Parameters
----------
time : int
Time (in samples) relative to the beginning of the current
audio block.
event : bytes or buffer or sequence of int
The actual MIDI event data.
.. note:: Buffer objects are only supported for CFFI >= 0.9.
Raises
------
JackError
If MIDI event couldn't be written.
"""
try:
event = _ffi.from_buffer(event)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
_check(_lib.jack_midi_event_write(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize),
time, event, len(event)), 'Error writing MIDI event')
def reserve_midi_event(self, time, size):
"""Get a buffer where an outgoing MIDI event can be written to.
Clients must write normalised MIDI data to the port - no running
status and no (one-byte) realtime messages interspersed with
other messages (realtime messages are fine when they occur on
their own, like other messages).
Events must be written in order, sorted by their sample offsets.
JACK will not sort the events for you, and will refuse to store
out-of-order events.
Parameters
----------
time : int
Time (in samples) relative to the beginning of the current
audio block.
size : int
Number of bytes to reserve.
Returns
-------
buffer
A buffer object where MIDI data bytes can be written to.
If no space could be reserved, an empty buffer is returned.
"""
buf = _lib.jack_midi_event_reserve(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize),
time, size)
return _ffi.buffer(buf, size if buf else 0)
class Ports(object):
"""A list of input/output ports.
This class is not meant to be instantiated directly. It is only
used as `Client.inports`, `Client.outports`, `Client.midi_inports`
and `Client.midi_outports`.
The ports can be accessed by indexing or by iteration.
New ports can be added with `register()`, existing ports can be
removed by calling their :meth:`~OwnPort.unregister` method.
"""
def __init__(self, client, porttype, flag):
self._client = client
self._type = porttype
self._flag = flag
self._portlist = []
def __len__(self):
return self._portlist.__len__()
def __getitem__(self, name):
return self._portlist.__getitem__(name)
# No __setitem__!
def __iter__(self):
return self._portlist.__iter__()
def __repr__(self):
return self._portlist.__repr__()
def register(self, shortname, is_terminal=False, is_physical=False):
"""Create a new input/output port.
The new `OwnPort` or `OwnMidiPort` object is automatically added
to `Client.inports`, `Client.outports`, `Client.midi_inports` or
`Client.midi_outports`.
Parameters
----------
shortname : str
Each port has a short name. The port's full name contains
the name of the client concatenated with a colon (:)
followed by its short name. The `port_name_size()` is the
maximum length of this full name. Exceeding that will cause
the port registration to fail.
The port name must be unique among all ports owned by this
client.
If the name is not unique, the registration will fail.
is_terminal : bool
For an input port: If ``True``, the data received by the
port will not be passed on or made available at any other
port.
For an output port: If ``True``, the data available at the
port does not originate from any other port
Audio synthesizers, I/O hardware interface clients, HDR
systems are examples of clients that would set this flag for
their ports.
is_physical : bool
If ``True`` the port corresponds to some kind of physical
I/O connector.
Returns
-------
Port
A new `OwnPort` or `OwnMidiPort` instance.
"""
port = self._client._register_port(
shortname, self._type, is_terminal, is_physical, self._flag)
self._portlist.append(port)
return port
def clear(self):
"""Unregister all ports in the list.
See Also
--------
OwnPort.unregister
"""
while self._portlist:
self._portlist[0].unregister()
class RingBuffer(object):
"""JACK's lock-free ringbuffer."""
def __init__(self, size):
"""Create a lock-free ringbuffer.
A ringbuffer is a good way to pass data between threads
(e.g. between the main program and the process callback),
when streaming realtime data to slower media, like audio file
playback or recording.
The key attribute of a ringbuffer is that it can be safely
accessed by two threads simultaneously -- one reading from the
buffer and the other writing to it -- without using any
synchronization or mutual exclusion primitives. For this to
work correctly, there can only be a single reader and a single
writer thread. Their identities cannot be interchanged.
Parameters
----------
size : int
Size in bytes. JACK will allocate a buffer of at least this
size (rounded up to the next power of 2), but one byte is
reserved for internal use. Use `write_space` to
determine the actual size available for writing.
"""
ptr = _lib.jack_ringbuffer_create(size)
if not ptr:
raise JackError('Could not create RingBuffer')
self._ptr = _ffi.gc(ptr, _lib.jack_ringbuffer_free)
@property
def write_space(self):
"""The number of bytes available for writing."""
return _lib.jack_ringbuffer_write_space(self._ptr)
def write(self, data):
"""Write data into the ringbuffer.
Parameters
----------
data : buffer or bytes or iterable of int
Bytes to be written to the ringbuffer.
Returns
-------
int
The number of bytes written, which could be less than the
length of *data* if there was no more space left
(see `write_space`).
See Also
--------
:attr:`write_space`, :attr:`write_buffers`
"""
try:
data = _ffi.from_buffer(data)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
return _lib.jack_ringbuffer_write(self._ptr, data, len(data))
@property
def write_buffers(self):
"""Contains two buffer objects that can be written to directly.
Two are needed because the space available for writing may be
split across the end of the ringbuffer. Either of them could be
0 length.
This can be used as a no-copy version of `write()`.
When finished with writing, `write_advance()` should be used.
.. note:: After an operation that changes the write pointer
(`write()`, `write_advance()`, `reset()`), the buffers are no
longer valid and one should use this property again to get
new ones.
"""
vectors = _ffi.new('jack_ringbuffer_data_t[2]')
_lib.jack_ringbuffer_get_write_vector(self._ptr, vectors)
return (
_ffi.buffer(vectors[0].buf, vectors[0].len),
_ffi.buffer(vectors[1].buf, vectors[1].len)
)
def write_advance(self, size):
"""Advance the write pointer.
After data has been written to the ringbuffer using
`write_buffers`, use this method to advance the buffer pointer,
making the data available for future read operations.
Parameters
----------
size : int
The number of bytes to advance.
"""
_lib.jack_ringbuffer_write_advance(self._ptr, size)
@property
def read_space(self):
"""The number of bytes available for reading."""
return _lib.jack_ringbuffer_read_space(self._ptr)
def read(self, size):
"""Read data from the ringbuffer.
Parameters
----------
size : int
Number of bytes to read.
Returns
-------
buffer
A buffer object containing the requested data.
If no more data is left (see `read_space`), a smaller
(or even empty) buffer is returned.
See Also
--------
peek, :attr:`read_space`, :attr:`read_buffers`
"""
data = _ffi.new('unsigned char[]', size)
size = _lib.jack_ringbuffer_read(self._ptr, data, size)
return _ffi.buffer(data, size)
def peek(self, size):
"""Peek at data from the ringbuffer.
Opposed to `read()` this function does not move the read
pointer. Thus it's a convenient way to inspect data in the
ringbuffer in a continuous fashion.
The price is that the data is copied into a newly allocated
buffer. For "raw" non-copy inspection of the data in the
ringbuffer use `read_buffers`.
Parameters
----------
size : int
Number of bytes to peek.
Returns
-------
buffer
A buffer object containing the requested data.
If no more data is left (see `read_space`), a smaller
(or even empty) buffer is returned.
See Also
--------
read, :attr:`read_space`, :attr:`read_buffers`
"""
data = _ffi.new('unsigned char[]', size)
size = _lib.jack_ringbuffer_peek(self._ptr, data, size)
return _ffi.buffer(data, size)
@property
def read_buffers(self):
"""Contains two buffer objects that can be read directly.
Two are needed because the data to be read may be split across
the end of the ringbuffer. Either of them could be 0 length.
This can be used as a no-copy version of `peek()` or `read()`.
When finished with reading, `read_advance()` should be used.
.. note:: After an operation that changes the read pointer
(`read()`, `read_advance()`, `reset()`), the buffers are no
longer valid and one should use this property again to get
new ones.
"""
vectors = _ffi.new('jack_ringbuffer_data_t[2]')
_lib.jack_ringbuffer_get_read_vector(self._ptr, vectors)
return (
_ffi.buffer(vectors[0].buf, vectors[0].len),
_ffi.buffer(vectors[1].buf, vectors[1].len)
)
def read_advance(self, size):
"""Advance the read pointer.
After data has been read from the ringbuffer using
`read_buffers` or `peek()`, use this method to advance the
buffer pointers, making that space available for future write
operations.
Parameters
----------
size : int
The number of bytes to advance.
"""
_lib.jack_ringbuffer_read_advance(self._ptr, size)
def mlock(self):
"""Lock a ringbuffer data block into memory.
Uses the ``mlock()`` system call. This prevents the
ringbuffer's memory from being paged to the swap area.
.. note:: This is not a realtime operation.
"""
_check(_lib.jack_ringbuffer_mlock(self._ptr),
'Error mlocking the RingBuffer data')
def reset(self, size=None):
"""Reset the read and write pointers, making an empty buffer.
.. note:: This is not thread safe.
Parameters
----------
size : int, optional
The new size for the ringbuffer.
Must be less than allocated size.
"""
if size is None:
_lib.jack_ringbuffer_reset(self._ptr)
else:
_lib.jack_ringbuffer_reset_size(self._ptr, size)
@property
def size(self):
"""The number of bytes in total used by the buffer.
See Also
--------
:attr:`read_space`, :attr:`write_space`
"""
return self._ptr.size
class Status(object):
"""Representation of the JACK status bits."""
__slots__ = '_code'
def __init__(self, code):
self._code = code
def __repr__(self):
flags = ', '.join(name for name in dir(self)
if not name.startswith('_') and getattr(self, name))
if not flags:
flags = 'no flags set'
return '<jack.Status 0x{0:X}: {1}>'.format(self._code, flags)
@property
def failure(self):
"""Overall operation failed."""
return self._hasflag(_lib.JackFailure)
@property
def invalid_option(self):
"""The operation contained an invalid or unsupported option."""
return self._hasflag(_lib.JackInvalidOption)
@property
def name_not_unique(self):
"""The desired client name was not unique.
With the *use_exact_name* option of `Client`, this situation is
fatal. Otherwise, the name is modified by appending a dash and
a two-digit number in the range "-01" to "-99". `Client.name`
will return the exact string that was used. If the specified
*name* plus these extra characters would be too long, the open
fails instead.
"""
return self._hasflag(_lib.JackNameNotUnique)
@property
def server_started(self):
"""The JACK server was started for this `Client`.
Otherwise, it was running already.
"""
return self._hasflag(_lib.JackServerStarted)
@property
def server_failed(self):
"""Unable to connect to the JACK server."""
return self._hasflag(_lib.JackServerFailed)
@property
def server_error(self):
"""Communication error with the JACK server."""
return self._hasflag(_lib.JackServerError)
@property
def no_such_client(self):
"""Requested client does not exist."""
return self._hasflag(_lib.JackNoSuchClient)
@property
def load_failure(self):
"""Unable to load internal client."""
return self._hasflag(_lib.JackLoadFailure)
@property
def init_failure(self):
"""Unable to initialize client."""
return self._hasflag(_lib.JackInitFailure)
@property
def shm_failure(self):
"""Unable to access shared memory."""
return self._hasflag(_lib.JackShmFailure)
@property
def version_error(self):
"""Client's protocol version does not match."""
return self._hasflag(_lib.JackVersionError)
@property
def backend_error(self):
"""Backend error."""
return self._hasflag(_lib.JackBackendError)
@property
def client_zombie(self):
"""Client zombified failure."""
return self._hasflag(_lib.JackClientZombie)
def _hasflag(self, flag):
"""Helper function for Status properties."""
return bool(self._code & flag)
class TransportState(object):
"""Representation of the JACK transport state.
See Also
--------
`Client.transport_state`, :meth:`Client.transport_query`
"""
__slots__ = '_code'
def __init__(self, code):
self._code = code
def __eq__(self, other):
return self._code == other
def __repr__(self):
return 'jack.' + {
_lib.JackTransportStopped: 'STOPPED',
_lib.JackTransportRolling: 'ROLLING',
_lib.JackTransportStarting: 'STARTING',
_lib.JackTransportNetStarting: 'NETSTARTING',
}[self._code]
class JackError(Exception):
"""Exception for all kinds of JACK-related errors."""
pass
class CallbackExit(Exception):
"""To be raised in a callback function to signal failure.
See Also
--------
:meth:`Client.set_process_callback`
:meth:`Client.set_blocksize_callback`
:meth:`Client.set_samplerate_callback`
:meth:`Client.set_port_rename_callback`
:meth:`Client.set_graph_order_callback`
:meth:`Client.set_xrun_callback`
"""
pass
def position2dict(pos):
"""Convert CFFI position struct to a dict."""
assert pos.unique_1 == pos.unique_2
keys = ['usecs', 'frame_rate', 'frame']
if pos.valid & _lib.JackPositionBBT:
keys += ['bar', 'beat', 'tick', 'bar_start_tick', 'beats_per_bar',
'beat_type', 'ticks_per_beat', 'beats_per_minute']
if pos.valid & _lib.JackPositionTimecode:
keys += ['frame_time', 'next_time']
if pos.valid & _lib.JackBBTFrameOffset:
keys += ['bbt_offset']
if pos.valid & _lib.JackAudioVideoRatio:
keys += ['audio_frames_per_video_frame']
if pos.valid & _lib.JackVideoFrameOffset:
keys += ['video_offset']
return dict((k, getattr(pos, k)) for k in keys)
def version():
"""Get tuple of major/minor/micro/protocol version."""
v = _ffi.new('int[4]')
_lib.jack_get_version(v+0, v+1, v+2, v+3)
return tuple(v)
def version_string():
"""Get human-readable JACK version."""
return _ffi.string(_lib.jack_get_version_string()).decode()
def client_name_size():
"""Return the maximum number of characters in a JACK client name.
This includes the final NULL character. This value is a constant.
"""
return _lib.jack_client_name_size()
def port_name_size():
"""Maximum length of port names.
The maximum number of characters in a full JACK port name including
the final NULL character. This value is a constant.
A port's full name contains the owning client name concatenated with
a colon (:) followed by its short name and a NULL character.
"""
return _lib.jack_port_name_size()
def set_error_function(callback=None):
"""Set the callback for error message display.
Set it to ``None`` to restore the default error callback function
(which prints the error message plus a newline to stderr).
The *callback* function must have this signature::
callback(message: str) -> None
"""
_set_error_or_info_function(callback, _lib.jack_set_error_function)
def set_info_function(callback=None):
"""Set the callback for info message display.
Set it to ``None`` to restore default info callback function
(which prints the info message plus a newline to stderr).
The *callback* function must have this signature::
callback(message: str) -> None
"""
_set_error_or_info_function(callback, _lib.jack_set_info_function)
def client_pid(name):
"""Return PID of a JACK client.
Parameters
----------
name : str
Name of the JACK client whose PID shall be returned.
Returns
-------
int
PID of *name*. If not available, 0 will be returned.
"""
return _lib.jack_get_client_pid(name.encode())
def _set_error_or_info_function(callback, setter):
"""Helper for set_error_function() and set_info_function()."""
if callback is None:
callback_wrapper = _ffi.NULL
else:
@_ffi.callback('void (*)(const char*)')
def callback_wrapper(msg):
callback(_ffi.string(msg).decode())
_keepalive[setter] = callback_wrapper
setter(callback_wrapper)
_keepalive = {}
def _check(error_code, msg):
"""Check error code and raise JackError if non-zero."""
if error_code:
raise JackError('{0} ({1})'.format(msg, error_code))
| [
[
[
1173,
1184
]
],
[
[
1220,
1249
],
[
1465,
1478
],
[
1521,
1534
],
[
1567,
1580
]
],
[
[
1257,
1272
],
[
13188,
13194
],
[
45694,
45700
]
],
[
[
1280,
1301
],
[
1366,
1375
],
[
1406,
1415
]
],
[
[
1309,
1330
],
[
15210,
15219
]
],
[
[
1350,
1361
],
[
1662,
1666
],
[
5018,
5022
],
[
3777,
3781
],
[
4102,
4106
],
[
4235,
4239
],
[
4904,
4908
],
[
5382,
5386
],
[
12187,
12191
],
[
14500,
14504
],
[
21404,
21408
],
[
24119,
24123
],
[
25590,
25594
],
[
27655,
27659
],
[
29168,
29172
],
[
30646,
30650
],
[
33615,
33619
],
[
36578,
36582
],
[
39174,
39178
],
[
40548,
40552
],
[
42027,
42031
],
[
45512,
45516
],
[
46033,
46037
],
[
46247,
46251
],
[
46520,
46524
],
[
46734,
46738
],
[
47743,
47747
],
[
49408,
49412
],
[
50802,
50806
],
[
51286,
51290
],
[
53216,
53220
],
[
53646,
53650
],
[
54072,
54076
],
[
54089,
54093
],
[
54127,
54131
],
[
54294,
54298
],
[
58045,
58049
],
[
61530,
61534
],
[
61630,
61634
],
[
62952,
62956
],
[
64977,
64981
],
[
66379,
66383
],
[
67853,
67857
],
[
71998,
72002
],
[
72747,
72751
],
[
73661,
73665
],
[
73794,
73798
],
[
73851,
73855
],
[
75008,
75012
],
[
75121,
75125
],
[
76002,
76006
],
[
76115,
76119
],
[
76793,
76797
],
[
76925,
76929
],
[
76982,
76986
],
[
83366,
83370
],
[
83529,
83533
],
[
85465,
85469
],
[
85494,
85498
],
[
21309,
21313
],
[
30501,
30505
],
[
38899,
38903
],
[
38956,
38960
],
[
49772,
49776
],
[
85589,
85593
]
],
[
[
1454,
1462
],
[
1593,
1601
],
[
1674,
1682
]
],
[
[
1510,
1518
],
[
1593,
1601
],
[
1674,
1682
]
],
[
[
1556,
1564
],
[
1593,
1601
],
[
1674,
1682
]
],
[
[
1655,
1659
],
[
1758,
1762
],
[
1818,
1822
],
[
1880,
1884
],
[
1951,
1955
],
[
3822,
3826
],
[
3913,
3917
],
[
3986,
3990
],
[
4055,
4059
],
[
4189,
4193
],
[
4296,
4300
],
[
4618,
4622
],
[
4685,
4689
],
[
4756,
4760
],
[
4827,
4831
],
[
5394,
5398
],
[
5559,
5563
],
[
6362,
6366
],
[
6473,
6477
],
[
6810,
6814
],
[
7088,
7092
],
[
7463,
7467
],
[
8430,
8434
],
[
10961,
10965
],
[
11179,
11183
],
[
11532,
11536
],
[
11980,
11984
],
[
12129,
12133
],
[
13070,
13074
],
[
13823,
13827
],
[
14062,
14066
],
[
14173,
14177
],
[
14463,
14467
],
[
14839,
14843
],
[
14970,
14974
],
[
17055,
17059
],
[
18006,
18010
],
[
19137,
19141
],
[
21348,
21352
],
[
24046,
24050
],
[
25515,
25519
],
[
27578,
27582
],
[
29091,
29095
],
[
30561,
30565
],
[
33532,
33536
],
[
36500,
36504
],
[
39097,
39101
],
[
40471,
40475
],
[
41957,
41961
],
[
45392,
45396
],
[
46041,
46045
],
[
46116,
46120
],
[
46528,
46532
],
[
46602,
46606
],
[
46972,
46976
],
[
47751,
47755
],
[
47828,
47832
],
[
49099,
49103
],
[
49163,
49167
],
[
49230,
49234
],
[
49299,
49303
],
[
49368,
49372
],
[
49416,
49420
],
[
49517,
49521
],
[
50093,
50097
],
[
50162,
50166
],
[
50205,
50209
],
[
51298,
51302
],
[
53228,
53232
],
[
53658,
53662
],
[
53779,
53783
],
[
54025,
54029
],
[
54188,
54192
],
[
54633,
54637
],
[
54918,
54922
],
[
55109,
55113
],
[
55401,
55405
],
[
55539,
55543
],
[
55706,
55710
],
[
55863,
55867
],
[
56005,
56009
],
[
56382,
56386
],
[
56576,
56580
],
[
57891,
57895
],
[
58053,
58057
],
[
58120,
58124
],
[
58488,
58492
],
[
59626,
59630
],
[
60566,
60570
],
[
61542,
61546
],
[
63543,
63547
],
[
63586,
63590
],
[
63960,
63964
],
[
64009,
64013
],
[
64671,
64675
],
[
64755,
64759
],
[
64811,
64815
],
[
65298,
65302
],
[
65339,
65343
],
[
66565,
66569
],
[
66605,
66609
],
[
67710,
67714
],
[
67752,
67756
],
[
71865,
71869
],
[
72011,
72015
],
[
72152,
72156
],
[
72932,
72936
],
[
73707,
73711
],
[
74297,
74301
],
[
74462,
74466
],
[
75057,
75061
],
[
76051,
76055
],
[
76839,
76843
],
[
77448,
77452
],
[
77780,
77784
],
[
78228,
78232
],
[
78292,
78296
],
[
79109,
79113
],
[
79273,
79277
],
[
79816,
79820
],
[
80029,
80033
],
[
80178,
80182
],
[
80329,
80333
],
[
80472,
80476
],
[
80613,
80617
],
[
80750,
80754
],
[
80889,
80893
],
[
81039,
81043
],
[
81164,
81168
],
[
81300,
81304
],
[
81837,
81841
],
[
81887,
81891
],
[
81937,
81941
],
[
81989,
81993
],
[
82726,
82730
],
[
82910,
82914
],
[
83000,
83004
],
[
83075,
83079
],
[
83169,
83173
],
[
83389,
83393
],
[
83541,
83545
],
[
83770,
83774
],
[
84149,
84153
],
[
84548,
84552
],
[
84942,
84946
],
[
85253,
85257
],
[
33252,
33256
],
[
36152,
36156
],
[
38630,
38634
],
[
41803,
41807
]
],
[
[
1685,
1691
],
[
4610,
4616
],
[
4677,
4683
],
[
48913,
48919
],
[
51347,
51353
]
],
[
[
1721,
1726
],
[
4749,
4754
],
[
4820,
4825
],
[
48986,
48991
],
[
51451,
51456
]
],
[
[
1748,
1755
]
],
[
[
1808,
1815
]
],
[
[
1869,
1877
]
],
[
[
1937,
1948
]
],
[
[
2027,
2035
],
[
24021,
24029
],
[
27553,
27561
],
[
29066,
29074
],
[
39072,
39080
],
[
40446,
40454
],
[
41932,
41940
]
],
[
[
2040,
2048
],
[
23836,
23844
],
[
27362,
27370
],
[
28873,
28881
],
[
38535,
38543
],
[
40275,
40283
],
[
41718,
41726
],
[
23993,
24001
],
[
27525,
27533
],
[
29038,
29046
],
[
39044,
39052
],
[
40418,
40426
],
[
41904,
41912
]
],
[
[
2061,
2067
]
],
[
[
51604,
51608
],
[
56633,
56637
],
[
57229,
57233
],
[
12931,
12935
],
[
13006,
13010
],
[
13683,
13687
],
[
13758,
13762
],
[
51016,
51020
],
[
51416,
51420
],
[
57711,
57715
],
[
58432,
58436
],
[
58819,
58823
]
],
[
[
56624,
56632
],
[
62141,
62149
],
[
51523,
51531
]
],
[
[
57221,
57228
],
[
62151,
62158
],
[
51374,
51381
],
[
62890,
62897
]
],
[
[
62129,
62140
],
[
51477,
51488
]
],
[
[
67898,
67903
],
[
4598,
4603
],
[
4665,
4670
],
[
4737,
4742
],
[
4808,
4813
]
],
[
[
70743,
70753
]
],
[
[
78570,
78576
],
[
4425,
4431
],
[
21295,
21301
]
],
[
[
81454,
81468
],
[
14448,
14462
],
[
16134,
16148
]
],
[
[
82065,
82074
],
[
4487,
4496
],
[
13221,
13230
],
[
46171,
46180
],
[
46657,
46666
],
[
47064,
47073
],
[
50361,
50370
],
[
71937,
71946
],
[
85834,
85843
]
],
[
[
82163,
82175
],
[
23956,
23968
],
[
27488,
27500
],
[
29001,
29013
],
[
39007,
39019
],
[
40381,
40393
],
[
41867,
41879
]
],
[
[
82552,
82565
],
[
16157,
16170
]
],
[
[
83288,
83295
]
],
[
[
83457,
83471
]
],
[
[
83588,
83604
]
],
[
[
83805,
83819
]
],
[
[
84182,
84200
]
],
[
[
84584,
84601
]
],
[
[
84977,
84987
]
],
[
[
85299,
85326
],
[
84510,
84537
],
[
84904,
84931
]
],
[
[
85694,
85704
],
[
85625,
85635
]
],
[
[
85716,
85722
],
[
6466,
6472
],
[
11172,
11178
],
[
11606,
11612
],
[
12247,
12253
],
[
13342,
13348
],
[
13816,
13822
],
[
14963,
14969
],
[
17999,
18005
],
[
19130,
19136
],
[
24039,
24045
],
[
25508,
25514
],
[
27571,
27577
],
[
29084,
29090
],
[
30554,
30560
],
[
33525,
33531
],
[
36493,
36499
],
[
39090,
39096
],
[
40464,
40470
],
[
41950,
41956
],
[
45746,
45752
],
[
53772,
53778
],
[
54626,
54632
],
[
54911,
54917
],
[
56375,
56381
],
[
59619,
59625
],
[
60559,
60565
],
[
66558,
66564
],
[
77773,
77779
]
]
] |
import re
import os
import glob
import json
import string
import logging
import subprocess as sp
import yaxil.commons as commons
logger = logging.getLogger(__name__)
# bids legal characters for sub, ses, and task
legal = re.compile('[^a-zA-Z0-9]')
def bids_from_config(yaxil_session, scans_metadata, config, out_base):
'''
Create a BIDS output directory from configuration file
'''
# get session and subject labels from scan metadata
_item = next(iter(scans_metadata))
project,session,subject = _item['session_project'],_item['session_label'],_item['subject_label']
session_id,subject_id = _item['session_id'],_item['subject_id']
# check for dataset_description.json and create it if necessary
check_dataset_description(out_base)
# define bids and sourcedata base directories
sourcedata_base = os.path.join(
out_base,
'sourcedata',
'sub-{0}'.format(legal.sub('', subject)),
'ses-{0}'.format(legal.sub('', session))
)
bids_base = os.path.join(
out_base,
'sub-{0}'.format(legal.sub('', subject)),
'ses-{0}'.format(legal.sub('', session))
)
# put arguments in a struct for convenience
args = commons.struct(
xnat=yaxil_session,
subject=subject,
subject_id=subject_id,
session=session,
session_id=session_id,
project=project,
bids=bids_base,
sourcedata=sourcedata_base
)
# process func, anat, and fmap
func_refs = proc_func(config, args)
anat_refs = proc_anat(config, args)
dwi_refs = proc_dwi(config, args)
fmap_refs = proc_fmap(config, args, func_refs)
def check_dataset_description(bids_dir, bids_version='1.4.0', ds_type='raw'):
if not os.path.exists(bids_dir):
os.makedirs(bids_dir)
ds_desc = os.path.join(bids_dir, 'dataset_description.json')
if not os.path.exists(ds_desc):
js = {
'Name': 'Made by YAXIL',
'BIDSVersion': bids_version,
'DatasetType': ds_type
}
with open(ds_desc, 'w') as fo:
fo.write(json.dumps(js))
def proc_func(config, args):
'''
Download functional data and convert to BIDS
'''
refs = dict()
for scan in iterconfig(config, 'func'):
ref = scan.get('id', None)
templ = 'sub-${sub}_ses-${ses}'
if 'task' in scan:
templ += '_task-${task}'
if 'acquisition' in scan:
templ += '_acq-${acquisition}'
if 'run' in scan:
templ += '_run-${run}'
if 'direction' in scan:
templ += '_dir-${direction}'
templ += '_${modality}'
templ = string.Template(templ)
fbase = templ.safe_substitute(
sub=legal.sub('', args.subject),
ses=legal.sub('', args.session),
task=scan.get('task', None),
acquisition=scan.get('acquisition', None),
run=scan.get('run', None),
direction=scan.get('direction', None),
modality=scan.get('modality', None)
)
# download data to bids sourcedata directory
sourcedata_dir = os.path.join(args.sourcedata, scan['type'])
if not os.path.exists(sourcedata_dir):
os.makedirs(sourcedata_dir)
dicom_dir = os.path.join(sourcedata_dir, '{0}.dicom'.format(fbase))
logger.info('downloading session=%s, scan=%s, loc=%s', args.session, scan['scan'], dicom_dir)
args.xnat.download(args.session, [scan['scan']], out_dir=dicom_dir)
# convert to nifti
fname = '{0}.nii.gz'.format(fbase)
refs[ref] = os.path.join(scan['type'], fname)
fullfile = os.path.join(args.bids, scan['type'], fname)
logger.info('converting %s to %s', dicom_dir, fullfile)
convert(dicom_dir, fullfile)
# add xnat source information to json sidecar
sidecar_file = os.path.join(args.bids, scan['type'], fbase + '.json')
with open(sidecar_file) as fo:
sidecarjs = json.load(fo)
sidecarjs['DataSource'] = {
'application/x-xnat': {
'url': args.xnat.url,
'project': args.project,
'subject': args.subject,
'subject_id': args.subject_id,
'experiment': args.session,
'experiment_id': args.session_id,
'scan': scan['scan']
}
}
# write out updated json sidecar
commons.atomic_write(sidecar_file, json.dumps(sidecarjs, indent=2))
return refs
def proc_anat(config, args):
'''
Download anatomical data and convert to BIDS
'''
refs = dict()
for scan in iterconfig(config, 'anat'):
ref = scan.get('id', None)
templ = 'sub-${sub}_ses-${ses}'
if 'acquisition' in scan:
templ += '_acq-${acquisition}'
if 'run' in scan:
templ += '_run-${run}'
templ += '_${modality}'
templ = string.Template(templ)
fbase = templ.safe_substitute(
sub=legal.sub('', args.subject),
ses=legal.sub('', args.session),
acquisition=scan.get('acquisition', None),
run=scan.get('run', None),
modality=scan.get('modality', None),
)
# download data to bids sourcedata directory
sourcedata_dir = os.path.join(args.sourcedata, scan['type'])
if not os.path.exists(sourcedata_dir):
os.makedirs(sourcedata_dir)
dicom_dir = os.path.join(sourcedata_dir, '{0}.dicom'.format(fbase))
logger.info('downloading session=%s, scan=%s, loc=%s', args.session, scan['scan'], dicom_dir)
args.xnat.download(args.session, [scan['scan']], out_dir=dicom_dir)
# convert to nifti (edge cases for T1w_vNav_setter)
fname = '{0}.nii.gz'.format(fbase)
refs[ref] = os.path.join(scan['type'], fname)
fullfile = os.path.join(args.bids, scan['type'], fname)
logger.info('converting %s to %s', dicom_dir, fullfile)
modality = scan.get('modality', None)
sidecar_files = list()
if modality == 'T1vnav':
fullfile = fullfile.replace('_T1vnav', '_split-%r_T1vnav')
for f in glob.glob(os.path.join(dicom_dir, '*.dcm')):
logger.debug('converting single file %s to %s', f, fullfile)
convert(f, fullfile, single_file=True)
ffbase = re.sub('.nii(.gz)?', '', fullfile)
expr = ffbase.replace('%r', '*') + '.json'
logger.debug('globbing for %s', expr)
sidecar_files = glob.glob(expr)
else:
convert(dicom_dir, fullfile)
sidecar_files = [
os.path.join(args.bids, scan['type'], fbase + '.json')
]
# add xnat source information to json sidecar files
for sidecar_file in sidecar_files:
logger.debug('adding provenance to %s', sidecar_file)
with open(sidecar_file) as fo:
sidecarjs = json.load(fo)
sidecarjs['DataSource'] = {
'application/x-xnat': {
'url': args.xnat.url,
'project': args.project,
'subject': args.subject,
'subject_id': args.subject_id,
'experiment': args.session,
'experiment_id': args.session_id,
'scan': scan['scan']
}
}
# write out updated json sidecar
commons.atomic_write(sidecar_file, json.dumps(sidecarjs, indent=2))
return refs
def proc_dwi(config, args):
'''
Download diffusion data and convert to BIDS
'''
refs = dict()
for scan in iterconfig(config, 'dwi'):
ref = scan.get('id', None)
templ = 'sub-${sub}_ses-${ses}'
if 'acquisition' in scan:
templ += '_acq-${acquisition}'
if 'direction' in scan:
templ += '_dir-${direction}'
if 'run' in scan:
templ += '_run-${run}'
templ += '_${modality}'
templ = string.Template(templ)
fbase = templ.safe_substitute(
sub=legal.sub('', args.subject),
ses=legal.sub('', args.session),
acquisition=scan.get('acquisition', None),
direction=scan.get('direction', None),
run=scan.get('run', None),
modality=scan.get('modality', None)
)
# download data to bids sourcedata directory
sourcedata_dir = os.path.join(args.sourcedata, scan['type'])
if not os.path.exists(sourcedata_dir):
os.makedirs(sourcedata_dir)
dicom_dir = os.path.join(sourcedata_dir, '{0}.dicom'.format(fbase))
logger.info('downloading session=%s, scan=%s, loc=%s', args.session, scan['scan'], dicom_dir)
args.xnat.download(args.session, [scan['scan']], out_dir=dicom_dir)
# convert to nifti
fname = '{0}.nii.gz'.format(fbase)
refs[ref] = os.path.join(scan['type'], fname)
fullfile = os.path.join(args.bids, scan['type'], fname)
logger.info('converting %s to %s', dicom_dir, fullfile)
modality = scan.get('modality', None)
convert(dicom_dir, fullfile)
sidecar_file = os.path.join(args.bids, scan['type'], fbase + '.json')
# add xnat source information to json sidecar files
logger.debug('adding provenance to %s', sidecar_file)
with open(sidecar_file) as fo:
sidecarjs = json.load(fo)
sidecarjs['DataSource'] = {
'application/x-xnat': {
'url': args.xnat.url,
'project': args.project,
'subject': args.subject,
'subject_id': args.subject_id,
'experiment': args.session,
'experiment_id': args.session_id,
'scan': scan['scan']
}
}
# write out updated json sidecar
commons.atomic_write(sidecar_file, json.dumps(sidecarjs, indent=2))
return refs
def proc_fmap(config, args, func_refs=None):
refs = dict()
for scan in iterconfig(config, 'fmap'):
ref = scan.get('id', None)
templ = 'sub-${sub}_ses-${ses}'
if 'acquisition' in scan:
templ += '_acq-${acquisition}'
if 'run' in scan:
templ += '_run-${run}'
if 'direction' in scan:
templ += '_dir-${direction}'
templ += '_${modality}'
templ = string.Template(templ)
fbase = templ.safe_substitute(
sub=legal.sub('', args.subject),
ses=legal.sub('', args.session),
acquisition=scan.get('acquisition', None),
run=scan.get('run', None),
direction=scan.get('direction', None),
modality=scan.get('modality', None),
)
# download data to bids sourcedata directory
sourcedata_dir = os.path.join(args.sourcedata, scan['type'])
if not os.path.exists(sourcedata_dir):
os.makedirs(sourcedata_dir)
dicom_dir = os.path.join(sourcedata_dir, '{0}.dicom'.format(fbase))
logger.info('downloading session=%s, scan=%s, loc=%s', args.session, scan['scan'], dicom_dir)
args.xnat.download(args.session, [scan['scan']], out_dir=dicom_dir)
# convert to nifti
fname = '{0}.nii.gz'.format(fbase)
refs[ref] = os.path.join(scan['type'], fname)
fullfile = os.path.join(args.bids, scan['type'], fname)
logger.info('converting %s to %s', dicom_dir, fullfile)
convert(dicom_dir, fullfile)
# rename fieldmap images to BIDS file naming convention
if scan['type'] == 'fmap':
if scan.get('modality', None) == 'magnitude':
rename_fmapm(args.bids, fbase)
elif scan.get('modality', None) == 'phase':
rename_fmapp(args.bids, fbase)
# add xnat source information to json sidecar
sidecar_file = os.path.join(args.bids, scan['type'], fbase + '.json')
with open(sidecar_file, 'r') as fo:
sidecarjs = json.load(fo)
sidecarjs['DataSource'] = {
'application/x-xnat': {
'url': args.xnat.url,
'project': args.project,
'subject': args.subject,
'subject_id': args.subject_id,
'experiment': args.session,
'experiment_id': args.session_id,
'scan': scan['scan']
}
}
# insert intended-for into json sidecar
if 'intended for' in scan and func_refs:
for intended in scan['intended for']:
if intended in func_refs:
logger.info('adding IntendedFor %s to %s', func_refs[intended], sidecar_file)
if 'IntendedFor' not in sidecarjs:
sidecarjs['IntendedFor'] = list()
if func_refs[intended] not in sidecarjs['IntendedFor']:
sidecarjs['IntendedFor'].append(func_refs[intended])
logger.info('writing file %s', sidecar_file)
# write out updated json sidecar
commons.atomic_write(sidecar_file, json.dumps(sidecarjs, indent=2))
return refs
def iterconfig(config, scan_type):
'''
Iterate over BIDS configuration file
'''
if scan_type in config:
for modality,scans in iter(config[scan_type].items()):
for scan in scans:
scan.update({
'type': scan_type,
'modality': modality
})
yield scan
def rename_fmapm(bids_base, basename):
'''
Rename magnitude fieldmap file to BIDS specification
'''
files = dict()
for ext in ['nii.gz', 'json']:
for echo in [1, 2]:
fname = '{0}_e{1}.{2}'.format(basename, echo, ext)
src = os.path.join(bids_base, 'fmap', fname)
if os.path.exists(src):
dst = src.replace(
'magnitude_e{0}'.format(echo),
'magnitude{0}'.format(echo)
)
logger.debug('renaming %s to %s', src, dst)
os.rename(src, dst)
files[ext] = dst
return files
def rename_fmapp(bids_base, basename):
'''
Rename phase fieldmap file to BIDS specification
'''
files = dict()
for ext in ['nii.gz', 'json']:
fname = '{0}_e2_ph.{1}'.format(basename, ext)
src = os.path.join(bids_base, 'fmap', fname)
if os.path.exists(src):
dst = src.replace(
'phase_e2_ph',
'phase'
)
logger.debug('renaming %s to %s', src, dst)
os.rename(src, dst)
files[ext] = dst
return files
def convert(input, output, single_file=False):
'''
Run dcm2niix on input file
'''
dirname = os.path.dirname(output)
if not os.path.exists(dirname):
os.makedirs(dirname)
basename = os.path.basename(output)
basename = re.sub('.nii(.gz)?', '', basename)
dcm2niix = commons.which('dcm2niix')
cmd = [
'dcm2niix'
]
if single_file:
cmd.extend([
'-s', 'y'
])
cmd.extend([
'-b', 'y',
'-z', 'y',
'-f', basename,
'-o', dirname,
input
])
logger.debug(cmd)
sp.check_output(cmd)
| [
[
[
7,
9
],
[
223,
225
],
[
6438,
6440
],
[
15087,
15089
]
],
[
[
17,
19
],
[
841,
843
],
[
1016,
1018
],
[
1753,
1755
],
[
1787,
1789
],
[
1823,
1825
],
[
1885,
1887
],
[
3157,
3159
],
[
3216,
3218
],
[
3260,
3262
],
[
3308,
3310
],
[
3632,
3634
],
[
3685,
3687
],
[
3908,
3910
],
[
5368,
5370
],
[
5427,
5429
],
[
5471,
5473
],
[
5519,
5521
],
[
5876,
5878
],
[
5929,
5931
],
[
6250,
6252
],
[
6723,
6725
],
[
8546,
8548
],
[
8605,
8607
],
[
8649,
8651
],
[
8697,
8699
],
[
9021,
9023
],
[
9074,
9076
],
[
9289,
9291
],
[
10946,
10948
],
[
11005,
11007
],
[
11049,
11051
],
[
11097,
11099
],
[
11421,
11423
],
[
11474,
11476
],
[
12004,
12006
],
[
13925,
13927
],
[
13979,
13981
],
[
14228,
14230
],
[
14529,
14531
],
[
14579,
14581
],
[
14768,
14770
],
[
14943,
14945
],
[
14978,
14980
],
[
15011,
15013
],
[
15047,
15049
]
],
[
[
27,
31
],
[
6240,
6244
],
[
6606,
6610
]
],
[
[
39,
43
],
[
2108,
2112
],
[
4026,
4030
],
[
4518,
4522
],
[
7032,
7036
],
[
7576,
7580
],
[
9529,
9533
],
[
10021,
10025
],
[
12127,
12131
],
[
13229,
13233
]
],
[
[
51,
57
],
[
2683,
2689
],
[
4985,
4991
],
[
8113,
8119
],
[
10512,
10518
]
],
[
[
65,
72
],
[
139,
146
]
],
[
[
80,
96
],
[
15423,
15425
]
],
[
[
104,
128
],
[
1212,
1219
],
[
4483,
4490
],
[
7541,
7548
],
[
9986,
9993
],
[
13194,
13201
],
[
15137,
15144
]
],
[
[
130,
136
],
[
3372,
3378
],
[
3738,
3744
],
[
5583,
5589
],
[
5982,
5988
],
[
6301,
6307
],
[
6540,
6546
],
[
6907,
6913
],
[
8761,
8767
],
[
9127,
9133
],
[
9412,
9418
],
[
11161,
11167
],
[
11527,
11533
],
[
12744,
12750
],
[
13100,
13106
],
[
14168,
14174
],
[
14712,
14718
],
[
15401,
15407
]
],
[
[
215,
220
],
[
920,
925
],
[
970,
975
],
[
1073,
1078
],
[
1123,
1128
],
[
2761,
2766
],
[
2806,
2811
],
[
5063,
5068
],
[
5108,
5113
],
[
8191,
8196
],
[
8236,
8241
],
[
10590,
10595
],
[
10635,
10640
]
],
[
[
255,
271
]
],
[
[
1668,
1693
],
[
733,
758
]
],
[
[
2133,
2142
],
[
1509,
1518
]
],
[
[
4572,
4581
],
[
1549,
1558
]
],
[
[
7630,
7638
],
[
1589,
1597
]
],
[
[
10075,
10084
],
[
1628,
1637
]
],
[
[
13283,
13293
],
[
2257,
2267
],
[
4696,
4706
],
[
7752,
7762
],
[
10150,
10160
]
],
[
[
13654,
13666
],
[
11793,
11805
]
],
[
[
14303,
14315
],
[
11896,
11908
]
],
[
[
14839,
14846
],
[
3802,
3809
],
[
6378,
6385
],
[
6648,
6655
],
[
9237,
9244
],
[
11591,
11598
]
]
] |
import requests
import pandas as pd
import ftplib
import io
import re
import json
import datetime
try:
from requests_html import HTMLSession
except Exception:
print("""Warning - Certain functionality
requires requests_html, which is not installed.
Install using:
pip install requests_html
After installation, you may have to restart your Python session.""")
base_url = "https://query1.finance.yahoo.com/v8/finance/chart/"
def build_url(ticker, start_date = None, end_date = None, interval = "1d"):
if end_date is None:
end_seconds = int(pd.Timestamp("now").timestamp())
else:
end_seconds = int(pd.Timestamp(end_date).timestamp())
if start_date is None:
start_seconds = 7223400
else:
start_seconds = int(pd.Timestamp(start_date).timestamp())
site = base_url + ticker
params = {"period1": start_seconds, "period2": end_seconds,
"interval": interval.lower(), "events": "div,splits"}
return site, params
def force_float(elt):
try:
return float(elt)
except:
return elt
def _convert_to_numeric(s):
if "M" in s:
s = s.strip("M")
return force_float(s) * 1_000_000
if "B" in s:
s = s.strip("B")
return force_float(s) * 1_000_000_000
return force_float(s)
def get_data(ticker, start_date = None, end_date = None, index_as_date = True,
interval = "1d", headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
):
'''Downloads historical stock price data into a pandas data frame. Interval
must be "1d", "1wk", "1mo", or "1m" for daily, weekly, monthly, or minute data.
Intraday minute data is limited to 7 days.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
@param: interval = "1d"
'''
if interval not in ("1d", "1wk", "1mo", "1m"):
raise AssertionError("interval must be of of '1d', '1wk', '1mo', or '1m'")
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, interval)
resp = requests.get(site, params = params, headers = headers)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# get open / high / low / close data
frame = pd.DataFrame(data["chart"]["result"][0]["indicators"]["quote"][0])
# get the date info
temp_time = data["chart"]["result"][0]["timestamp"]
if interval != "1m":
# add in adjclose
frame["adjclose"] = data["chart"]["result"][0]["indicators"]["adjclose"][0]["adjclose"]
frame.index = pd.to_datetime(temp_time, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
frame = frame[["open", "high", "low", "close", "adjclose", "volume"]]
else:
frame.index = pd.to_datetime(temp_time, unit = "s")
frame = frame[["open", "high", "low", "close", "volume"]]
frame['ticker'] = ticker.upper()
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def tickers_sp500(include_company_data = False):
'''Downloads list of tickers currently listed in the S&P 500 '''
# get list of all S&P 500 stocks
sp500 = pd.read_html("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")[0]
sp500["Symbol"] = sp500["Symbol"].str.replace(".", "-", regex=True)
if include_company_data:
return sp500
sp_tickers = sp500.Symbol.tolist()
sp_tickers = sorted(sp_tickers)
return sp_tickers
def tickers_nasdaq(include_company_data = False):
'''Downloads list of tickers currently listed in the NASDAQ'''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR nasdaqlisted.txt', r.write)
if include_company_data:
r.seek(0)
data = pd.read_csv(r, sep = "|")
return data
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers if "NASDAQ" not in x != "\r\n"]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_other(include_company_data = False):
'''Downloads list of tickers currently listed in the "otherlisted.txt"
file on "ftp.nasdaqtrader.com" '''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR otherlisted.txt', r.write)
if include_company_data:
r.seek(0)
data = pd.read_csv(r, sep = "|")
return data
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_dow(include_company_data = False):
'''Downloads list of currently traded tickers on the Dow'''
site = "https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average"
table = pd.read_html(site, attrs = {"id":"constituents"})[0]
if include_company_data:
return table
dow_tickers = sorted(table['Symbol'].tolist())
return dow_tickers
def tickers_ibovespa(include_company_data = False):
'''Downloads list of currently traded tickers on the Ibovespa, Brazil'''
table = pd.read_html("https://pt.wikipedia.org/wiki/Lista_de_companhias_citadas_no_Ibovespa")[0]
table.columns = ["Symbol", "Share", "Sector", "Type", "Site"]
if include_company_data:
return table
ibovespa_tickers = sorted(table.Symbol.tolist())
return ibovespa_tickers
def tickers_nifty50(include_company_data = False, headers = {'User-agent': 'Mozilla/5.0'}):
'''Downloads list of currently traded tickers on the NIFTY 50, India'''
site = "https://finance.yahoo.com/quote/%5ENSEI/components?p=%5ENSEI"
table = pd.read_html(requests.get(site, headers=headers).text)[0]
if include_company_data:
return table
nifty50 = sorted(table['Symbol'].tolist())
return nifty50
def tickers_niftybank():
''' Currently traded tickers on the NIFTY BANK, India '''
niftybank = ['AXISBANK', 'KOTAKBANK', 'HDFCBANK', 'SBIN', 'BANKBARODA', 'INDUSINDBK', 'PNB', 'IDFCFIRSTB', 'ICICIBANK', 'RBLBANK', 'FEDERALBNK', 'BANDHANBNK']
return niftybank
def tickers_ftse100(include_company_data = False):
'''Downloads a list of the tickers traded on the FTSE 100 index'''
table = pd.read_html("https://en.wikipedia.org/wiki/FTSE_100_Index", attrs = {"id": "constituents"})[0]
if include_company_data:
return table
return sorted(table.EPIC.tolist())
def tickers_ftse250(include_company_data = False):
'''Downloads a list of the tickers traded on the FTSE 250 index'''
table = pd.read_html("https://en.wikipedia.org/wiki/FTSE_250_Index", attrs = {"id": "constituents"})[0]
table.columns = ["Company", "Ticker"]
if include_company_data:
return table
return sorted(table.Ticker.tolist())
def get_quote_table(ticker , dict_result = True, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes data elements found on Yahoo Finance's quote page
of input ticker
@param: ticker
@param: dict_result = True
'''
site = "https://finance.yahoo.com/quote/" + ticker + "?p=" + ticker
tables = pd.read_html(requests.get(site, headers=headers).text)
data = tables[0].append(tables[1])
data.columns = ["attribute" , "value"]
quote_price = pd.DataFrame(["Quote Price", get_live_price(ticker)]).transpose()
quote_price.columns = data.columns.copy()
data = data.append(quote_price)
data = data.sort_values("attribute")
data = data.drop_duplicates().reset_index(drop = True)
data["value"] = data.value.map(force_float)
if dict_result:
result = {key : val for key,val in zip(data.attribute , data.value)}
return result
return data
def get_stats(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes information from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(requests.get(stats_site, headers=headers).text)
tables = [table for table in tables[1:] if table.shape[1] == 2]
table = tables[0]
for elt in tables[1:]:
table = table.append(elt)
table.columns = ["Attribute" , "Value"]
table = table.reset_index(drop = True)
return table
def get_stats_valuation(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes Valuation Measures table from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(requests.get(stats_site, headers=headers).text)
tables = [table for table in tables if "Trailing P/E" in table.iloc[:,0].tolist()]
table = tables[0].reset_index(drop = True)
return table
def _parse_json(url, headers = {'User-agent': 'Mozilla/5.0'}):
html = requests.get(url=url, headers = headers).text
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
try:
data = json.loads(json_str)[
'context']['dispatcher']['stores']['QuoteSummaryStore']
except:
return '{}'
else:
# return data
new_data = json.dumps(data).replace('{}', 'null')
new_data = re.sub(r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
json_info = json.loads(new_data)
return json_info
def _parse_table(json_info):
df = pd.DataFrame(json_info)
if df.empty:
return df
del df["maxAge"]
df.set_index("endDate", inplace=True)
df.index = pd.to_datetime(df.index, unit="s")
df = df.transpose()
df.index.name = "Breakdown"
return df
def get_income_statement(ticker, yearly = True):
'''Scrape income statement from Yahoo Finance for a given ticker
@param: ticker
'''
income_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(income_site)
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
else:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
return _parse_table(temp)
def get_balance_sheet(ticker, yearly = True):
'''Scrapes balance sheet from Yahoo Finance for an input ticker
@param: ticker
'''
balance_sheet_site = "https://finance.yahoo.com/quote/" + ticker + \
"/balance-sheet?p=" + ticker
json_info = _parse_json(balance_sheet_site)
try:
if yearly:
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
else:
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
except:
temp = []
return _parse_table(temp)
def get_cash_flow(ticker, yearly = True):
'''Scrapes the cash flow statement from Yahoo Finance for an input ticker
@param: ticker
'''
cash_flow_site = "https://finance.yahoo.com/quote/" + \
ticker + "/cash-flow?p=" + ticker
json_info = _parse_json(cash_flow_site)
if yearly:
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
else:
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
return _parse_table(temp)
def get_financials(ticker, yearly = True, quarterly = True):
'''Scrapes financials data from Yahoo Finance for an input ticker, including
balance sheet, cash flow statement, and income statement. Returns dictionary
of results.
@param: ticker
@param: yearly = True
@param: quarterly = True
'''
if not yearly and not quarterly:
raise AssertionError("yearly or quarterly must be True")
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
result = {}
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
table = _parse_table(temp)
result["yearly_income_statement"] = table
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
table = _parse_table(temp)
result["yearly_balance_sheet"] = table
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
table = _parse_table(temp)
result["yearly_cash_flow"] = table
if quarterly:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
table = _parse_table(temp)
result["quarterly_income_statement"] = table
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
table = _parse_table(temp)
result["quarterly_balance_sheet"] = table
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
table = _parse_table(temp)
result["quarterly_cash_flow"] = table
return result
def get_holders(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes the Holders page from Yahoo Finance for an input ticker
@param: ticker
'''
holders_site = "https://finance.yahoo.com/quote/" + \
ticker + "/holders?p=" + ticker
tables = pd.read_html(requests.get(holders_site, headers=headers).text)
table_names = ["Major Holders" , "Direct Holders (Forms 3 and 4)" ,
"Top Institutional Holders" , "Top Mutual Fund Holders"]
table_mapper = {key : val for key,val in zip(table_names , tables)}
return table_mapper
def get_analysts_info(ticker, headers = {'User-agent': 'Mozilla/5.0'}):
'''Scrapes the Analysts page from Yahoo Finance for an input ticker
@param: ticker
'''
analysts_site = "https://finance.yahoo.com/quote/" + ticker + \
"/analysts?p=" + ticker
tables = pd.read_html(requests.get(analysts_site, headers=headers).text)
table_names = [table.columns[0] for table in tables]
table_mapper = {key : val for key , val in zip(table_names , tables)}
return table_mapper
def get_live_price(ticker):
'''Gets the live price of input ticker
@param: ticker
'''
df = get_data(ticker, end_date = pd.Timestamp.today() + pd.DateOffset(10))
return df.close[-1]
def _raw_get_daily_info(site):
session = HTMLSession()
resp = session.get(site)
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df.columns = tables[0].columns
del df["52 Week Range"]
df["% Change"] = df["% Change"].map(lambda x: float(x.strip("%+").replace(",", "")))
fields_to_change = [x for x in df.columns.tolist() if "Vol" in x \
or x == "Market Cap"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].map(_convert_to_numeric)
session.close()
return df
def get_day_most_active(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/most-active?offset=0&count={count}")
def get_day_gainers(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/gainers?offset=0&count={count}")
def get_day_losers(count: int = 100):
return _raw_get_daily_info(f"https://finance.yahoo.com/losers?offset=0&count={count}")
def get_top_crypto():
'''Gets the top 100 Cryptocurrencies by Market Cap'''
session = HTMLSession()
resp = session.get("https://finance.yahoo.com/cryptocurrencies?offset=0&count=100")
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df["% Change"] = df["% Change"].map(lambda x: float(str(x).strip("%").\
strip("+").\
replace(",", "")))
del df["52 Week Range"]
del df["1 Day Chart"]
fields_to_change = [x for x in df.columns.tolist() if "Volume" in x \
or x == "Market Cap" or x == "Circulating Supply"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].map(lambda x: _convert_to_numeric(str(x)))
session.close()
return df
def get_dividends(ticker, start_date = None, end_date = None, index_as_date = True,
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
):
'''Downloads historical dividend data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params, headers = headers)
if not resp.ok:
return pd.DataFrame()
# get JSON response
data = resp.json()
# check if there is data available for dividends
if "events" not in data["chart"]["result"][0] or "dividends" not in data["chart"]["result"][0]['events']:
return pd.DataFrame()
# get the dividend data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['dividends'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove old date column
frame = frame.drop(columns='date')
frame = frame.rename({'amount': 'dividend'}, axis = 'columns')
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_splits(ticker, start_date = None, end_date = None, index_as_date = True,
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
):
'''Downloads historical stock split data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params, headers = headers)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# check if there is data available for events
if "events" not in data["chart"]["result"][0]:
raise AssertionError("There is no data available on stock events, or none have occured")
# check if there is data available for splits
if "splits" not in data["chart"]["result"][0]['events']:
raise AssertionError("There is no data available on stock splits, or none have occured")
# get the split data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['splits'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in to chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove unnecessary columns
frame = frame.drop(columns=['date', 'denominator', 'numerator'])
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_earnings(ticker):
'''Scrapes earnings data from Yahoo Finance for an input ticker
@param: ticker
'''
result = {
"quarterly_results": pd.DataFrame(),
"yearly_revenue_earnings": pd.DataFrame(),
"quarterly_revenue_earnings": pd.DataFrame()
}
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
if "earnings" not in json_info:
return result
temp = json_info["earnings"]
if temp == None:
return result
result["quarterly_results"] = pd.DataFrame.from_dict(temp["earningsChart"]["quarterly"])
result["yearly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["yearly"])
result["quarterly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["quarterly"])
return result
### Earnings functions
def _parse_earnings_json(url, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
):
resp = requests.get(url, headers = headers)
content = resp.content.decode(encoding='utf-8', errors='strict')
page_data = [row for row in content.split(
'\n') if row.startswith('root.App.main = ')][0][:-1]
page_data = page_data.split('root.App.main = ', 1)[1]
return json.loads(page_data)
def get_next_earnings_date(ticker):
base_earnings_url = 'https://finance.yahoo.com/quote'
new_url = base_earnings_url + "/" + ticker
parsed_result = _parse_earnings_json(new_url)
temp = parsed_result['context']['dispatcher']['stores']['QuoteSummaryStore']['calendarEvents']['earnings']['earningsDate'][0]['raw']
return datetime.datetime.fromtimestamp(temp)
def get_earnings_history(ticker):
'''Inputs: @ticker
Returns the earnings calendar history of the input ticker with
EPS actual vs. expected data.'''
url = 'https://finance.yahoo.com/calendar/earnings?symbol=' + ticker
result = _parse_earnings_json(url)
return result["context"]["dispatcher"]["stores"]["ScreenerResultsStore"]["results"]["rows"]
def get_earnings_for_date(date, offset = 0, count = 1):
'''Inputs: @date
Returns a dictionary of stock tickers with earnings expected on the
input date. The dictionary contains the expected EPS values for each
stock if available.'''
base_earnings_url = 'https://finance.yahoo.com/calendar/earnings'
if offset >= count:
return []
temp = pd.Timestamp(date)
date = temp.strftime("%Y-%m-%d")
dated_url = '{0}?day={1}&offset={2}&size={3}'.format(
base_earnings_url, date, offset, 100)
result = _parse_earnings_json(dated_url)
stores = result['context']['dispatcher']['stores']
earnings_count = stores['ScreenerCriteriaStore']['meta']['total']
new_offset = offset + 100
more_earnings = get_earnings_for_date(date, new_offset, earnings_count)
current_earnings = stores['ScreenerResultsStore']['results']['rows']
total_earnings = current_earnings + more_earnings
return total_earnings
def get_earnings_in_date_range(start_date, end_date):
'''Inputs: @start_date
@end_date
Returns the stock tickers with expected EPS data for all dates in the
input range (inclusive of the start_date and end_date.'''
earnings_data = []
days_diff = pd.Timestamp(end_date) - pd.Timestamp(start_date)
days_diff = days_diff.days
current_date = pd.Timestamp(start_date)
dates = [current_date + datetime.timedelta(diff) for diff in range(days_diff + 1)]
dates = [d.strftime("%Y-%m-%d") for d in dates]
i = 0
while i < len(dates):
try:
earnings_data += get_earnings_for_date(dates[i])
except Exception:
pass
i += 1
return earnings_data
def get_currencies(headers = {'User-agent': 'Mozilla/5.0'}):
'''Returns the currencies table from Yahoo Finance'''
site = "https://finance.yahoo.com/currencies"
tables = pd.read_html(requests.get(site, headers=headers).text)
result = tables[0]
return result
def get_futures(headers = {'User-agent': 'Mozilla/5.0'}):
'''Returns the futures table from Yahoo Finance'''
site = "https://finance.yahoo.com/commodities"
tables = pd.read_html(requests.get(site, headers=headers).text)
result = tables[0]
return result
def get_undervalued_large_caps(headers = {'User-agent': 'Mozilla/5.0'}):
'''Returns the undervalued large caps table from Yahoo Finance'''
site = "https://finance.yahoo.com/screener/predefined/undervalued_large_caps?offset=0&count=100"
tables = pd.read_html(requests.get(site, headers=headers).text)
result = tables[0]
return result
def get_quote_data(ticker, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
):
'''Inputs: @ticker
Returns a dictionary containing over 70 elements corresponding to the
input ticker, including company name, book value, moving average data,
pre-market / post-market price (when applicable), and more.'''
site = "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + ticker
resp = requests.get(site, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
)
if not resp.ok:
raise AssertionError("""Invalid response from server. Check if ticker is
valid.""")
json_result = resp.json()
info = json_result["quoteResponse"]["result"]
return info[0]
def get_market_status():
'''Returns the current state of the market - PRE, POST, OPEN, or CLOSED'''
quote_data = get_quote_data("^dji")
return quote_data["marketState"]
def get_premarket_price(ticker):
'''Inputs: @ticker
Returns the current pre-market price of the input ticker
(returns value if pre-market price is available.'''
quote_data = get_quote_data(ticker)
if "preMarketPrice" in quote_data:
return quote_data["preMarketPrice"]
raise AssertionError("Premarket price not currently available.")
def get_postmarket_price(ticker):
'''Inputs: @ticker
Returns the current post-market price of the input ticker
(returns value if pre-market price is available.'''
quote_data = get_quote_data(ticker)
if "postMarketPrice" in quote_data:
return quote_data["postMarketPrice"]
raise AssertionError("Postmarket price not currently available.")
# Company Information Functions
def get_company_info(ticker):
'''Scrape the company information for a ticker
@param: ticker
'''
site = f"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}"
json_info = _parse_json(site)
json_info = json_info["assetProfile"]
info_frame = pd.DataFrame.from_dict(json_info,
orient="index",
columns=["Value"])
info_frame = info_frame.drop("companyOfficers", axis="index")
info_frame.index.name = "Breakdown"
return info_frame
def get_company_officers(ticker):
'''Scrape the company information and return a table of the officers
@param: ticker
'''
site = f"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}"
json_info = _parse_json(site)
json_info = json_info["assetProfile"]["companyOfficers"]
info_frame = pd.DataFrame.from_dict(json_info)
info_frame = info_frame.set_index("name")
return info_frame
| [
[
[
7,
15
],
[
2459,
2467
],
[
6833,
6841
],
[
8453,
8461
],
[
9473,
9481
],
[
10199,
10207
],
[
10511,
10519
],
[
15314,
15322
],
[
16046,
16054
],
[
19342,
19350
],
[
21031,
21039
],
[
23536,
23544
],
[
26944,
26952
],
[
27251,
27259
],
[
27644,
27652
],
[
28297,
28305
]
],
[
[
24,
36
],
[
679,
681
],
[
760,
762
],
[
921,
923
],
[
2716,
2718
],
[
3051,
3053
],
[
3283,
3285
],
[
3790,
3792
],
[
4485,
4487
],
[
5301,
5303
],
[
5883,
5885
],
[
6242,
6244
],
[
6820,
6822
],
[
7456,
7458
],
[
7819,
7821
],
[
8440,
8442
],
[
8612,
8614
],
[
9460,
9462
],
[
10186,
10188
],
[
11112,
11114
],
[
11268,
11270
],
[
15301,
15303
],
[
16033,
16035
],
[
16448,
16450
],
[
16471,
16473
],
[
16665,
16667
],
[
17912,
17914
],
[
19446,
19448
],
[
19713,
19715
],
[
19776,
19778
],
[
19906,
19908
],
[
21693,
21695
],
[
21820,
21822
],
[
22535,
22537
],
[
22587,
22589
],
[
22642,
22644
],
[
23006,
23008
],
[
23112,
23114
],
[
23220,
23222
],
[
25168,
25170
],
[
26159,
26161
],
[
26184,
26186
],
[
26281,
26283
],
[
26931,
26933
],
[
27238,
27240
],
[
27631,
27633
],
[
30099,
30101
],
[
30718,
30720
]
],
[
[
45,
51
],
[
4248,
4254
],
[
5065,
5071
]
],
[
[
60,
62
],
[
4347,
4349
],
[
5164,
5166
]
],
[
[
71,
73
],
[
10932,
10934
]
],
[
[
82,
86
],
[
10694,
10698
],
[
10873,
10877
],
[
11017,
11021
],
[
23884,
23888
]
],
[
[
95,
103
],
[
24273,
24281
],
[
26349,
26357
]
],
[
[
143,
154
],
[
16595,
16606
],
[
17783,
17794
]
],
[
[
475,
483
],
[
977,
985
]
],
[
[
546,
555
],
[
2397,
2406
],
[
19284,
19293
],
[
20973,
20982
]
],
[
[
1181,
1192
],
[
1372,
1383
],
[
1465,
1476
],
[
1514,
1525
],
[
8924,
8935
]
],
[
[
1286,
1305
],
[
17164,
17183
],
[
18607,
18626
]
],
[
[
1538,
1546
],
[
16420,
16428
]
],
[
[
3624,
3637
]
],
[
[
4111,
4125
]
],
[
[
4890,
4903
]
],
[
[
5675,
5686
]
],
[
[
6095,
6111
]
],
[
[
6563,
6578
]
],
[
[
7019,
7036
]
],
[
[
7312,
7327
]
],
[
[
7669,
7684
]
],
[
[
8087,
8102
]
],
[
[
9119,
9128
]
],
[
[
9818,
9837
]
],
[
[
10440,
10451
],
[
11680,
11691
],
[
12283,
12294
],
[
12926,
12937
],
[
13805,
13816
],
[
22791,
22802
],
[
30020,
30031
],
[
30620,
30631
]
],
[
[
11075,
11087
],
[
11921,
11933
],
[
12587,
12599
],
[
13170,
13182
],
[
13976,
13988
],
[
14144,
14156
],
[
14314,
14326
],
[
14502,
14514
],
[
14682,
14694
],
[
14864,
14876
]
],
[
[
11390,
11410
]
],
[
[
11963,
11980
]
],
[
[
12621,
12634
]
],
[
[
13204,
13218
]
],
[
[
14970,
14981
]
],
[
[
15696,
15713
]
],
[
[
16288,
16302
],
[
8641,
8655
]
],
[
[
16544,
16563
],
[
17307,
17326
],
[
17450,
17469
],
[
17588,
17607
]
],
[
[
17254,
17273
]
],
[
[
17401,
17416
]
],
[
[
17540,
17554
]
],
[
[
17677,
17691
]
],
[
[
18763,
18776
]
],
[
[
20456,
20466
]
],
[
[
22351,
22363
]
],
[
[
23341,
23361
],
[
24085,
24105
],
[
24614,
24634
],
[
25353,
25373
]
],
[
[
23913,
23935
]
],
[
[
24320,
24340
]
],
[
[
24762,
24783
],
[
25584,
25605
],
[
26566,
26587
]
],
[
[
25815,
25841
]
],
[
[
26738,
26752
]
],
[
[
27050,
27061
]
],
[
[
27357,
27383
]
],
[
[
27750,
27764
],
[
28884,
28898
],
[
29164,
29178
],
[
29577,
29591
]
],
[
[
28753,
28770
]
],
[
[
28954,
28973
]
],
[
[
29365,
29385
]
],
[
[
29816,
29832
]
],
[
[
30390,
30410
]
]
] |
from .mc import *
| [
[
[
16,
17
]
]
] |
import os
def find_base(p, bases):
for base_name, base_path in bases.items():
r = os.path.relpath(p, base_path)
if r and (r == '.' or r[0] != '.'):
return base_name, r
return None
| [
[
[
7,
9
],
[
95,
97
]
],
[
[
15,
24
]
]
] |
###############################################################################
# Lambda kinder class
###############################################################################
# lambda is actionable dot ru
###############################################################################
# TODO:
###############################################################################
from lib.krampus_logging import KLog
class Lambda():
def __init__(self, func_name, region, sess):
try:
self.conn = sess.client("lambda", region_name=region)
except Exception as e:
KLog.log("issue connecting to AWS %s" % str(e), "critical")
exit("[!] issue connecting to AWS: %s" % str(e))
# get volume reference
self.func = func_name
self.region = region
# save raw sess in case of instance actions
self.sess = sess
def disable(self):
KLog.log("no disable action for lambda function '%s', will delete instead" % self.func, "warning")
return self.kill()
def kill(self):
try:
# low level call, just pass the resp back
return self.conn.delete_function(FunctionName=self.func)
except Exception as e:
if str(e).find("ResourceNotFoundException") is not -1:
KLog.log("could not find function '%s', dequeueing task" % self.func)
else:
KLog.log("could not delete function '%s', unknown error: %s" % str(e), "critical")
return None
| [
[
[
412,
416
],
[
606,
610
],
[
926,
930
],
[
1323,
1327
],
[
1427,
1431
]
],
[
[
425,
431
]
]
] |
T = float(input("Entre com a temperatura que está agora: "))
if T >= 26.0 and T <= 36.0:
print("A temperatura está boa")
elif T > 36.0:
print("A temperatura está quente\n Tome bastante líquido")
elif T >= 15.0 and T < 26.0:
print("A temperatura está agradável")
else:
print("A temperatura esta fria")
| [
[
[
0,
1
],
[
65,
66
],
[
79,
80
],
[
128,
129
],
[
204,
205
],
[
218,
219
]
]
] |
import numpy as np
import scipy.sparse
import theano
from theano import gof, tensor
from theano.gof.op import Op
from theano.sparse.basic import (
Remove0,
SparseType,
_is_sparse,
as_sparse_variable,
remove0,
)
# Also for compatibility
from theano.tensor import discrete_dtypes, float_dtypes
# Probability Ops are currently back in sandbox, because they do not respect
# Theano's Op contract, as their behaviour is not reproducible: calling
# the perform() method twice with the same argument will yield different
# results.
# from theano.sparse.basic import (
# Multinomial, multinomial, Poisson, poisson,
# Binomial, csr_fbinomial, csc_fbinomial, csr_dbinomial, csc_dbinomial)
# Alias to maintain compatibility
EliminateZeros = Remove0
eliminate_zeros = remove0
# Probability
class Poisson(Op):
"""Return a sparse having random values from a Poisson density
with mean from the input.
WARNING: This Op is NOT deterministic, as calling it twice with the
same inputs will NOT give the same result. This is a violation of
Theano's contract for Ops
:param x: Sparse matrix.
:return: A sparse matrix of random integers of a Poisson density
with mean of `x` element wise.
"""
__props__ = ()
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
assert x.format in ["csr", "csc"]
out[0] = x.copy()
out[0].data = np.asarray(np.random.poisson(out[0].data), dtype=x.dtype)
out[0].eliminate_zeros()
def grad(self, inputs, outputs_gradients):
comment = "No gradient exists for class Poisson in\
theano/sparse/sandbox/sp2.py"
return [
theano.gradient.grad_undefined(
op=self, x_pos=0, x=inputs[0], comment=comment
)
]
def infer_shape(self, fgraph, node, ins_shapes):
return ins_shapes
poisson = Poisson()
class Binomial(Op):
"""Return a sparse matrix having random values from a binomial
density having number of experiment `n` and probability of succes
`p`.
WARNING: This Op is NOT deterministic, as calling it twice with the
same inputs will NOT give the same result. This is a violation of
Theano's contract for Ops
:param n: Tensor scalar representing the number of experiment.
:param p: Tensor scalar representing the probability of success.
:param shape: Tensor vector for the output shape.
:return: A sparse matrix of integers representing the number
of success.
"""
__props__ = ("format", "dtype")
def __init__(self, format, dtype):
self.format = format
self.dtype = dtype
def make_node(self, n, p, shape):
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
shape = tensor.as_tensor_variable(shape)
assert n.dtype in discrete_dtypes
assert p.dtype in float_dtypes
assert shape.dtype in discrete_dtypes
return gof.Apply(
self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format)()]
)
def perform(self, node, inputs, outputs):
(n, p, shape) = inputs
(out,) = outputs
binomial = np.random.binomial(n, p, size=shape)
csx_matrix = getattr(scipy.sparse, self.format + "_matrix")
out[0] = csx_matrix(binomial, dtype=self.dtype)
def connection_pattern(self, node):
return [[True], [True], [False]]
def grad(self, inputs, gout):
(n, p, shape) = inputs
(gz,) = gout
comment_n = "No gradient exists for the number of samples in class\
Binomial of theano/sparse/sandbox/sp2.py"
comment_p = "No gradient exists for the prob of success in class\
Binomial of theano/sparse/sandbox/sp2.py"
return [
theano.gradient.grad_undefined(op=self, x_pos=0, x=n, comment=comment_n),
theano.gradient.grad_undefined(op=self, x_pos=1, x=p, comment=comment_p),
theano.gradient.disconnected_type(),
]
def infer_shape(self, fgraph, node, ins_shapes):
return [(node.inputs[2][0], node.inputs[2][1])]
csr_fbinomial = Binomial("csr", "float32")
csc_fbinomial = Binomial("csc", "float32")
csr_dbinomial = Binomial("csr", "float64")
csc_dbinomial = Binomial("csc", "float64")
class Multinomial(Op):
"""Return a sparse matrix having random values from a multinomial
density having number of experiment `n` and probability of succes
`p`.
WARNING: This Op is NOT deterministic, as calling it twice with the
same inputs will NOT give the same result. This is a violation of
Theano's contract for Ops
:param n: Tensor type vector or scalar representing the number of
experiment for each row. If `n` is a scalar, it will be
used for each row.
:param p: Sparse matrix of probability where each row is a probability
vector representing the probability of succes. N.B. Each row
must sum to one.
:return: A sparse matrix of random integers from a multinomial density
for each row.
:note: It will works only if `p` have csr format.
"""
__props__ = ()
def make_node(self, n, p):
n = tensor.as_tensor_variable(n)
p = as_sparse_variable(p)
assert p.format in ["csr", "csc"]
return gof.Apply(self, [n, p], [p.type()])
def perform(self, node, inputs, outputs):
(n, p) = inputs
(out,) = outputs
assert _is_sparse(p)
if p.format != "csr":
raise NotImplementedError
out[0] = p.copy()
if n.ndim == 0:
for i in range(p.shape[0]):
k, l = p.indptr[i], p.indptr[i + 1]
out[0].data[k:l] = np.random.multinomial(n, p.data[k:l])
elif n.ndim == 1:
if n.shape[0] != p.shape[0]:
raise ValueError(
"The number of element of n must be "
"the same as the number of row of p."
)
for i in range(p.shape[0]):
k, l = p.indptr[i], p.indptr[i + 1]
out[0].data[k:l] = np.random.multinomial(n[i], p.data[k:l])
def grad(self, inputs, outputs_gradients):
comment_n = "No gradient exists for the number of samples in class\
Multinomial of theano/sparse/sandbox/sp2.py"
comment_p = "No gradient exists for the prob of success in class\
Multinomial of theano/sparse/sandbox/sp2.py"
return [
theano.gradient.grad_undefined(
op=self, x_pos=0, x=inputs[0], comment=comment_n
),
theano.gradient.grad_undefined(
op=self, x_pos=1, x=inputs[1], comment=comment_p
),
]
def infer_shape(self, fgraph, node, ins_shapes):
return [ins_shapes[1]]
multinomial = Multinomial()
| [
[
[
7,
18
],
[
1597,
1599
],
[
1608,
1610
],
[
3402,
3404
],
[
6002,
6004
],
[
6402,
6404
]
],
[
[
26,
38
],
[
3468,
3473
]
],
[
[
47,
53
],
[
1874,
1880
],
[
4037,
4043
],
[
4123,
4129
],
[
4209,
4215
],
[
6802,
6808
],
[
6926,
6932
]
],
[
[
73,
76
],
[
1351,
1354
],
[
3173,
3176
],
[
5593,
5596
]
],
[
[
78,
84
],
[
2910,
2916
],
[
2951,
2957
],
[
2996,
3002
],
[
5472,
5478
]
],
[
[
111,
113
],
[
826,
828
],
[
2112,
2114
],
[
4560,
4562
]
],
[
[
152,
159
],
[
762,
769
]
],
[
[
165,
175
],
[
3218,
3228
]
],
[
[
181,
191
],
[
1493,
1503
],
[
5740,
5750
]
],
[
[
197,
215
],
[
1314,
1332
],
[
5513,
5531
]
],
[
[
221,
228
],
[
788,
795
]
],
[
[
284,
299
],
[
3056,
3071
],
[
3141,
3156
]
],
[
[
301,
313
],
[
3098,
3110
]
],
[
[
745,
759
]
],
[
[
770,
785
]
],
[
[
818,
825
],
[
2085,
2092
]
],
[
[
2075,
2082
]
],
[
[
2103,
2111
],
[
4384,
4392
],
[
4427,
4435
],
[
4470,
4478
],
[
4513,
4521
]
],
[
[
4368,
4381
]
],
[
[
4411,
4424
]
],
[
[
4454,
4467
]
],
[
[
4497,
4510
]
],
[
[
4548,
4559
],
[
7149,
7160
]
],
[
[
7135,
7146
]
]
] |
"""
WSGI config for getDoc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'getDoc.settings.production')
application = get_wsgi_application()
| [
[
[
229,
231
],
[
284,
286
]
],
[
[
262,
282
],
[
377,
397
]
],
[
[
363,
374
]
]
] |
#!/usr/bin/python
import deck
while(True):
p = deck.Deck()
p.shuffle()
pai = p.deal(5)
# assert len(pai) == 5, "??????"
del p
pai.sort(key=lambda x:x.figure)
x = True
for i in range(1, len(pai)):
if(pai[i].suit == pai[i-1].suit and (pai[i].figure == pai[i-1].figure + 1 or pai[i].figure == 10 and pai[i-1].figure == 1)):
continue
else:
x = False
break
if(x == True):
for i in pai:
print(i,end="\t")
print()
| [
[
[
26,
30
],
[
53,
57
]
],
[
[
49,
50
],
[
69,
70
],
[
91,
92
],
[
146,
147
]
],
[
[
85,
88
],
[
152,
155
],
[
223,
226
],
[
241,
244
],
[
256,
259
],
[
275,
278
],
[
292,
295
],
[
315,
318
],
[
339,
342
],
[
474,
477
]
],
[
[
188,
189
],
[
445,
446
]
],
[
[
205,
206
],
[
245,
246
],
[
260,
261
],
[
279,
280
],
[
296,
297
],
[
319,
320
],
[
343,
344
]
],
[
[
410,
411
],
[
445,
446
]
],
[
[
469,
470
],
[
497,
498
]
]
] |
"""Support for Eliot tracing with Dask computations."""
from pyrsistent import PClass, field
from dask import compute, optimize
from dask.core import toposort, get_dependencies
from . import start_action, current_action, Action, Message
class _RunWithEliotContext(PClass):
"""
Run a callable within an Eliot context.
@ivar task_id: The serialized Eliot task ID.
@ivar func: The function that Dask wants to run.
@ivar key: The key in the Dask graph.
@ivar dependencies: The keys in the Dask graph this depends on.
"""
task_id = field(type=str)
func = field() # callable
key = field(type=str)
dependencies = field()
# Pretend to be underlying callable for purposes of equality; necessary for
# optimizer to be happy:
def __eq__(self, other):
return self.func == other
def __ne__(self, other):
return self.func != other
def __hash__(self):
return hash(self.func)
def __call__(self, *args, **kwargs):
with Action.continue_task(task_id=self.task_id):
Message.log(
message_type="dask:task",
key=self.key,
dependencies=self.dependencies
)
return self.func(*args, **kwargs)
def compute_with_trace(*args):
"""Do Dask compute(), but with added Eliot tracing.
Dask is a graph of tasks, but Eliot logs trees. So we need to emulate a
graph using a tree. We do this by making Eliot action for each task, but
having it list the tasks it depends on.
We use the following algorithm:
1. Create a top-level action.
2. For each entry in the dask graph, create a child with
serialize_task_id. Do this in likely order of execution, so that
if B depends on A the task level of B is higher than the task Ievel
of A.
3. Replace each function with a wrapper that uses the corresponding
task ID (with Action.continue_task), and while it's at it also
records which other things this function depends on.
Known issues:
1. Retries will confuse Eliot. Probably need different
distributed-tree mechanism within Eliot to solve that.
"""
# 1. Create top-level Eliot Action:
with start_action(action_type="dask:compute"):
# In order to reduce logging verbosity, add logging to the already
# optimized graph:
optimized = optimize(*args, optimizations=[_add_logging])
return compute(*optimized, optimize_graph=False)
def _add_logging(dsk, ignore=None):
"""
Add logging to a Dask graph.
@param dsk: The Dask graph.
@return: New Dask graph.
"""
ctx = current_action()
result = {}
# Use topological sort to ensure Eliot actions are in logical order of
# execution in Dask:
keys = toposort(dsk)
# Give each key a string name. Some keys are just aliases to other
# keys, so make sure we have underlying key available. Later on might
# want to shorten them as well.
def simplify(k):
if isinstance(k, str):
return k
return "-".join(str(o) for o in k)
key_names = {}
for key in keys:
value = dsk[key]
if not callable(value) and value in keys:
# It's an alias for another key:
key_names[key] = key_names[value]
else:
key_names[key] = simplify(key)
# 2. Create Eliot child Actions for each key, in topological order:
key_to_action_id = {
key: str(ctx.serialize_task_id(), "utf-8")
for key in keys
}
# 3. Replace function with wrapper that logs appropriate Action:
for key in keys:
func = dsk[key][0]
args = dsk[key][1:]
if not callable(func):
# This key is just an alias for another key, no need to add
# logging:
result[key] = dsk[key]
continue
wrapped_func = _RunWithEliotContext(
task_id=key_to_action_id[key],
func=func,
key=key_names[key],
dependencies=[key_names[k] for k in get_dependencies(dsk, key)],
)
result[key] = (wrapped_func, ) + tuple(args)
assert result.keys() == dsk.keys()
return result
__all__ = ["compute_with_trace"]
| [
[
[
80,
86
],
[
268,
274
]
],
[
[
88,
93
],
[
564,
569
],
[
591,
596
],
[
621,
626
],
[
656,
661
]
],
[
[
112,
119
],
[
2512,
2519
]
],
[
[
121,
129
],
[
2451,
2459
]
],
[
[
152,
160
],
[
2859,
2867
]
],
[
[
162,
178
],
[
4133,
4149
]
],
[
[
193,
205
],
[
2287,
2299
]
],
[
[
207,
221
],
[
2714,
2728
]
],
[
[
223,
229
],
[
1013,
1019
]
],
[
[
231,
238
],
[
1069,
1076
]
],
[
[
247,
267
],
[
3965,
3985
]
],
[
[
1267,
1285
]
],
[
[
2560,
2572
],
[
2482,
2494
]
],
[
[
4285,
4292
]
]
] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Copyright 2016 Everley #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import copy
class Field(object):
"""
The field object of :class:`Event`.
"""
field_type = None
"""
The class of the field.
"""
default = None
"""
The default value of the filed.
"""
def __init__(self, field_type, default=None):
self.field_type = field_type
self.default = default
self.match(default)
def match(self, value):
"""
Raise an :class:`TypeError` is `value` is not an instance of `self.field_type`.
:param value: The value to match.
"""
if value is not None and not isinstance(value, self.field_type):
raise TypeError('expect %s, not %s' %
(self.field_type, type(value)))
class EventMetaClass(type):
"""
The metaclass to help new :class:`Event`.
"""
def __new__(cls, name, bases, attrs):
fields = []
mappings = {}
params = {}
new_attrs = {}
for k, v in attrs.items():
if isinstance(v, Field):
fields.append(v)
mappings[k] = v
params[k] = v.default
else:
new_attrs[k] = v
new_attrs['__fields__'] = fields
new_attrs['__mappings__'] = mappings
new_attrs['__params__'] = params
new_attrs['__actual_params__'] = None
new_attrs['__tag__'] = attrs['__tag__'] \
if '__tag__' in attrs else ''
new_attrs['__description__'] = attrs['__description__'] \
if '__description__' in attrs else ''
return super(EventMetaClass, cls).__new__(cls, name, bases, new_attrs)
class Event(object):
"""
The base class of specific event.
"""
__metaclass__ = EventMetaClass
def __init__(self, **kwargs):
self.__actual_params__ = copy.deepcopy(self.__params__)
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getattr__(self, key):
if key in self.__actual_params__:
return self.__actual_params__[key]
else:
raise AttributeError(
"%s has no param `%s`" %
(type(self), key))
def __setattr__(self, key, value):
if key in ['__actual_params__']:
return super(Event, self).__setattr__(key, value)
if key in self.__actual_params__:
self.__mappings__[key].match(value)
self.__actual_params__[key] = value
else:
raise AttributeError(
"%s has no param `%s`" %
(type(self), key))
@property
def params(self):
"""
A `dict` which is a deep copy of the event's params.
"""
return copy.deepcopy(self.__actual_params__)
@classmethod
def tag(cls):
"""
The tag of the event.
"""
return cls.__tag__
@classmethod
def description(cls):
"""
The description of the event.
"""
return cls.__description__
@classmethod
def key(cls):
"""
A unique string for the event.
"""
return '%s.%s' % (cls.__module__, cls.__name__)
@property
def no_field(self):
"""
return True if the event doesn't have any field.
"""
return len(self.__params__) == 0
| [
[
[
1250,
1254
],
[
3076,
3080
],
[
3970,
3974
]
],
[
[
1263,
1268
],
[
2273,
2278
]
],
[
[
1996,
2010
],
[
2993,
3007
],
[
2837,
2851
]
],
[
[
2903,
2908
],
[
3534,
3539
]
]
] |
import torch.utils.data as data
class CombineDBs(data.Dataset):
def __init__(self, dataloaders, excluded=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
# Get object pointers
self.obj_list = []
self.im_list = []
new_im_ids = []
obj_counter = 0
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):
flag = False
new_im_ids.append(curr_im_id)
for kk in range(len(dl.obj_dict[curr_im_id])):
if dl.obj_dict[curr_im_id][kk] != -1:
self.obj_list.append({'db_ii': ii, 'obj_ii': dl.obj_list.index([jj, kk])})
flag = True
obj_counter += 1
self.im_list.append({'db_ii': ii, 'im_ii': jj})
if flag:
num_images += 1
self.im_ids = new_im_ids
print('Combined number of images: {:d}\nCombined number of objects: {:d}'.format(num_images, len(self.obj_list)))
def __getitem__(self, index):
_db_ii = self.obj_list[index]["db_ii"]
_obj_ii = self.obj_list[index]['obj_ii']
sample = self.dataloaders[_db_ii].__getitem__(_obj_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.obj_list)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:'+str(include_db)+'\n'+'Excluded datasets:'+str(exclude_db)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from dataloaders import pascal
from dataloaders import sbd
import torch
import numpy as np
import dataset.custom_transforms as tr
from torchvision import transforms
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-15, 15), scales=(.75, 1.25)),
tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),
tr.DistanceMap(v=0.15, elem='gt'),
tr.ConcatInputs(elems=('image', 'distance_map')),
tr.ToTensor()])
composed_transforms_ts = transforms.Compose([
tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),
tr.DistanceMap(v=0.15, elem='gt'),
tr.ConcatInputs(elems=('image', 'distance_map')),
tr.ToTensor()])
pascal_voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)
sbd = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr, retname=True)
pascal_voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr, retname=True)
dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
dismap = sample['distance_map'][jj].numpy()
gt = sample['gt'][jj].numpy()
gt[gt > 0] = 255
gt = np.array(gt[0]).astype(np.uint8)
dismap = np.array(dismap[0]).astype(np.uint8)
display = 0.9 * gt + 0.4 * dismap
display = display.astype(np.uint8)
plt.figure()
plt.title('display')
plt.imshow(display, cmap='gray')
if ii == 1:
break
plt.show(block=True) | [
[
[
7,
31
],
[
51,
55
]
],
[
[
40,
50
],
[
3432,
3442
]
],
[
[
2284,
2308
],
[
4030,
4033
],
[
4055,
4058
],
[
4088,
4091
],
[
4164,
4167
]
],
[
[
2337,
2343
],
[
3122,
3128
],
[
3331,
3337
]
],
[
[
2372,
2375
],
[
3216,
3219
]
],
[
[
2387,
2392
],
[
3512,
3517
]
],
[
[
2404,
2415
],
[
3834,
3836
],
[
3857,
3859
],
[
3888,
3890
],
[
3915,
3917
],
[
4008,
4010
]
],
[
[
2427,
2458
],
[
2557,
2559
],
[
2592,
2594
],
[
2653,
2655
],
[
2730,
2732
],
[
2773,
2775
],
[
2831,
2833
],
[
2906,
2908
],
[
2983,
2985
],
[
3026,
3028
],
[
3084,
3086
]
],
[
[
2487,
2497
],
[
2528,
2538
],
[
2877,
2887
]
],
[
[
2503,
2525
],
[
3270,
3292
],
[
3379,
3401
]
],
[
[
2852,
2874
],
[
3168,
3190
]
],
[
[
3105,
3119
],
[
3478,
3492
]
],
[
[
3210,
3213
],
[
3462,
3465
]
],
[
[
3312,
3328
],
[
3444,
3460
]
],
[
[
3422,
3429
],
[
3540,
3547
]
],
[
[
3499,
3509
],
[
3625,
3635
]
],
[
[
3601,
3603
],
[
4133,
4135
]
],
[
[
3605,
3611
],
[
3662,
3668
],
[
3711,
3717
],
[
3763,
3769
]
],
[
[
3650,
3652
],
[
3734,
3736
],
[
3776,
3778
]
],
[
[
3702,
3708
],
[
3897,
3903
]
],
[
[
3758,
3760
],
[
3800,
3802
],
[
3803,
3805
],
[
3843,
3845
]
],
[
[
3829,
3831
],
[
3953,
3955
]
],
[
[
3879,
3885
],
[
3964,
3970
]
],
[
[
3937,
3944
],
[
3993,
4000
]
],
[
[
3983,
3990
],
[
4099,
4106
]
]
] |
try:
import vim
except ImportError:
raise ImportError(
'"vim" is not available. This module require to be loaded from Vim.'
)
#
# NOTE
# Vim use a global namespace for python/python3 so define a unique name
# function and write a code inside of the function to prevent conflicts.
#
def _vim_vital_web_api_github_main():
"""A namespace function for Vital.Web.API.GitHub"""
import re
import sys
import ssl
import collections
from itertools import chain
from threading import Lock, Thread
try:
import json
except ImportError:
import simplejson as json
try:
from urllib.request import urlopen, Request
from urllib.parse import (urlparse, parse_qs, urlencode, urlunparse)
except ImportError:
from urllib2 import urlopen, Request
from urllib import urlencode
from urlparse import (urlparse, parse_qs, urlunparse)
DEFAULT_INDICATOR = (
'Requesting entries and converting into '
'JSON %%(page)d/%(page_count)d ...'
)
def format_exception():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
return "%s: %s at %s:%d" % (
exc_obj.__class__.__name__,
exc_obj, filename, lineno,
)
def to_vim(obj):
if obj is None:
return ''
elif isinstance(obj, bool):
return int(obj)
elif isinstance(obj, dict):
return dict([to_vim(k), to_vim(v)] for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return list(to_vim(v) for v in obj)
return obj
def build_headers(token):
return {'Authorization': 'token %s' % token} if token else {}
def build_url(url, **kwargs):
scheme, netloc, path, params, query, fragment = urlparse(url)
p = parse_qs(query)
p.update(kwargs)
return urlunparse([
scheme, netloc, path, params,
urlencode(p, doseq=True), fragment
])
def request(url, headers={}, method=None):
if method:
if sys.version_info.major >= 3:
req = Request(url, headers=headers, method=method)
else:
req = Request(url, headers=headers)
req.get_method = lambda: method
else:
req = Request(url, headers=headers)
context = ssl._create_unverified_context()
res = urlopen(req, context=context)
if not hasattr(res, 'getheader'):
# urllib2 does not have getheader
res.getheader = lambda name, self=res: self.info().getheader(name)
return res
def request_head(url, name, headers={}):
res = request(url, headers=headers, method='HEAD')
return res.getheader(name)
def request_json(url, headers={}, **kwargs):
url = build_url(url, **kwargs)
res = request(url, headers=headers)
obj = json.loads(res.read().decode('utf-8'))
return to_vim(obj)
def _request_entries(lock, queue, entries_per_pages, url,
headers, callback=None):
try:
while True:
page, indicator = queue.popleft()
entries = request_json(url, headers=headers, page=page)
entries_per_pages.append([page, entries])
if callback:
message = indicator % {'page': len(entries_per_pages)}
if hasattr(vim, 'async_call'):
with lock:
vim.async_call(callback, message)
else:
with lock:
callback(message)
except IndexError:
pass
except Exception as e:
# clear queue to stop other threads
queue.clear()
entries_per_pages.append(e)
def request_entries(url, token,
indicator=DEFAULT_INDICATOR,
page_start=1, page_end=0,
nprocess=20, callback=None, **kwargs):
# the followings might be str when specified from Vim.
page_start = int(page_start)
page_end = int(page_end)
nprocess = int(nprocess)
url = build_url(url, **kwargs)
headers = build_headers(token)
lock = Lock()
queue = collections.deque()
entries_per_pages = collections.deque()
# figure out the number of pages from HEAD request
if page_end == 0:
if callback:
callback('Requesting the total number of pages ...')
response_link = request_head(url, 'link', headers=headers)
if response_link:
m = re.search(
'<.*?[?&]page=(\d+)[^>]*>; rel="last"', response_link
)
page_end = int(m.group(1)) if m else 1
else:
page_end = 1
# prepare task queue
for page in range(page_start, page_end + 1):
queue.append([page, indicator % {
'url': url,
'page_count': page_end - page_start + 1
}])
# start workers
kwargs = dict(
target=_request_entries,
args=(lock, queue, entries_per_pages, url, headers, callback),
)
workers = [Thread(**kwargs) for n in range(nprocess)]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
# check if sub-thread throw exceptions or not
exceptions = list(
filter(lambda x: not isinstance(x, list), entries_per_pages)
)
if len(exceptions):
raise exceptions[0]
# merge and flatten entries
return list(chain.from_iterable(map(
lambda x: x[1], sorted(entries_per_pages, key=lambda x: x[0])
)))
def echo_status_vim(indicator):
vim.command('redraw | echo "%s"' % indicator)
if sys.version_info < (3, 0, 0):
def ensure_unicode(s, encoding):
if isinstance(s, unicode):
return s
else:
return s.decode(encoding)
else:
def ensure_unicode(s, encoding):
if not isinstance(s, bytes):
return s
else:
return s.decode(encoding)
# Execute a main code
namespace = {}
try:
# Override 'request' with 'pseudo_requst' if exists
try:
request = _vim_vital_web_api_github_test_pseudo_request
except NameError:
pass
encoding = vim.eval('&encoding')
kwargs = vim.eval('kwargs')
kwargs = { ensure_unicode(k, encoding): ensure_unicode(v, encoding)
for k, v in kwargs.items()}
if kwargs.pop('verbose', 1):
kwargs['callback'] = echo_status_vim
entries = request_entries(**kwargs)
namespace['entries'] = entries
except:
namespace['exception'] = format_exception()
return namespace
# Call a namespace function
_vim_vital_web_api_github_response = _vim_vital_web_api_github_main()
| [
[
[
16,
19
],
[
6719,
6722
],
[
6758,
6761
],
[
3552,
3555
],
[
3635,
3638
],
[
6034,
6037
]
],
[
[
312,
342
],
[
7221,
7251
]
],
[
[
7184,
7218
]
]
] |
#!/bin/python3
import sys
import socket
from datetime import datetime
#Defining our target
if len (sys.argv) == 2:
target=socket.gethostbyname(sys.argv[1]) #translate hostname to IPv4
else:
print("invalid amount of arguments.")
print("Syntax: python3 scanner.py <ip>")
#add a pretty banner
print("-" * 50)
print("Scanning target " + target)
print("Time started: " +str(datetime.now()))
print("-"*50)
try:
for port in range(50,85):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = s.connect_ex((target,port))# returns an error indicator
if result ==0:
print("Port {} is open".format(port))
s.close()
except KeyboardInterrupt:
print("\n Exiting program...")
sys.exit()
except socket.gaierror:
print("\n Hostname could not be resolved...")
sys.exit()
except socket.gaierror:
print("\n Could not connect to server...")
sys.exit()
| [
[
[
23,
26
],
[
101,
104
],
[
146,
149
],
[
723,
726
],
[
807,
810
],
[
888,
891
]
],
[
[
34,
40
],
[
125,
131
],
[
446,
452
],
[
460,
466
],
[
476,
482
],
[
498,
504
],
[
742,
748
],
[
826,
832
]
],
[
[
62,
70
],
[
376,
384
]
],
[
[
118,
124
],
[
340,
346
],
[
551,
557
]
],
[
[
418,
422
],
[
558,
562
],
[
644,
648
]
],
[
[
442,
443
],
[
537,
538
],
[
653,
654
]
],
[
[
528,
534
],
[
598,
604
]
]
] |
"""
OS abstraction
"""
import os, shutil, os.path, re, traceback
import wx
from . import SystemInfo
from .StringOps import mbcsEnc, urlQuote, pathnameFromUrl, pathEnc
# import WindowsHacks
try:
import WindowsHacks
except:
if SystemInfo.isWindows():
traceback.print_exc()
WindowsHacks = None
try:
import GtkHacks
except:
import ExceptionLogger
ExceptionLogger.logOptionalComponentException(
"Initialize GTK hacks in OsAbstract.py")
GtkHacks = None
# Define startFile
if SystemInfo.isWindows():
if SystemInfo.isWinNT() and SystemInfo.isUnicode() and WindowsHacks:
startFile = WindowsHacks.startFile
else:
def startFile(mainControl, link):
os.startfile(mbcsEnc(link, "replace")[0])
else:
def startFile(mainControl, link):
# We need mainControl only for this version of startFile()
startPath = mainControl.getConfig().get("main", "fileLauncher_path", u"")
if startPath == u"":
wx.LaunchDefaultBrowser(link)
return
if link.startswith("file:"):
link = pathnameFromUrl(link)
os.spawnlp(os.P_NOWAIT, startPath, startPath, link)
# Define copyFile
if SystemInfo.isWinNT() and WindowsHacks:
copyFile = WindowsHacks.copyFile
moveFile = WindowsHacks.moveFile
deleteFile = WindowsHacks.deleteFile
else:
# TODO Mac version
def copyFile(srcPath, dstPath):
"""
Copy file from srcPath to dstPath. dstPath may be overwritten if
existing already. dstPath must point to a file, not a directory.
If some directories in dstPath do not exist, they are created.
This currently just calls shutil.copy2() TODO!
"""
dstDir = os.path.dirname(dstPath)
if not os.path.exists(pathEnc(dstDir)):
os.makedirs(dstDir)
shutil.copy2(srcPath, dstPath)
def moveFile(srcPath, dstPath):
"""
Move file from srcPath to dstPath. dstPath may be overwritten if
existing already. dstPath must point to a file, not a directory.
If some directories in dstPath do not exist, they are created.
"""
dstDir = os.path.dirname(dstPath)
if not os.path.exists(pathEnc(dstDir)):
os.makedirs(dstDir)
shutil.move(srcPath, dstPath)
def deleteFile(path):
"""
Delete file or directory path.
"""
# TODO: Check for directories
# os.rmdir(path) ?
if os.path.isfile(path) or os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
os.rmdir(path)
# Define samefile
if SystemInfo.isWindows():
if WindowsHacks:
def samefile(path1, path2):
# Not fully reliable. Does anybody know something better?
if WindowsHacks.getLongPath(path1).lower() == \
WindowsHacks.getLongPath(path2).lower():
return True
return WindowsHacks.getLongPath(os.path.abspath(path1)).lower() == \
WindowsHacks.getLongPath(os.path.abspath(path2)).lower()
else:
def samefile(path1, path2):
return os.path.abspath(path1) == os.path.abspath(path2)
else:
samefile = os.path.samefile
if WindowsHacks:
def normalizePath(path):
return WindowsHacks.getLongPath(os.path.abspath(path)).lower()
else:
def normalizePath(path):
return os.path.normcase(os.path.abspath(path))
# Define checkForOtherInstances
# If defined properly it returns a list of process identifier of other WikidPad
# processes. This list should be empty if option "Single process per user"
# is selected. If it is not, there is an error.
if WindowsHacks:
checkForOtherInstances = WindowsHacks.checkForOtherInstances
else:
def checkForOtherInstances():
return []
# Define createInterceptCollection, createClipboardInterceptor (may return None)
# Define supportsClipboardInterceptor
# Fallback def.
def supportsClipboardInterceptor():
return False
def createInterceptCollection(interceptors=None):
return None
def createClipboardInterceptor(callingWindow):
return None
if SystemInfo.isWindows():
if WindowsHacks:
def supportsClipboardInterceptor():
return True
def createInterceptCollection(interceptors=None):
return WindowsHacks.WinProcInterceptCollection(interceptors)
def createClipboardInterceptor(callingWindow):
return WindowsHacks.ClipboardCatchIceptor(callingWindow)
else:
if GtkHacks:
def supportsClipboardInterceptor():
return True
def createInterceptCollection(interceptors=None):
return GtkHacks.FakeInterceptCollection(interceptors)
def createClipboardInterceptor(callingWindow):
return GtkHacks.ClipboardCatchFakeIceptor(callingWindow)
if WindowsHacks:
translateAcceleratorByKbLayout = WindowsHacks.translateAcceleratorByKbLayout
else:
def translateAcceleratorByKbLayout(accStr):
return accStr
| [
[
[
35,
37
]
],
[
[
39,
45
],
[
1965,
1971
],
[
2435,
2441
]
],
[
[
47,
54
],
[
3427,
3429
],
[
764,
766
],
[
1199,
1201
],
[
1210,
1212
],
[
1829,
1831
],
[
1884,
1886
],
[
1930,
1932
],
[
2299,
2301
],
[
2354,
2356
],
[
2400,
2402
],
[
2642,
2644
],
[
2666,
2668
],
[
2701,
2703
],
[
2731,
2733
],
[
2765,
2767
],
[
3172,
3174
],
[
3255,
3257
],
[
3355,
3357
],
[
3381,
3383
],
[
3537,
3539
],
[
3621,
3623
],
[
3638,
3640
]
],
[
[
56,
58
]
],
[
[
60,
69
],
[
287,
296
]
],
[
[
78,
80
],
[
1056,
1058
]
],
[
[
98,
108
],
[
254,
264
],
[
555,
565
],
[
587,
597
],
[
612,
622
],
[
1278,
1288
],
[
2807,
2817
],
[
4394,
4404
]
],
[
[
133,
140
],
[
777,
784
]
],
[
[
142,
150
]
],
[
[
152,
167
],
[
1166,
1181
]
],
[
[
169,
176
],
[
1899,
1906
],
[
2369,
2376
]
],
[
[
224,
236
],
[
639,
651
],
[
674,
686
],
[
1303,
1315
],
[
1333,
1345
],
[
1371,
1383
],
[
1411,
1423
],
[
2839,
2851
],
[
3452,
3464
],
[
3912,
3924
],
[
3956,
3968
],
[
4426,
4438
],
[
5132,
5144
],
[
5184,
5196
],
[
2977,
2989
],
[
3043,
3055
],
[
3147,
3159
],
[
3230,
3242
],
[
3512,
3524
],
[
4589,
4601
],
[
4719,
4731
]
],
[
[
314,
326
],
[
639,
651
],
[
674,
686
],
[
1303,
1315
],
[
1333,
1345
],
[
1371,
1383
],
[
1411,
1423
],
[
2839,
2851
],
[
3452,
3464
],
[
3912,
3924
],
[
3956,
3968
],
[
4426,
4438
],
[
5132,
5144
],
[
5184,
5196
],
[
2977,
2989
],
[
3043,
3055
],
[
3147,
3159
],
[
3230,
3242
],
[
3512,
3524
],
[
4589,
4601
],
[
4719,
4731
]
],
[
[
354,
362
],
[
4784,
4792
],
[
4943,
4951
],
[
5066,
5074
]
],
[
[
384,
399
],
[
405,
420
]
],
[
[
511,
519
],
[
4784,
4792
],
[
4943,
4951
],
[
5066,
5074
]
],
[
[
662,
671
]
],
[
[
721,
730
]
],
[
[
822,
831
]
],
[
[
1322,
1330
]
],
[
[
1360,
1368
]
],
[
[
1398,
1408
]
],
[
[
1479,
1487
]
],
[
[
2007,
2015
]
],
[
[
2478,
2488
]
],
[
[
2866,
2874
]
],
[
[
3311,
3319
]
],
[
[
3416,
3424
]
],
[
[
3475,
3488
]
],
[
[
3584,
3597
]
],
[
[
3931,
3953
]
],
[
[
4008,
4030
]
],
[
[
4205,
4233
]
],
[
[
4260,
4285
]
],
[
[
4328,
4354
]
],
[
[
4453,
4481
]
],
[
[
4523,
4548
]
],
[
[
4656,
4682
]
],
[
[
4807,
4835
]
],
[
[
4877,
4902
]
],
[
[
5003,
5029
]
],
[
[
5151,
5181
]
],
[
[
5244,
5274
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PolicyArgs', 'Policy']
@pulumi.input_type
class PolicyArgs:
def __init__(__self__, *,
api_management_id: pulumi.Input[str],
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Policy resource.
:param pulumi.Input[str] api_management_id: The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy as a string.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
pulumi.set(__self__, "api_management_id", api_management_id)
if xml_content is not None:
pulumi.set(__self__, "xml_content", xml_content)
if xml_link is not None:
pulumi.set(__self__, "xml_link", xml_link)
@property
@pulumi.getter(name="apiManagementId")
def api_management_id(self) -> pulumi.Input[str]:
"""
The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
"""
return pulumi.get(self, "api_management_id")
@api_management_id.setter
def api_management_id(self, value: pulumi.Input[str]):
pulumi.set(self, "api_management_id", value)
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> Optional[pulumi.Input[str]]:
"""
The XML Content for this Policy as a string.
"""
return pulumi.get(self, "xml_content")
@xml_content.setter
def xml_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_content", value)
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> Optional[pulumi.Input[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
@xml_link.setter
def xml_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_link", value)
@pulumi.input_type
class _PolicyState:
def __init__(__self__, *,
api_management_id: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Policy resources.
:param pulumi.Input[str] api_management_id: The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy as a string.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
if api_management_id is not None:
pulumi.set(__self__, "api_management_id", api_management_id)
if xml_content is not None:
pulumi.set(__self__, "xml_content", xml_content)
if xml_link is not None:
pulumi.set(__self__, "xml_link", xml_link)
@property
@pulumi.getter(name="apiManagementId")
def api_management_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
"""
return pulumi.get(self, "api_management_id")
@api_management_id.setter
def api_management_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_management_id", value)
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> Optional[pulumi.Input[str]]:
"""
The XML Content for this Policy as a string.
"""
return pulumi.get(self, "xml_content")
@xml_content.setter
def xml_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_content", value)
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> Optional[pulumi.Input[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
@xml_link.setter
def xml_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "xml_link", value)
class Policy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_id: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a API Management service Policy.
> **NOTE:** This resource will, upon creation, **overwrite any existing policy in the API Management service**, as there is no feasible way to test whether the policy has been modified from the default. Similarly, when this resource is destroyed, the API Management service will revert to its default policy.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_service = azure.apimanagement.Service("exampleService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
publisher_name="pub1",
publisher_email="[email protected]",
sku_name="Developer_1")
example_named_value = azure.apimanagement.NamedValue("exampleNamedValue",
resource_group_name=example_resource_group.name,
api_management_name=example_service.name,
display_name="ExampleProperty",
value="Example Value")
example_policy = azure.apimanagement.Policy("examplePolicy",
api_management_id=example_service.id,
xml_content=(lambda path: open(path).read())("example.xml"))
```
## Import
API Management service Policys can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/policy:Policy example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/policies/policy
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_id: The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy as a string.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a API Management service Policy.
> **NOTE:** This resource will, upon creation, **overwrite any existing policy in the API Management service**, as there is no feasible way to test whether the policy has been modified from the default. Similarly, when this resource is destroyed, the API Management service will revert to its default policy.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_service = azure.apimanagement.Service("exampleService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
publisher_name="pub1",
publisher_email="[email protected]",
sku_name="Developer_1")
example_named_value = azure.apimanagement.NamedValue("exampleNamedValue",
resource_group_name=example_resource_group.name,
api_management_name=example_service.name,
display_name="ExampleProperty",
value="Example Value")
example_policy = azure.apimanagement.Policy("examplePolicy",
api_management_id=example_service.id,
xml_content=(lambda path: open(path).read())("example.xml"))
```
## Import
API Management service Policys can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/policy:Policy example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/policies/policy
```
:param str resource_name: The name of the resource.
:param PolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_id: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyArgs.__new__(PolicyArgs)
if api_management_id is None and not opts.urn:
raise TypeError("Missing required property 'api_management_id'")
__props__.__dict__["api_management_id"] = api_management_id
__props__.__dict__["xml_content"] = xml_content
__props__.__dict__["xml_link"] = xml_link
super(Policy, __self__).__init__(
'azure:apimanagement/policy:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_management_id: Optional[pulumi.Input[str]] = None,
xml_content: Optional[pulumi.Input[str]] = None,
xml_link: Optional[pulumi.Input[str]] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_id: The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
:param pulumi.Input[str] xml_content: The XML Content for this Policy as a string.
:param pulumi.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PolicyState.__new__(_PolicyState)
__props__.__dict__["api_management_id"] = api_management_id
__props__.__dict__["xml_content"] = xml_content
__props__.__dict__["xml_link"] = xml_link
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiManagementId")
def api_management_id(self) -> pulumi.Output[str]:
"""
The ID of the API Management service. Changing this forces a new API Management service Policy to be created.
"""
return pulumi.get(self, "api_management_id")
@property
@pulumi.getter(name="xmlContent")
def xml_content(self) -> pulumi.Output[str]:
"""
The XML Content for this Policy as a string.
"""
return pulumi.get(self, "xml_content")
@property
@pulumi.getter(name="xmlLink")
def xml_link(self) -> pulumi.Output[Optional[str]]:
"""
A link to a Policy XML Document, which must be publicly available.
"""
return pulumi.get(self, "xml_link")
| [
[
[
192,
200
]
],
[
[
208,
214
]
],
[
[
222,
236
],
[
370,
376
],
[
1347,
1353
],
[
1797,
1803
],
[
2167,
2173
],
[
2524,
2530
],
[
3578,
3584
],
[
4048,
4054
],
[
4418,
4424
],
[
4787,
4793
],
[
12866,
12872
],
[
13174,
13180
],
[
13400,
13406
],
[
472,
478
],
[
530,
536
],
[
593,
599
],
[
1081,
1087
],
[
1190,
1196
],
[
1284,
1290
],
[
1420,
1426
],
[
1596,
1602
],
[
1704,
1710
],
[
1732,
1738
],
[
1868,
1874
],
[
1980,
1986
],
[
2079,
2085
],
[
2108,
2114
],
[
2232,
2238
],
[
2366,
2372
],
[
2456,
2462
],
[
2485,
2491
],
[
2637,
2643
],
[
2703,
2709
],
[
2766,
2772
],
[
3312,
3318
],
[
3421,
3427
],
[
3515,
3521
],
[
3660,
3666
],
[
3837,
3843
],
[
3954,
3960
],
[
3983,
3989
],
[
4119,
4125
],
[
4231,
4237
],
[
4330,
4336
],
[
4359,
4365
],
[
4483,
4489
],
[
4617,
4623
],
[
4707,
4713
],
[
4736,
4742
],
[
4921,
4927
],
[
4998,
5004
],
[
5064,
5070
],
[
5127,
5133
],
[
7566,
7572
],
[
9703,
9709
],
[
10050,
10056
],
[
10127,
10133
],
[
10193,
10199
],
[
10256,
10262
],
[
10361,
10367
],
[
10418,
10424
],
[
11427,
11433
],
[
11473,
11479
],
[
11545,
11551
],
[
11606,
11612
],
[
11664,
11670
],
[
12480,
12486
],
[
12515,
12521
],
[
12939,
12945
],
[
13116,
13122
],
[
13236,
13242
],
[
13348,
13354
],
[
13456,
13462
],
[
13600,
13606
]
],
[
[
256,
259
]
],
[
[
261,
268
]
],
[
[
270,
278
],
[
521,
529
],
[
584,
592
],
[
1859,
1867
],
[
2070,
2078
],
[
2223,
2231
],
[
2447,
2455
],
[
2628,
2636
],
[
2694,
2702
],
[
2757,
2765
],
[
3651,
3659
],
[
3945,
3953
],
[
4110,
4118
],
[
4321,
4329
],
[
4474,
4482
],
[
4698,
4706
],
[
4912,
4920
],
[
4989,
4997
],
[
5055,
5063
],
[
5118,
5126
],
[
7557,
7565
],
[
10041,
10049
],
[
10118,
10126
],
[
10184,
10192
],
[
10247,
10255
],
[
11464,
11472
],
[
11536,
11544
],
[
11597,
11605
],
[
11655,
11663
],
[
13470,
13478
]
],
[
[
280,
288
]
],
[
[
290,
295
]
],
[
[
297,
305
],
[
4816,
4824
],
[
7426,
7434
]
],
[
[
321,
331
],
[
9657,
9667
],
[
10593,
10603
]
],
[
[
333,
340
]
],
[
[
394,
404
],
[
7522,
7532
],
[
9691,
9701
],
[
10843,
10853
],
[
10862,
10872
]
],
[
[
2548,
2560
],
[
12567,
12579
],
[
12588,
12600
]
],
[
[
4780,
4786
],
[
11215,
11221
],
[
12792,
12798
]
]
] |
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q-)uruwrq0lgi+fp=do3(nbh)$o_+fr4qzp5w&_n5$lq0$*ywx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'main.apps.MainConfig'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Caracas'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000',
'http://localhost:8000',
'http://localhost:8080',
]
| [
[
[
316,
318
],
[
403,
405
],
[
419,
421
],
[
435,
437
],
[
2332,
2334
]
],
[
[
392,
400
],
[
2345,
2353
]
],
[
[
668,
678
]
],
[
[
801,
806
]
],
[
[
815,
828
]
],
[
[
862,
876
]
],
[
[
1138,
1148
]
],
[
[
1597,
1609
]
],
[
[
1628,
1637
]
],
[
[
2113,
2129
]
],
[
[
2237,
2246
]
],
[
[
2482,
2506
]
],
[
[
2985,
2998
]
],
[
[
3010,
3019
]
],
[
[
3041,
3049
]
],
[
[
3058,
3066
]
],
[
[
3075,
3081
]
],
[
[
3193,
3203
]
],
[
[
3218,
3239
]
]
] |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ListConnectionsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"count": "int",
"results": "list[V1ConnectionResponse]",
"previous": "str",
"next": "str",
}
attribute_map = {
"count": "count",
"results": "results",
"previous": "previous",
"next": "next",
}
def __init__(
self,
count=None,
results=None,
previous=None,
next=None,
local_vars_configuration=None,
): # noqa: E501
"""V1ListConnectionsResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._results = None
self._previous = None
self._next = None
self.discriminator = None
if count is not None:
self.count = count
if results is not None:
self.results = results
if previous is not None:
self.previous = previous
if next is not None:
self.next = next
@property
def count(self):
"""Gets the count of this V1ListConnectionsResponse. # noqa: E501
:return: The count of this V1ListConnectionsResponse. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this V1ListConnectionsResponse.
:param count: The count of this V1ListConnectionsResponse. # noqa: E501
:type: int
"""
self._count = count
@property
def results(self):
"""Gets the results of this V1ListConnectionsResponse. # noqa: E501
:return: The results of this V1ListConnectionsResponse. # noqa: E501
:rtype: list[V1ConnectionResponse]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1ListConnectionsResponse.
:param results: The results of this V1ListConnectionsResponse. # noqa: E501
:type: list[V1ConnectionResponse]
"""
self._results = results
@property
def previous(self):
"""Gets the previous of this V1ListConnectionsResponse. # noqa: E501
:return: The previous of this V1ListConnectionsResponse. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1ListConnectionsResponse.
:param previous: The previous of this V1ListConnectionsResponse. # noqa: E501
:type: str
"""
self._previous = previous
@property
def next(self):
"""Gets the next of this V1ListConnectionsResponse. # noqa: E501
:return: The next of this V1ListConnectionsResponse. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this V1ListConnectionsResponse.
:param next: The next of this V1ListConnectionsResponse. # noqa: E501
:type: str
"""
self._next = next
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ListConnectionsResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ListConnectionsResponse):
return True
return self.to_dict() != other.to_dict()
| [
[
[
878,
884
],
[
5633,
5639
]
],
[
[
892,
894
]
],
[
[
917,
920
],
[
4770,
4773
]
],
[
[
961,
974
],
[
2074,
2087
]
],
[
[
983,
1008
],
[
5873,
5898
],
[
6096,
6121
]
]
] |
from unittest import TestCase
import numpy as np
from pyecsca.sca import Trace, trim, reverse, pad
class EditTests(TestCase):
def setUp(self):
self._trace = Trace(np.array([10, 20, 30, 40, 50], dtype=np.dtype("i1")))
def test_trim(self):
result = trim(self._trace, 2)
self.assertIsNotNone(result)
np.testing.assert_equal(result.samples, np.array([30, 40, 50], dtype=np.dtype("i1")))
result = trim(self._trace, end=3)
self.assertIsNotNone(result)
np.testing.assert_equal(result.samples, np.array([10, 20, 30], dtype=np.dtype("i1")))
with self.assertRaises(ValueError):
trim(self._trace, 5, 1)
def test_reverse(self):
result = reverse(self._trace)
self.assertIsNotNone(result)
np.testing.assert_equal(result.samples,
np.array([50, 40, 30, 20, 10], dtype=np.dtype("i1")))
def test_pad(self):
result = pad(self._trace, 2)
self.assertIsNotNone(result)
np.testing.assert_equal(result.samples,
np.array([0, 0, 10, 20, 30, 40, 50, 0, 0], dtype=np.dtype("i1")))
result = pad(self._trace, (1, 3))
self.assertIsNotNone(result)
np.testing.assert_equal(result.samples,
np.array([0, 10, 20, 30, 40, 50, 0, 0, 0], dtype=np.dtype("i1")))
| [
[
[
21,
29
],
[
119,
127
]
],
[
[
38,
49
],
[
180,
182
],
[
217,
219
],
[
343,
345
],
[
383,
385
],
[
412,
414
],
[
517,
519
],
[
557,
559
],
[
586,
588
],
[
796,
798
],
[
868,
870
],
[
905,
907
],
[
1029,
1031
],
[
1101,
1103
],
[
1150,
1152
],
[
1255,
1257
],
[
1327,
1329
],
[
1376,
1378
]
],
[
[
75,
80
],
[
174,
179
]
],
[
[
82,
86
],
[
277,
281
],
[
447,
451
],
[
660,
664
]
],
[
[
88,
95
],
[
730,
737
]
],
[
[
97,
100
],
[
964,
967
],
[
1185,
1188
]
],
[
[
109,
118
]
]
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyqt_example.ui'
#
# Created: Sun May 18 03:45:55 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.dataList = QtGui.QListWidget(self.centralwidget)
self.dataList.setObjectName(_fromUtf8("dataList"))
self.gridLayout.addWidget(self.dataList, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "PyQt4 + PyUSB keyboard-alike example", None))
| [
[
[
252,
258
],
[
288,
294
],
[
1548,
1554
]
],
[
[
260,
265
],
[
396,
401
],
[
487,
492
],
[
635,
640
],
[
877,
882
],
[
998,
1003
],
[
1123,
1128
],
[
1362,
1367
]
],
[
[
276,
285
],
[
787,
796
],
[
944,
953
],
[
1074,
1083
],
[
1197,
1206
],
[
1428,
1437
]
],
[
[
343,
352
],
[
787,
796
],
[
944,
953
],
[
1074,
1083
],
[
1197,
1206
],
[
1428,
1437
]
],
[
[
384,
393
],
[
541,
550
]
],
[
[
435,
445
],
[
1674,
1684
]
],
[
[
583,
593
],
[
1674,
1684
]
],
[
[
696,
709
]
]
] |
#! /usr/bin/env python3
### stdlib imports
import pathlib
### local imports
import utils
@utils.part1
def part1(puzzleInput: str):
# Parse the coordinate pairs from the puzzle input
coordList = [
[
tuple(int(coord) for coord in pair.split(","))
for pair in line.split(" -> ")
]
for line in puzzleInput.strip().splitlines()
]
# Dictionary containing lookups for coordinate hits
part1Grid: dict[tuple[int, int], int] = {}
part2Grid: dict[tuple[int, int], int] = {}
# Iterate through each line pair and mark each coordinate the line passes through
for (startX, startY), (endX, endY) in coordList:
xMod = -1 if endX < startX else 1
xRange = range(startX, endX + xMod, xMod)
yMod = -1 if endY < startY else 1
yRange = range(startY, endY + yMod, yMod)
# For horizontal and vertical lines, it's sufficient to simply loop through the coordinates
if startX == endX or startY == endY:
for x in xRange:
for y in yRange:
part1Grid[(x, y)] = part1Grid.get((x, y), 0) + 1
part2Grid[(x, y)] = part2Grid.get((x, y), 0) + 1
# For diagonal lines (45 deg only) we can assume the x and y ranges are equal in length
else:
for i, x in enumerate(xRange):
y = yRange[i]
part2Grid[(x, y)] = part2Grid.get((x, y), 0) + 1
# If the draw option is enabled, create visualization images
if utils.getOption("draw"):
from PIL import Image
maxX, maxY = 0, 0
for (startX, startY), (endX, endY) in coordList:
maxX = max(startX, endX, maxX)
maxY = max(startY, endY, maxY)
for i, grid in enumerate([part1Grid, part2Grid]):
canvas = Image.new("RGB", (maxX + 1, maxY + 1))
for coord, count in grid.items():
canvas.putpixel(
coord, (255, 0, 0) if count > 1 else (255, 255, 255)
)
canvas.save(pathlib.Path.cwd() / f"day05.part{i + 1}.png")
# The answer is the number of grid coordinates with more than one line
utils.printAnswer(len([item for item in part1Grid.items() if item[1] > 1]))
# Pass the part 2 answer to its solution function
return len([item for item in part2Grid.items() if item[1] > 1])
@utils.part2
def part2(_, answer: int):
# Part 1 counted the overlapping points for diagonal lines as well,
# so we can just print the answer
utils.printAnswer(answer)
utils.start()
| [
[
[
50,
57
],
[
2073,
2080
]
],
[
[
84,
89
],
[
93,
98
],
[
2402,
2407
],
[
2583,
2588
],
[
1532,
1537
],
[
2200,
2205
],
[
2555,
2560
]
],
[
[
109,
114
]
],
[
[
2418,
2423
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from flask import request, abort, jsonify
from . import app, mysql
from utils import requires_auth
@requires_auth
@app.route("/tickets/add", methods=['POST'])
def submit_ticket():
team_id = request.form.get("team_id")
subject = request.form.get("subject")
msg = request.form.get("message")
ts = request.form.get("ts")
cursor = mysql.cursor()
cursor.execute("""INSERT INTO tickets
(team_id, ts, subject, msg, response)
VALUES (%s, %s, %s, %s, %s)""",
(team_id, ts, subject, msg, "No Response Yet"))
ticket_id = cursor.lastrowid
mysql.database.commit()
if cursor.rowcount == 0:
return json.dumps({"result": "fail"})
else:
return json.dumps({"result": "success", "ticket_id": ticket_id})
@app.route("/tickets/get")
@app.route("/tickets/get/<int:team_id>")
@requires_auth
def get_all_tickets(team_id = None):
cursor = mysql.cursor()
if not team_id:
cursor.execute("""SELECT * FROM tickets""")
else:
cursor.execute("""SELECT * FROM tickets where team_id = %d;""", team_id)
tks = cursor.fetchall()
for t in tks:
t['msg'] = t['msg'].decode('utf-8')
t['response'] = t['response'].decode('utf-8')
return jsonify({"tickets": tks})
@app.route("/tickets/get/open")
@requires_auth
def get_open_tickets():
cursor = mysql.cursor()
cursor.execute("""SELECT * FROM tickets WHERE done = 0;""")
return jsonify({"tickets": cursor.fetchall()})
@app.route("/tickets/respond/<int:ticket_id>")
@requires_auth
def respond_to_ticket(ticket_id):
response = request.form.get("response")
cursor = mysql.cursor()
cursor.execute("""UPDATE tickets SET response = %s WHERE id = %s;""", (response, ticket_id))
mysql.database.commit()
return jsonify({"result": 'success'})
@app.route("/tickets/close/<int:ticket_id>", methods=['POST'])
@requires_auth
def close_ticket(ticket_id):
ticket_id = int(ticket_id)
cursor = mysql.cursor()
cursor.execute("""UPDATE tickets SET done = 1 WHERE id = %s;""", ticket_id)
mysql.database.commit()
return json.dumps({"result": 'success'})
| [
[
[
54,
58
],
[
760,
764
],
[
816,
820
],
[
2210,
2214
]
],
[
[
78,
85
],
[
257,
264
],
[
299,
306
],
[
337,
344
],
[
374,
381
],
[
1697,
1704
]
],
[
[
87,
92
]
],
[
[
94,
101
],
[
1342,
1349
],
[
1545,
1552
],
[
1892,
1899
]
],
[
[
117,
120
],
[
178,
181
],
[
877,
880
],
[
904,
907
],
[
1371,
1374
],
[
1587,
1590
],
[
1926,
1929
]
],
[
[
122,
127
],
[
410,
415
],
[
691,
696
],
[
1009,
1014
],
[
1454,
1459
],
[
1739,
1744
],
[
1856,
1861
],
[
2076,
2081
],
[
2175,
2180
]
],
[
[
146,
159
],
[
163,
176
],
[
945,
958
],
[
1403,
1416
],
[
1634,
1647
],
[
1989,
2002
]
],
[
[
226,
239
]
],
[
[
963,
978
]
],
[
[
1421,
1437
]
],
[
[
1652,
1669
]
],
[
[
2007,
2019
]
]
] |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import exceptions
from neutron import quota
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from quark import allocation_pool
from quark.db import api as db_api
from quark.db import models as db_models
from quark import exceptions as quark_exceptions
from quark import plugin_views as v
CONF = cfg.CONF
DEFAULT_ROUTE = netaddr.IPNetwork("0.0.0.0/0")
LOG = logging.getLogger(__name__)
ipam_driver = (importutils.import_class(CONF.QUARK.ipam_driver))()
def get_route(context, id):
LOG.info("get_route %s for tenant %s" % (id, context.tenant_id))
route = db_api.route_find(context, id=id, scope=db_api.ONE)
if not route:
raise quark_exceptions.RouteNotFound(route_id=id)
return v._make_route_dict(route)
def get_routes(context):
LOG.info("get_routes for tenant %s" % context.tenant_id)
routes = db_api.route_find(context)
return [v._make_route_dict(r) for r in routes]
def create_route(context, route):
LOG.info("create_route for tenant %s" % context.tenant_id)
route = route["route"]
for key in ["gateway", "cidr", "subnet_id"]:
if key not in route:
raise exceptions.BadRequest(resource="routes",
msg="%s is required" % key)
subnet_id = route["subnet_id"]
with context.session.begin():
subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=subnet_id)
policies = db_models.IPPolicy.get_ip_policy_cidrs(subnet)
alloc_pools = allocation_pool.AllocationPools(subnet["cidr"],
policies=policies)
alloc_pools.validate_gateway_excluded(route["gateway"])
# TODO(anyone): May want to denormalize the cidr values into columns
# to achieve single db lookup on conflict check
route_cidr = netaddr.IPNetwork(route["cidr"])
subnet_routes = db_api.route_find(context, subnet_id=subnet_id,
scope=db_api.ALL)
quota.QUOTAS.limit_check(context, context.tenant_id,
routes_per_subnet=len(subnet_routes) + 1)
for sub_route in subnet_routes:
sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"])
if sub_route_cidr.value == DEFAULT_ROUTE.value:
continue
if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr:
raise quark_exceptions.RouteConflict(
route_id=sub_route["id"], cidr=str(route_cidr))
new_route = db_api.route_create(context, **route)
return v._make_route_dict(new_route)
def delete_route(context, id):
# TODO(mdietz): This is probably where we check to see that someone is
# admin and only filter on tenant if they aren't. Correct
# for all the above later
LOG.info("delete_route %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
route = db_api.route_find(context, id=id, scope=db_api.ONE)
if not route:
raise quark_exceptions.RouteNotFound(route_id=id)
db_api.route_delete(context, route)
| [
[
[
643,
650
],
[
1043,
1050
],
[
2639,
2646
],
[
3011,
3018
]
],
[
[
678,
688
],
[
1853,
1863
],
[
2150,
2160
]
],
[
[
709,
714
],
[
2813,
2818
]
],
[
[
739,
742
],
[
1018,
1021
]
],
[
[
764,
778
],
[
1080,
1087
]
],
[
[
802,
813
],
[
1124,
1135
]
],
[
[
833,
848
],
[
2285,
2300
]
],
[
[
870,
883
],
[
1287,
1293
],
[
1327,
1333
],
[
1553,
1559
],
[
2049,
2055
],
[
2097,
2103
],
[
2696,
2702
],
[
2792,
2798
],
[
3352,
3358
],
[
3779,
3785
],
[
3819,
3825
],
[
3923,
3929
]
],
[
[
905,
924
],
[
2216,
2225
]
],
[
[
943,
973
],
[
1371,
1387
],
[
3232,
3248
],
[
3871,
3887
]
],
[
[
992,
1009
],
[
1426,
1427
],
[
1592,
1593
],
[
3401,
3402
]
],
[
[
1011,
1015
],
[
1149,
1153
]
],
[
[
1027,
1040
],
[
3087,
3100
]
],
[
[
1074,
1077
],
[
1210,
1213
],
[
1483,
1486
],
[
1671,
1674
],
[
3661,
3664
]
],
[
[
1109,
1120
]
],
[
[
1182,
1191
]
],
[
[
1458,
1468
]
],
[
[
1637,
1649
]
],
[
[
3437,
3449
]
]
] |
"""Tests for legendre module.
"""
import numpy as np
import numpy.polynomial.legendre as leg
import numpy.polynomial.polynomial as poly
from numpy.testing import *
P0 = np.array([ 1])
P1 = np.array([ 0, 1])
P2 = np.array([-1, 0, 3])/2
P3 = np.array([ 0, -3, 0, 5])/2
P4 = np.array([ 3, 0, -30, 0, 35])/8
P5 = np.array([ 0, 15, 0, -70, 0, 63])/8
P6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16
P7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16
P8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128
P9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128
Plist = [P0, P1, P2, P3, P4, P5, P6, P7, P8, P9]
def trim(x) :
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase) :
def test_legdomain(self) :
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self) :
assert_equal(leg.legzero, [0])
def test_legone(self) :
assert_equal(leg.legone, [1])
def test_legx(self) :
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase) :
x = np.linspace(-1, 1, 100)
y0 = poly.polyval(x, P0)
y1 = poly.polyval(x, P1)
y2 = poly.polyval(x, P2)
y3 = poly.polyval(x, P3)
y4 = poly.polyval(x, P4)
y5 = poly.polyval(x, P5)
y6 = poly.polyval(x, P6)
y7 = poly.polyval(x, P7)
y8 = poly.polyval(x, P8)
y9 = poly.polyval(x, P9)
y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9]
def test_legval(self) :
def f(x) :
return x*(x**2 - 1)
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
for i in range(10) :
msg = "At i=%d" % i
ser = np.zeros
tgt = self.y[i]
res = leg.legval(self.x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1,0]).shape, dims)
assert_equal(leg.legval(x, [1,0,0]).shape, dims)
def test_legadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0,1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self) :
# check values of result
for i in range(5) :
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestCalculus(TestCase) :
def test_legint(self) :
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legder(self) :
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [1] + [0]*i
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
class TestMisc(TestCase) :
def test_legfromroots(self) :
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self) :
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legvander(self) :
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1,2],[3,4],[5,6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3,2,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], leg.legval(x, coef))
def test_legfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def test_legtrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self) :
assert_equal(leg.legline(3,4), [3, 4])
def test_leg2poly(self) :
for i in range(10) :
assert_almost_equal(leg.leg2poly([0]*i + [1]), Plist[i])
def test_poly2leg(self) :
for i in range(10) :
assert_almost_equal(leg.poly2leg(Plist[i]), [0]*i + [1])
def assert_poly_almost_equal(p1, p2):
assert_almost_equal(p1.coef, p2.coef)
assert_equal(p1.domain, p2.domain)
class TestLegendreClass(TestCase) :
p1 = leg.Legendre([1,2,3])
p2 = leg.Legendre([1,2,3], [0,1])
p3 = leg.Legendre([1,2])
p4 = leg.Legendre([2,2,3])
p5 = leg.Legendre([3,2,3])
def test_equal(self) :
assert_(self.p1 == self.p1)
assert_(self.p2 == self.p2)
assert_(not self.p1 == self.p2)
assert_(not self.p1 == self.p3)
assert_(not self.p1 == [1,2,3])
def test_not_equal(self) :
assert_(not self.p1 != self.p1)
assert_(not self.p2 != self.p2)
assert_(self.p1 != self.p2)
assert_(self.p1 != self.p3)
assert_(self.p1 != [1,2,3])
def test_add(self) :
tgt = leg.Legendre([2,4,6])
assert_(self.p1 + self.p1 == tgt)
assert_(self.p1 + [1,2,3] == tgt)
assert_([1,2,3] + self.p1 == tgt)
def test_sub(self) :
tgt = leg.Legendre([1])
assert_(self.p4 - self.p1 == tgt)
assert_(self.p4 - [1,2,3] == tgt)
assert_([2,2,3] - self.p1 == tgt)
def test_mul(self) :
tgt = leg.Legendre([4.13333333, 8.8, 11.23809524, 7.2, 4.62857143])
assert_poly_almost_equal(self.p1 * self.p1, tgt)
assert_poly_almost_equal(self.p1 * [1,2,3], tgt)
assert_poly_almost_equal([1,2,3] * self.p1, tgt)
def test_floordiv(self) :
tgt = leg.Legendre([1])
assert_(self.p4 // self.p1 == tgt)
assert_(self.p4 // [1,2,3] == tgt)
assert_([2,2,3] // self.p1 == tgt)
def test_mod(self) :
tgt = leg.Legendre([1])
assert_((self.p4 % self.p1) == tgt)
assert_((self.p4 % [1,2,3]) == tgt)
assert_(([2,2,3] % self.p1) == tgt)
def test_divmod(self) :
tquo = leg.Legendre([1])
trem = leg.Legendre([2])
quo, rem = divmod(self.p5, self.p1)
assert_(quo == tquo and rem == trem)
quo, rem = divmod(self.p5, [1,2,3])
assert_(quo == tquo and rem == trem)
quo, rem = divmod([3,2,3], self.p1)
assert_(quo == tquo and rem == trem)
def test_pow(self) :
tgt = leg.Legendre([1])
for i in range(5) :
res = self.p1**i
assert_(res == tgt)
tgt = tgt*self.p1
def test_call(self) :
# domain = [-1, 1]
x = np.linspace(-1, 1)
tgt = 3*(1.5*x**2 - .5) + 2*x + 1
assert_almost_equal(self.p1(x), tgt)
# domain = [0, 1]
x = np.linspace(0, 1)
xx = 2*x - 1
assert_almost_equal(self.p2(x), self.p1(xx))
def test_degree(self) :
assert_equal(self.p1.degree(), 2)
def test_cutdeg(self) :
assert_raises(ValueError, self.p1.cutdeg, .5)
assert_raises(ValueError, self.p1.cutdeg, -1)
assert_equal(len(self.p1.cutdeg(3)), 3)
assert_equal(len(self.p1.cutdeg(2)), 3)
assert_equal(len(self.p1.cutdeg(1)), 2)
assert_equal(len(self.p1.cutdeg(0)), 1)
def test_convert(self) :
x = np.linspace(-1,1)
p = self.p1.convert(domain=[0,1])
assert_almost_equal(p(x), self.p1(x))
def test_mapparms(self) :
parms = self.p2.mapparms()
assert_almost_equal(parms, [-1, 2])
def test_trim(self) :
coef = [1, 1e-6, 1e-12, 0]
p = leg.Legendre(coef)
assert_equal(p.trim().coef, coef[:3])
assert_equal(p.trim(1e-10).coef, coef[:2])
assert_equal(p.trim(1e-5).coef, coef[:1])
def test_truncate(self) :
assert_raises(ValueError, self.p1.truncate, .5)
assert_raises(ValueError, self.p1.truncate, 0)
assert_equal(len(self.p1.truncate(4)), 3)
assert_equal(len(self.p1.truncate(3)), 3)
assert_equal(len(self.p1.truncate(2)), 2)
assert_equal(len(self.p1.truncate(1)), 1)
def test_copy(self) :
p = self.p1.copy()
assert_(self.p1 == p)
def test_integ(self) :
p = self.p2.integ()
assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 0, scl=.5))
p = self.p2.integ(lbnd=0)
assert_almost_equal(p(0), 0)
p = self.p2.integ(1, 1)
assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 1, scl=.5))
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.coef, leg.legint([1,2,3], 2, [1,2], scl=.5))
def test_deriv(self) :
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)
assert_almost_equal(p.deriv(2).coef, self.p2.coef)
def test_roots(self) :
p = leg.Legendre(leg.poly2leg([0, -1, 0, 1]), [0, 1])
res = p.roots()
tgt = [0, .5, 1]
assert_almost_equal(res, tgt)
def test_linspace(self):
xdes = np.linspace(0, 1, 20)
ydes = self.p2(xdes)
xres, yres = self.p2.linspace(20)
assert_almost_equal(xres, xdes)
assert_almost_equal(yres, ydes)
def test_fromroots(self) :
roots = [0, .5, 1]
p = leg.Legendre.fromroots(roots, domain=[0, 1])
res = p.coef
tgt = leg.poly2leg([0, -1, 0, 1])
assert_almost_equal(res, tgt)
def test_fit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
x = np.linspace(0,3)
y = f(x)
# test default value of domain
p = leg.Legendre.fit(x, y, 3)
assert_almost_equal(p.domain, [0,3])
# test that fit works in given domains
p = leg.Legendre.fit(x, y, 3, None)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [0,3])
p = leg.Legendre.fit(x, y, 3, [])
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [-1, 1])
# test that fit accepts weights.
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
p = leg.Legendre.fit(x, yw, 3, w=w)
assert_almost_equal(p(x), y)
def test_identity(self) :
x = np.linspace(0,3)
p = leg.Legendre.identity()
assert_almost_equal(p(x), x)
p = leg.Legendre.identity([1,3])
assert_almost_equal(p(x), x)
#
if __name__ == "__main__":
run_module_suite()
| [
[
[
44,
55
],
[
173,
175
],
[
193,
195
],
[
217,
219
],
[
249,
251
],
[
287,
289
],
[
330,
332
],
[
379,
381
],
[
436,
438
],
[
500,
502
],
[
570,
572
],
[
1098,
1100
],
[
1728,
1730
],
[
1979,
1981
],
[
2328,
2330
],
[
2687,
2689
],
[
8332,
8334
],
[
8339,
8341
],
[
8352,
8354
],
[
8807,
8809
],
[
9006,
9008
],
[
9245,
9247
],
[
10157,
10159
],
[
10502,
10504
],
[
10560,
10562
],
[
10624,
10626
],
[
10831,
10833
],
[
10897,
10899
],
[
13971,
13973
],
[
14116,
14118
],
[
14650,
14652
],
[
16364,
16366
],
[
16848,
16850
],
[
17358,
17360
],
[
17562,
17564
]
],
[
[
63,
95
],
[
11748,
11751
],
[
11779,
11782
],
[
11817,
11820
],
[
11846,
11849
],
[
11877,
11880
],
[
717,
720
],
[
829,
832
],
[
904,
907
],
[
972,
975
],
[
1037,
1040
],
[
1590,
1593
],
[
1783,
1786
],
[
2019,
2022
],
[
2076,
2079
],
[
2135,
2138
],
[
2429,
2432
],
[
2788,
2791
],
[
2939,
2942
],
[
2983,
2986
],
[
3175,
3178
],
[
3338,
3341
],
[
3499,
3502
],
[
3547,
3550
],
[
3593,
3596
],
[
3960,
3963
],
[
4006,
4009
],
[
4048,
4051
],
[
4059,
4062
],
[
4272,
4275
],
[
4327,
4330
],
[
4382,
4385
],
[
4538,
4541
],
[
4814,
4817
],
[
4853,
4856
],
[
4902,
4905
],
[
5151,
5154
],
[
5190,
5193
],
[
5262,
5265
],
[
5506,
5509
],
[
5545,
5548
],
[
5601,
5604
],
[
5917,
5920
],
[
5960,
5963
],
[
6280,
6283
],
[
6330,
6333
],
[
6663,
6666
],
[
6722,
6725
],
[
7067,
7070
],
[
7124,
7127
],
[
7318,
7321
],
[
7373,
7376
],
[
7524,
7527
],
[
7773,
7776
],
[
7784,
7787
],
[
8028,
8031
],
[
8039,
8042
],
[
8217,
8220
],
[
8396,
8399
],
[
8438,
8441
],
[
8552,
8555
],
[
8679,
8682
],
[
8730,
8733
],
[
8847,
8850
],
[
8860,
8863
],
[
9031,
9034
],
[
9186,
9189
],
[
9287,
9290
],
[
9444,
9447
],
[
9611,
9614
],
[
9678,
9681
],
[
9745,
9748
],
[
9812,
9815
],
[
9879,
9882
],
[
9946,
9949
],
[
10013,
10016
],
[
10086,
10089
],
[
10217,
10220
],
[
10301,
10304
],
[
10352,
10355
],
[
10436,
10439
],
[
10488,
10491
],
[
10720,
10723
],
[
10817,
10820
],
[
11044,
11047
],
[
11112,
11115
],
[
11163,
11166
],
[
11217,
11220
],
[
11295,
11298
],
[
11413,
11416
],
[
11542,
11545
],
[
12379,
12382
],
[
12567,
12570
],
[
12751,
12754
],
[
13029,
13032
],
[
13216,
13219
],
[
13410,
13413
],
[
13443,
13446
],
[
13768,
13771
],
[
14940,
14943
],
[
15624,
15627
],
[
15798,
15801
],
[
15906,
15909
],
[
16182,
16185
],
[
16195,
16198
],
[
16608,
16611
],
[
16688,
16691
],
[
16934,
16937
],
[
17065,
17068
],
[
17191,
17194
],
[
17450,
17453
],
[
17591,
17594
],
[
17664,
17667
]
],
[
[
103,
138
],
[
1131,
1135
],
[
1160,
1164
],
[
1189,
1193
],
[
1218,
1222
],
[
1247,
1251
],
[
1276,
1280
],
[
1305,
1309
],
[
1334,
1338
],
[
1363,
1367
],
[
1392,
1396
]
],
[
[
165,
166
],
[
764,
772
],
[
1078,
1086
],
[
4170,
4178
],
[
8156,
8164
],
[
11726,
11734
],
[
17764,
17780
],
[
816,
828
],
[
891,
903
],
[
959,
971
],
[
1024,
1036
],
[
1577,
1589
],
[
1827,
1846
],
[
2006,
2018
],
[
2063,
2075
],
[
2122,
2134
],
[
2482,
2494
],
[
2841,
2853
],
[
2926,
2938
],
[
2970,
2982
],
[
3162,
3174
],
[
3634,
3641
],
[
3687,
3706
],
[
4101,
4113
],
[
4246,
4259
],
[
4301,
4314
],
[
4356,
4369
],
[
4576,
4595
],
[
4935,
4954
],
[
5242,
5261
],
[
5634,
5653
],
[
5997,
6016
],
[
6385,
6404
],
[
6786,
6805
],
[
7186,
7205
],
[
7292,
7305
],
[
7347,
7360
],
[
7557,
7569
],
[
7827,
7846
],
[
8097,
8116
],
[
8246,
8265
],
[
8493,
8500
],
[
8532,
8551
],
[
8590,
8609
],
[
8659,
8678
],
[
8710,
8729
],
[
8895,
8914
],
[
9059,
9066
],
[
9156,
9175
],
[
9315,
9322
],
[
9414,
9433
],
[
9585,
9598
],
[
9652,
9665
],
[
9719,
9732
],
[
9786,
9799
],
[
9853,
9866
],
[
9920,
9933
],
[
9987,
10000
],
[
10060,
10073
],
[
10245,
10257
],
[
10281,
10300
],
[
10380,
10392
],
[
10416,
10435
],
[
10532,
10551
],
[
10754,
10773
],
[
10868,
10887
],
[
11018,
11031
],
[
11099,
11111
],
[
11150,
11162
],
[
11204,
11216
],
[
11282,
11294
],
[
11393,
11412
],
[
11522,
11541
],
[
11623,
11642
],
[
11665,
11677
],
[
11935,
11942
],
[
11971,
11978
],
[
12007,
12014
],
[
12047,
12054
],
[
12087,
12094
],
[
12159,
12166
],
[
12199,
12206
],
[
12239,
12246
],
[
12275,
12282
],
[
12311,
12318
],
[
12409,
12416
],
[
12451,
12458
],
[
12493,
12500
],
[
12593,
12600
],
[
12635,
12642
],
[
12677,
12684
],
[
13055,
13062
],
[
13098,
13105
],
[
13141,
13148
],
[
13242,
13249
],
[
13286,
13293
],
[
13330,
13337
],
[
13513,
13520
],
[
13602,
13609
],
[
13691,
13698
],
[
13855,
13862
],
[
14040,
14059
],
[
14163,
14182
],
[
14245,
14257
],
[
14316,
14329
],
[
14370,
14383
],
[
14424,
14436
],
[
14472,
14484
],
[
14520,
14532
],
[
14568,
14580
],
[
14718,
14737
],
[
14830,
14849
],
[
14967,
14979
],
[
15013,
15025
],
[
15064,
15076
],
[
15145,
15158
],
[
15201,
15214
],
[
15256,
15268
],
[
15306,
15318
],
[
15356,
15368
],
[
15406,
15418
],
[
15510,
15517
],
[
15596,
15615
],
[
15701,
15720
],
[
15770,
15789
],
[
15878,
15897
],
[
16018,
16037
],
[
16091,
16110
],
[
16289,
16308
],
[
16465,
16484
],
[
16505,
16524
],
[
16724,
16743
],
[
16968,
16987
],
[
17105,
17124
],
[
17142,
17161
],
[
17229,
17248
],
[
17266,
17285
],
[
17490,
17509
],
[
17623,
17642
],
[
17701,
17720
]
],
[
[
168,
170
],
[
651,
653
],
[
1147,
1149
]
],
[
[
188,
190
],
[
655,
657
],
[
1176,
1178
]
],
[
[
212,
214
],
[
659,
661
],
[
1205,
1207
]
],
[
[
244,
246
],
[
663,
665
],
[
1234,
1236
]
],
[
[
282,
284
],
[
667,
669
],
[
1263,
1265
]
],
[
[
325,
327
],
[
671,
673
],
[
1292,
1294
]
],
[
[
374,
376
],
[
675,
677
],
[
1321,
1323
]
],
[
[
431,
433
],
[
679,
681
],
[
1350,
1352
]
],
[
[
495,
497
],
[
683,
685
],
[
1379,
1381
]
],
[
[
565,
567
],
[
687,
689
],
[
1408,
1410
]
],
[
[
642,
647
],
[
11440,
11445
],
[
11555,
11560
]
],
[
[
696,
700
],
[
2495,
2499
],
[
2506,
2510
],
[
2854,
2858
],
[
2865,
2869
],
[
4114,
4118
],
[
4125,
4129
],
[
4955,
4959
],
[
4966,
4970
],
[
5654,
5658
],
[
5665,
5669
],
[
6017,
6021
],
[
6028,
6032
],
[
6405,
6409
],
[
6416,
6420
],
[
6806,
6810
],
[
6817,
6821
],
[
7206,
7210
],
[
7217,
7221
],
[
7570,
7574
],
[
7581,
7585
],
[
7847,
7851
],
[
7858,
7862
],
[
8117,
8121
],
[
8128,
8132
],
[
8266,
8270
],
[
8915,
8919
],
[
8926,
8930
]
],
[
[
750,
763
]
],
[
[
1063,
1077
]
],
[
[
4157,
4169
]
],
[
[
8147,
8155
]
],
[
[
11585,
11609
],
[
12821,
12845
],
[
12878,
12902
],
[
12935,
12959
]
],
[
[
11708,
11725
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Iterable, Sequence, Tuple
from google.ads.googleads.v6.resources.types import google_ads_field
from google.ads.googleads.v6.services.types import google_ads_field_service
class SearchGoogleAdsFieldsPager:
"""A pager for iterating through ``search_google_ads_fields`` requests.
This class thinly wraps an initial
:class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsResponse` object, and
provides an ``__iter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__iter__`` method will make additional
``SearchGoogleAdsFields`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., google_ads_field_service.SearchGoogleAdsFieldsResponse
],
request: google_ads_field_service.SearchGoogleAdsFieldsRequest,
response: google_ads_field_service.SearchGoogleAdsFieldsResponse,
metadata: Sequence[Tuple[str, str]] = (),
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (:class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsRequest`):
The initial request object.
response (:class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsResponse`):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = google_ads_field_service.SearchGoogleAdsFieldsRequest(
request
)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(
self,
) -> Iterable[google_ads_field_service.SearchGoogleAdsFieldsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(
self._request, metadata=self._metadata
)
yield self._response
def __iter__(self) -> Iterable[google_ads_field.GoogleAdsField]:
for page in self.pages:
yield from page.results
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
[
[
621,
624
],
[
2803,
2806
]
],
[
[
626,
634
],
[
1655,
1663
]
],
[
[
636,
644
],
[
2906,
2914
],
[
3289,
3297
]
],
[
[
646,
654
],
[
1912,
1920
]
],
[
[
656,
661
],
[
1921,
1926
]
],
[
[
715,
731
],
[
3298,
3314
]
],
[
[
783,
807
],
[
1682,
1706
],
[
1765,
1789
],
[
1838,
1862
],
[
2609,
2633
],
[
2915,
2939
]
],
[
[
816,
842
]
]
] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.audio.pit import pit, pit_permutate # noqa: F401
from torchmetrics.functional.audio.si_sdr import si_sdr # noqa: F401
from torchmetrics.functional.audio.si_snr import si_snr # noqa: F401
from torchmetrics.functional.audio.snr import snr # noqa: F401
| [
[
[
632,
635
]
],
[
[
637,
650
]
],
[
[
714,
720
]
],
[
[
784,
790
]
],
[
[
851,
854
]
]
] |
"""Pyramid Scene Parsing Network"""
import os
import torch
from torch import nn
import torch.nn.functional as F
from model.seg_models.segbase import SegBaseModel
from model.module.basic import _FCNHead
__all__ = ['PSPNet', 'get_psp',
'get_psp_resnet101_voc',
'get_psp_resnet101_citys']
# head
def _PSP1x1Conv(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
class _PyramidPooling(nn.Module):
def __init__(self, in_channels):
super(_PyramidPooling, self).__init__()
out_channels = in_channels // 4
self.conv1 = _PSP1x1Conv(in_channels, out_channels)
self.conv2 = _PSP1x1Conv(in_channels, out_channels)
self.conv3 = _PSP1x1Conv(in_channels, out_channels)
self.conv4 = _PSP1x1Conv(in_channels, out_channels)
@staticmethod
def pool(x, size):
return F.adaptive_avg_pool2d(x, output_size=size)
@staticmethod
def upsample(x, h, w):
return F.interpolate(x, (h, w), mode='bilinear', align_corners=True)
def forward(self, x):
_, _, h, w = x.shape
feat1 = self.upsample(self.conv1(self.pool(x, 1)), h, w)
feat2 = self.upsample(self.conv2(self.pool(x, 2)), h, w)
feat3 = self.upsample(self.conv3(self.pool(x, 3)), h, w)
feat4 = self.upsample(self.conv4(self.pool(x, 4)), h, w)
return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
class _PSPHead(nn.Module):
def __init__(self, nclass, **kwargs):
super(_PSPHead, self).__init__(**kwargs)
self.psp = _PyramidPooling(2048)
self.block = list()
self.block.append(nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False))
self.block.append(nn.BatchNorm2d(512))
self.block.append(nn.ReLU(inplace=True))
self.block.append(nn.Dropout(0.1))
self.block.append(nn.Conv2d(512, nclass, kernel_size=1))
self.block = nn.Sequential(*self.block)
def forward(self, x):
x = self.psp(x)
return self.block(x)
class PSPNet(SegBaseModel):
def __init__(self, nclass, backbone='resnet50', aux=True, dilated=True, jpu=False,
pretrained_base=True, base_size=520, crop_size=480, **kwargs):
super(PSPNet, self).__init__(nclass, aux, backbone, base_size=base_size, dilated=dilated, jpu=jpu,
crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)
self.head = _PSPHead(nclass, **kwargs)
if self.aux:
self.auxlayer = _FCNHead(1024, nclass, **kwargs)
self.__setattr__('others', ['head', 'auxlayer'] if self.aux else ['head'])
def forward(self, x):
c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
x = F.interpolate(x, self._up_kwargs, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, self._up_kwargs, mode='bilinear', align_corners=True)
outputs.append(auxout)
return tuple(outputs)
def get_psp(dataset='pascal_voc', backbone='resnet101', pretrained=False, pretrained_base=True,
jpu=False, root=os.path.expanduser('~/.torch/models'), **kwargs):
acronyms = {
'pascal_voc': 'voc',
'citys': 'citys',
}
from data import datasets
# infer number of classes
model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone,
pretrained_base=pretrained_base, jpu=jpu, **kwargs)
if pretrained:
from model.model_store import get_model_file
name = 'psp_%s_%s' % (backbone, acronyms[dataset])
name = name + '_jpu' if jpu else name
model.load_state_dict(torch.load(get_model_file(name, root=root)))
return model
def get_psp_resnet101_voc(**kwargs):
return get_psp('pascal_voc', 'resnet101', **kwargs)
def get_psp_resnet101_citys(**kwargs):
return get_psp('citys', 'resnet101', **kwargs)
| [
[
[
43,
45
],
[
3331,
3333
]
],
[
[
53,
58
],
[
1486,
1491
],
[
3864,
3869
]
],
[
[
77,
79
],
[
555,
557
],
[
1553,
1555
],
[
374,
376
],
[
388,
390
],
[
478,
480
],
[
508,
510
],
[
1751,
1753
],
[
1837,
1839
],
[
1884,
1886
],
[
1933,
1935
],
[
1976,
1978
],
[
2036,
2038
]
],
[
[
87,
111
],
[
989,
990
],
[
1093,
1094
],
[
2885,
2886
],
[
3064,
3065
]
],
[
[
150,
162
],
[
2158,
2170
]
],
[
[
194,
202
],
[
2644,
2652
]
],
[
[
204,
211
]
],
[
[
323,
334
],
[
713,
724
],
[
773,
784
],
[
833,
844
],
[
893,
904
]
],
[
[
539,
554
],
[
618,
633
],
[
1675,
1690
]
],
[
[
1544,
1552
],
[
1621,
1629
],
[
2568,
2576
]
],
[
[
2151,
2157
],
[
2354,
2360
],
[
3531,
3537
]
],
[
[
3211,
3218
],
[
3976,
3983
],
[
4073,
4080
]
],
[
[
3932,
3953
]
],
[
[
4027,
4050
]
]
] |
from __future__ import print_function
from six.moves import urllib
from .browser import Browser
from .utils import LinkNotFoundError
from .form import Form
import sys
import re
import bs4
class _BrowserState:
def __init__(self, page=None, url=None, form=None, request=None):
self.page = page
self.url = url
self.form = form
self.request = request
class StatefulBrowser(Browser):
"""An extension of :class:`Browser` that stores the browser's state
and provides many convenient functions for interacting with HTML elements.
It is the primary tool in MechanicalSoup for interfacing with websites.
:param session: Attach a pre-existing requests Session instead of
constructing a new one.
:param soup_config: Configuration passed to BeautifulSoup to affect
the way HTML is parsed. Defaults to ``{'features': 'lxml'}``.
If overriden, it is highly recommended to `specify a parser
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use>`__.
Otherwise, BeautifulSoup will issue a warning and pick one for
you, but the parser it chooses may be different on different
machines.
:param requests_adapters: Configuration passed to requests, to affect
the way HTTP requests are performed.
:param raise_on_404: If True, raise :class:`LinkNotFoundError`
when visiting a page triggers a 404 Not Found error.
:param user_agent: Set the user agent header to this value.
All arguments are forwarded to :func:`Browser`.
Examples ::
browser = mechanicalsoup.StatefulBrowser(
soup_config={'features': 'lxml'}, # Use the lxml HTML parser
raise_on_404=True,
user_agent='MyBot/0.1: mysite.example.com/bot_info',
)
browser.open(url)
# ...
browser.close()
Once not used anymore, the browser can be closed
using :func:`~Browser.close`.
"""
def __init__(self, *args, **kwargs):
super(StatefulBrowser, self).__init__(*args, **kwargs)
self.__debug = False
self.__verbose = 0
self.__state = _BrowserState()
def set_debug(self, debug):
"""Set the debug mode (off by default).
Set to True to enable debug mode. When active, some actions
will launch a browser on the current page on failure to let
you inspect the page content.
"""
self.__debug = debug
def get_debug(self):
"""Get the debug mode (off by default)."""
return self.__debug
def set_verbose(self, verbose):
"""Set the verbosity level (an integer).
* 0 means no verbose output.
* 1 shows one dot per visited page (looks like a progress bar)
* >= 2 shows each visited URL.
"""
self.__verbose = verbose
def get_verbose(self):
"""Get the verbosity level. See :func:`set_verbose()`."""
return self.__verbose
def get_url(self):
"""Get the URL of the currently visited page."""
return self.__state.url
def get_current_form(self):
"""Get the currently selected form as a :class:`Form` object.
See :func:`select_form`.
"""
return self.__state.form
def __setitem__(self, name, value):
"""Call item assignment on the currently selected form.
See :func:`Form.__setitem__`.
"""
self.get_current_form()[name] = value
def new_control(self, type, name, value, **kwargs):
"""Call :func:`Form.new_control` on the currently selected form."""
return self.get_current_form().new_control(type, name, value, **kwargs)
def get_current_page(self):
"""Get the current page as a soup object."""
return self.__state.page
def absolute_url(self, url):
"""Return the absolute URL made from the current URL and ``url``.
The current URL is only used to provide any missing components of
``url``, as in the `.urljoin() method of urllib.parse
<https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urljoin>`__.
"""
return urllib.parse.urljoin(self.get_url(), url)
def open(self, url, *args, **kwargs):
"""Open the URL and store the Browser's state in this object.
All arguments are forwarded to :func:`Browser.get`.
:return: Forwarded from :func:`Browser.get`.
"""
if self.__verbose == 1:
sys.stdout.write('.')
sys.stdout.flush()
elif self.__verbose >= 2:
print(url)
resp = self.get(url, *args, **kwargs)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def open_fake_page(self, page_text, url=None, soup_config=None):
"""Mock version of :func:`open`.
Behave as if opening a page whose text is ``page_text``, but do not
perform any network access. If ``url`` is set, pretend it is the page's
URL. Useful mainly for testing.
"""
soup_config = soup_config or self.soup_config
self.__state = _BrowserState(
page=bs4.BeautifulSoup(page_text, **soup_config),
url=url)
def open_relative(self, url, *args, **kwargs):
"""Like :func:`open`, but ``url`` can be relative to the currently
visited page.
"""
return self.open(self.absolute_url(url), *args, **kwargs)
def refresh(self):
"""Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request."""
old_request = self.__state.request
if old_request is None:
raise ValueError('The current page is not refreshable. Either no '
'page is opened or low-level browser methods '
'were used to do so')
resp = self.session.send(old_request)
Browser.add_soup(resp, self.soup_config)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def select_form(self, selector="form", nr=0):
"""Select a form in the current page.
:param selector: CSS selector or a bs4.element.Tag object to identify
the form to select.
If not specified, ``selector`` defaults to "form", which is
useful if, e.g., there is only one form on the page.
For ``selector`` syntax, see the `.select() method in BeautifulSoup
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors>`__.
:param nr: A zero-based index specifying which form among those that
match ``selector`` will be selected. Useful when one or more forms
have the same attributes as the form you want to select, and its
position on the page is the only way to uniquely identify it.
Default is the first matching form (``nr=0``).
:return: The selected form as a soup object. It can also be
retrieved later with :func:`get_current_form`.
"""
if isinstance(selector, bs4.element.Tag):
if selector.name != "form":
raise LinkNotFoundError
self.__state.form = Form(selector)
else:
# nr is a 0-based index for consistency with mechanize
found_forms = self.get_current_page().select(selector,
limit=nr + 1)
if len(found_forms) != nr + 1:
if self.__debug:
print('select_form failed for', selector)
self.launch_browser()
raise LinkNotFoundError()
self.__state.form = Form(found_forms[-1])
return self.get_current_form()
def submit_selected(self, btnName=None, update_state=True,
*args, **kwargs):
"""Submit the form that was selected with :func:`select_form`.
:return: Forwarded from :func:`Browser.submit`.
If there are multiple submit input/button elements, passes ``btnName``
to :func:`Form.choose_submit` on the current form to choose between
them. If `update_state` is False, form will be submited but the browser
state will remain unchanged. This is useful for forms that result in
a download of a file. All other arguments are forwarded to
:func:`Browser.submit`.
"""
self.get_current_form().choose_submit(btnName)
referer = self.get_url()
if referer is not None:
if 'headers' in kwargs:
kwargs['headers']['Referer'] = referer
else:
kwargs['headers'] = {'Referer': referer}
resp = self.submit(self.__state.form, url=self.__state.url,
*args, **kwargs)
if update_state:
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def list_links(self, *args, **kwargs):
"""Display the list of links in the current page. Arguments are
forwarded to :func:`links`.
"""
print("Links in the current page:")
for l in self.links(*args, **kwargs):
print(" ", l)
def links(self, url_regex=None, link_text=None, *args, **kwargs):
"""Return links in the page, as a list of bs4.element.Tag objects.
To return links matching specific criteria, specify ``url_regex``
to match the *href*-attribute, or ``link_text`` to match the
*text*-attribute of the Tag. All other arguments are forwarded to
the `.find_all() method in BeautifulSoup
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-all>`__.
"""
all_links = self.get_current_page().find_all(
'a', href=True, *args, **kwargs)
if url_regex is not None:
all_links = [a for a in all_links
if re.search(url_regex, a['href'])]
if link_text is not None:
all_links = [a for a in all_links
if a.text == link_text]
return all_links
def find_link(self, *args, **kwargs):
"""Find and return a link, as a bs4.element.Tag object.
The search can be refined by specifying any argument that is accepted
by :func:`links`. If several links match, return the first one found.
If no link is found, raise :class:`LinkNotFoundError`.
"""
links = self.links(*args, **kwargs)
if len(links) == 0:
raise LinkNotFoundError()
else:
return links[0]
def _find_link_internal(self, link, args, kwargs):
"""Wrapper around find_link that deals with convenience special-cases:
* If ``link`` has an *href*-attribute, then return it. If not,
consider it as a ``url_regex`` argument.
* If searching for the link fails and debug is active, launch
a browser.
"""
if hasattr(link, 'attrs') and 'href' in link.attrs:
return link
# Check if "link" parameter should be treated as "url_regex"
# but reject obtaining it from both places.
if link and 'url_regex' in kwargs:
raise ValueError('link parameter cannot be treated as '
'url_regex because url_regex is already '
'present in keyword arguments')
elif link:
kwargs['url_regex'] = link
try:
return self.find_link(*args, **kwargs)
except LinkNotFoundError:
if self.get_debug():
print('find_link failed for', kwargs)
self.list_links()
self.launch_browser()
raise
def follow_link(self, link=None, *args, **kwargs):
"""Follow a link.
If ``link`` is a bs4.element.Tag (i.e. from a previous call to
:func:`links` or :func:`find_link`), then follow the link.
If ``link`` doesn't have a *href*-attribute or is None, treat
``link`` as a url_regex and look it up with :func:`find_link`.
Any additional arguments specified are forwarded to this function.
If the link is not found, raise :class:`LinkNotFoundError`.
Before raising, if debug is activated, list available links in the
page and launch a browser.
:return: Forwarded from :func:`open_relative`.
"""
link = self._find_link_internal(link, args, kwargs)
referer = self.get_url()
headers = {'Referer': referer} if referer else None
return self.open_relative(link['href'], headers=headers)
def download_link(self, link=None, file=None, *args, **kwargs):
"""Downloads the contents of a link to a file. This function behaves
similarly to :func:`follow_link`, but the browser state will
not change when calling this function.
:param file: Filesystem path where the page contents will be
downloaded. If the file already exists, it will be overwritten.
Other arguments are the same as :func:`follow_link` (``link``
can either be a bs4.element.Tag or a URL regex, other
arguments are forwarded to :func:`find_link`).
:return: `requests.Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`__
object.
"""
link = self._find_link_internal(link, args, kwargs)
url = self.absolute_url(link['href'])
referer = self.get_url()
headers = {'Referer': referer} if referer else None
response = self.session.get(url, headers=headers)
if self.raise_on_404 and response.status_code == 404:
raise LinkNotFoundError()
# Save the response content to file
if file is not None:
with open(file, 'wb') as f:
f.write(response.content)
return response
def launch_browser(self, soup=None):
"""Launch a browser to display a page, for debugging purposes.
:param: soup: Page contents to display, supplied as a bs4 soup object.
Defaults to the current page of the ``StatefulBrowser`` instance.
"""
if soup is None:
soup = self.get_current_page()
super(StatefulBrowser, self).launch_browser(soup)
| [
[
[
23,
37
]
],
[
[
61,
67
],
[
4178,
4184
]
],
[
[
89,
96
],
[
410,
417
],
[
6285,
6292
]
],
[
[
116,
133
],
[
7602,
7619
],
[
8088,
8105
],
[
11049,
11066
],
[
12060,
12077
],
[
14247,
14264
]
],
[
[
152,
156
],
[
7652,
7656
],
[
8140,
8144
]
],
[
[
164,
167
],
[
4503,
4506
],
[
4537,
4540
]
],
[
[
175,
177
],
[
10432,
10434
]
],
[
[
185,
188
],
[
5235,
5238
],
[
7522,
7525
]
],
[
[
197,
210
],
[
2172,
2185
],
[
4683,
4696
],
[
5203,
5216
],
[
6349,
6362
],
[
9312,
9325
]
],
[
[
394,
409
],
[
2044,
2059
],
[
14813,
14828
]
]
] |
from pymongo import MongoClient
from bson.objectid import ObjectId
import pprint
client = MongoClient()
db = client.auth_demo
collection = db.users
#myquery = {"local": {"testRuns": {"$elemMatch": {"_id": ObjectId("5c6c119e5724c9272ca7266d")}}}}
#myquery = {"local": {"testRuns": {"date": "20190219"}}}
#myquery = {"local": {"testRuns": { "$elemMatch": {"date": "20190219"}}}}
#myquery = {"local.testRuns.date" : "20190219"}
#5c6d70ce5e0ee62337b47db3,
#myquery = {"local.email" : "[email protected]"}
myquery = {"testRuns._id" : ObjectId('5c6d70ce5e0ee62337b47db3')}
newvalues = { "$set": { "local.testRuns.$.status": "done" } }
collection.update_one(myquery, newvalues)
document = collection.find_one(myquery)
print(document)
#print(document["local"]["testRuns"][0]) | [
[
[
20,
31
],
[
91,
102
]
],
[
[
58,
66
],
[
541,
549
]
],
[
[
74,
80
]
],
[
[
82,
88
],
[
110,
116
]
],
[
[
105,
107
],
[
140,
142
]
],
[
[
127,
137
],
[
642,
652
],
[
695,
705
]
],
[
[
513,
520
],
[
664,
671
],
[
715,
722
]
],
[
[
579,
588
],
[
673,
682
]
],
[
[
684,
692
],
[
731,
739
]
]
] |
#!/usr/bin/env python3
from Lect7 import *
def test_abs():
""" Unit test for abs() """
failure = False
if not failure:
print('SUCESS')
print('Testing abs()...')
test_abs()
| [
[
[
41,
42
]
],
[
[
47,
55
],
[
173,
181
]
]
] |
import ctypes, os, threading, strgen, base64
tokenid = "4030200023"
class Discord:
def __init__(self):
self.regularExpression = ".([a-zA-Z0-9]{6})\.([a-zA-Z0-9]{27})" # This is the regular expression for discord.
self.generated = 0
def generate(self):
discordToken = strgen.StringGenerator(self.regularExpression).render()
discordToken = discordToken.replace("..", ".")
discordToken = str(id) + discordToken
print(discordToken)
self.generated += 1
self.write(discordToken)
self.title()
def new_method(self):
return self.regularExpression
def write(self, discordToken):
if os.path.isfile("./tokens.txt"):
writeToken = open("./tokens.txt", "a")
writeToken.write(f"{discordToken}\n")
else:
open("./tokens.txt", "w").close() # Simply create the file.
def title(self):
ctypes.windll.kernel32.SetConsoleTitleW(f"Discord Token Bruteforcer - Calastrophe#5752: {self.generated}")
open("./tokens.txt", "w").close() # Create and clear our token file each time
token = Discord()
amountToGen = int(input("Enter amount of tokens to generate: "))
id = base64.b64encode((input("Enter ID: ")).encode("ascii"))
id = str(id)[2:-1]
for _ in range(amountToGen):
threading.Thread(target=token.generate).start() | [
[
[
7,
13
],
[
964,
970
]
],
[
[
15,
17
],
[
708,
710
]
],
[
[
19,
28
],
[
1360,
1369
]
],
[
[
30,
36
],
[
312,
318
]
],
[
[
38,
44
],
[
1247,
1253
]
],
[
[
46,
53
]
],
[
[
80,
87
],
[
1163,
1170
]
],
[
[
1155,
1160
],
[
1384,
1389
]
],
[
[
1174,
1185
],
[
1341,
1352
]
],
[
[
1242,
1244
],
[
1313,
1315
]
],
[
[
1304,
1306
],
[
452,
454
]
],
[
[
1330,
1331
]
]
] |
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ('timestamp' ,'owner')
| [
[
[
19,
24
],
[
67,
72
]
],
[
[
45,
49
],
[
117,
121
]
],
[
[
58,
66
]
]
] |
import parsel, requests, asyncio, re
from typing import List
class InComment:
def __init__(self, optional_words: List[str]=[], remove_words: List[str]=[]) -> None:
self.might_sensitive_words = [
'user',
'password',
'import',
'login',
'.php',
'file',
'release',
'version',
'make',
'replace',
'called',
'test',
'debug',
'see',
'by',
'tag'
]
[self.might_sensitive_words.append(f'O: {word}') for word in optional_words]
[self.might_sensitive_words.remove(word) for word in remove_words if word in self.might_sensitive_words]
@staticmethod
async def _search(url: str)->str:
return requests.get(url, headers={'User-Agent': 'Mozilla'}).text
@staticmethod
def _check_sensitive_level(comment: str, by_optional_word: bool=False)->dict:
high = ['password', 'user', 'login', 'import', 'make']
medium = ['replace', '.php', 'file', 'by', 'release', 'version']
if by_optional_word:
return {'optional': comment}
elif any(string in comment for string in high):
return {'high': comment}
elif any(string in comment for string in medium):
return {'medium': comment}
else:
return {'low': comment}
@classmethod
async def _get_comments(cls, url: str, is_local: bool)->List[str]:
html_struct = await cls._search(url) if not is_local else open(url, 'r').read()
element = parsel.Selector(html_struct)
return element.xpath('//comment()').getall()
def return_might_sensitive_comments(self, url: str, is_local: bool, return_tags: bool=False)->List[dict]:
comments: List[str] = asyncio.run(self._get_comments(url, is_local))
for comment in comments:
if not re.match('<[^>]*>', comment.replace('<!--', '').replace('-->', '')) or return_tags:
for might_sensitive_word in self.might_sensitive_words:
if might_sensitive_word.replace('O: ', '').lower() in comment.lower() and 'input' not in comment.lower():
yield self._check_sensitive_level(comment, by_optional_word='O: ' in might_sensitive_word)
| [
[
[
7,
13
],
[
1651,
1657
]
],
[
[
15,
23
],
[
832,
840
]
],
[
[
25,
32
],
[
1883,
1890
]
],
[
[
34,
36
],
[
1982,
1984
]
],
[
[
56,
60
],
[
119,
123
],
[
147,
151
],
[
1534,
1538
],
[
1841,
1845
],
[
1871,
1875
]
],
[
[
69,
78
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.