code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/ui_ShowResultDialog.ui'
#
# Created: Sat May 16 17:05:43 2015
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.lb_image = ImageLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_image.sizePolicy().hasHeightForWidth())
self.lb_image.setSizePolicy(sizePolicy)
self.lb_image.setMinimumSize(QtCore.QSize(100, 100))
self.lb_image.setAlignment(QtCore.Qt.AlignCenter)
self.lb_image.setObjectName("lb_image")
self.verticalLayout.addWidget(self.lb_image)
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName("hLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.btn_save = QtWidgets.QPushButton(Dialog)
self.btn_save.setObjectName("btn_save")
self.hLayout.addWidget(self.btn_save)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.hLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.lb_image.setText(_translate("Dialog", "Image Label"))
self.btn_save.setText(_translate("Dialog", "Save it"))
from widgets.ImageLabel import ImageLabel
| [
"widgets.ImageLabel.ImageLabel",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize"
]
| [((443, 472), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['Dialog'], {}), '(Dialog)\n', (464, 472), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((557, 575), 'widgets.ImageLabel.ImageLabel', 'ImageLabel', (['Dialog'], {}), '(Dialog)\n', (567, 575), False, 'from widgets.ImageLabel import ImageLabel\n'), ((597, 689), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (618, 689), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1145, 1168), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1166, 1168), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1236, 1334), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1257, 1334), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1395, 1424), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (1416, 1424), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1541, 1639), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1562, 1639), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1773, 1818), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (1810, 1818), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((939, 961), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(100)', '(100)'], {}), '(100, 100)\n', (951, 961), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
"""Extension loader for filetype handlers.
The extension objects provided by MIMEExtensionLoader objects have four
attributes: parse, embed, add_options, and update_options. The first two
are used as handlers for supporting the MIME type as primary and embeded
resources. The last two are (currently) only used for printing.
"""
__version__ = '$Revision: 2.4 $'
from . import extloader
import string
class MIMEExtensionLoader(extloader.ExtensionLoader):
def find(self, name):
new_name = string.replace(name, "-", "_")
major, minor = tuple(string.split(new_name, "/"))
if minor:
modname = "%s_%s" % (major, minor)
else:
modname = major
mod = self.find_module(modname)
ext = None
if not mod and modname != major:
ext = self.get(major + "/")
elif mod:
ext = MIMETypeExtension(name, mod, modname)
return ext
class MIMETypeExtension:
def __init__(self, type, mod, modname):
self.type = type
self.__load_attr(mod, "parse_" + modname, "parse")
self.__load_attr(mod, "embed_" + modname, "embed")
self.__load_attr(mod, "add_options")
self.__load_attr(mod, "update_settings")
def __repr__(self):
classname = self.__class__.__name__
modulename = self.__class__.__module__
if self.parse and self.embed:
flags = " [displayable, embeddable]"
elif self.embed:
flags = " [embeddable]"
elif self.parse:
flags = " [displayable]"
else:
# not very useful, now is it?
flags = ""
return "<%s.%s for %s%s>" % (modulename, classname, self.type, flags)
def __load_attr(self, mod, name, load_as=None):
load_as = load_as or name
if hasattr(mod, name):
v = getattr(mod, name)
else:
v = None
setattr(self, load_as, v)
| [
"string.split",
"string.replace"
]
| [((506, 536), 'string.replace', 'string.replace', (['name', '"""-"""', '"""_"""'], {}), "(name, '-', '_')\n", (520, 536), False, 'import string\n'), ((566, 593), 'string.split', 'string.split', (['new_name', '"""/"""'], {}), "(new_name, '/')\n", (578, 593), False, 'import string\n')] |
#!/usr/bin/env python3
# Project: VUT FIT SUI Project - Dice Wars
# Authors:
# - <NAME> <<EMAIL>>
# - <NAME> <<EMAIL>>
# - <NAME> <<EMAIL>>
# - <NAME> <<EMAIL>>
# Year: 2020
# Description: Generates game configurations.
import random
import sys
from argparse import ArgumentParser
import time
from signal import signal, SIGCHLD
from utils import run_ai_only_game, BoardDefinition
parser = ArgumentParser(prog='Dice_Wars')
parser.add_argument('-p', '--port', help="Server port", type=int, default=5005)
parser.add_argument('-a', '--address', help="Server address", default='127.0.0.1')
procs = []
def signal_handler():
""" Handler for SIGCHLD signal that terminates server and clients. """
for p in procs:
try:
p.kill()
except ProcessLookupError:
pass
PLAYING_AIs = [
'xkolar71_orig',
'xkolar71_2',
'xkolar71_3',
'xkolar71_4',
]
def board_definitions():
while True:
random.seed(int(time.time()))
yield BoardDefinition(random.randint(1, 10 ** 10), random.randint(1, 10 ** 10), random.randint(1, 10 ** 10))
def main():
args = parser.parse_args()
signal(SIGCHLD, signal_handler)
boards_played = 0
try:
for board_definition in board_definitions():
boards_played += 1
run_ai_only_game(
args.port, args.address, procs, PLAYING_AIs,
board_definition,
fixed=random.randint(1, 10 ** 10),
client_seed=random.randint(1, 10 ** 10),
debug=True, logdir='logs',
)
print(f'Played {boards_played} games.', file=sys.stderr)
except (Exception, KeyboardInterrupt) as e:
sys.stderr.write("Breaking the tournament because of {}\n".format(repr(e)))
for p in procs:
p.kill()
raise
if __name__ == '__main__':
main()
| [
"signal.signal",
"time.time",
"random.randint",
"argparse.ArgumentParser"
]
| [((414, 446), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""Dice_Wars"""'}), "(prog='Dice_Wars')\n", (428, 446), False, 'from argparse import ArgumentParser\n'), ((1170, 1201), 'signal.signal', 'signal', (['SIGCHLD', 'signal_handler'], {}), '(SIGCHLD, signal_handler)\n', (1176, 1201), False, 'from signal import signal, SIGCHLD\n'), ((989, 1000), 'time.time', 'time.time', ([], {}), '()\n', (998, 1000), False, 'import time\n'), ((1033, 1060), 'random.randint', 'random.randint', (['(1)', '(10 ** 10)'], {}), '(1, 10 ** 10)\n', (1047, 1060), False, 'import random\n'), ((1062, 1089), 'random.randint', 'random.randint', (['(1)', '(10 ** 10)'], {}), '(1, 10 ** 10)\n', (1076, 1089), False, 'import random\n'), ((1091, 1118), 'random.randint', 'random.randint', (['(1)', '(10 ** 10)'], {}), '(1, 10 ** 10)\n', (1105, 1118), False, 'import random\n'), ((1466, 1493), 'random.randint', 'random.randint', (['(1)', '(10 ** 10)'], {}), '(1, 10 ** 10)\n', (1480, 1493), False, 'import random\n'), ((1523, 1550), 'random.randint', 'random.randint', (['(1)', '(10 ** 10)'], {}), '(1, 10 ** 10)\n', (1537, 1550), False, 'import random\n')] |
import functools
import sys
from contextlib import contextmanager
import pytest
_orig_trace = None
def pytest_configure():
global _orig_trace
_orig_trace = sys.gettrace()
@pytest.fixture(scope="session", autouse=True)
def term():
"""Configure TERM for predictable output from Pygments."""
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
m.setenv("TERM", "xterm-256color")
yield m
m.undo()
# if _orig_trace and not hasattr(sys, "pypy_version_info"):
# Fails with PyPy2 (https://travis-ci.org/antocuni/pdb/jobs/509624590)?!
@pytest.fixture(autouse=True)
def restore_settrace(monkeypatch):
"""(Re)store sys.gettrace after test run.
This is required to re-enable coverage tracking.
"""
assert sys.gettrace() is _orig_trace
orig_settrace = sys.settrace
# Wrap sys.settrace to restore original tracing function (coverage)
# with `sys.settrace(None)`.
def settrace(func):
if func is None:
orig_settrace(_orig_trace)
else:
orig_settrace(func)
monkeypatch.setattr("sys.settrace", settrace)
yield
newtrace = sys.gettrace()
if newtrace is not _orig_trace:
sys.settrace(_orig_trace)
assert newtrace is None
@pytest.fixture(scope="session")
def _tmphome_path(tmpdir_factory):
return tmpdir_factory.mktemp("tmphome")
@pytest.fixture(autouse=sys.version_info < (3, 6))
def tmphome(request, monkeypatch):
"""Set up HOME in a temporary directory.
This ignores any real ~/.pdbrc.py then, and seems to be
required also with linecache on py27, where it would read contents from
~/.pdbrc?!.
"""
# Use tmpdir from testdir, if it is used.
if "testdir" in request.fixturenames:
tmpdir = request.getfixturevalue("testdir").tmpdir
else:
tmpdir = request.getfixturevalue("_tmphome_path")
monkeypatch.setenv("HOME", str(tmpdir))
monkeypatch.setenv("USERPROFILE", str(tmpdir))
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture(params=("pyrepl", "readline"), scope="session")
def readline_param(request):
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
if request.param == "pyrepl":
try:
import pyrepl.readline # noqa: F401
except ImportError as exc:
pytest.skip(msg="pyrepl not available: {}".format(exc))
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", True)
else:
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", False)
return request.param
@pytest.fixture
def monkeypatch_readline(request, monkeypatch, readline_param):
"""Patch readline to return given results."""
def inner(line, begidx, endidx):
if readline_param == "pyrepl":
readline = "pyrepl.readline"
else:
assert readline_param == "readline"
readline = "readline"
monkeypatch.setattr("%s.get_line_buffer" % readline, lambda: line)
monkeypatch.setattr("%s.get_begidx" % readline, lambda: begidx)
monkeypatch.setattr("%s.get_endidx" % readline, lambda: endidx)
return inner
@pytest.fixture
def monkeypatch_pdb_methods(monkeypatch):
def mock(method, *args, **kwargs):
print("=== %s(%s, %s)" % (method, args, kwargs))
for mock_method in ("set_trace", "set_continue"):
monkeypatch.setattr(
"pdb.pdb.Pdb.%s" % mock_method, functools.partial(mock, mock_method)
)
@pytest.fixture
def monkeypatch_importerror(monkeypatch):
@contextmanager
def cm(mocked_imports):
orig_import = __import__
def import_mock(name, *args):
if name in mocked_imports:
raise ImportError
return orig_import(name, *args)
with monkeypatch.context() as m:
if sys.version_info >= (3,):
m.setattr('builtins.__import__', import_mock)
else:
m.setattr('__builtin__.__import__', import_mock)
yield m
return cm
| [
"_pytest.monkeypatch.MonkeyPatch",
"functools.partial",
"sys.gettrace",
"pytest.fixture",
"sys.settrace"
]
| [((186, 231), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (200, 231), False, 'import pytest\n'), ((578, 606), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (592, 606), False, 'import pytest\n'), ((1262, 1293), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1276, 1293), False, 'import pytest\n'), ((1376, 1425), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(sys.version_info < (3, 6))'}), '(autouse=sys.version_info < (3, 6))\n', (1390, 1425), False, 'import pytest\n'), ((2029, 2091), 'pytest.fixture', 'pytest.fixture', ([], {'params': "('pyrepl', 'readline')", 'scope': '"""session"""'}), "(params=('pyrepl', 'readline'), scope='session')\n", (2043, 2091), False, 'import pytest\n'), ((168, 182), 'sys.gettrace', 'sys.gettrace', ([], {}), '()\n', (180, 182), False, 'import sys\n'), ((364, 377), '_pytest.monkeypatch.MonkeyPatch', 'MonkeyPatch', ([], {}), '()\n', (375, 377), False, 'from _pytest.monkeypatch import MonkeyPatch\n'), ((1142, 1156), 'sys.gettrace', 'sys.gettrace', ([], {}), '()\n', (1154, 1156), False, 'import sys\n'), ((2178, 2191), '_pytest.monkeypatch.MonkeyPatch', 'MonkeyPatch', ([], {}), '()\n', (2189, 2191), False, 'from _pytest.monkeypatch import MonkeyPatch\n'), ((761, 775), 'sys.gettrace', 'sys.gettrace', ([], {}), '()\n', (773, 775), False, 'import sys\n'), ((1201, 1226), 'sys.settrace', 'sys.settrace', (['_orig_trace'], {}), '(_orig_trace)\n', (1213, 1226), False, 'import sys\n'), ((3435, 3471), 'functools.partial', 'functools.partial', (['mock', 'mock_method'], {}), '(mock, mock_method)\n', (3452, 3471), False, 'import functools\n')] |
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from mock import patch
from auto_nag.people import People
from auto_nag.round_robin import BadFallback, RoundRobin
class TestRoundRobin(unittest.TestCase):
config = {
'doc': 'The triagers need to have a \'Fallback\' entry.',
'triagers': {
'A B': {'bzmail': '<EMAIL>'},
'C D': {'bzmail': '<EMAIL>'},
'E F': {'bzmail': '<EMAIL>'},
'Fallback': {'bzmail': '<EMAIL>'},
},
'components': {'P1::C1': 'default', 'P2::C2': 'default', 'P3::C3': 'special'},
'default': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'A B',
'2019-02-28': 'C D',
'2019-03-07': 'E F',
},
'special': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'E F',
'2019-02-28': 'A B',
'2019-03-07': 'C D',
},
}
people = People(
[
{
'mail': '<EMAIL>',
'cn': 'G H',
'ismanager': 'FALSE',
'title': 'nothing',
}
]
)
def mk_bug(self, pc):
p, c = pc.split('::')
return {
'product': p,
'component': c,
'triage_owner': '<EMAIL>',
'triage_owner_detail': {'nick': 'ij'},
}
@staticmethod
def _get_nick(x, bzmail):
return bzmail.split('@')[0]
def test_get(self):
with patch.object(RoundRobin, 'get_nick', new=TestRoundRobin._get_nick):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-17') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-17') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-17') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-24') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-24') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-24') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-28') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-28') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-28') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-05') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-05') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-05') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('Foo::Bar'), '2019-03-01') == (
'<EMAIL>',
'ij',
)
def test_get_who_to_nag(self):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get_who_to_nag('2019-02-25') == {}
assert rr.get_who_to_nag('2019-02-28') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-05') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-07') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-10') == {'<EMAIL>': ['']}
with patch.object(RoundRobin, 'is_mozilla', return_value=False):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
self.assertRaises(BadFallback, rr.get_who_to_nag, '2019-03-01')
| [
"mock.patch.object",
"auto_nag.round_robin.RoundRobin",
"auto_nag.people.People"
]
| [((1169, 1257), 'auto_nag.people.People', 'People', (["[{'mail': '<EMAIL>', 'cn': 'G H', 'ismanager': 'FALSE', 'title': 'nothing'}]"], {}), "([{'mail': '<EMAIL>', 'cn': 'G H', 'ismanager': 'FALSE', 'title':\n 'nothing'}])\n", (1175, 1257), False, 'from auto_nag.people import People\n'), ((4065, 4141), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', ([], {'rr': "{'team': TestRoundRobin.config}", 'people': 'TestRoundRobin.people'}), "(rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people)\n", (4075, 4141), False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n'), ((1720, 1786), 'mock.patch.object', 'patch.object', (['RoundRobin', '"""get_nick"""'], {'new': 'TestRoundRobin._get_nick'}), "(RoundRobin, 'get_nick', new=TestRoundRobin._get_nick)\n", (1732, 1786), False, 'from mock import patch\n'), ((1805, 1881), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', ([], {'rr': "{'team': TestRoundRobin.config}", 'people': 'TestRoundRobin.people'}), "(rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people)\n", (1815, 1881), False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n'), ((4504, 4562), 'mock.patch.object', 'patch.object', (['RoundRobin', '"""is_mozilla"""'], {'return_value': '(False)'}), "(RoundRobin, 'is_mozilla', return_value=False)\n", (4516, 4562), False, 'from mock import patch\n'), ((4581, 4657), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', ([], {'rr': "{'team': TestRoundRobin.config}", 'people': 'TestRoundRobin.people'}), "(rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people)\n", (4591, 4657), False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n')] |
# should re-write compiled functions to take a local and global dict
# as input.
from __future__ import absolute_import, print_function
import sys
import os
from . import ext_tools
from . import catalog
from . import common_info
from numpy.core.multiarray import _get_ndarray_c_version
ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),)
# not an easy way for the user_path_list to come in here.
# the PYTHONCOMPILED environment variable offers the most hope.
function_catalog = catalog.catalog()
class inline_ext_function(ext_tools.ext_function):
# Some specialization is needed for inline extension functions
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py__locals = NULL;\n' \
'PyObject *py__globals = NULL;\n'
py_objects = ', '.join(self.arg_specs.py_pointers())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
else:
declare_py_objects = ''
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_vars:
init_values = py_vars + ' = NULL;\n\n'
else:
init_values = ''
parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\
'&py__locals,'\
'&py__globals))\n'\
' return NULL;\n'
return declare_return + declare_py_objects + \
init_values + parse_tuple
def arg_declaration_code(self):
"""Return the declaration code as a string."""
arg_strings = [arg.declaration_code(inline=1)
for arg in self.arg_specs]
return "".join(arg_strings)
def arg_cleanup_code(self):
"""Return the cleanup code as a string."""
arg_strings = [arg.cleanup_code() for arg in self.arg_specs]
return "".join(arg_strings)
def arg_local_dict_code(self):
"""Return the code to create the local dict as a string."""
arg_strings = [arg.local_dict_code() for arg in self.arg_specs]
return "".join(arg_strings)
def function_code(self):
from .ext_tools import indent
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
# local_dict_code = indent(self.arg_local_dict_code(),4)
try_code = \
' try \n' \
' { \n' \
'#if defined(__GNUC__) || defined(__ICC)\n' \
' PyObject* raw_locals __attribute__ ((unused));\n' \
' PyObject* raw_globals __attribute__ ((unused));\n' \
'#else\n' \
' PyObject* raw_locals;\n' \
' PyObject* raw_globals;\n' \
'#endif\n' \
' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \
' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \
' /* argument conversion code */ \n' \
+ decl_code + \
' /* inline code */ \n' \
+ function_code + \
' /*I would like to fill in changed locals and globals here...*/ \n' \
' }\n'
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /* cleanup code */ \n" + \
cleanup_code + \
" if(!(PyObject*)return_val && !exception_occurred)\n" \
" {\n \n" \
" return_val = Py_None; \n" \
" }\n \n" \
" return return_val.disown(); \n" \
"} \n"
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
try_code + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args
return function_decls
class inline_ext_module(ext_tools.ext_module):
def __init__(self,name,compiler=''):
ext_tools.ext_module.__init__(self,name,compiler)
self._build_information.append(common_info.inline_info())
function_cache = {}
def inline(code,arg_names=[],local_dict=None, global_dict=None,
force=0,
compiler='',
verbose=0,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
newarr_converter=0,
**kw):
"""
Inline C/C++ code within Python scripts.
``inline()`` compiles and executes C/C++ code on the fly. Variables
in the local and global Python scope are also available in the
C/C++ code. Values are passed to the C/C++ code by assignment
much like variables passed are passed into a standard Python
function. Values are returned from the C/C++ code through a
special argument called return_val. Also, the contents of
mutable objects can be changed within the C/C++ code and the
changes remain after the C code exits and returns to Python.
inline has quite a few options as listed below. Also, the keyword
arguments for distutils extension modules are accepted to
specify extra information needed for compiling.
Parameters
----------
code : string
A string of valid C++ code. It should not specify a return
statement. Instead it should assign results that need to be
returned to Python in the `return_val`.
arg_names : [str], optional
A list of Python variable names that should be transferred from
Python into the C/C++ code. It defaults to an empty string.
local_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the local scope for the C/C++ code. If local_dict is not
specified the local dictionary of the calling function is used.
global_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the global scope for the C/C++ code. If `global_dict` is not
specified, the global dictionary of the calling function is used.
force : {0, 1}, optional
If 1, the C++ code is compiled every time inline is called. This
is really only useful for debugging, and probably only useful if
your editing `support_code` a lot.
compiler : str, optional
The name of compiler to use when compiling. On windows, it
understands 'msvc' and 'gcc' as well as all the compiler names
understood by distutils. On Unix, it'll only understand the
values understood by distutils. (I should add 'gcc' though to
this).
On windows, the compiler defaults to the Microsoft C++ compiler.
If this isn't available, it looks for mingw32 (the gcc compiler).
On Unix, it'll probably use the same compiler that was used when
compiling Python. Cygwin's behavior should be similar.
verbose : {0,1,2}, optional
Specifies how much information is printed during the compile
phase of inlining code. 0 is silent (except on windows with msvc
where it still prints some garbage). 1 informs you when compiling
starts, finishes, and how long it took. 2 prints out the command
lines for the compilation process and can be useful if your having
problems getting code to work. Its handy for finding the name of
the .cpp file if you need to examine it. verbose has no effect if
the compilation isn't necessary.
support_code : str, optional
A string of valid C++ code declaring extra code that might be
needed by your compiled function. This could be declarations of
functions, classes, or structures.
headers : [str], optional
A list of strings specifying header files to use when compiling
the code. The list might look like ``["<vector>","'my_header'"]``.
Note that the header strings need to be in a form than can be
pasted at the end of a ``#include`` statement in the C++ code.
customize : base_info.custom_info, optional
An alternative way to specify `support_code`, `headers`, etc. needed
by the function. See :mod:`scipy.weave.base_info` for more
details. (not sure this'll be used much).
type_converters : [type converters], optional
These guys are what convert Python data types to C/C++ data types.
If you'd like to use a different set of type conversions than the
default, specify them here. Look in the type conversions section
of the main documentation for examples.
auto_downcast : {1,0}, optional
This only affects functions that have numpy arrays as input
variables. Setting this to 1 will cause all floating point values
to be cast as float instead of double if all the Numeric arrays
are of type float. If even one of the arrays has type double or
double complex, all variables maintain their standard
types.
newarr_converter : int, optional
Unused.
Other Parameters
----------------
Relevant :mod:`distutils` keywords. These are duplicated from <NAME>'s
:class:`distutils.extension.Extension` class for convenience:
sources : [string]
List of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
.. note:: The `module_path` file is always appended to the front of
this list
include_dirs : [string]
List of directories to search for C/C++ header files (in Unix
form for portability).
define_macros : [(name : string, value : string|None)]
List of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line).
undef_macros : [string]
List of macros to undefine explicitly.
library_dirs : [string]
List of directories to search for C/C++ libraries at link time.
libraries : [string]
List of library names (not filenames or paths) to link against.
runtime_library_dirs : [string]
List of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded).
extra_objects : [string]
List of extra files to link with (e.g. object files not implied
by 'sources', static libraries that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
List of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
Any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
List of files that the extension depends on.
language : string
Extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
See Also
--------
distutils.extension.Extension : Describes additional parameters.
"""
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
global function_catalog
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if force:
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
else:
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try function catalog
try:
results = attempt_function_call(code,local_dict,global_dict)
# 3. build the function
except ValueError:
# compile the library
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
return results
def attempt_function_call(code,local_dict,global_dict):
# we try 3 levels here -- a local cache first, then the
# catalog cache, and then persistent catalog.
#
global function_catalog
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try catalog cache.
function_list = function_catalog.get_functions_fast(code)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except TypeError as msg: # should specify argument types here.
# This should really have its own error type, instead of
# checking the beginning of the message, but I don't know
# how to define that yet.
msg = str(msg)
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
# 3. try persistent catalog
module_dir = global_dict.get('__file__',None)
function_list = function_catalog.get_functions(code,module_dir)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except: # should specify argument types here.
pass
# if we get here, the function wasn't found
raise ValueError('function with correct signature not found')
def inline_function_code(code,arg_names,local_dict=None,
global_dict=None,auto_downcast=1,
type_converters=None,compiler=''):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
from . import build_tools
compiler = build_tools.choose_compiler(compiler)
ext_func.set_compiler(compiler)
return ext_func.function_code()
def compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
# figure out where to store and what to name the extension module
# that will contain the function.
# storage_dir = catalog.intermediate_dir()
code = ndarray_api_version + '\n' + code
module_path = function_catalog.unique_module_name(code, module_dir)
storage_dir, module_name = os.path.split(module_path)
mod = inline_ext_module(module_name,compiler)
# create the function. This relies on the auto_downcast and
# type factories setting
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
mod.add_function(ext_func)
# if customize (a custom_info object), then set the module customization.
if customize:
mod.customize = customize
# add the extra "support code" needed by the function to the module.
if support_code:
mod.customize.add_support_code(support_code)
# add the extra headers needed by the function to the module.
for header in headers:
mod.customize.add_header(header)
# it's nice to let the users know when anything gets compiled, as the
# slowdown is very noticeable.
if verbose > 0:
print('<weave: compiling>')
# compile code in correct location, with the given compiler and verbosity
# setting. All input keywords are passed through to distutils
mod.compile(location=storage_dir,compiler=compiler,
verbose=verbose, **kw)
# import the module and return the function. Make sure
# the directory where it lives is in the python path.
try:
sys.path.insert(0,storage_dir)
exec('import ' + module_name)
func = eval(module_name+'.compiled_func')
finally:
del sys.path[0]
return func
| [
"numpy.core.multiarray._get_ndarray_c_version",
"sys.path.insert",
"sys._getframe",
"os.path.split"
]
| [((19930, 19956), 'os.path.split', 'os.path.split', (['module_path'], {}), '(module_path)\n', (19943, 19956), False, 'import os\n'), ((344, 368), 'numpy.core.multiarray._get_ndarray_c_version', '_get_ndarray_c_version', ([], {}), '()\n', (366, 368), False, 'from numpy.core.multiarray import _get_ndarray_c_version\n'), ((13885, 13900), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (13898, 13900), False, 'import sys\n'), ((18722, 18737), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (18735, 18737), False, 'import sys\n'), ((21298, 21329), 'sys.path.insert', 'sys.path.insert', (['(0)', 'storage_dir'], {}), '(0, storage_dir)\n', (21313, 21329), False, 'import sys\n')] |
import pathlib
import yaml
documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml",
"Alpha Streams": "QuantConnect-Alpha-0.8.yaml"}
def RequestTable(api_call, params):
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'
for item in params:
example_ = "/"
description_ = "Optional. " if "required" not in item or not item["required"] else ""
description_ += item["description"]
if description_[-1] != ".":
description_ += "."
if "type" in item["schema"]:
type_ = item["schema"]["type"]
else:
type_ = item["schema"]["$ref"].split("/")[-1]
if "minimum" in item["schema"]:
description_ += f' Minimum: {item["schema"]["minimum"]}'
example_ = item["schema"]["minimum"]
elif "maximum" in item["schema"]:
description_ += f' Maximum: {item["schema"]["maximum"]}'
example_ = item["schema"]["maximum"]
elif "default" in item["schema"]:
description_ += f' Default: {item["schema"]["default"]}'
example_ = item["schema"]["default"]
if type_ == "array":
array_obj = item["schema"]["items"]
if "$ref" in array_obj:
type_ = array_obj["$ref"].split("/")[-1] + " Array"
ref = array_obj["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
request_object_ = doc
for path in ref:
request_object_ = request_object_[path]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
example_, __, __ = ExampleWriting(request_object_properties_, [], 1)
if "type" in array_obj:
type_ = array_obj["type"] + " Array"
if "enum" in array_obj:
type_ = type_ + " Enum"
description_ += f' Options: {str(array_obj["enum"])}'
example_ = f'"{array_obj["enum"][0]}"'
if "Enum" not in type_:
if "string" in type_:
example_ = '"string"'
elif "number" in type_ or "integer" in type_:
example_ = '0'
elif "boolean" in type_:
example_ = 'true'
writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'
example += f' "{item["name"]}": {example_},\n'
return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>"
def ResponseTable(requestBody):
writeUp = ""
array = False
order = 0
if "content" in requestBody:
component = requestBody["content"]["application/json"]["schema"]
if "$ref" in component:
component = component["$ref"].split("/")[1:]
elif "items" in component and "$ref" in component["items"]:
component = component["items"]["$ref"].split("/")[1:]
array = True
order += 1
else:
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n'
writeUp += '</tr>\n</thead>\n'
writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += f'[\n "{component["items"]["example"]}"\n]'
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
return writeUp
else:
component = requestBody["$ref"].split("/")[1:]
item_list = [component]
i = 0
while i < len(item_list):
request_object = doc
for item in item_list[i]:
request_object = request_object[item]
if "items" in request_object and "oneOf" in request_object["items"]:
prop = request_object["items"]["oneOf"]
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'
writeUp += '</tr>\n</thead>'
for y in prop:
path = y["$ref"].split("/")[1:]
name = path[-1]
enum = ""
item_list.append(path)
request_object = doc
for item in path:
request_object = request_object[item]
if "enum" in request_object:
enum = " Options: " + str(request_object["enum"])
description_ = request_object["description"]
if description_[-1] != ".":
description_ += "."
writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n'
if "example" in request_object:
text = request_object["example"]
elif "enum" in request_object:
text = '"' + request_object["enum"][0] + '"'
example += f'\n {text},'
example += '\b\n ]\n]'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
continue
elif "oneOf" in request_object:
for y in request_object["oneOf"]:
item_list.append(y["$ref"].split("/")[1:])
i += 1
continue
elif "properties" in request_object:
request_object_properties = request_object["properties"]
elif "content" in request_object:
item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])
i += 1
continue
elif "type" in request_object and "properties" not in request_object:
request_object_properties = {item: request_object}
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
if "description" in request_object:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'
else:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'
writeUp += '</tr>\n</thead>\n'
example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order)
if array:
array = False
order -= 1
for line in html_property:
writeUp += line
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
return writeUp
def ExampleWriting(request_object_properties, item_list, array=False, order=0):
tab = " " * order
if array:
example = "[\n {\n"
else:
example = "{\n"
line = []
for name, properties in request_object_properties.items():
type_ = properties["type"] if "type" in properties else "object"
description_ = properties["description"] if "description" in properties else "/"
if (example != "{\n" and not array) or (example != "[\n {\n" and array):
example += ",\n"
example_ = tab + f' "{name}": '
if type_ == "array":
example_ += '[\n'
if "type" in properties["items"]:
type_ = properties["items"]["type"] + " Array"
example_ += tab + f' "{properties["items"]["type"]}"'
elif "$ref" in properties["items"]:
ref = properties["items"]["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2)
example_ += tab + " " * 2 + write_up
elif type_ == "object":
if "additionalProperties" in properties:
add_prop = properties["additionalProperties"]
if "type" in add_prop:
prop_type = add_prop["type"]
if "format" in prop_type:
type_ = prop_type + f'$({prop_type["format"]})' + " object"
if prop_type["format"] == "date-time":
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += "0"
else:
type_ = prop_type + " object"
example_ += f'"{prop_type}"'
elif "$ref" in add_prop:
ref = add_prop["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "$ref" in properties:
ref = properties["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "type" in request_object_:
properties = request_object_properties_ = request_object_
type_ = request_object_["type"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
elif type_ == "integer" or type_ == "number":
example_ += "0"
elif type_ == "boolean":
example_ += "true"
elif type_ == "string":
if "format" in properties:
type_ += f'(${properties["format"]})'
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += '"string"'
if description_[-1] != ".":
description_ += "."
if "enum" in properties:
type_ += " Enum"
description_ += f' Options : {properties["enum"]}'
if "string" in type_:
example_ = tab + f' "{name}": "{properties["enum"][0]}"'
else:
example_ = tab + f' "{name}": {properties["enum"][0]}'
if "example" in properties:
eg = properties["example"]
type_ += f'<br/><i><sub>example: {eg}</sub></i>'
if isinstance(eg, str):
eg = '"' + eg + '"'
example_ = tab + f' "{name}": {eg}'
if "Array" in type_:
example_ += "\n" + tab + " ]"
if order == 0 or array:
line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')
example += example_
if not array:
return example + "\n" + tab + "}", line, item_list
return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list
for section, source in documentations.items():
yaml_file = open(source)
doc = yaml.load(yaml_file, Loader=yaml.Loader)
paths = doc["paths"]
for api_call, result in paths.items():
j = 1
content = result["post"] if "post" in result else result["get"]
# Create path if not exist
destination_folder = pathlib.Path("/".join(content["tags"]))
destination_folder.mkdir(parents=True, exist_ok=True)
# Create Introduction part
with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file:
html_file.write("<p>\n")
html_file.write(f"{content['summary']}\n")
html_file.write("</p>\n")
j += 1
# Create Description part if having one
if "description" in content:
with open(destination_folder / f'{j:02} Description.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'{content["description"]}\n')
html_file.write('</p>\n')
j += 1
# Create Request part
with open(destination_folder / f'{j:02} Request.html', "w") as html_file:
description_ = ""
if "parameters" in content:
writeUp = RequestTable(api_call, content["parameters"])
elif "requestBody" in content:
if "description" in content["requestBody"]:
description_ = str(content["requestBody"]["description"])
if description_[-1] != ".":
description_ += "."
description_ += " "
writeUp = ResponseTable(content["requestBody"])
else:
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'
writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'
description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n'
html_file.write("<p>\n" + description_ + "</p>\n")
html_file.write(writeUp)
j += 1
# Create Response part
with open(destination_folder / f'{j:02} Responses.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')
html_file.write('</p>\n')
request_body = content["responses"]
for code, properties in request_body.items():
if code == "200":
html_file.write('<h4>200 Success</h4>\n')
elif code == "401":
html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')
html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')
html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')
continue
elif code == "404":
html_file.write('<h4>404 Not Found Error</h4>\n')
html_file.write('<p>The requested item, index, page was not found.</p>\n')
continue
elif code == "default":
html_file.write('<h4>Default Generic Error</h4>\n')
writeUp = ResponseTable(properties)
html_file.write(writeUp)
print(f"Documentation of {section} is generated and inplace!") | [
"yaml.load"
]
| [((13986, 14026), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.Loader'}), '(yaml_file, Loader=yaml.Loader)\n', (13995, 14026), False, 'import yaml\n')] |
import glob
import logging
import os
import warnings
import pytest
from _pytest.outcomes import Failed
from _pytest.reports import TestReport
from .broker_pact import BrokerPact, BrokerPacts, PactBrokerConfig
from .result import PytestResult, log
def pytest_addoption(parser):
group = parser.getgroup("pact specific options (pactman)")
group.addoption(
"--pact-files", default=None, help="pact JSON files to verify (wildcards allowed)"
)
group.addoption("--pact-broker-url", default="", help="pact broker URL")
group.addoption("--pact-broker-token", default="", help="pact broker bearer token")
group.addoption(
"--pact-provider-name", default=None, help="pact name of provider being verified"
)
group.addoption(
"--pact-consumer-name",
default=None,
help="consumer name to limit pact verification to - "
"DEPRECATED, use --pact-verify-consumer instead",
)
group.addoption(
"--pact-verify-consumer", default=None, help="consumer name to limit pact verification to"
)
group.addoption(
"--pact-verify-consumer-tag",
metavar="TAG",
action="append",
help="limit broker pacts verified to those matching the tag. May be "
"specified multiple times in which case pacts matching any of these "
"tags will be verified.",
)
group.addoption(
"--pact-publish-results",
action="store_true",
default=False,
help="report pact verification results to pact broker",
)
group.addoption(
"--pact-provider-version",
default=None,
help="provider version to use when reporting pact results to pact broker",
)
group.addoption(
"--pact-allow-fail",
default=False,
action="store_true",
help="do not fail the pytest run if any pacts fail verification",
)
# Future options to be implemented. Listing them here so naming consistency can be a thing.
# group.addoption("--pact-publish-pacts", action="store_true", default=False,
# help="publish pacts to pact broker")
# group.addoption("--pact-consumer-version", default=None,
# help="consumer version to use when publishing pacts to the broker")
# group.addoption("--pact-consumer-version-source", default=None,
# help="generate consumer version from source 'git-tag' or 'git-hash'")
# group.addoption("--pact-consumer-version-tag", metavar='TAG', action="append",
# help="tag(s) that should be applied to the consumer version when pacts "
# "are uploaded to the broker; multiple tags may be supplied")
def get_broker_url(config):
return config.getoption("pact_broker_url") or os.environ.get("PACT_BROKER_URL")
def get_provider_name(config):
return config.getoption("pact_provider_name") or os.environ.get("PACT_PROVIDER_NAME")
# add the pact broker URL to the pytest output if running verbose
def pytest_report_header(config):
if config.getoption("verbose") > 0:
location = get_broker_url(config) or config.getoption("pact_files")
return [f"Loading pacts from {location}"]
def pytest_configure(config):
logging.getLogger("pactman").handlers = []
logging.basicConfig(format="%(message)s")
verbosity = config.getoption("verbose")
if verbosity > 0:
log.setLevel(logging.DEBUG)
class PytestPactVerifier:
def __init__(self, publish_results, provider_version, interaction, consumer):
self.publish_results = publish_results
self.provider_version = provider_version
self.interaction = interaction
self.consumer = consumer
def verify(self, provider_url, provider_setup, extra_provider_headers={}):
try:
self.interaction.verify_with_callable_setup(provider_url, provider_setup, extra_provider_headers)
except (Failed, AssertionError) as e:
raise Failed(str(e)) from None
def finish(self):
if self.consumer and self.publish_results and self.provider_version:
self.consumer.publish_result(self.provider_version)
def flatten_pacts(pacts):
for consumer in pacts:
last = consumer.interactions[-1]
for interaction in consumer.interactions:
if interaction is last:
yield (interaction, consumer)
else:
yield (interaction, None)
def load_pact_files(file_location):
for filename in glob.glob(file_location, recursive=True):
yield BrokerPact.load_file(filename, result_factory=PytestResult)
def test_id(identifier):
interaction, _ = identifier
return str(interaction)
def pytest_generate_tests(metafunc):
if "pact_verifier" in metafunc.fixturenames:
broker_url = get_broker_url(metafunc.config)
if not broker_url:
pact_files_location = metafunc.config.getoption("pact_files")
if not pact_files_location:
raise ValueError("need a --pact-broker-url or --pact-files option")
pact_files = load_pact_files(pact_files_location)
metafunc.parametrize(
"pact_verifier", flatten_pacts(pact_files), ids=test_id, indirect=True
)
else:
provider_name = get_provider_name(metafunc.config)
if not provider_name:
raise ValueError("--pact-broker-url requires the --pact-provider-name option")
broker = PactBrokerConfig(
broker_url,
metafunc.config.getoption("pact_broker_token"),
metafunc.config.getoption("pact_verify_consumer_tag", []),
)
broker_pacts = BrokerPacts(
provider_name, pact_broker=broker, result_factory=PytestResult
)
pacts = broker_pacts.consumers()
filter_consumer_name = metafunc.config.getoption("pact_verify_consumer")
if not filter_consumer_name:
filter_consumer_name = metafunc.config.getoption("pact_consumer_name")
if filter_consumer_name:
warnings.warn(
"The --pact-consumer-name command-line option is deprecated "
"and will be removed in the 3.0.0 release.",
DeprecationWarning,
)
if filter_consumer_name:
pacts = [pact for pact in pacts if pact.consumer == filter_consumer_name]
metafunc.parametrize("pact_verifier", flatten_pacts(pacts), ids=test_id, indirect=True)
class PactTestReport(TestReport):
"""Custom TestReport that allows us to attach an interaction to the result, and
then display the interaction's verification result ouput as well as the traceback
of the failure.
"""
@classmethod
def from_item_and_call(cls, item, call, interaction):
report = super().from_item_and_call(item, call)
report.pact_interaction = interaction
# the toterminal() call can't reasonably get at this config, so we store it here
report.verbosity = item.config.option.verbose
return report
def toterminal(self, out):
out.line("Pact failure details:", bold=True)
for text, kw in self.pact_interaction.result.results_for_terminal():
out.line(text, **kw)
if self.verbosity > 0:
out.line("Traceback:", bold=True)
return super().toterminal(out)
else:
out.line("Traceback not shown, use pytest -v to show it")
def pytest_runtest_makereport(item, call):
if call.when != "call" or "pact_verifier" not in getattr(item, "fixturenames", []):
return
# use our custom TestReport subclass if we're reporting on a pact verification call
interaction = item.funcargs["pact_verifier"].interaction
report = PactTestReport.from_item_and_call(item, call, interaction)
if report.failed and item.config.getoption("pact_allow_fail"):
# convert the fail into an "expected" fail, which allows the run to pass
report.wasxfail = True
report.outcome = "passed"
return report
def pytest_report_teststatus(report, config):
if not hasattr(report, "pact_interaction"):
return
if hasattr(report, "wasxfail"):
# wasxfail usually displays an "X" but since it's not *expected* to fail an "f" is a little clearer
return "ignore fail", "f", "IGNORE_FAIL"
@pytest.fixture()
def pact_verifier(pytestconfig, request):
interaction, consumer = request.param
p = PytestPactVerifier(
pytestconfig.getoption("pact_publish_results"),
pytestconfig.getoption("pact_provider_version"),
interaction,
consumer,
)
yield p
p.finish()
| [
"logging.basicConfig",
"logging.getLogger",
"os.environ.get",
"pytest.fixture",
"warnings.warn",
"glob.glob"
]
| [((8527, 8543), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (8541, 8543), False, 'import pytest\n'), ((3312, 3353), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""'}), "(format='%(message)s')\n", (3331, 3353), False, 'import logging\n'), ((4536, 4576), 'glob.glob', 'glob.glob', (['file_location'], {'recursive': '(True)'}), '(file_location, recursive=True)\n', (4545, 4576), False, 'import glob\n'), ((2804, 2837), 'os.environ.get', 'os.environ.get', (['"""PACT_BROKER_URL"""'], {}), "('PACT_BROKER_URL')\n", (2818, 2837), False, 'import os\n'), ((2924, 2960), 'os.environ.get', 'os.environ.get', (['"""PACT_PROVIDER_NAME"""'], {}), "('PACT_PROVIDER_NAME')\n", (2938, 2960), False, 'import os\n'), ((3265, 3293), 'logging.getLogger', 'logging.getLogger', (['"""pactman"""'], {}), "('pactman')\n", (3282, 3293), False, 'import logging\n'), ((6181, 6328), 'warnings.warn', 'warnings.warn', (['"""The --pact-consumer-name command-line option is deprecated and will be removed in the 3.0.0 release."""', 'DeprecationWarning'], {}), "(\n 'The --pact-consumer-name command-line option is deprecated and will be removed in the 3.0.0 release.'\n , DeprecationWarning)\n", (6194, 6328), False, 'import warnings\n')] |
from flask_restx import Api
from app.apis.hello import api as hello
api = Api(
title='api',
version='1.0',
description='',
prefix='/api',
doc='/api'
)
api.add_namespace(hello)
| [
"flask_restx.Api"
]
| [((76, 150), 'flask_restx.Api', 'Api', ([], {'title': '"""api"""', 'version': '"""1.0"""', 'description': '""""""', 'prefix': '"""/api"""', 'doc': '"""/api"""'}), "(title='api', version='1.0', description='', prefix='/api', doc='/api')\n", (79, 150), False, 'from flask_restx import Api\n')] |
from fastapi import FastAPI, Request, Response
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from utils import get_page_data, process_initial
import uvicorn
app = FastAPI()
templates = Jinja2Templates(directory="templates")
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", response_class=HTMLResponse)
async def home(request: Request):
# Expect requests with cookies
return process_initial(request)
@app.get("/page", response_class=HTMLResponse)
async def home(request: Request):
# Expect requests with cookies
return get_page_data(request)
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8050, log_level="info")
| [
"fastapi.FastAPI",
"uvicorn.run",
"fastapi.templating.Jinja2Templates",
"utils.get_page_data",
"fastapi.staticfiles.StaticFiles",
"utils.process_initial"
]
| [((252, 261), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (259, 261), False, 'from fastapi import FastAPI, Request, Response\n'), ((274, 312), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (289, 312), False, 'from fastapi.templating import Jinja2Templates\n'), ((334, 365), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': '"""static"""'}), "(directory='static')\n", (345, 365), False, 'from fastapi.staticfiles import StaticFiles\n'), ((507, 531), 'utils.process_initial', 'process_initial', (['request'], {}), '(request)\n', (522, 531), False, 'from utils import get_page_data, process_initial\n'), ((661, 683), 'utils.get_page_data', 'get_page_data', (['request'], {}), '(request)\n', (674, 683), False, 'from utils import get_page_data, process_initial\n'), ((717, 787), 'uvicorn.run', 'uvicorn.run', (['"""main:app"""'], {'host': '"""127.0.0.1"""', 'port': '(8050)', 'log_level': '"""info"""'}), "('main:app', host='127.0.0.1', port=8050, log_level='info')\n", (728, 787), False, 'import uvicorn\n')] |
from json import JSONEncoder
from time import time
class Jsonable:
"""Abstract class to standardize the toJson method to be implemented by any class that wants to be
serialized to JSON"""
def toJson(self):
"""Abstract method"""
raise NotImplementedError('You should implement this method in your classes.')
class CommonMessage(Jsonable):
def __init__(self):
self.client = Client()
self.emitter = Emitter()
self.type = ""
self.body = ""
self.tags = ["music", "culture", "food"]
def toJson(self):
return dict(client=self.client, emitter=self.emitter, type=self.type, body=self.body, tags=self.tags)
class Client(Jsonable):
def __init__(self):
self.id = ""
self.name = ""
self.time = int(round(time() * 1000))
def toJson(self):
return dict(id=self.id, name=self.name, time=self.time)
class Emitter(Jsonable):
def __init__(self):
self.id = ""
def toJson(self):
return dict(id=self.id)
class ComplexJsonEncoder(JSONEncoder):
"""Basic JSON encoder for 'complex (nested)' Python objects."""
def default(self, o):
if hasattr(o, 'toJson'):
return o.toJson()
else:
return JSONEncoder.default(self, o)
| [
"time.time",
"json.JSONEncoder.default"
]
| [((1268, 1296), 'json.JSONEncoder.default', 'JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (1287, 1296), False, 'from json import JSONEncoder\n'), ((808, 814), 'time.time', 'time', ([], {}), '()\n', (812, 814), False, 'from time import time\n')] |
#!/usr/bin/python3
import time
from brownie import (
DataTypes,
TransparentUpgradeableProxy,
ProxyAdmin,
config,
network,
Contract,
)
from scripts.helpful_scripts import get_account, encode_function_data
def main():
account = get_account()
print(config["networks"][network.show_active()])
print(f"Deploying to {network.show_active()}")
data_types = DataTypes.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# Optional, deploy the ProxyAdmin and use that as the admin contract
proxy_admin = ProxyAdmin.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# If we want an intializer function we can add
# `initializer=box.store, 1`
# to simulate the initializer being the `store` function
# with a `newValue` of 1
# data_types_encoded_initializer_function = encode_function_data(data_types.setDataTypes)
data_types_encoded_initializer_function = encode_function_data(
data_types.setDataTypes, 10
)
proxy = TransparentUpgradeableProxy.deploy(
data_types.address,
proxy_admin.address,
data_types_encoded_initializer_function,
# gas limit removed fort an issue not very clear
# {"from": account, "gas_limit": 100000000000},
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
print(f"Proxy deployed to {proxy} ! You can now upgrade it to dataTypesV2!")
proxy_data_types = Contract.from_abi("DataTypes", proxy.address, DataTypes.abi)
| [
"brownie.network.show_active",
"scripts.helpful_scripts.encode_function_data",
"brownie.Contract.from_abi",
"scripts.helpful_scripts.get_account"
]
| [((256, 269), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (267, 269), False, 'from scripts.helpful_scripts import get_account, encode_function_data\n'), ((1052, 1101), 'scripts.helpful_scripts.encode_function_data', 'encode_function_data', (['data_types.setDataTypes', '(10)'], {}), '(data_types.setDataTypes, 10)\n', (1072, 1101), False, 'from scripts.helpful_scripts import get_account, encode_function_data\n'), ((1596, 1656), 'brownie.Contract.from_abi', 'Contract.from_abi', (['"""DataTypes"""', 'proxy.address', 'DataTypes.abi'], {}), "('DataTypes', proxy.address, DataTypes.abi)\n", (1613, 1656), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((299, 320), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (318, 320), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((349, 370), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (368, 370), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((478, 499), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (497, 499), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((697, 718), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (716, 718), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((1452, 1473), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (1471, 1473), False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n')] |
# written by <NAME>
# version 0.1
# ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== #
from customs.train import train, test
from customs.dataset import load_dataset
from customs.model import load_model
# ================== TRAINING SETTINGS ================== #
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce')
parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use')
parser.add_argument('--model', default='CNN', type=str, help='model to use')
parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)')
parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker')
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use')
parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage')
parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector')
parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights')
parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs')
# data setting
parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data')
parser.add_argument('--transform', default='default', type=str, help='choose the data transform type')
# training parameter setting
parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration')
parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch')
parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch')
# optimizer & scheduler setting
parser.add_argument('--lr', default=0.03, type=float, help='training learning rate')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select')
parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select')
opt = parser.parse_args()
# ===================== IMPORT PYTORCH LIBRARIES ================== #
import torch
from torch.utils.data import DataLoader
torch.manual_seed(opt.seed)
# ================== GPU SETTINGS ================== #
def gpu_setup(opt):
use_cuda = not opt.no_cuda and torch.cuda.is_available()
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
if opt.multi_gpu != 0:
print()
print('Activating multi-gpu training mode')
print(opt.multi_gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.multi_gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
print()
print('Activating single-gpu training mode')
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using gpu number ' + str(opt.gpu))
return use_cuda
# ======================= MAIN SCRIPT ============================= #
def main(opt):
use_cuda = gpu_setup(opt)
dataset_train, dataset_validation = load_dataset(opt, train=True)
print('training data size: {}'.format(len(dataset_train)))
print('validation data size: {}'.format(len(dataset_validation)))
dataset_test = load_dataset(opt, train=False)
print('test data size: {}'.format(len(dataset_test)))
print()
kwargs = {'num_workers': opt.num_worker, 'pin_memory': opt.pin_memory} if use_cuda else {}
train_dataloader = DataLoader(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs)
validation_dataloader = DataLoader(dataset_validation, batch_size=opt.batch_size, shuffle=True, **kwargs)
test_dataloader = DataLoader(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs)
model = load_model(opt)
if opt.multi_gpu != 0:
model = torch.nn.DataParallel(model)
model.to(opt.device)
train(opt, model, train_dataloader, validation_dataloader)
test(opt, model, test_dataloader)
if __name__ == '__main__':
main(opt)
| [
"torch.manual_seed",
"customs.model.load_model",
"customs.train.train",
"argparse.ArgumentParser",
"customs.dataset.load_dataset",
"torch.nn.DataParallel",
"os.getcwd",
"customs.train.test",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
]
| [((338, 363), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (361, 363), False, 'import argparse\n'), ((3220, 3247), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (3237, 3247), False, 'import torch\n'), ((4177, 4206), 'customs.dataset.load_dataset', 'load_dataset', (['opt'], {'train': '(True)'}), '(opt, train=True)\n', (4189, 4206), False, 'from customs.dataset import load_dataset\n'), ((4364, 4394), 'customs.dataset.load_dataset', 'load_dataset', (['opt'], {'train': '(False)'}), '(opt, train=False)\n', (4376, 4394), False, 'from customs.dataset import load_dataset\n'), ((4593, 4669), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)'}), '(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs)\n', (4603, 4669), False, 'from torch.utils.data import DataLoader\n'), ((4699, 4785), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_validation'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)'}), '(dataset_validation, batch_size=opt.batch_size, shuffle=True, **\n kwargs)\n', (4709, 4785), False, 'from torch.utils.data import DataLoader\n'), ((4804, 4889), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {'batch_size': 'opt.test_batch_size', 'shuffle': '(True)'}), '(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs\n )\n', (4814, 4889), False, 'from torch.utils.data import DataLoader\n'), ((4900, 4915), 'customs.model.load_model', 'load_model', (['opt'], {}), '(opt)\n', (4910, 4915), False, 'from customs.model import load_model\n'), ((5023, 5081), 'customs.train.train', 'train', (['opt', 'model', 'train_dataloader', 'validation_dataloader'], {}), '(opt, model, train_dataloader, validation_dataloader)\n', (5028, 5081), False, 'from customs.train import train, test\n'), ((5087, 5120), 'customs.train.test', 'test', (['opt', 'model', 'test_dataloader'], {}), '(opt, model, test_dataloader)\n', (5091, 5120), False, 'from customs.train import train, test\n'), ((3363, 3388), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3386, 3388), False, 'import torch\n'), ((4961, 4989), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4982, 4989), False, 'import torch\n'), ((1809, 1820), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1818, 1820), False, 'import os\n'), ((1928, 1939), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1937, 1939), False, 'import os\n'), ((3682, 3707), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3705, 3707), False, 'import torch\n'), ((3906, 3931), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3929, 3931), False, 'import torch\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""A collection of backend information formatted to generate drawing data.
This instance will be provided to generator functions. The module provides an abstract
class :py:class:``DrawerBackendInfo`` with necessary methods to generate drawing objects.
Because the data structure of backend class may depend on providers, this abstract class
has an abstract factory method `create_from_backend`. Each subclass should provide
the factory method which conforms to the associated provider. By default we provide
:py:class:``OpenPulseBackendInfo`` class that has the factory method taking backends
satisfying OpenPulse specification [1].
This class can be also initialized without the factory method by manually specifying
required information. This may be convenient for visualizing a pulse program for simulator
backend that only has a device Hamiltonian information. This requires two mapping objects
for channel/qubit and channel/frequency along with the system cycle time.
If those information are not provided, this class will be initialized with a set of
empty data and the drawer illustrates a pulse program without any specific information.
Reference:
- [1] Qiskit Backend Specifications for OpenQASM and OpenPulse Experiments,
https://arxiv.org/abs/1809.03452
"""
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Dict, List, Union, Optional
from qiskit import pulse
from qiskit.providers import BaseBackend, BackendConfigurationError
class DrawerBackendInfo(ABC):
"""Backend information to be used for the drawing data generation."""
def __init__(self,
name: Optional[str] = None,
dt: Optional[float] = None,
channel_frequency_map: Optional[Dict[pulse.channels.Channel, float]] = None,
qubit_channel_map: Optional[Dict[int, List[pulse.channels.Channel]]] = None):
"""Create new backend information.
Args:
name: Name of the backend.
dt: System cycle time.
channel_frequency_map: Mapping of channel and associated frequency.
qubit_channel_map: Mapping of qubit and associated channels.
"""
self.backend_name = name or 'no-backend'
self._dt = dt
self._chan_freq_map = channel_frequency_map or dict()
self._qubit_channel_map = qubit_channel_map or dict()
@classmethod
@abstractmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
"""
raise NotImplementedError
@property
def dt(self):
"""Return cycle time."""
return self._dt
def get_qubit_index(self, chan: pulse.channels.Channel) -> Union[int, None]:
"""Get associated qubit index of given channel object."""
for qind, chans in self._qubit_channel_map.items():
if chan in chans:
return qind
return chan.index
def get_channel_frequency(self, chan: pulse.channels.Channel) -> Union[float, None]:
"""Get frequency of given channel object."""
return self._chan_freq_map.get(chan, None)
class OpenPulseBackendInfo(DrawerBackendInfo):
"""Drawing information of backend that conforms to OpenPulse specification."""
@classmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
Returns:
OpenPulseBackendInfo: New configured instance.
"""
configuration = backend.configuration()
defaults = backend.defaults()
# load name
name = backend.name()
# load cycle time
dt = configuration.dt
# load frequencies
chan_freqs = dict()
chan_freqs.update({pulse.DriveChannel(qind): freq
for qind, freq in enumerate(defaults.qubit_freq_est)})
chan_freqs.update({pulse.MeasureChannel(qind): freq
for qind, freq in enumerate(defaults.meas_freq_est)})
for qind, u_lo_mappers in enumerate(configuration.u_channel_lo):
temp_val = .0 + .0j
for u_lo_mapper in u_lo_mappers:
temp_val += defaults.qubit_freq_est[u_lo_mapper.q] * complex(*u_lo_mapper.scale)
chan_freqs[pulse.ControlChannel(qind)] = temp_val.real
# load qubit channel mapping
qubit_channel_map = defaultdict(list)
for qind in range(configuration.n_qubits):
qubit_channel_map[qind].append(configuration.drive(qubit=qind))
qubit_channel_map[qind].append(configuration.measure(qubit=qind))
for tind in range(configuration.n_qubits):
try:
qubit_channel_map[qind].extend(configuration.control(qubits=(qind, tind)))
except BackendConfigurationError:
pass
return OpenPulseBackendInfo(name=name,
dt=dt,
channel_frequency_map=chan_freqs,
qubit_channel_map=qubit_channel_map)
| [
"qiskit.pulse.MeasureChannel",
"qiskit.pulse.ControlChannel",
"collections.defaultdict",
"qiskit.pulse.DriveChannel"
]
| [((5084, 5101), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5095, 5101), False, 'from collections import defaultdict\n'), ((4450, 4474), 'qiskit.pulse.DriveChannel', 'pulse.DriveChannel', (['qind'], {}), '(qind)\n', (4468, 4474), False, 'from qiskit import pulse\n'), ((4590, 4616), 'qiskit.pulse.MeasureChannel', 'pulse.MeasureChannel', (['qind'], {}), '(qind)\n', (4610, 4616), False, 'from qiskit import pulse\n'), ((4974, 5000), 'qiskit.pulse.ControlChannel', 'pulse.ControlChannel', (['qind'], {}), '(qind)\n', (4994, 5000), False, 'from qiskit import pulse\n')] |
# Create your views here.
from .models import Mfund
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.tools import make_subplots
from django.db.models import Q
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views import View
from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min
from django.db.models.functions import Trim, Lower, Round
import pandas as pd
import csv, io
import openpyxl
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update
from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf
def Mfund_url():
return "unused-mfund-refresh-url"
class MfundListView(ListView):
model = Mfund
# if pagination is desired
# paginate_by = 300
# filter_backends = [filters.OrderingFilter,]
# ordering_fields = ['sno', 'nse_symbol']
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id)
return queryset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MfundListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Amount(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id).order_by('-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_amc', 'mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC_Amount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_amc').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
print('hi ', self.queryset)
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_amc']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundListView_Category(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Subcat(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Reco(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_research_reco', '-mf_rating')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_SubcatAmount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_subcat').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_subcat']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundRefreshView(View):
debug_level = 1
def get(self, request):
self.mfund_refresh(request)
return HttpResponseRedirect(reverse("mfund-list"))
def __init__(self):
super(MfundRefreshView, self).__init__()
def mfund_refresh(self, request):
debug_level = 1
# declaring template
# first delete all existing mfund objects
Mfund.objects.all().filter(mf_user_id=request.user.id).delete()
max_id_instances = Mfund.objects.aggregate(max_id=Max('mf_id'))
max_mf_id = max_id_instances['max_id']
print('DS: found max id ', max_mf_id)
if max_mf_id is None:
max_mf_id = 0
print('max_mf_id ', max_mf_id)
unique_id = max_mf_id
for brec in BrokerIcidirMf.objects.all().filter(bim_user_id=request.user.id):
unique_id += 1
print(brec.bim_amc, brec.bim_name, brec.bim_category, brec.bim_subcat)
print(brec.bim_rating, brec.bim_units, brec.bim_cost_value, brec.bim_nav_value)
print(brec.bim_research_reco)
# skip 0 units
if int(float(brec.bim_units)) != 0:
_, created = Mfund.objects.update_or_create(
mf_id=unique_id,
mf_user_id=request.user.id,
mf_broker='icidir',
mf_amc=brec.bim_amc,
mf_name=brec.bim_name,
mf_category=brec.bim_category,
mf_subcat=brec.bim_subcat,
mf_rating=brec.bim_rating,
mf_cost_value=brec.bim_cost_value,
mf_nav_value=brec.bim_nav_value,
mf_research_reco=brec.bim_research_reco
)
# breakpoint()
# import pdb
# pdb.set_trace()
# Updated Gfundareco objects
lastrefd_update("mfund")
| [
"django.db.models.Sum",
"plotly.graph_objects.Pie",
"plotly.offline.plot",
"django.utils.decorators.method_decorator",
"django_gotolong.broker.icidir.imf.models.BrokerIcidirMf.objects.all",
"django.urls.reverse",
"django.db.models.Max",
"django_gotolong.lastrefd.models.lastrefd_update"
]
| [((1284, 1316), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (1300, 1316), False, 'from django.utils.decorators import method_decorator\n'), ((3719, 3771), 'plotly.offline.plot', 'plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)'}), "(fig, output_type='div', include_plotlyjs=False)\n", (3723, 3771), False, 'from plotly.offline import plot\n'), ((6372, 6424), 'plotly.offline.plot', 'plot', (['fig'], {'output_type': '"""div"""', 'include_plotlyjs': '(False)'}), "(fig, output_type='div', include_plotlyjs=False)\n", (6376, 6424), False, 'from plotly.offline import plot\n'), ((8376, 8400), 'django_gotolong.lastrefd.models.lastrefd_update', 'lastrefd_update', (['"""mfund"""'], {}), "('mfund')\n", (8391, 8400), False, 'from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update\n'), ((6645, 6666), 'django.urls.reverse', 'reverse', (['"""mfund-list"""'], {}), "('mfund-list')\n", (6652, 6666), False, 'from django.urls import reverse\n'), ((7015, 7027), 'django.db.models.Max', 'Max', (['"""mf_id"""'], {}), "('mf_id')\n", (7018, 7027), False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n'), ((7272, 7300), 'django_gotolong.broker.icidir.imf.models.BrokerIcidirMf.objects.all', 'BrokerIcidirMf.objects.all', ([], {}), '()\n', (7298, 7300), False, 'from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf\n'), ((3562, 3598), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values'}), '(labels=labels, values=values)\n', (3568, 3598), True, 'import plotly.graph_objects as go\n'), ((6215, 6251), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values'}), '(labels=labels, values=values)\n', (6221, 6251), True, 'import plotly.graph_objects as go\n'), ((2730, 2749), 'django.db.models.Sum', 'Sum', (['"""mf_nav_value"""'], {}), "('mf_nav_value')\n", (2733, 2749), False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n'), ((5415, 5434), 'django.db.models.Sum', 'Sum', (['"""mf_nav_value"""'], {}), "('mf_nav_value')\n", (5418, 5434), False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n')] |
#coding: utf-8
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
import gevent
import requests
import urllib
import os
import time
import re
import ssl
class Downloader:
def __init__(self, pool_size, retry=3):
self.pool = Pool(pool_size)
self.session = self._get_http_session(pool_size, pool_size, retry)
self.retry = retry
self.dir = ''
self.succed = {}
self.failed = []
self.ts_total = 0
def _get_http_session(self, pool_connections, pool_maxsize, max_retries):
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=pool_connections, pool_maxsize=pool_maxsize, max_retries=max_retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def run(self, m3u8_url, dir='',moreTs=False):
self.dir = dir
if self.dir and not os.path.isdir(self.dir):
os.makedirs(self.dir)
r = self.session.get(m3u8_url, timeout=10)
if r.ok:
body = r.content
if body:
ssl._create_default_https_context = ssl._create_unverified_context
ts_list = [urllib.parse.urljoin(m3u8_url, n.strip()) for n in str(body, encoding = "utf8").split('\n') if n and not n.startswith("#")]
if moreTs:
ts_list = self.getMoreTsList(ts_list)
ts_list = list(zip(ts_list, [n for n in range(len(list(ts_list)))]))
if ts_list:
self.ts_total = len(ts_list)
print(self.ts_total)
g1 = gevent.spawn(self._join_file)
self._download(ts_list)
g1.join()
else:
print( r.status_code)
def _download(self, ts_list):
self.pool.map(self._worker, ts_list)
if self.failed:
ts_list = self.failed
self.failed = []
self._download(ts_list)
def _worker(self, ts_tuple):
url = ts_tuple[0]
index = ts_tuple[1]
retry = self.retry
while retry:
try:
r = self.session.get(url, timeout=20)
if r.ok:
file_name = url.split('/')[-1].split('?')[0]
print( file_name)
with open(os.path.join(self.dir, file_name), 'wb') as f:
f.write(r.content)
self.succed[index] = file_name
return
except:
retry -= 1
print ('[FAIL]%s' % url)
self.failed.append((url, index))
def _join_file(self):
index = 0
outfile = ''
while index < self.ts_total:
file_name = self.succed.get(index, '')
if file_name:
infile = open(os.path.join(self.dir, file_name), 'rb')
if not outfile:
outfile = open(os.path.join(self.dir, file_name.split('.')[0]+'_all.'+file_name.split('.')[-1]), 'wb')
outfile.write(infile.read())
infile.close()
os.remove(os.path.join(self.dir, file_name))
index += 1
else:
time.sleep(1)
if outfile:
outfile.close()
def getMoreTsList(self,ts_list):
headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'upgrade-insecure-requests':1,
'scheme':'https'
}
retry = self.retry
isOk = False
lastTs = ts_list[-1]
pattern = re.compile(r'(\d+\.?\d)\.ts')
tsNum = '{:0>3}'.format(int(pattern.findall(lastTs)[0]) + 1 )
nextTs = re.sub(pattern,str(tsNum),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
l = r = int(tsNum)
maxTs = 0
while retry or isOk:
try:
isOk = urllib.request.urlopen(req).status==200
if isOk:
retry = 3
l = r + 1
r = l + 100 if maxTs < r else maxTs - int((maxTs-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
else:
r = r - int((r-l)/2)
except :
if int((r-l)/2) == 0:
for i in range(int(tsNum) , r):
ts_list.append(re.sub(pattern,'{:0>3}'.format(i),lastTs,1) + ".ts")
return ts_list
maxTs = r
r = r - int((r-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
retry -= 1
isOk = False
return ts_list
if __name__ == '__main__':
downloader = Downloader(5)
downloader.run('https://www.xiaodianying.com/filets/2069/dp.m3u8', './video',True)
| [
"requests.Session",
"os.makedirs",
"re.compile",
"gevent.monkey.patch_all",
"urllib.request.Request",
"requests.adapters.HTTPAdapter",
"os.path.join",
"time.sleep",
"urllib.request.urlopen",
"gevent.pool.Pool",
"os.path.isdir",
"gevent.spawn"
]
| [((42, 60), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (58, 60), False, 'from gevent import monkey\n'), ((260, 275), 'gevent.pool.Pool', 'Pool', (['pool_size'], {}), '(pool_size)\n', (264, 275), False, 'from gevent.pool import Pool\n'), ((577, 595), 'requests.Session', 'requests.Session', ([], {}), '()\n', (593, 595), False, 'import requests\n'), ((618, 738), 'requests.adapters.HTTPAdapter', 'requests.adapters.HTTPAdapter', ([], {'pool_connections': 'pool_connections', 'pool_maxsize': 'pool_maxsize', 'max_retries': 'max_retries'}), '(pool_connections=pool_connections,\n pool_maxsize=pool_maxsize, max_retries=max_retries)\n', (647, 738), False, 'import requests\n'), ((3963, 3995), 're.compile', 're.compile', (['"""(\\\\d+\\\\.?\\\\d)\\\\.ts"""'], {}), "('(\\\\d+\\\\.?\\\\d)\\\\.ts')\n", (3973, 3995), False, 'import re\n'), ((4138, 4203), 'urllib.request.Request', 'urllib.request.Request', ([], {'url': 'nextTs', 'headers': 'headers', 'method': '"""GET"""'}), "(url=nextTs, headers=headers, method='GET')\n", (4160, 4203), False, 'import urllib\n'), ((994, 1015), 'os.makedirs', 'os.makedirs', (['self.dir'], {}), '(self.dir)\n', (1005, 1015), False, 'import os\n'), ((957, 980), 'os.path.isdir', 'os.path.isdir', (['self.dir'], {}), '(self.dir)\n', (970, 980), False, 'import os\n'), ((3294, 3307), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3304, 3307), False, 'import time\n'), ((1681, 1710), 'gevent.spawn', 'gevent.spawn', (['self._join_file'], {}), '(self._join_file)\n', (1693, 1710), False, 'import gevent\n'), ((2900, 2933), 'os.path.join', 'os.path.join', (['self.dir', 'file_name'], {}), '(self.dir, file_name)\n', (2912, 2933), False, 'import os\n'), ((3198, 3231), 'os.path.join', 'os.path.join', (['self.dir', 'file_name'], {}), '(self.dir, file_name)\n', (3210, 3231), False, 'import os\n'), ((4627, 4692), 'urllib.request.Request', 'urllib.request.Request', ([], {'url': 'nextTs', 'headers': 'headers', 'method': '"""GET"""'}), "(url=nextTs, headers=headers, method='GET')\n", (4649, 4692), False, 'import urllib\n'), ((5155, 5220), 'urllib.request.Request', 'urllib.request.Request', ([], {'url': 'nextTs', 'headers': 'headers', 'method': '"""GET"""'}), "(url=nextTs, headers=headers, method='GET')\n", (5177, 5220), False, 'import urllib\n'), ((4317, 4344), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (4339, 4344), False, 'import urllib\n'), ((2401, 2434), 'os.path.join', 'os.path.join', (['self.dir', 'file_name'], {}), '(self.dir, file_name)\n', (2413, 2434), False, 'import os\n')] |
import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
class DatasetCache:
def __init__(self, config, checksum, cache_map, dataset_manager):
self.config = config
self.checksum = checksum
self.cache_map = cache_map
self.dataset_manager = dataset_manager
def get(self):
training_set_metadata_fp = self.cache_map[META]
if not path_exists(training_set_metadata_fp):
return None
cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp)
cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None
cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None
cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None
valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None
return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set
def put(self, training_set, test_set, validation_set, training_set_metadata):
logger.info("Writing preprocessed training set cache")
training_set = self.dataset_manager.save(
self.cache_map[TRAINING],
training_set,
self.config,
training_set_metadata,
TRAINING,
)
if test_set is not None:
logger.info("Writing preprocessed test set cache")
test_set = self.dataset_manager.save(
self.cache_map[TEST],
test_set,
self.config,
training_set_metadata,
TEST,
)
if validation_set is not None:
logger.info("Writing preprocessed validation set cache")
validation_set = self.dataset_manager.save(
self.cache_map[VALIDATION],
validation_set,
self.config,
training_set_metadata,
VALIDATION,
)
logger.info("Writing train set metadata")
data_utils.save_json(self.cache_map[META], training_set_metadata)
return training_set, test_set, validation_set, training_set_metadata
def delete(self):
for fname in self.cache_map.values():
if path_exists(fname):
delete(fname)
class CacheManager:
def __init__(self, dataset_manager, cache_dir=None):
self._dataset_manager = dataset_manager
self._cache_dir = cache_dir
def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None):
if dataset is not None:
key = self.get_cache_key(dataset, config)
cache_map = {
META: self.get_cache_path(dataset, key, META, "json"),
TRAINING: self.get_cache_path(dataset, key, TRAINING),
TEST: self.get_cache_path(dataset, key, TEST),
VALIDATION: self.get_cache_path(dataset, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
else:
key = self.get_cache_key(training_set, config)
cache_map = {
META: self.get_cache_path(training_set, key, META, "json"),
TRAINING: self.get_cache_path(training_set, key, TRAINING),
TEST: self.get_cache_path(test_set, key, TEST),
VALIDATION: self.get_cache_path(validation_set, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
def get_cache_key(self, dataset, config):
if not isinstance(dataset, str):
# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
return str(uuid.uuid1())
return calculate_checksum(dataset, config)
def get_cache_path(self, dataset, key, tag, ext=None):
if not isinstance(dataset, str):
dataset = None
if self._cache_dir is None and dataset is not None:
# Use the input dataset filename (minus the extension) as the cache path
stem = Path(dataset).stem
else:
# To avoid collisions across different directories, we use the unique checksum
# as the cache path
stem = alphanum(key)
ext = ext or self.data_format
cache_fname = f"{stem}.{tag}.{ext}"
return os.path.join(self.get_cache_directory(dataset), cache_fname)
def get_cache_directory(self, input_fname):
if self._cache_dir is None:
if input_fname is not None:
return os.path.dirname(input_fname)
return "."
return self._cache_dir
def can_cache(self, skip_save_processed_input):
return self._dataset_manager.can_cache(skip_save_processed_input)
@property
def data_format(self):
return self._dataset_manager.data_format
| [
"logging.getLogger",
"ludwig.utils.data_utils.save_json",
"pathlib.Path",
"ludwig.utils.data_utils.load_json",
"ludwig.utils.fs_utils.path_exists",
"uuid.uuid1",
"os.path.dirname",
"ludwig.data.cache.util.calculate_checksum",
"re.sub",
"ludwig.utils.fs_utils.delete"
]
| [((299, 326), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (316, 326), False, 'import logging\n'), ((421, 442), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 'v'], {}), "('\\\\W+', '', v)\n", (427, 442), False, 'import re\n'), ((872, 918), 'ludwig.utils.data_utils.load_json', 'data_utils.load_json', (['training_set_metadata_fp'], {}), '(training_set_metadata_fp)\n', (892, 918), False, 'from ludwig.utils import data_utils\n'), ((2535, 2600), 'ludwig.utils.data_utils.save_json', 'data_utils.save_json', (['self.cache_map[META]', 'training_set_metadata'], {}), '(self.cache_map[META], training_set_metadata)\n', (2555, 2600), False, 'from ludwig.utils import data_utils\n'), ((4299, 4334), 'ludwig.data.cache.util.calculate_checksum', 'calculate_checksum', (['dataset', 'config'], {}), '(dataset, config)\n', (4317, 4334), False, 'from ludwig.data.cache.util import calculate_checksum\n'), ((770, 807), 'ludwig.utils.fs_utils.path_exists', 'path_exists', (['training_set_metadata_fp'], {}), '(training_set_metadata_fp)\n', (781, 807), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((978, 1015), 'ludwig.utils.fs_utils.path_exists', 'path_exists', (['self.cache_map[TRAINING]'], {}), '(self.cache_map[TRAINING])\n', (989, 1015), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((1077, 1110), 'ludwig.utils.fs_utils.path_exists', 'path_exists', (['self.cache_map[TEST]'], {}), '(self.cache_map[TEST])\n', (1088, 1110), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((1184, 1223), 'ludwig.utils.fs_utils.path_exists', 'path_exists', (['self.cache_map[VALIDATION]'], {}), '(self.cache_map[VALIDATION])\n', (1195, 1223), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((2763, 2781), 'ludwig.utils.fs_utils.path_exists', 'path_exists', (['fname'], {}), '(fname)\n', (2774, 2781), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((2799, 2812), 'ludwig.utils.fs_utils.delete', 'delete', (['fname'], {}), '(fname)\n', (2805, 2812), False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((4270, 4282), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4280, 4282), False, 'import uuid\n'), ((4628, 4641), 'pathlib.Path', 'Path', (['dataset'], {}), '(dataset)\n', (4632, 4641), False, 'from pathlib import Path\n'), ((5124, 5152), 'os.path.dirname', 'os.path.dirname', (['input_fname'], {}), '(input_fname)\n', (5139, 5152), False, 'import os\n')] |
import pprint
from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict
from FactorioCalcBase.recipe import Recipe
from FactorioCalcBase.calculator_base import CalculatorBase
from FactorioCalcBase.dependency_dict_common_function import dict_add_number
import time
def test_change_machine(test_obj: CalculatorBase, target_recipe, failed_dict):
recipe_obj = Recipe(recipe_name=target_recipe)
cat = recipe_obj.get_category()
available_machine_list = production_machine_category_list_dict.get(cat)
failed_dict['method_failed']['change_machine_failed'] = {}
if len(available_machine_list) > 1:
for machine in available_machine_list:
test_obj.change_machine_to_specific_block(recipe_name=target_recipe,
machine_name=machine)
if test_obj.block_obj_dict['recipe']['machine_name'] != machine:
raise 'MachineNotChanged'
def test_calculator_base_methods(test_obj: CalculatorBase, failed_dict: dict):
recipe_list = list(test_obj.block_obj_dict['recipe'].keys())
for recipe in recipe_list:
try:
test_change_machine(test_obj, recipe, failed_dict)
except:
dict_add_number(failed_dict['method_failed']['change_machine_failed'], recipe, 1)
def test_calculator_base(failed_dict):
mrms = [0, 0.3]
pm = [None, ["assembling-machine-2", "stone-furnace", "burner-mining-drill"]]
uk = [True, False]
am = [1, 101.5]
failed_dict['init_failed'] = {}
failed_dict['method_failed'] = {
'change_machine_failed': {
}
}
for recipe in sorted_recipe_list:
for mining_research_modifier in mrms:
for preferred_machines in pm:
for use_kovarex in uk:
for amount in am:
try:
test_obj = CalculatorBase(recipe_name=recipe, amount=amount,
preferred_machine_list=preferred_machines,
use_kovarex=use_kovarex,
mining_research_modifier=mining_research_modifier)
except:
dict_add_number(failed_dict['init_failed'], key=recipe, val=1)
test_calculator_base_methods(test_obj, failed_dict)
pprint.pp(failed_dict)
return failed_dict
def run_test():
start_time = time.time()
test_calculator_base({})
print(f'finished in {time.time()-start_time}')
| [
"FactorioCalcBase.data.binary.production_machine_category_list_dict.get",
"pprint.pp",
"FactorioCalcBase.dependency_dict_common_function.dict_add_number",
"FactorioCalcBase.recipe.Recipe",
"FactorioCalcBase.calculator_base.CalculatorBase",
"time.time"
]
| [((403, 436), 'FactorioCalcBase.recipe.Recipe', 'Recipe', ([], {'recipe_name': 'target_recipe'}), '(recipe_name=target_recipe)\n', (409, 436), False, 'from FactorioCalcBase.recipe import Recipe\n'), ((502, 548), 'FactorioCalcBase.data.binary.production_machine_category_list_dict.get', 'production_machine_category_list_dict.get', (['cat'], {}), '(cat)\n', (543, 548), False, 'from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict\n'), ((2456, 2478), 'pprint.pp', 'pprint.pp', (['failed_dict'], {}), '(failed_dict)\n', (2465, 2478), False, 'import pprint\n'), ((2537, 2548), 'time.time', 'time.time', ([], {}), '()\n', (2546, 2548), False, 'import time\n'), ((1257, 1342), 'FactorioCalcBase.dependency_dict_common_function.dict_add_number', 'dict_add_number', (["failed_dict['method_failed']['change_machine_failed']", 'recipe', '(1)'], {}), "(failed_dict['method_failed']['change_machine_failed'],\n recipe, 1)\n", (1272, 1342), False, 'from FactorioCalcBase.dependency_dict_common_function import dict_add_number\n'), ((2603, 2614), 'time.time', 'time.time', ([], {}), '()\n', (2612, 2614), False, 'import time\n'), ((1922, 2100), 'FactorioCalcBase.calculator_base.CalculatorBase', 'CalculatorBase', ([], {'recipe_name': 'recipe', 'amount': 'amount', 'preferred_machine_list': 'preferred_machines', 'use_kovarex': 'use_kovarex', 'mining_research_modifier': 'mining_research_modifier'}), '(recipe_name=recipe, amount=amount, preferred_machine_list=\n preferred_machines, use_kovarex=use_kovarex, mining_research_modifier=\n mining_research_modifier)\n', (1936, 2100), False, 'from FactorioCalcBase.calculator_base import CalculatorBase\n'), ((2313, 2375), 'FactorioCalcBase.dependency_dict_common_function.dict_add_number', 'dict_add_number', (["failed_dict['init_failed']"], {'key': 'recipe', 'val': '(1)'}), "(failed_dict['init_failed'], key=recipe, val=1)\n", (2328, 2375), False, 'from FactorioCalcBase.dependency_dict_common_function import dict_add_number\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 22:18:58 2020
@author: https://stackoverflow.com/questions/293431/python-object-deleting-itself
@editor: thirschbuechler
this is probably overkill to alternatively exit a with-context, rather than by exception,
but hey, maybe it will be needed, or related to getting rid of the visa-handle within thvisa
# for some reason, __enter__ does not work in the with-context
"""
# NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it.
import weakref #https://docs.python.org/3/library/weakref.html
class InsaneClass(object):
_alive = []
def __new__(cls): # there is a difference btw. cls and self, but i don't understand
self = super().__new__(cls)
InsaneClass._alive.append(self)
return weakref.proxy(self)
def commit_suicide(self):
self._alive.remove(self)
def __enter__(self):
print("enter says hello")
return self
def __init__(self):
pass
def __exit__(self, exc_type, exc_value, tb):# "with" context exit: call del
print("bye")
if __name__ == '__main__': # test if called as executable, not as library
instance = InsaneClass()
instance.__enter__()
instance.commit_suicide()
#print(instance)
print(InsaneClass) # pointer
print(InsaneClass().__enter__()) # an object
print("now, something completely different!")
with InsaneClass() as i:
i.commit_suicide()
print(i) | [
"weakref.proxy"
]
| [((821, 840), 'weakref.proxy', 'weakref.proxy', (['self'], {}), '(self)\n', (834, 840), False, 'import weakref\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module containing an algorithm for hand gesture recognition"""
import numpy as np
import cv2
from typing import Tuple
__author__ = "<NAME>"
__license__ = "GNU GPL 3.0 or later"
def recognize(img_gray):
"""Recognizes hand gesture in a single-channel depth image
This method estimates the number of extended fingers based on
a single-channel depth image showing a hand and arm region.
:param img_gray: single-channel depth image
:returns: (num_fingers, img_draw) The estimated number of
extended fingers and an annotated RGB image
"""
# segment arm region
segment = segment_arm(img_gray)
# find the hull of the segmented area, and based on that find the
# convexity defects
(contour, defects) = find_hull_defects(segment)
# detect the number of fingers depending on the contours and convexity
# defects, then draw defects that belong to fingers green, others red
img_draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)
(num_fingers, img_draw) = detect_num_fingers(contour,
defects, img_draw)
return (num_fingers, img_draw)
def segment_arm(frame: np.ndarray, abs_depth_dev: int = 14) -> np.ndarray:
"""Segments arm region
This method accepts a single-channel depth image of an arm and
hand region and extracts the segmented arm region.
It is assumed that the hand is placed in the center of the image.
:param frame: single-channel depth image
:returns: binary image (mask) of segmented arm region, where
arm=255, else=0
"""
height, width = frame.shape
# find center (21x21 pixel) region of imageheight frame
center_half = 10 # half-width of 21 is 21/2-1
center = frame[height // 2 - center_half:height // 2 + center_half,
width // 2 - center_half:width // 2 + center_half]
# find median depth value of center region
med_val = np.median(center)
# try this instead:
frame = np.where(abs(frame - med_val) <= abs_depth_dev,
128, 0).astype(np.uint8)
# morphological
kernel = np.ones((3, 3), np.uint8)
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)
# connected component
small_kernel = 3
frame[height // 2 - small_kernel:height // 2 + small_kernel,
width // 2 - small_kernel:width // 2 + small_kernel] = 128
mask = np.zeros((height + 2, width + 2), np.uint8)
flood = frame.copy()
cv2.floodFill(flood, mask, (width // 2, height // 2), 255,
flags=4 | (255 << 8))
ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
return flooded
def find_hull_defects(segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Find hull defects
This method finds all defects in the hull of a segmented arm
region.
:param segment: a binary image (mask) of a segmented arm region,
where arm=255, else=0
:returns: (max_contour, defects) the largest contour in the image
and all corresponding defects
"""
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# find largest area contour
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01 * cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# find convexity hull and defects
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return max_contour, defects
def detect_num_fingers(contour: np.ndarray, defects: np.ndarray,
img_draw: np.ndarray, thresh_deg: float = 80.0) -> Tuple[int, np.ndarray]:
"""Detects the number of extended fingers
This method determines the number of extended fingers based on a
contour and convexity defects.
It will annotate an RGB color image of the segmented arm region
with all relevant defect points and the hull.
:param contours: a list of contours
:param defects: a list of convexity defects
:param img_draw: an RGB color image to be annotated
:returns: (num_fingers, img_draw) the estimated number of extended
fingers and an annotated RGB color image
"""
# if there are no convexity defects, possibly no hull found or no
# fingers extended
if defects is None:
return [0, img_draw]
# we assume the wrist will generate two convexity defects (one on each
# side), so if there are no additional defect points, there are no
# fingers extended
if len(defects) <= 2:
return [0, img_draw]
# if there is a sufficient amount of convexity defects, we will find a
# defect point between two fingers so to get the number of fingers,
# start counting at 1
num_fingers = 1
# Defects are of shape (num_defects,1,4)
for defect in defects[:, 0, :]:
# Each defect is an array of four integers.
# First three indexes of start, end and the furthest
# points respectively
# contour is of shape (num_points,1,2) - 2 for point coordinates
start, end, far = [contour[i][0] for i in defect[:3]]
# draw the hull
cv2.line(img_draw, tuple(start), tuple(end), (0, 255, 0), 2)
# if angle is below a threshold, defect point belongs to two
# extended fingers
if angle_rad(start - far, end - far) < deg2rad(thresh_deg):
# increment number of fingers
num_fingers += 1
# draw point as green
cv2.circle(img_draw, tuple(far), 5, (0, 255, 0), -1)
else:
# draw point as red
cv2.circle(img_draw, tuple(far), 5, (0, 0, 255), -1)
# make sure we cap the number of fingers
return min(5, num_fingers), img_draw
def angle_rad(v1, v2):
"""Angle in radians between two vectors
This method returns the angle (in radians) between two array-like
vectors using the cross-product method, which is more accurate for
small angles than the dot-product-acos method.
"""
return np.arctan2(np.linalg.norm(np.cross(v1, v2)), np.dot(v1, v2))
def deg2rad(angle_deg):
"""Convert degrees to radians
This method converts an angle in radians e[0,2*np.pi) into degrees
e[0,360)
"""
return angle_deg / 180.0 * np.pi
| [
"cv2.convexHull",
"numpy.median",
"numpy.ones",
"numpy.cross",
"cv2.threshold",
"cv2.arcLength",
"cv2.floodFill",
"cv2.convexityDefects",
"cv2.morphologyEx",
"numpy.zeros",
"numpy.dot",
"cv2.approxPolyDP",
"cv2.cvtColor",
"cv2.findContours"
]
| [((1022, 1063), 'cv2.cvtColor', 'cv2.cvtColor', (['segment', 'cv2.COLOR_GRAY2RGB'], {}), '(segment, cv2.COLOR_GRAY2RGB)\n', (1034, 1063), False, 'import cv2\n'), ((2042, 2059), 'numpy.median', 'np.median', (['center'], {}), '(center)\n', (2051, 2059), True, 'import numpy as np\n'), ((2225, 2250), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2232, 2250), True, 'import numpy as np\n'), ((2263, 2311), 'cv2.morphologyEx', 'cv2.morphologyEx', (['frame', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(frame, cv2.MORPH_CLOSE, kernel)\n', (2279, 2311), False, 'import cv2\n'), ((2506, 2549), 'numpy.zeros', 'np.zeros', (['(height + 2, width + 2)', 'np.uint8'], {}), '((height + 2, width + 2), np.uint8)\n', (2514, 2549), True, 'import numpy as np\n'), ((2579, 2657), 'cv2.floodFill', 'cv2.floodFill', (['flood', 'mask', '(width // 2, height // 2)', '(255)'], {'flags': '(4 | 255 << 8)'}), '(flood, mask, (width // 2, height // 2), 255, flags=4 | 255 << 8)\n', (2592, 2657), False, 'import cv2\n'), ((2698, 2747), 'cv2.threshold', 'cv2.threshold', (['flood', '(129)', '(255)', 'cv2.THRESH_BINARY'], {}), '(flood, 129, 255, cv2.THRESH_BINARY)\n', (2711, 2747), False, 'import cv2\n'), ((3232, 3297), 'cv2.findContours', 'cv2.findContours', (['segment', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3248, 3297), False, 'import cv2\n'), ((3499, 3543), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['max_contour', 'epsilon', '(True)'], {}), '(max_contour, epsilon, True)\n', (3515, 3543), False, 'import cv2\n'), ((3594, 3641), 'cv2.convexHull', 'cv2.convexHull', (['max_contour'], {'returnPoints': '(False)'}), '(max_contour, returnPoints=False)\n', (3608, 3641), False, 'import cv2\n'), ((3656, 3695), 'cv2.convexityDefects', 'cv2.convexityDefects', (['max_contour', 'hull'], {}), '(max_contour, hull)\n', (3676, 3695), False, 'import cv2\n'), ((3448, 3480), 'cv2.arcLength', 'cv2.arcLength', (['max_contour', '(True)'], {}), '(max_contour, True)\n', (3461, 3480), False, 'import cv2\n'), ((6368, 6382), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (6374, 6382), True, 'import numpy as np\n'), ((6349, 6365), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (6357, 6365), True, 'import numpy as np\n')] |
import sys
import os
from . import filesys
MAIN_USAGE_MESSAGE = """
usage: xlab command ...
Options:
positional arguments:
command
project
"""
def project(args):
if len(args) != 1:
print("error: Invalid arguments.")
exit()
if args[0] == 'init':
root = os.getcwd()
dirs = filesys.Directories()
dirs.set_root(root)
def main():
if len(sys.argv) <= 1:
print(MAIN_USAGE_MESSAGE)
exit()
command = sys.argv[1]
args = sys.argv[2:]
if command == 'project':
exe = project
else:
print("error: No command 'xlab {}'.".format(command))
exit()
exe(args) | [
"os.getcwd"
]
| [((300, 311), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (309, 311), False, 'import os\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import Optimizer
from .adam import Adam
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable
from ..fluid.dygraph import base as imperative_base
from collections import Callable
import paddle
_C_ops = core.ops
__all__ = []
class AdamW(Adam):
r"""
The AdamW optimizer is implemented based on the AdamW Optimization
in paper `DECOUPLED WEIGHT DECAY REGULARIZATION <https://arxiv.org/pdf/1711.05101.pdf>`_.
it can resolves the problem of L2 regularization failure in the Adam optimizer.
.. math::
t & = t + 1
moment\_1\_out & = {\beta}_1 * moment\_1 + (1 - {\beta}_1) * grad
moemnt\_2\_out & = {\beta}_2 * moment\_2 + (1 - {\beta}_2) * grad * grad
learning\_rate & = learning\_rate *
\frac{\sqrt{1 - {\beta}_2^t}}{1 - {beta}_1^t}
param\_out & = param - learning\_rate * (\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + \lambda * param)
Args:
learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``.
It can be a float value or a LRScheduler. The default value is 0.001.
parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static mode, at this time all parameters will be updated.
beta1 (float|Tensor, optional): The exponential decay rate for the 1st moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.9.
beta2 (float|Tensor, optional): The exponential decay rate for the 2nd moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.999.
epsilon (float, optional): A small float value for numerical stability.
The default value is 1e-08.
weight_decay (float|Tensor, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01.
lr_ratio (function|None, optional): If it is not None,
the learning rate will be updated with layerwise learning rate ratio.
Otherwise, the learning rate is the original.
Default: None.
apply_decay_param_fun (function|None, optional): If it is not None,
only tensors that makes apply_decay_param_fun(Tensor.name)==True
will be updated with weight decay. It only works when we want to specify tensors.
Default: None.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
The accumulators are updated at every step. Every element of the two moving-average
is updated in both dense mode and sparse mode. If the size of parameter is very large,
then the update may be very slow. The lazy mode only update the element that has
gradient in current mini-batch, so it will be much more faster. But this mode has
different semantics with the original Adam algorithm and may lead to different result.
The default value is False.
multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
**Notes**:
**Currently, AdamW doesn't support sparse parameter optimization.**
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.AdamW(learning_rate=0.1,
parameters=linear.parameters(),
beta1=beta1,
beta2=beta2,
weight_decay=0.01)
out.backward()
adam.step()
adam.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adam = paddle.optimizer.AdamW(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
'beta1': 0.8
}],
weight_decay=0.01,
beta1=0.9)
out.backward()
adam.step()
adam.clear_grad()
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
parameters=None,
weight_decay=0.01,
lr_ratio=None,
apply_decay_param_fun=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
if not 0 <= beta1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
if not 0 <= epsilon:
raise ValueError("Invaild value of epsilon, expect epsilon >= 0.")
coeff = weight_decay
if not isinstance(coeff, float) and \
not isinstance(coeff, framework.Variable):
raise TypeError("coeff should be float or Tensor.")
self._params_name = set()
self._apply_decay_param_fun = apply_decay_param_fun
self._coeff = coeff
self._lr_to_coeff = dict()
if lr_ratio is not None:
assert isinstance(lr_ratio, Callable)
if core.is_compiled_with_xpu() or core.is_compiled_with_npu():
raise NotImplementedError(
"'lr_ratio' is unimplemented in XPU and NPU")
self._lr_ratio = lr_ratio
super(AdamW, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
grad_clip=grad_clip,
name=name,
lazy_mode=lazy_mode,
multi_precision=multi_precision)
self._default_dict = {'coeff': coeff}
self.type = "adamw"
if core.is_compiled_with_xpu():
self.type = "adam"
# Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that.
self._auxiliary_vars = dict()
def _set_auxiliary_var(self, key, val):
self._auxiliary_vars[key] = val
def _get_auxiliary_var(self, key):
if key in self._auxiliary_vars:
return self._auxiliary_vars[key]
else:
return None
def _append_decoupled_weight_decay(self, block, param_and_grad):
"""
Add decoupled weight decay op.
parameter = parameter - parameter * coeff * lr
Args:
block: block in which variable is to be created
param_and_grad: (parameters, gradients) pairs,
the parameters need to decay.
Raises:
Exception: The type of coeff and parameter is not consistent.
"""
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
return
if isinstance(self._learning_rate, float):
learning_rate = self._learning_rate
else:
# NOTE. We add this function to the _append_optimize_op(),
# for we must make sure _create_param_lr() be called after
# optimizer._create_global_learning_rate().
learning_rate = self._create_param_lr(param_and_grad)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('weight decay'):
self._params_name.add(param.name)
# If it has been calculated, the result will be reused.
# NOTE(wangxi): In dygraph mode, apply_gradient will be executed
# every step, so need clear _lr_to_coeff every step,
# we do this in _create_optimization_pass
decay_coeff = self._lr_to_coeff.get(learning_rate, None)
if decay_coeff is None:
# NOTE(wangxi): for pipeline to set device:all
with paddle.static.device_guard(None):
decay_coeff = 1.0 - learning_rate * self._coeff
self._lr_to_coeff[learning_rate] = decay_coeff
find_master = (self._multi_precision and
param.dtype == core.VarDesc.VarType.FP16)
if find_master:
master_weight = self._master_weights[param.name]
scaled_param = master_weight * decay_coeff
paddle.fluid.layers.assign(
input=scaled_param, output=master_weight)
else:
scaled_param = param * decay_coeff
paddle.fluid.layers.assign(input=scaled_param, output=param)
def _append_optimize_op(self, block, param_and_grad):
if paddle.is_compiled_with_xpu():
self._append_decoupled_weight_decay(block, param_and_grad)
return super(AdamW, self)._append_optimize_op(block, param_and_grad)
assert isinstance(block, framework.Block)
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
# Whether we should do weight decay for the parameter.
with_decay = True
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
with_decay = False
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
lr = self._create_param_lr(param_and_grad)
# create the adamw optimize op
if framework.in_dygraph_mode():
lr_ratio_ = 1. if self._lr_ratio is None else self._lr_ratio(
param_and_grad[0])
_beta1 = self._beta1 if not isinstance(
self._beta1, Variable) else self._beta1.numpy().item(0)
_beta2 = self._beta2 if not isinstance(
self._beta2, Variable) else self._beta2.numpy().item(0)
_, _, _, _, _ = _C_ops.adamw(
param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
moment2, beta1_pow_acc, beta2_pow_acc, 'epsilon', self._epsilon,
'lazy_mode', self._lazy_mode, 'min_row_size_to_use_multithread',
1000, 'beta1', _beta1, 'beta2', _beta2, 'coeff', self._coeff,
"lr_ratio", lr_ratio_)
return None
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"LearningRate": [lr],
"Moment1": [moment1],
"Moment2": [moment2],
"Beta1Pow": [beta1_pow_acc],
"Beta2Pow": [beta2_pow_acc],
}
# Pass found_inf to adamw, to skip update for not only param, but also momentum and beta_pow
found_inf = self._get_auxiliary_var('found_inf')
if found_inf:
inputs['SkipUpdate'] = found_inf
outputs = {
"ParamOut": [param_and_grad[0]],
"Moment1Out": [moment1],
"Moment2Out": [moment2],
"Beta1PowOut": [beta1_pow_acc],
"Beta2PowOut": [beta2_pow_acc],
}
attrs = {
"lazy_mode": self._lazy_mode,
"min_row_size_to_use_multithread": 1000,
"multi_precision": find_master,
"with_decay": with_decay,
"coeff": self._coeff,
"lr_ratio": 1.
if self._lr_ratio is None else self._lr_ratio(param_and_grad[0])
}
if isinstance(self._beta1, Variable):
inputs['Beta1Tensor'] = self._beta1
else:
attrs['beta1'] = self._beta1
if isinstance(self._beta2, Variable):
inputs['Beta2Tensor'] = self._beta2
else:
attrs['beta2'] = self._beta2
if isinstance(self._epsilon, Variable):
inputs['EpsilonTensor'] = self._epsilon
else:
attrs['epsilon'] = self._epsilon
if find_master:
inputs["MasterParam"] = master_weight
outputs["MasterParamOut"] = master_weight
adamw_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return adamw_op
def _create_optimization_pass(self, parameters_and_grads):
optimize_ops = super(
AdamW, self)._create_optimization_pass(parameters_and_grads)
# In dygraph mode, clear _lr_to_coeff after applied gradient
self._lr_to_coeff = dict()
return optimize_ops
def __str__(self):
return " ".join(["Weight Decay, params:", ",".join(self._params_name)])
def _update_param_group(self, parameters):
self._coeff = parameters.get('coeff', self._default_dict['coeff'])
parameters = parameters.get('params')
return parameters
| [
"paddle.is_compiled_with_xpu",
"paddle.static.device_guard",
"paddle.fluid.layers.assign"
]
| [((11439, 11468), 'paddle.is_compiled_with_xpu', 'paddle.is_compiled_with_xpu', ([], {}), '()\n', (11466, 11468), False, 'import paddle\n'), ((11133, 11201), 'paddle.fluid.layers.assign', 'paddle.fluid.layers.assign', ([], {'input': 'scaled_param', 'output': 'master_weight'}), '(input=scaled_param, output=master_weight)\n', (11159, 11201), False, 'import paddle\n'), ((11308, 11368), 'paddle.fluid.layers.assign', 'paddle.fluid.layers.assign', ([], {'input': 'scaled_param', 'output': 'param'}), '(input=scaled_param, output=param)\n', (11334, 11368), False, 'import paddle\n'), ((10677, 10709), 'paddle.static.device_guard', 'paddle.static.device_guard', (['None'], {}), '(None)\n', (10703, 10709), False, 'import paddle\n')] |
# install BeautifulSoup4 before running
#
# prints out historical data in csv format:
#
# [date, open, high, low, close, volume]
#
import re, csv, sys, urllib2
from bs4 import BeautifulSoup
# If start date and end date is the same only one value will be returned and
# if not the multiple values which can be used to make calculations
#
# ticker (company symbol)
# interval (d (daily), m (monthly), q (quarterly), y (yearly))
# start_date (YYYYMMDD)
# end_date (YYYYMMDD)
def get_historical_data(ticker, interval, start_date, end_date):
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
url_builder = []
url_builder.append('https://stooq.com/q/d/?s=')
url_builder.append(ticker)
url_builder.append('&c=0&d1=')
url_builder.append(start_date)
url_builder.append('&d2=')
url_builder.append(end_date)
url_builder.append('&i=')
url_builder.append(interval)
url = ''.join(url_builder)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
link = soup.findAll('a', href=re.compile('^q/d/l/'))
link = re.search('"(.*)"', str(link))
try:
link = link.group(1)
except AttributeError:
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow('')
exit()
link = link.replace('amp;', '')
arr = []
arr.append('https://stooq.com/')
arr.append(link)
link = ''.join(arr)
response = urllib2.urlopen(link)
cr = csv.reader(response)
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerows(cr)
def main():
args = sys.argv
get_historical_data(args[1], args[2], args[3], args[4])
if __name__ == '__main__':
main()
| [
"urllib2.urlopen",
"re.compile",
"csv.writer",
"bs4.BeautifulSoup",
"csv.reader"
]
| [((1398, 1418), 'urllib2.urlopen', 'urllib2.urlopen', (['url'], {}), '(url)\n', (1413, 1418), False, 'import re, csv, sys, urllib2\n'), ((1431, 1465), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (1444, 1465), False, 'from bs4 import BeautifulSoup\n'), ((1945, 1966), 'urllib2.urlopen', 'urllib2.urlopen', (['link'], {}), '(link)\n', (1960, 1966), False, 'import re, csv, sys, urllib2\n'), ((1977, 1997), 'csv.reader', 'csv.reader', (['response'], {}), '(response)\n', (1987, 1997), False, 'import re, csv, sys, urllib2\n'), ((2053, 2102), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '"""@"""', 'quotechar': '"""#"""'}), "(csvfile, delimiter='@', quotechar='#')\n", (2063, 2102), False, 'import re, csv, sys, urllib2\n'), ((1501, 1522), 're.compile', 're.compile', (['"""^q/d/l/"""'], {}), "('^q/d/l/')\n", (1511, 1522), False, 'import re, csv, sys, urllib2\n'), ((1696, 1745), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '"""@"""', 'quotechar': '"""#"""'}), "(csvfile, delimiter='@', quotechar='#')\n", (1706, 1745), False, 'import re, csv, sys, urllib2\n')] |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that a node receiving many (potentially out of order) blocks exits
initial block download (IBD; this occurs once it has passed minimumchainwork)
and continues to sync without seizing.
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.mininode import (CBlockHeader,
network_thread_start,
P2PInterface,
msg_block,
msg_headers)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, p2p_port
NUM_IBD_BLOCKS = 50
class BaseNode(P2PInterface):
def send_header(self, block):
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
self.send_message(msg)
def send_block(self, block):
self.send_message(msg_block(block))
class SyncChainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
# Setting minimumchainwork makes sure we test IBD as well as post-IBD
self.extra_args = [
["-minimumchainwork={:#x}".format(202 + 2 * NUM_IBD_BLOCKS)]]
def run_test(self):
node0conn = BaseNode()
node0conn.peer_connect('127.0.0.1', p2p_port(0))
network_thread_start()
node0conn.wait_for_verack()
node0 = self.nodes[0]
tip = int(node0.getbestblockhash(), 16)
height = node0.getblockcount() + 1
time = node0.getblock(node0.getbestblockhash())['time'] + 1
blocks = []
for i in range(NUM_IBD_BLOCKS * 2):
block = create_block(tip, create_coinbase(height), time)
block.solve()
blocks.append(block)
tip = block.sha256
height += 1
time += 1
# Headers need to be sent in-order
for b in blocks:
node0conn.send_header(b)
# Send blocks in some random order
for b in random.sample(blocks, len(blocks)):
node0conn.send_block(b)
# The node should eventually, completely sync without getting stuck
def node_synced():
return node0.getbestblockhash() == blocks[-1].hash
wait_until(node_synced)
if __name__ == '__main__':
SyncChainTest().main()
| [
"test_framework.mininode.CBlockHeader",
"test_framework.mininode.msg_headers",
"test_framework.util.p2p_port",
"test_framework.util.wait_until",
"test_framework.mininode.network_thread_start",
"test_framework.blocktools.create_coinbase",
"test_framework.mininode.msg_block"
]
| [((963, 976), 'test_framework.mininode.msg_headers', 'msg_headers', ([], {}), '()\n', (974, 976), False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((1535, 1557), 'test_framework.mininode.network_thread_start', 'network_thread_start', ([], {}), '()\n', (1555, 1557), False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((2469, 2492), 'test_framework.util.wait_until', 'wait_until', (['node_synced'], {}), '(node_synced)\n', (2479, 2492), False, 'from test_framework.util import wait_until, p2p_port\n'), ((1000, 1019), 'test_framework.mininode.CBlockHeader', 'CBlockHeader', (['block'], {}), '(block)\n', (1012, 1019), False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((1112, 1128), 'test_framework.mininode.msg_block', 'msg_block', (['block'], {}), '(block)\n', (1121, 1128), False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((1513, 1524), 'test_framework.util.p2p_port', 'p2p_port', (['(0)'], {}), '(0)\n', (1521, 1524), False, 'from test_framework.util import wait_until, p2p_port\n'), ((1888, 1911), 'test_framework.blocktools.create_coinbase', 'create_coinbase', (['height'], {}), '(height)\n', (1903, 1911), False, 'from test_framework.blocktools import create_block, create_coinbase\n')] |
from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
@implementer(IWorkflowChangedEvent)
class WorkflowChangedEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, workflow, action, comments):
ObjectEvent.__init__(self, object)
self.object = object
self.workflow = workflow
self.action = action
self.comments = comments
| [
"zope.interface.implementer",
"guillotina.events.ObjectEvent.__init__"
]
| [((158, 192), 'zope.interface.implementer', 'implementer', (['IWorkflowChangedEvent'], {}), '(IWorkflowChangedEvent)\n', (169, 192), False, 'from zope.interface import implementer\n'), ((338, 372), 'guillotina.events.ObjectEvent.__init__', 'ObjectEvent.__init__', (['self', 'object'], {}), '(self, object)\n', (358, 372), False, 'from guillotina.events import ObjectEvent\n')] |
"""
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression
):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=None):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Suppress COVID EHR vaccine concepts."
super().__init__(
issue_numbers=['DC1692'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def create_suppression_lookup_table(self, client):
concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
| [
"logging.getLogger",
"cdr_cleaner.args_parser.parse_args",
"common.JINJA_ENV.from_string",
"cdr_cleaner.clean_cdr_engine.get_query_list",
"utils.pipeline_logging.configure",
"cdr_cleaner.clean_cdr_engine.clean_dataset",
"cdr_cleaner.clean_cdr_engine.add_console_logging",
"google.cloud.exceptions.GoogleCloudError"
]
| [((447, 474), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (464, 474), False, 'import logging\n'), ((565, 2264), 'common.JINJA_ENV.from_string', 'JINJA_ENV.from_string', (['"""\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS\nwith covid_vacc as (\n SELECT *\n FROM `{{project_id}}.{{dataset_id}}.concept` \n WHERE (\n -- done by name and vocab --\n REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\') AND\n REGEXP_CONTAINS(concept_name, r\'(?i)(VAC)\') AND \n vocabulary_id not in (\'PPI\')\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(207)|(208)|(210)|(211)|(212)\')\n and vocabulary_id = \'CVX\'\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(91300)|(91301)|(91302)|(91303)|(91304)\')\n and vocabulary_id = \'CPT4\'\n )\n),\nconcepts_via_cr as (\nselect distinct c.*\nfrom `{{project_id}}.{{dataset_id}}.concept`as c\nleft join `{{project_id}}.{{dataset_id}}.concept_relationship`\non c.concept_id = concept_id_1\nwhere concept_id_2 in (select concept_id from covid_vacc)\n# and concept_id_1 not in (select concept_id from covid_vacc)\nand (\n relationship_id not in (\'Subsumes\', \'RxNorm dose form of\', \'Dose form group of\', \'RxNorm - SPL\') OR \n (relationship_id = \'RxNorm - SPL\' and REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\'))\n )\n),\nconcepts_via_ca as (\n select c.*\n from `{{project_id}}.{{dataset_id}}.concept`as c\n left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca\n on c.concept_id = ca.descendant_concept_id\n where ca.ancestor_concept_id in (select concept_id from covid_vacc) \n)\nselect distinct * from covid_vacc \nunion distinct\nselect distinct * from concepts_via_ca \nunion distinct \nselect distinct * from concepts_via_cr\n"""'], {}), '(\n """\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS\nwith covid_vacc as (\n SELECT *\n FROM `{{project_id}}.{{dataset_id}}.concept` \n WHERE (\n -- done by name and vocab --\n REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\') AND\n REGEXP_CONTAINS(concept_name, r\'(?i)(VAC)\') AND \n vocabulary_id not in (\'PPI\')\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(207)|(208)|(210)|(211)|(212)\')\n and vocabulary_id = \'CVX\'\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(91300)|(91301)|(91302)|(91303)|(91304)\')\n and vocabulary_id = \'CPT4\'\n )\n),\nconcepts_via_cr as (\nselect distinct c.*\nfrom `{{project_id}}.{{dataset_id}}.concept`as c\nleft join `{{project_id}}.{{dataset_id}}.concept_relationship`\non c.concept_id = concept_id_1\nwhere concept_id_2 in (select concept_id from covid_vacc)\n# and concept_id_1 not in (select concept_id from covid_vacc)\nand (\n relationship_id not in (\'Subsumes\', \'RxNorm dose form of\', \'Dose form group of\', \'RxNorm - SPL\') OR \n (relationship_id = \'RxNorm - SPL\' and REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\'))\n )\n),\nconcepts_via_ca as (\n select c.*\n from `{{project_id}}.{{dataset_id}}.concept`as c\n left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca\n on c.concept_id = ca.descendant_concept_id\n where ca.ancestor_concept_id in (select concept_id from covid_vacc) \n)\nselect distinct * from covid_vacc \nunion distinct\nselect distinct * from concepts_via_ca \nunion distinct \nselect distinct * from concepts_via_cr\n"""\n )\n', (586, 2264), False, 'from common import JINJA_ENV, CDM_TABLES\n'), ((5661, 5680), 'cdr_cleaner.args_parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (5678, 5680), True, 'import cdr_cleaner.args_parser as parser\n'), ((5685, 5758), 'utils.pipeline_logging.configure', 'pipeline_logging.configure', ([], {'level': 'logging.DEBUG', 'add_console_handler': '(True)'}), '(level=logging.DEBUG, add_console_handler=True)\n', (5711, 5758), False, 'from utils import pipeline_logging\n'), ((5794, 5828), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', ([], {}), '()\n', (5826, 5828), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((5850, 5981), 'cdr_cleaner.clean_cdr_engine.get_query_list', 'clean_engine.get_query_list', (['ARGS.project_id', 'ARGS.dataset_id', 'ARGS.sandbox_dataset_id', '[(CovidEHRVaccineConceptSuppression,)]'], {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)])\n', (5877, 5981), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((6085, 6135), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', (['ARGS.console_log'], {}), '(ARGS.console_log)\n', (6117, 6135), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((6144, 6274), 'cdr_cleaner.clean_cdr_engine.clean_dataset', 'clean_engine.clean_dataset', (['ARGS.project_id', 'ARGS.dataset_id', 'ARGS.sandbox_dataset_id', '[(CovidEHRVaccineConceptSuppression,)]'], {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)])\n', (6170, 6274), True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((3922, 3993), 'google.cloud.exceptions.GoogleCloudError', 'GoogleCloudError', (['f"""Error running job {result.job_id}: {result.errors}"""'], {}), "(f'Error running job {result.job_id}: {result.errors}')\n", (3938, 3993), False, 'from google.cloud.exceptions import GoogleCloudError\n')] |
import pydbhub
from typing import Any, Dict, List, Tuple
from json.decoder import JSONDecodeError
import requests
import io
def send_request_json(query_url: str, data: Dict[str, Any]) -> Tuple[List[Any], str]:
"""
send_request_json sends a request to DBHub.io, formatting the returned result as JSON
Parameters
----------
query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.json(), None
except JSONDecodeError as e:
return None, e.args[0]
except TypeError as e:
return None, e.args[0]
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_request(query_url: str, data: Dict[str, Any]) -> Tuple[List[bytes], str]:
"""
send_request sends a request to DBHub.io.
Parameters
---- query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.------
Returns
-------
List[bytes]
database file is returned as a list of bytes
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.content, None
except requests.exceptions.HTTPError as e:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_upload(query_url: str, data: Dict[str, Any], db_bytes: io.BufferedReader) -> Tuple[List[Any], str]:
"""
send_upload uploads a database to DBHub.io.
Parameters
----------
query_url : str
url of the API endpoint.
data : Dict[str, Any]
data to be processed to the server.
db_bytes : io.BufferedReader
A buffered binary stream of the database file.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
files = {"file": db_bytes}
response = requests.post(query_url, data=data, headers=headers, files=files)
response.raise_for_status()
if response.status_code != 201:
# The returned status code indicates something went wrong
try:
return response.json(), str(response.status_code)
except JSONDecodeError:
return None, str(response.status_code)
return response.json(), None
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
| [
"requests.post"
]
| [((721, 773), 'requests.post', 'requests.post', (['query_url'], {'data': 'data', 'headers': 'headers'}), '(query_url, data=data, headers=headers)\n', (734, 773), False, 'import requests\n'), ((1752, 1804), 'requests.post', 'requests.post', (['query_url'], {'data': 'data', 'headers': 'headers'}), '(query_url, data=data, headers=headers)\n', (1765, 1804), False, 'import requests\n'), ((2778, 2843), 'requests.post', 'requests.post', (['query_url'], {'data': 'data', 'headers': 'headers', 'files': 'files'}), '(query_url, data=data, headers=headers, files=files)\n', (2791, 2843), False, 'import requests\n')] |
#This script Imports Game Data from ESPN, and Odds from the ODDS-API, and then imports them into a MySQL table, example in workbench here https://puu.sh/HOKCj/ce199eec8e.png
import mysql.connector
import requests
import json
import datetime
import time
#Connection to the MYSQL Server.
mydb = mysql.connector.connect(
host="",
user="",
password="",
database="basketbet_data"
)
mycursor = mydb.cursor()
#Games List.
allGames=[]
#Gets the game Data from ESPN API given the link.
def newGetter(gameDay):
#Json Response for YESTERDAY.
response = requests.get(gameDay).json()
gameData = response["events"]
#Loop through to collect GameDay data.
a=0
while a < len(gameData):
game = str(gameData[a]['name'])
game_ID = str(gameData[a]['id'])
game_Date = str(gameData[a]['date'][:-7])
game_Time = str(gameData[a]['date'][11:-1])
game_Period = str(gameData[a]['status']['period'])
game_Status = str(gameData[a]['status']['type']['description'])
home_Score = str(gameData[a]['competitions'][0]['competitors'][0]['score'])
away_Score = str(gameData[a]['competitions'][0]['competitors'][1]['score'])
#Quick fix to change Clippers Name from LA Clippers to Los Angeles Clippers.
if str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName']) == 'LA Clippers':
home_Team = 'Los Angeles Clippers'
else:
home_Team = str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName'])
if str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName']) == 'LA Clippers':
away_Team = 'Los Angeles Clippers'
else:
away_Team = str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName'])
#Appends the Game Data to the list.
allGames.append((game_ID, game, home_Team, home_Score, away_Team, away_Score, game_Date, game_Time, game_Period, game_Status))
a+=1
#Gets the Odds from the ODDS-API.
def oddsGetter():
#Parameters for Odds Api.
parameters = {
"sport" : "basketball_nba",
"region" : "uk",
"mkt" : "h2h",
"apiKey" : "",
}
#JSON Response.
response = requests.get("https://api.the-odds-api.com/v3/odds/", params=parameters)
data = response.json()['data']
team0OddsInfo=[]
team1OddsInfo=[]
team0_odds = ''
team1_odds = ''
#Appends the odds info to a list as strings.
for game in data:
for site in game['sites']:
if site['site_key'] == "paddypower":
team0_odds = str(site['odds']['h2h'][0])
team1_odds = str(site['odds']['h2h'][1])
if team0_odds == '':
team0_odds = 0
if team1_odds == '':
team1_odds = 0
team0 = str(game['teams'][0])
team1 = str(game['teams'][1])
startTime = game['commence_time']
gameDate = str(datetime.datetime.utcfromtimestamp(startTime).strftime('%Y-%m-%d %H:%M:%S'))[:-9]
team0OddsInfo.append((team0, team0_odds, gameDate))
team1OddsInfo.append((team1, team1_odds, gameDate))
a=0
#as both lists are the same length, it loops through one and Updates the tables where needed.
while a < len(team0OddsInfo):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_Date = %s'
gameDate = (str(team0OddsInfo[a][2]),)
mycursor.execute(query_string, gameDate)
matchedGames = mycursor.fetchall()
b=0
while b < len(matchedGames):
if matchedGames[b][2] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Home_Odds = %s, Away_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
elif matchedGames[b][5] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Away_Odds = %s, Home_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
b+=1
a+=1
#For the console to show when odds were updated.
mydb.commit()
time = datetime.datetime.utcnow()
print('\n' + 'ODDS UPDATE AT: ' + str(time))
print('--------------------------------')
print('--------------------------------')
print(len(team0OddsInfo), "GAME ODDS inserted.")
print('REMAINING REQUESTS:', response.headers['x-requests-remaining'])
print('USED REQUESTS:', response.headers['x-requests-used'])
print('--------------------------------')
print('--------------------------------')
#Block to keep the script running then sleep for time 300 with counter set at 72 for Games every 5min | Odds every 6hr.
counter=72
startTime = time.time()
while True:
#Today, Yesterday and Tomorrow.
today = datetime.date.today()
yesterday = today + datetime.timedelta(days=-1)
tomorrow = today + datetime.timedelta(days=1)
#Removing the - from the dates for the URLs, then making the URLs.
todayShort = str(today).replace('-', '')
yesterdayShort = str(yesterday).replace('-', '')
tomorrowShort = str(tomorrow).replace('-', '')
yesterdayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + yesterdayShort + '-' + yesterdayShort
todayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + todayShort + '-' + todayShort
tomorrowUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + tomorrowShort + '-' + tomorrowShort
newGetter(yesterdayUrl)
newGetter(todayUrl)
newGetter(tomorrowUrl)
#Inserting or updating the table in MYSQL with the games.
c=0
updateCount=0
newGameCount=0
while c < len(allGames):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_ID = %s'
gameID = (str(allGames[c][0]),)
mycursor.execute(query_string, gameID)
if mycursor.fetchone():
updateCount+=1
query_list = [allGames[c][1], allGames[c][2], allGames[c][4], allGames[c][5], allGames[c][3], allGames[c][6], allGames[c][7], allGames[c][8], allGames[c][9], allGames[c][0]]
query_string = 'UPDATE all_games SET Game_Name = %s, Home_Team = %s, Away_Team = %s, Away_Score = %s, Home_Score = %s, Game_Date = %s, Game_Time = %s, Game_Period = %s, Game_Status = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
mydb.commit()
else:
newGameCount+=1
query_string = "INSERT INTO basketbet_data.all_games (Game_ID, Game_Name, Home_Team, Home_Odds, Home_Score, Away_Team, Away_Odds, Away_Score, Game_Date, Game_Time, Game_Period, Game_Status) VALUES (%s, %s, %s, 0, %s, %s, 0, %s, %s, %s, %s, %s)"
mycursor.execute(query_string, allGames[c])
mydb.commit()
c+=1
#Prints to console what games were updated and what new games were inserted.
print('----------------------------------------')
print(str(updateCount) + ' GAMES UPDATED, and ' + str(newGameCount) + ' NEW GAMES inserted.')
print('----------------------------------------')
allGames=[]
#Counter for the Odds script.
if counter==72:
oddsGetter()
counter=0
else:
counter+=1
print('\n')
time.sleep(300 - ((time.time() - startTime) % 300)) | [
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.utcnow",
"datetime.date.today",
"requests.get",
"datetime.timedelta",
"time.time"
]
| [((5112, 5123), 'time.time', 'time.time', ([], {}), '()\n', (5121, 5123), False, 'import time\n'), ((2357, 2429), 'requests.get', 'requests.get', (['"""https://api.the-odds-api.com/v3/odds/"""'], {'params': 'parameters'}), "('https://api.the-odds-api.com/v3/odds/', params=parameters)\n", (2369, 2429), False, 'import requests\n'), ((4501, 4527), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4525, 4527), False, 'import datetime\n'), ((5193, 5214), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5212, 5214), False, 'import datetime\n'), ((5240, 5267), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (5258, 5267), False, 'import datetime\n'), ((5292, 5318), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5310, 5318), False, 'import datetime\n'), ((587, 608), 'requests.get', 'requests.get', (['gameDay'], {}), '(gameDay)\n', (599, 608), False, 'import requests\n'), ((7819, 7830), 'time.time', 'time.time', ([], {}), '()\n', (7828, 7830), False, 'import time\n'), ((3100, 3145), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['startTime'], {}), '(startTime)\n', (3134, 3145), False, 'import datetime\n')] |
from typing import Callable
import numpy as np
from hmc.integrators.states.leapfrog_state import LeapfrogState
from hmc.integrators.fields import riemannian
from hmc.linalg import solve_psd
class RiemannianLeapfrogState(LeapfrogState):
"""The Riemannian leapfrog state uses the Fisher information matrix to provide
a position-dependent Riemannian metric. As such, computing the gradients of
the Hamiltonian requires higher derivatives of the metric, which vanish in
the Euclidean case.
"""
def __init__(self,
position: np.ndarray,
momentum: np.ndarray):
super().__init__(position, momentum)
self._jac_metric: np.ndarray
self._grad_logdet_metric: np.ndarray
@property
def requires_update(self) -> bool:
o = self.log_posterior is None or \
self.grad_log_posterior is None or \
self.metric is None or \
self.inv_metric is None or \
self.jac_metric is None or \
self.grad_logdet_metric is None
return o
@property
def jac_metric(self):
return self._jac_metric
@jac_metric.setter
def jac_metric(self, value):
self._jac_metric = value
@jac_metric.deleter
def jac_metric(self):
del self._jac_metric
@property
def grad_logdet_metric(self):
return self._grad_logdet_metric
@grad_logdet_metric.setter
def grad_logdet_metric(self, value):
self._grad_logdet_metric = value
@grad_logdet_metric.deleter
def grad_logdet_metric(self):
del self._grad_logdet_metric
def update(self, auxiliaries: Callable):
num_dims = len(self.position)
log_posterior, grad_log_posterior, metric, jac_metric = auxiliaries(self.position)
jac_metric = np.swapaxes(jac_metric, 0, -1)
inv_metric, sqrtm_metric = solve_psd(metric, return_chol=True)
grad_logdet_metric = riemannian.grad_logdet(inv_metric, jac_metric, num_dims)
self.log_posterior = log_posterior
self.grad_log_posterior = grad_log_posterior
self.metric = metric
self.sqrtm_metric = sqrtm_metric
self.inv_metric = inv_metric
self.jac_metric = jac_metric
self.grad_logdet_metric = grad_logdet_metric
self.velocity = riemannian.velocity(inv_metric, self.momentum)
self.force = riemannian.force(self.velocity, grad_log_posterior, jac_metric, grad_logdet_metric)
def clear(self):
super().clear()
del self.jac_metric
del self.grad_logdet_metric
del self.metric
del self.inv_metric
del self.logdet_metric
del self.sqrtm_metric
| [
"hmc.integrators.fields.riemannian.velocity",
"hmc.integrators.fields.riemannian.grad_logdet",
"hmc.integrators.fields.riemannian.force",
"numpy.swapaxes",
"hmc.linalg.solve_psd"
]
| [((1817, 1847), 'numpy.swapaxes', 'np.swapaxes', (['jac_metric', '(0)', '(-1)'], {}), '(jac_metric, 0, -1)\n', (1828, 1847), True, 'import numpy as np\n'), ((1883, 1918), 'hmc.linalg.solve_psd', 'solve_psd', (['metric'], {'return_chol': '(True)'}), '(metric, return_chol=True)\n', (1892, 1918), False, 'from hmc.linalg import solve_psd\n'), ((1948, 2004), 'hmc.integrators.fields.riemannian.grad_logdet', 'riemannian.grad_logdet', (['inv_metric', 'jac_metric', 'num_dims'], {}), '(inv_metric, jac_metric, num_dims)\n', (1970, 2004), False, 'from hmc.integrators.fields import riemannian\n'), ((2322, 2368), 'hmc.integrators.fields.riemannian.velocity', 'riemannian.velocity', (['inv_metric', 'self.momentum'], {}), '(inv_metric, self.momentum)\n', (2341, 2368), False, 'from hmc.integrators.fields import riemannian\n'), ((2390, 2477), 'hmc.integrators.fields.riemannian.force', 'riemannian.force', (['self.velocity', 'grad_log_posterior', 'jac_metric', 'grad_logdet_metric'], {}), '(self.velocity, grad_log_posterior, jac_metric,\n grad_logdet_metric)\n', (2406, 2477), False, 'from hmc.integrators.fields import riemannian\n')] |
from django.utils.translation import ugettext_lazy as _
USER_TYPE_STAFF = 'STAFF'
USER_TYPE_ADMIN = 'ADMIN'
USER_TYPE_BARBER = 'BARBER'
USER_TYPE_CHOICES = (
(USER_TYPE_STAFF, _('Dev')),
(USER_TYPE_ADMIN, _('Admin')),
(USER_TYPE_BARBER, _('Barber')),
) | [
"django.utils.translation.ugettext_lazy"
]
| [((182, 190), 'django.utils.translation.ugettext_lazy', '_', (['"""Dev"""'], {}), "('Dev')\n", (183, 190), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((215, 225), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin"""'], {}), "('Admin')\n", (216, 225), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((251, 262), 'django.utils.translation.ugettext_lazy', '_', (['"""Barber"""'], {}), "('Barber')\n", (252, 262), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
#!/usr/bin/env python
# Copyright (C) 2018 rerobots, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line interface
"""
import argparse
import json
import logging
import logging.handlers
import os
import os.path
import subprocess
import sys
import uuid
import yaml
from aiohttp.client_exceptions import ClientConnectorError as ConnectionError
from .core import WorkspaceInstance
from .mgmt import get_local_config, add_key, add_ssh_path, list_local_keys
from .mgmt import find_wd, modify_local, rm_wd
from .api import HSAPIClient
from .err import Error as HSError
from .addons import camera_main, stop_cameras
from .addons import add_cmdsh, rm_cmdsh, add_vnc, rm_vnc, add_mistyproxy, rm_mistyproxy
def get_config_with_index(id_prefix=None):
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return None, None, 1
if len(config['wdeployments']) == 0:
print(('ERROR: no workspace deployment in local configuration.'))
return config, None, 1
if isinstance(id_prefix, list):
if len(id_prefix) == 0:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = [0]
else:
indices = []
for idp in id_prefix:
index = find_wd(config, idp)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
indices.append(index)
index = indices
elif id_prefix:
index = find_wd(config, id_prefix)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
else:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = 0
return config, index, 0
def main(argv=None):
pkglogger = logging.getLogger('hardshare')
pkglogger.setLevel(logging.WARNING)
loghandler = logging.handlers.WatchedFileHandler(filename='hardshare_client.log', mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
if argv is None:
argv = sys.argv[1:]
argparser = argparse.ArgumentParser(description=('Command-line interface'
' for the hardshare client'), add_help=False)
argparser.add_argument('-h', '--help', dest='print_help',
action='store_true', default=False,
help='print this help message and exit')
argparser.add_argument('-V', '--version', action='store_true', default=False,
help='print version of hardshare (this) package.',
dest='print_version')
argparser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print verbose messages about actions by the hardshare client',
dest='verbose')
argparser.add_argument('--format', metavar='FORMAT',
default=None, type=str,
help=('special output formatting (default is no special formatting); '
'options: YAML , JSON'),
dest='output_format')
subparsers = argparser.add_subparsers(dest='command')
subparsers.add_parser('version', help='print version number and exit.')
help_parser = subparsers.add_parser('help', help='print this help message and exit')
help_parser.add_argument('help_target_command', metavar='COMMAND', type=str, nargs='?')
config_commanddesc = 'manage local and remote configuration'
config_parser = subparsers.add_parser('config',
description=config_commanddesc,
help=config_commanddesc)
config_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment for configuration changes'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
config_parser.add_argument('-c', '--create', action='store_true', default=False,
dest='create_config',
help='if no local configuration is found, then create one')
config_parser.add_argument('--add-terminate-prog', metavar='PATH',
dest='add_terminate_prog', default=None,
help='add program to list of commands to execute')
config_parser.add_argument('--rm-terminate-prog', metavar='PATH',
dest='rm_terminate_prog', default=None,
help=('remove program from list of commands to execute; '
'for example, '
'copy-and-paste value shown in `hardshare config -l` here'))
config_parser.add_argument('--add-key', metavar='FILE',
dest='new_api_token',
help='add new account key')
config_parser.add_argument('--add-ssh-path', metavar='PATH',
dest='new_ssh_path',
help='add path to SSH key pair (does NOT copy the key)')
config_parser.add_argument('--add-raw-device', metavar='PATH', type=str,
dest='raw_device_path', default=None,
help='add device file to present in container')
config_parser.add_argument('--cprovider', metavar='CPROVIDER', type=str,
dest='cprovider', default=None,
help='select a container provider: docker, podman, proxy')
config_parser.add_argument('--assign-image', metavar='IMG', type=str,
dest='cprovider_img', default=None,
help='assign image for cprovider to use (advanced option)')
config_parser.add_argument('--rm-raw-device', metavar='PATH', type=str,
dest='remove_raw_device_path', default=None,
help='remove device previously marked for inclusion in container')
config_parser.add_argument('--add-init-inside', metavar='CMD', type=str,
dest='add_init_inside', default=None,
help='add command to be executed inside container')
config_parser.add_argument('--rm-init-inside', action='store_true', default=False,
dest='rm_init_inside',
help='remove (empty) list of commands for inside initialization')
config_parser.add_argument('-p', '--prune', action='store_true', default=False,
dest='prune_err_keys',
help=('delete files in local key directory that'
' are not valid; to get list of'
' files with errors, try `--list`'))
config_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_config',
help='list configuration')
config_parser.add_argument('--local', action='store_true', default=False,
dest='only_local_config',
help='only show local configuration data')
config_parser.add_argument('--include-dissolved', action='store_true', default=False,
dest='include_dissolved',
help='include configuration data of dissolved workspace deployments')
config_parser.add_argument('--declare', metavar='ID',
dest='declared_wdeployment_id', default=None,
help=('declare that workspace deployment is'
' hosted here. (this only works if it'
' has been previously registered under'
' the same user account.)'))
rules_commanddesc = 'modify access rules (also known as capabilities or permissions)'
rules_parser = subparsers.add_parser('rules',
description=rules_commanddesc,
help=rules_commanddesc)
rules_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
rules_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_rules',
help='list all rules')
rules_parser.add_argument('--permit-me', action='store_true', default=False,
dest='add_rule_permit_me',
help='permit instantiations by you (the owner)')
rules_parser.add_argument('--drop-all', action='store_true', default=False,
dest='drop_all_rules',
help=('remove all access rules; '
'note that access is denied by default, '
'including to you (the owner)'))
rules_parser.add_argument('--permit-all', action='store_true', default=False,
dest='add_rule_permit_all',
help='permit instantiations by anyone')
register_commanddesc = 'register new workspace deployment'
register_parser = subparsers.add_parser('register',
description=register_commanddesc,
help=register_commanddesc)
register_parser.add_argument('--permit-more', action='store_false', default=True,
dest='register_at_most_one',
help=('permit registration of more than 1 wdeployment; '
'default is to fail if local configuration already '
'has wdeployment declared'))
check_commanddesc = 'check registration of this workspace deployment'
check_parser = subparsers.add_parser('check',
description=check_commanddesc,
help=check_commanddesc)
check_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to check'
' (can be unique prefix)'))
dissolve_commanddesc = ('dissolve this workspace deployment, making it'
' unavailable for any future use'
' (THIS CANNOT BE UNDONE)')
dissolve_parser = subparsers.add_parser('dissolve',
description=dissolve_commanddesc,
help=dissolve_commanddesc)
dissolve_parser.add_argument('wdid', metavar='ID', nargs='?', default=None,
help='id of workspace deployment to dissolve')
status_commanddesc = 'get status of local instances and daemon'
status_parser = subparsers.add_parser('status',
description=status_commanddesc,
help=status_commanddesc)
status_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
advertise_commanddesc = 'advertise availability, accept new instances'
advertise_parser = subparsers.add_parser('ad',
description=advertise_commanddesc,
help=advertise_commanddesc)
advertise_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to advertise'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
advertise_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
attach_camera_commanddesc = 'attach camera stream to workspace deployments'
attach_camera_parser = subparsers.add_parser('attach-camera',
description=attach_camera_commanddesc,
help=attach_camera_commanddesc)
attach_camera_parser.add_argument('camera', default=0,
type=int,
help=('on Linux, 0 typically implies /dev/video0; '
'if you only have one camera, then try 0'))
attach_camera_parser.add_argument('id_prefix', metavar='ID', nargs='*', default=None,
help=('id of workspace deployment on which to attach'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
attach_camera_parser.add_argument('--width-height', metavar='W,H', type=str,
dest='attach_camera_res', default=None,
help=('width and height of captured images; '
'default depends on the supporting drivers'))
attach_camera_parser.add_argument('--crop', metavar='CROPCONFIG', type=str,
dest='attach_camera_crop_config', default=None,
help=('image crop configuration; '
'default: all wdeployments get full images'))
attach_camera_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
stop_cameras_commanddesc = 'stop camera streams previously started by attach-camera'
stop_cameras_parser = subparsers.add_parser('stop-cameras',
description=stop_cameras_commanddesc,
help=stop_cameras_commanddesc)
stop_cameras_parser.add_argument('-a', '--all', action='store_true', default=False,
help=('stop all attached cameras associated with this '
'user account, whether or not started on this host'),
dest='all_cameras')
addon_cmdsh_commanddesc = 'manage add-on cmdsh for your workspace deployments'
addon_cmdsh_parser = subparsers.add_parser('addon-cmdsh',
description=addon_cmdsh_commanddesc,
help=addon_cmdsh_commanddesc)
addon_cmdsh_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_cmdsh_parser.add_argument('--add', action='store_true', default=False,
help='add add-on cmdsh to enable terminal access via WebSockets',
dest='add_addon_cmdsh')
addon_cmdsh_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on cmdsh',
dest='rm_addon_cmdsh')
addon_vnc_commanddesc = 'manage add-on vnc for your workspace deployments'
addon_vnc_parser = subparsers.add_parser('addon-vnc',
description=addon_vnc_commanddesc,
help=addon_vnc_commanddesc)
addon_vnc_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_vnc_parser.add_argument('--add', action='store_true', default=False,
help='add add-on vnc to enable VNC via rerobots.net',
dest='add_addon_vnc')
addon_vnc_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on vnc',
dest='rm_addon_vnc')
addon_mistyproxy_commanddesc = 'manage add-on mistyproxy for your workspace deployments'
addon_mistyproxy_parser = subparsers.add_parser('addon-mistyproxy',
description=addon_mistyproxy_commanddesc,
help=addon_mistyproxy_commanddesc)
addon_mistyproxy_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_mistyproxy_parser.add_argument('--add', action='store_true', default=False,
help='add add-on mistyproxy to allow HTTP proxy to Misty robots',
dest='add_addon_mistyproxy')
addon_mistyproxy_parser.add_argument('--ip', metavar='ADDRESS', default=None,
help='IP address of the Misty robot',
dest='targetaddr')
addon_mistyproxy_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on mistyproxy',
dest='rm_addon_mistyproxy')
terminate_commanddesc = 'mark as unavailable; optionally wait for current instance to finish'
terminate_parser = subparsers.add_parser('stop-ad',
description=terminate_commanddesc,
help=terminate_commanddesc)
terminate_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
terminate_parser.add_argument('-f', '--force', action='store_true', default=False,
help=('if there is an active instance, then'
' stop it without waiting'),
dest='force_terminate')
help_message_purge = ('if the server indicates that an instance is active,'
' but there is not one or it is otherwise in a'
' non-recoverable state, then mark it remotely as'
' terminated and attempt local clean-up; this'
' command is a last resort. First, try `hardshare'
' terminate` without --purge.')
terminate_parser.add_argument('--purge', action='store_true', default=False,
help=help_message_purge,
dest='purge_supposed_instance')
argv_parsed = argparser.parse_args(argv)
if argv_parsed.print_version or argv_parsed.command == 'version':
from . import __version__ as hardshare_pkg_version
print(hardshare_pkg_version)
return 0
elif argv_parsed.command is None or argv_parsed.command == 'help':
if hasattr(argv_parsed, 'help_target_command') and argv_parsed.help_target_command is not None:
if argv_parsed.help_target_command == 'config':
config_parser.print_help()
elif argv_parsed.help_target_command == 'rules':
rules_parser.print_help()
elif argv_parsed.help_target_command == 'register':
register_parser.print_help()
elif argv_parsed.help_target_command == 'check':
check_parser.print_help()
elif argv_parsed.help_target_command == 'dissolve':
dissolve_parser.print_help()
elif argv_parsed.help_target_command == 'status':
status_parser.print_help()
elif argv_parsed.help_target_command == 'attach-camera':
attach_camera_parser.print_help()
elif argv_parsed.help_target_command == 'stop-cameras':
stop_cameras_parser.print_help()
elif argv_parsed.help_target_command == 'addon-cmdsh':
addon_cmdsh_parser.print_help()
elif argv_parsed.help_target_command == 'addon-vnc':
addon_vnc_parser.print_help()
elif argv_parsed.help_target_command == 'addon-mistyproxy':
addon_mistyproxy_parser.print_help()
elif argv_parsed.help_target_command == 'ad':
advertise_parser.print_help()
elif argv_parsed.help_target_command == 'stop-ad':
terminate_parser.print_help()
else:
argparser.print_help()
else:
argparser.print_help()
return 0
if argv_parsed.verbose:
pkglogger.setLevel(logging.DEBUG)
if argv_parsed.output_format is not None:
output_format = argv_parsed.output_format.lower()
if output_format not in ['yaml', 'json']:
print('output format unrecognized: {}'.format(argv_parsed.output_format))
return 1
else:
output_format = None
try:
ac = HSAPIClient()
except:
ac = None
if argv_parsed.command == 'status':
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return 1
if argv_parsed.id_prefix is None:
if len(config['wdeployments']) == 0:
findings = [WorkspaceInstance.inspect_instance()]
else:
findings = []
for wd in config['wdeployments']:
findings.append(WorkspaceInstance.inspect_instance(wdeployment=wd))
else:
findings = []
for m in find_wd(config, argv_parsed.id_prefix, one_or_none=False):
findings.append(WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][m]))
if output_format == 'json':
print(json.dumps(findings))
else: # output_format == 'yaml'
print(yaml.dump(findings, default_flow_style=False))
elif argv_parsed.command == 'attach-camera':
config, indices, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployments = [config['wdeployments'][jj]['id'] for jj in indices]
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
if argv_parsed.attach_camera_res:
width, height = [int(x) for x in argv_parsed.attach_camera_res.split(',')]
if width < 1 or height < 1:
print('Width, height must be positive')
return 1
else:
width, height = None, None
if argv_parsed.attach_camera_crop_config:
crop = json.loads(argv_parsed.attach_camera_crop_config)
else:
crop = None
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
try:
camera_main(wdeployments, tok=tok, dev=argv_parsed.camera, width=width, height=height, crop=crop)
except ConnectionError:
if not argv_parsed.become_daemon:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'stop-cameras':
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
stop_cameras(tok, allcam=argv_parsed.all_cameras)
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'addon-cmdsh':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_cmdsh:
add_cmdsh(wdeployment_id, tok)
elif argv_parsed.rm_addon_cmdsh:
rm_cmdsh(wdeployment_id, tok)
else:
print('Use `hardshare addon-cmdsh` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-cmdsh')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-vnc':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_vnc:
add_vnc(wdeployment_id, tok)
elif argv_parsed.rm_addon_vnc:
rm_vnc(wdeployment_id, tok)
else:
print('Use `hardshare addon-vnc` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-vnc')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-mistyproxy':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_mistyproxy:
if argv_parsed.targetaddr is None:
print('--ip is required with --add')
return 1
add_mistyproxy(wdeployment_id, tok, argv_parsed.targetaddr)
elif argv_parsed.rm_addon_mistyproxy:
rm_mistyproxy(wdeployment_id, tok)
else:
print('Use `hardshare addon-mistyproxy` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-mistyproxy')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'ad':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if 'ssh_key' not in config or config['ssh_key'] is None:
print('WARNING: local configuration does not declare SSH key.\n'
'Instances with connection type sshtun cannot launch.')
pkglogger.removeHandler(loghandler)
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
else:
pkglogger.addHandler(logging.StreamHandler())
logfname = 'hardshare_client.{}.log'.format(config['wdeployments'][index]['id'])
loghandler = logging.FileHandler(filename=logfname, mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
return ac.run_sync(config['wdeployments'][index]['id'])
elif argv_parsed.command == 'stop-ad':
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if argv_parsed.purge_supposed_instance:
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--purge not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
findings = WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][index])
if 'container' in findings:
try:
subprocess.check_call([cprovider, 'rm', '-f',
findings['container']['name']],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except:
print('failed to stop container `{}`'.format(findings['container']['name']))
return 1
return 0
else:
print('failed to detect local instance')
return 1
else:
if ac is None:
print('cannot terminate without valid API client')
return 1
try:
ac.terminate(config['wdeployments'][index]['id'])
except FileNotFoundError:
print('ERROR: cannot reach daemon. Does it exist? (Try `hardshare status`)')
return 1
return 0
elif argv_parsed.command == 'register':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
try:
print(ac.register_new(at_most_one=argv_parsed.register_at_most_one))
except HSError as err:
print('ERROR: {}'.format(err))
return 1
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'rules':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
if argv_parsed.id_prefix is None:
wdid = None
else:
try:
wdid = str(uuid.UUID(argv_parsed.id_prefix))
except:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
print('The given ID does not appear to be valid.')
return 1
wdid = config['wdeployments'][index]['id']
if argv_parsed.list_rules:
try:
res = ac.get_access_rules(wdid)
except Exception as err:
print('{}'.format(err))
return 1
if 'err' in res:
if res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
res['comments'] = [
'Access is denied unless a rule explicitly permits it.',
]
if output_format == 'json':
print(json.dumps(res))
else: # output_format == 'yaml'
print(yaml.dump(res, default_flow_style=False))
elif argv_parsed.drop_all_rules or argv_parsed.add_rule_permit_me:
try:
if argv_parsed.drop_all_rules:
ac.drop_access_rules(wdid)
elif argv_parsed.add_rule_permit_me:
ac.add_access_rule(wdid)
except Exception as err:
print('{}'.format(err))
return 1
elif argv_parsed.add_rule_permit_all:
ui_input = None
while ui_input not in ('y', 'yes'):
print('Do you want to permit access by anyone? [y/N] ', end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
ac.add_access_rule(wdid, to_user='*')
except Exception as err:
print('{}'.format(err))
return 1
else:
print('Use `hardshare rules` with a switch. For example, `hardshare rules -l`')
print('or to get a help message, enter\n\n hardshare help rules')
return 1
elif argv_parsed.command == 'check':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
res = ac.check_registration(argv_parsed.id_prefix)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
else:
print('summary of workspace deployment {}'.format(res['id']))
print('\tcreated: {}'.format(res['date_created']))
print('\torigin (address) of registration: {}'.format(res['origin']))
if 'date_dissolved' in res:
print('\tdissolved: {}'.format(res['date_dissolved']))
elif argv_parsed.command == 'dissolve':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
wdid = str(uuid.UUID(argv_parsed.wdid))
except:
print('The given ID does not appear to be valid.')
return 1
ui_input = None
while ui_input not in ('y', 'yes'):
print(('Do you want to dissolve {}? This action cannot be undone. '
'[y/N] ').format(wdid), end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
res = ac.dissolve_registration(wdid)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
# Remove from local configuration, if present
rm_wd(get_local_config(), wdid, save=True)
elif argv_parsed.command == 'config':
if argv_parsed.list_config:
try:
config = get_local_config(create_if_empty=argv_parsed.create_config,
collect_errors=True)
except:
print('error loading configuration data.'
' does it exist? is it broken?')
return 1
if not argv_parsed.only_local_config:
# Try to get remote config, given possibly new local config
try:
assert ac is not None
remote_config = ac.get_remote_config(include_dissolved=argv_parsed.include_dissolved)
except HSError as err:
print('Error: {}'.format(err))
return 1
except:
print('Error occurred while contacting rerobots servers')
print('Try config -l --local to only get local information')
return 1
config = {
'local': config,
'remote': remote_config,
}
if 'local' in config:
ref = config['local']['wdeployments']
else:
ref = config['wdeployments']
for jj, wdeployment in enumerate(ref):
ref[jj]['url'] = 'https://rerobots.net/workspace/{}'.format(wdeployment['id'])
if output_format == 'json':
print(json.dumps(config))
elif output_format == 'yaml':
print(yaml.dump(config, default_flow_style=False))
else:
if 'local' not in config:
config = {
'local': config,
'remote': None,
}
print('workspace deployments defined in local configuration:')
if len(config['local']['wdeployments']) == 0:
print('\t(none)')
else:
for wdeployment in config['local']['wdeployments']:
print('{}\n\turl: {}\n\towner: {}\n\tcprovider: {}\n\tcargs: {}'.format(
wdeployment['id'],
wdeployment['url'],
wdeployment['owner'],
wdeployment['cprovider'],
wdeployment['cargs'],
))
if wdeployment['cprovider'] in ['docker', 'podman']:
print('\timg: {}'.format(wdeployment['image']))
if wdeployment['terminate']:
print('\tterminate:')
for terminate_p in wdeployment['terminate']:
print('\t\t{}'.format(terminate_p))
print('\nfound keys:')
if len(config['local']['keys']) == 0:
print('\t(none)')
else:
print('\t' + '\n\t'.join(config['local']['keys']))
if 'err_keys' in config['local'] and len(config['local']['err_keys']) > 0:
print('found possible keys with errors:')
for err_key_path, err in config['local']['err_keys'].items():
print('\t {}: {}'.format(err, err_key_path))
if config['remote']:
if 'err' in config['remote']:
print('Error occurred while contacting remote server.')
if config['remote']['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(config['remote']['err'])
return 1
if len(config['remote']['deployments']) == 0:
print('\nno registered workspace deployments with this user account')
else:
print('\nregistered workspace deployments with this user account:')
for wd in config['remote']['deployments']:
print('{}'.format(wd['id']))
print('\tcreated: {}'.format(wd['date_created']))
if wd['desc'] is not None:
print('\tdesc: {}'.format(wd['desc']))
print('\torigin (address) of registration: {}'
.format(wd['origin']))
if wd['dissolved']:
print('\tdissolved: {}'.format(wd['dissolved']))
elif argv_parsed.prune_err_keys:
_, errored_keys = list_local_keys(collect_errors=True)
for err_key_path, err in errored_keys.items():
print('deleting {}...'.format(err_key_path))
os.unlink(err_key_path)
elif argv_parsed.new_api_token:
try:
add_key(argv_parsed.new_api_token)
except:
print('failed to add key')
return 1
elif argv_parsed.new_ssh_path:
try:
add_ssh_path(argv_parsed.new_ssh_path)
except:
print('ERROR: {} or {} does not exist or '
'has the wrong permissions.'.format(
argv_parsed.new_ssh_path,
argv_parsed.new_ssh_path + '.pub'
))
return 1
elif argv_parsed.create_config:
get_local_config(create_if_empty=True)
elif argv_parsed.declared_wdeployment_id is not None:
assert ac is not None
ac.declare_existing(argv_parsed.declared_wdeployment_id)
ac.sync_config()
elif argv_parsed.raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-raw-device not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if not os.path.exists(argv_parsed.raw_device_path):
print('ERROR: given device file does not exist')
return 1
carg = '--device={D}:{D}'.format(D=argv_parsed.raw_device_path)
config['wdeployments'][index]['cargs'].append(carg)
modify_local(config)
elif argv_parsed.remove_raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
carg = '--device={D}:{D}'.format(D=argv_parsed.remove_raw_device_path)
config['wdeployments'][index]['cargs'].remove(carg)
modify_local(config)
elif argv_parsed.add_init_inside is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'].append(argv_parsed.add_init_inside)
modify_local(config)
elif argv_parsed.rm_init_inside:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--rm-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'] = []
modify_local(config)
elif argv_parsed.cprovider is not None:
selected_cprovider = argv_parsed.cprovider.lower()
if selected_cprovider not in ['docker', 'podman', 'proxy']:
print('ERROR: cprovider must be one of the following: docker, podman, proxy')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['cprovider'] = selected_cprovider
if selected_cprovider == 'proxy':
config['wdeployments'][index]['image'] = None
else: # selected_cprovider \in {docker, podman}
if config['wdeployments'][index]['image'] is None:
config['wdeployments'][index]['image'] = 'rerobots/hs-generic'
modify_local(config)
elif argv_parsed.cprovider_img is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider not in ['docker', 'podman', 'proxy']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if cprovider == 'podman':
cp_images = subprocess.run([cprovider, 'image', 'exists', argv_parsed.cprovider_img])
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
elif cprovider == 'docker':
cp_images = subprocess.run([cprovider, 'image', 'inspect', argv_parsed.cprovider_img],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
else: # cprovider == 'proxy'
print('ERROR: --assign-image not supported for cprovider `proxy`')
return 1
config['wdeployments'][index]['image'] = argv_parsed.cprovider_img
modify_local(config)
elif argv_parsed.add_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
normalized_path = os.path.abspath(argv_parsed.add_terminate_prog)
if not os.path.exists(normalized_path):
print('ERROR: given path does not exist')
return 1
config['wdeployments'][index]['terminate'].append(normalized_path)
modify_local(config)
elif argv_parsed.rm_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['terminate'].remove(argv_parsed.rm_terminate_prog)
modify_local(config)
else:
print('Use `hardshare config` with a switch. For example, `hardshare config -l`')
print('or to get a help message, enter\n\n hardshare help config')
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"logging.getLogger",
"logging.handlers.WatchedFileHandler",
"json.loads",
"logging.StreamHandler",
"uuid.UUID",
"os.path.exists",
"argparse.ArgumentParser",
"yaml.dump",
"os.close",
"subprocess.check_call",
"json.dumps",
"subprocess.run",
"logging.FileHandler",
"os.unlink",
"os.getpid",
"os.fork",
"os.path.abspath"
]
| [((2664, 2694), 'logging.getLogger', 'logging.getLogger', (['"""hardshare"""'], {}), "('hardshare')\n", (2681, 2694), False, 'import logging\n'), ((2752, 2847), 'logging.handlers.WatchedFileHandler', 'logging.handlers.WatchedFileHandler', ([], {'filename': '"""hardshare_client.log"""', 'mode': '"""a"""', 'delay': '(True)'}), "(filename='hardshare_client.log', mode=\n 'a', delay=True)\n", (2787, 2847), False, 'import logging\n'), ((3226, 3333), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command-line interface for the hardshare client"""', 'add_help': '(False)'}), "(description=\n 'Command-line interface for the hardshare client', add_help=False)\n", (3249, 3333), False, 'import argparse\n'), ((3108, 3119), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3117, 3119), False, 'import os\n'), ((25002, 25022), 'json.dumps', 'json.dumps', (['findings'], {}), '(findings)\n', (25012, 25022), False, 'import json\n'), ((25083, 25128), 'yaml.dump', 'yaml.dump', (['findings'], {'default_flow_style': '(False)'}), '(findings, default_flow_style=False)\n', (25092, 25128), False, 'import yaml\n'), ((25998, 26047), 'json.loads', 'json.loads', (['argv_parsed.attach_camera_crop_config'], {}), '(argv_parsed.attach_camera_crop_config)\n', (26008, 26047), False, 'import json\n'), ((26193, 26204), 'os.close', 'os.close', (['(0)'], {}), '(0)\n', (26201, 26204), False, 'import os\n'), ((26217, 26228), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (26225, 26228), False, 'import os\n'), ((26241, 26252), 'os.close', 'os.close', (['(2)'], {}), '(2)\n', (26249, 26252), False, 'import os\n'), ((26140, 26149), 'os.fork', 'os.fork', ([], {}), '()\n', (26147, 26149), False, 'import os\n'), ((31626, 31686), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'logfname', 'mode': '"""a"""', 'delay': '(True)'}), "(filename=logfname, mode='a', delay=True)\n", (31645, 31686), False, 'import logging\n'), ((31384, 31395), 'os.close', 'os.close', (['(0)'], {}), '(0)\n', (31392, 31395), False, 'import os\n'), ((31408, 31419), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (31416, 31419), False, 'import os\n'), ((31432, 31443), 'os.close', 'os.close', (['(2)'], {}), '(2)\n', (31440, 31443), False, 'import os\n'), ((31331, 31340), 'os.fork', 'os.fork', ([], {}), '()\n', (31338, 31340), False, 'import os\n'), ((31491, 31514), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (31512, 31514), False, 'import logging\n'), ((31968, 31979), 'os.getpid', 'os.getpid', ([], {}), '()\n', (31977, 31979), False, 'import os\n'), ((32821, 32957), 'subprocess.check_call', 'subprocess.check_call', (["[cprovider, 'rm', '-f', findings['container']['name']]"], {'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "([cprovider, 'rm', '-f', findings['container']['name']\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", (32842, 32957), False, 'import subprocess\n'), ((34586, 34618), 'uuid.UUID', 'uuid.UUID', (['argv_parsed.id_prefix'], {}), '(argv_parsed.id_prefix)\n', (34595, 34618), False, 'import uuid\n'), ((35531, 35546), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (35541, 35546), False, 'import json\n'), ((35615, 35655), 'yaml.dump', 'yaml.dump', (['res'], {'default_flow_style': '(False)'}), '(res, default_flow_style=False)\n', (35624, 35655), False, 'import yaml\n'), ((38072, 38099), 'uuid.UUID', 'uuid.UUID', (['argv_parsed.wdid'], {}), '(argv_parsed.wdid)\n', (38081, 38099), False, 'import uuid\n'), ((40728, 40746), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (40738, 40746), False, 'import json\n'), ((44179, 44202), 'os.unlink', 'os.unlink', (['err_key_path'], {}), '(err_key_path)\n', (44188, 44202), False, 'import os\n'), ((40813, 40856), 'yaml.dump', 'yaml.dump', (['config'], {'default_flow_style': '(False)'}), '(config, default_flow_style=False)\n', (40822, 40856), False, 'import yaml\n'), ((45653, 45696), 'os.path.exists', 'os.path.exists', (['argv_parsed.raw_device_path'], {}), '(argv_parsed.raw_device_path)\n', (45667, 45696), False, 'import os\n'), ((48936, 49009), 'subprocess.run', 'subprocess.run', (["[cprovider, 'image', 'exists', argv_parsed.cprovider_img]"], {}), "([cprovider, 'image', 'exists', argv_parsed.cprovider_img])\n", (48950, 49009), False, 'import subprocess\n'), ((50090, 50137), 'os.path.abspath', 'os.path.abspath', (['argv_parsed.add_terminate_prog'], {}), '(argv_parsed.add_terminate_prog)\n', (50105, 50137), False, 'import os\n'), ((49237, 49369), 'subprocess.run', 'subprocess.run', (["[cprovider, 'image', 'inspect', argv_parsed.cprovider_img]"], {'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "([cprovider, 'image', 'inspect', argv_parsed.cprovider_img],\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", (49251, 49369), False, 'import subprocess\n'), ((50158, 50189), 'os.path.exists', 'os.path.exists', (['normalized_path'], {}), '(normalized_path)\n', (50172, 50189), False, 'import os\n')] |
"""Realty Info"""
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
from app.walk_score import *
load_dotenv()
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
@router.get('/streamlined_rent_list')
async def streamlined_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
response = response_for_rent.json()['properties']
rental_list = []
for i in range(limit):
line = response[i]['address']['line']
city = response[i]['address']['city']
state = response[i]['address']['state']
lat = response[i]['address']['lat']
lon = response[i]['address']['lon']
photos = response[i]['photos']
address = line +" "+ city + " "+ state
walk_score = just_walk_score(address, lat, lon)
element = {'address': address,
'lat': lat,
'lon': lon,
'city':city,
'state':state,
'photos': photos,
'walk_score': walk_score}
rental_list.append(element)
return rental_list
@router.get('/for_rent_list')
async def for_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()['properties']
@router.get('/for_rent_list/{property_id}')
async def property_detail(property_id: str = "O3599084026"):
"""
Parameters:
property_id
Returns:
detailed information about the property
"""
url = os.getenv('url_property_detail')
querystring = {"property_id":property_id}
response_prop_detail = requests.request("GET", url, headers=headers, params=querystring)
return response_prop_detail.json()['properties']
@router.get('/for_sale_list')
async def for_sale_list(api_key = config.settings.api_key,
city = "New York City",
state= "NY",
limit = 4):
url = os.getenv('url_list_for_sale')
querystring = {"city": city ,"limit": limit,"offset":"0","state_code": state,"sort":"relevance"}
response_for_sale = requests.request("GET", url, headers=headers, params=querystring)
return response_for_sale.json()['properties']
| [
"requests.request",
"fastapi.APIRouter",
"os.getenv",
"dotenv.load_dotenv"
]
| [((230, 243), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (241, 243), False, 'from dotenv import load_dotenv\n'), ((254, 265), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (263, 265), False, 'from fastapi import APIRouter, Depends\n'), ((296, 316), 'os.getenv', 'os.getenv', (['"""api_key"""'], {}), "('api_key')\n", (305, 316), False, 'import os\n'), ((348, 365), 'os.getenv', 'os.getenv', (['"""host"""'], {}), "('host')\n", (357, 365), False, 'import os\n'), ((893, 923), 'os.getenv', 'os.getenv', (['"""url_list_for_rent"""'], {}), "('url_list_for_rent')\n", (902, 923), False, 'import os\n'), ((1180, 1245), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring', 'headers': 'headers'}), "('GET', url, params=querystring, headers=headers)\n", (1196, 1245), False, 'import requests\n'), ((2515, 2545), 'os.getenv', 'os.getenv', (['"""url_list_for_rent"""'], {}), "('url_list_for_rent')\n", (2524, 2545), False, 'import os\n'), ((2802, 2867), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring', 'headers': 'headers'}), "('GET', url, params=querystring, headers=headers)\n", (2818, 2867), False, 'import requests\n'), ((3154, 3186), 'os.getenv', 'os.getenv', (['"""url_property_detail"""'], {}), "('url_property_detail')\n", (3163, 3186), False, 'import os\n'), ((3265, 3330), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers', 'params': 'querystring'}), "('GET', url, headers=headers, params=querystring)\n", (3281, 3330), False, 'import requests\n'), ((3594, 3624), 'os.getenv', 'os.getenv', (['"""url_list_for_sale"""'], {}), "('url_list_for_sale')\n", (3603, 3624), False, 'import os\n'), ((3751, 3816), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers', 'params': 'querystring'}), "('GET', url, headers=headers, params=querystring)\n", (3767, 3816), False, 'import requests\n')] |
#!/bin/python2
import collections
import re
import subprocess
import sys
PUC = "../pamu2fcfg/pamu2fcfg"
resident = ["", "-r"]
presence = ["", "-P"]
pin = ["", "-N"]
verification = ["", "-V"]
Credential = collections.namedtuple("Credential", "keyhandle pubkey attributes oldformat")
sshformat = 0
def print_test_case(filename, sshformat, credentials):
start = """
cfg.auth_file = "{authfile}";
cfg.sshformat = {ssh};
rc = get_devices_from_authfile(&cfg, username, dev, &n_devs);
assert(rc == 1);
assert(n_devs == {devices});
"""
checks = """
assert(strcmp(dev[{i}].coseType, "es256") == 0);
assert(strcmp(dev[{i}].keyHandle, "{kh}") == 0);
assert(strcmp(dev[{i}].publicKey, "{pk}") == 0);
assert(strcmp(dev[{i}].attributes, "{attr}") == 0);
assert(dev[{i}].old_format == {old});
"""
free = """
free(dev[{i}].coseType);
free(dev[{i}].attributes);
free(dev[{i}].keyHandle);
free(dev[{i}].publicKey);
"""
end = """
memset(dev, 0, sizeof(dev_t) * {devices});
"""
code = ""
free_block = ""
code += start.format(authfile = filename, ssh = sshformat, devices = len(credentials))
for c, v in enumerate(credentials):
code += checks.format(i = c, kh = v.keyhandle, pk = v.pubkey, attr = v.attributes, old = v.oldformat)
free_block += free.format(i = c)
code += free_block + end.format(devices = len(credentials))
print(code)
# Single credentials
print >> sys.stderr, "Generating single credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Double credentials
print >> sys.stderr, "Generating double credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_double_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Mixed credentials
print >> sys.stderr, "Mixed double credentials"
options = [("", ""), ("", "-P"), ("-P", ""), ("-P", "-P")]
for p1, p2 in options:
filename = "credentials/new_mixed_" + p1 +"1" + p2 + "2"
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", p1])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", p2])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
| [
"subprocess.check_output",
"collections.namedtuple",
"re.match"
]
| [((211, 288), 'collections.namedtuple', 'collections.namedtuple', (['"""Credential"""', '"""keyhandle pubkey attributes oldformat"""'], {}), "('Credential', 'keyhandle pubkey attributes oldformat')\n", (233, 288), False, 'import collections\n'), ((4082, 4132), 'subprocess.check_output', 'subprocess.check_output', (["[PUC, '-u@USERNAME@', p1]"], {}), "([PUC, '-u@USERNAME@', p1])\n", (4105, 4132), False, 'import subprocess\n'), ((4148, 4199), 're.match', 're.match', (['"""^.*?:(.*?),(.*?),es256,(.*)"""', 'line', 're.M'], {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)\n", (4156, 4199), False, 'import re\n'), ((4517, 4557), 'subprocess.check_output', 'subprocess.check_output', (["[PUC, '-n', p2]"], {}), "([PUC, '-n', p2])\n", (4540, 4557), False, 'import subprocess\n'), ((4573, 4624), 're.match', 're.match', (['"""^.*?:(.*?),(.*?),es256,(.*)"""', 'line', 're.M'], {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)\n", (4581, 4624), False, 'import re\n'), ((1750, 1808), 'subprocess.check_output', 'subprocess.check_output', (["[PUC, '-u@USERNAME@', r, p, v, n]"], {}), "([PUC, '-u@USERNAME@', r, p, v, n])\n", (1773, 1808), False, 'import subprocess\n'), ((1836, 1887), 're.match', 're.match', (['"""^.*?:(.*?),(.*?),es256,(.*)"""', 'line', 're.M'], {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)\n", (1844, 1887), False, 'import re\n'), ((2680, 2738), 'subprocess.check_output', 'subprocess.check_output', (["[PUC, '-u@USERNAME@', r, p, v, n]"], {}), "([PUC, '-u@USERNAME@', r, p, v, n])\n", (2703, 2738), False, 'import subprocess\n'), ((2766, 2817), 're.match', 're.match', (['"""^.*?:(.*?),(.*?),es256,(.*)"""', 'line', 're.M'], {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)\n", (2774, 2817), False, 'import re\n'), ((3216, 3264), 'subprocess.check_output', 'subprocess.check_output', (["[PUC, '-n', r, p, v, n]"], {}), "([PUC, '-n', r, p, v, n])\n", (3239, 3264), False, 'import subprocess\n'), ((3292, 3343), 're.match', 're.match', (['"""^.*?:(.*?),(.*?),es256,(.*)"""', 'line', 're.M'], {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)\n", (3300, 3343), False, 'import re\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineScalarBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.test_session():
mu = -1.
# scale corresponds to 1.
bijector = AffineScalar(shift=mu)
self.assertEqual("affine_scalar", bijector.name)
def testNoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = AffineScalar(shift=mu, scale=2.)
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose([-np.log(2.)] * 3,
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = AffineScalar(scale=multiplier)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose([np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose([0., 0.], run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaScale(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu, scale=[2., 1])
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(2), 0.], run(bijector.inverse_log_det_jacobian, x))
def testScalarCongruency(self):
with self.test_session():
bijector = AffineScalar(shift=3.6, scale=0.42)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.distributions.bijector_test_util.assert_scalar_congruency",
"numpy.float64",
"tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar",
"numpy.log",
"numpy.array",
"tensorflow.python.platform.test.main"
]
| [((5795, 5806), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (5804, 5806), False, 'from tensorflow.python.platform import test\n'), ((1410, 1432), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': 'mu'}), '(shift=mu)\n', (1422, 1432), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((5663, 5698), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': '(3.6)', 'scale': '(0.42)'}), '(shift=3.6, scale=0.42)\n', (5675, 5698), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((5705, 5766), 'tensorflow.python.ops.distributions.bijector_test_util.assert_scalar_congruency', 'assert_scalar_congruency', (['bijector'], {'lower_x': '(-2.0)', 'upper_x': '(2.0)'}), '(bijector, lower_x=-2.0, upper_x=2.0)\n', (5729, 5766), False, 'from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency\n'), ((1674, 1691), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (1682, 1691), True, 'import numpy as np\n'), ((1704, 1751), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {'name': '"""x"""'}), "(dtypes.float32, name='x')\n", (1725, 1751), False, 'from tensorflow.python.ops import array_ops\n'), ((1924, 1957), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': 'mu', 'scale': '(2.0)'}), '(shift=mu, scale=2.0)\n', (1936, 1957), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((2546, 2593), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float64'], {'name': '"""x"""'}), "(dtypes.float64, name='x')\n", (2567, 2593), False, 'from tensorflow.python.ops import array_ops\n'), ((2708, 2725), 'numpy.float64', 'np.float64', (['[1.0]'], {}), '([1.0])\n', (2718, 2725), True, 'import numpy as np\n'), ((2809, 2831), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': 'mu'}), '(shift=mu)\n', (2821, 2831), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((2844, 2861), 'numpy.float64', 'np.float64', (['[1.0]'], {}), '([1.0])\n', (2854, 2861), True, 'import numpy as np\n'), ((3365, 3412), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float64'], {'name': '"""x"""'}), "(dtypes.float64, name='x')\n", (3386, 3412), False, 'from tensorflow.python.ops import array_ops\n'), ((3535, 3552), 'numpy.float64', 'np.float64', (['[2.0]'], {}), '([2.0])\n', (3545, 3552), True, 'import numpy as np\n'), ((3647, 3677), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'scale': 'multiplier'}), '(scale=multiplier)\n', (3659, 3677), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((3690, 3707), 'numpy.float64', 'np.float64', (['[1.0]'], {}), '([1.0])\n', (3700, 3707), True, 'import numpy as np\n'), ((4180, 4197), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4188, 4197), True, 'import numpy as np\n'), ((4210, 4257), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {'name': '"""x"""'}), "(dtypes.float32, name='x')\n", (4231, 4257), False, 'from tensorflow.python.ops import array_ops\n'), ((4471, 4493), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': 'mu'}), '(shift=mu)\n', (4483, 4493), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((4964, 4981), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4972, 4981), True, 'import numpy as np\n'), ((4994, 5041), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {'name': '"""x"""'}), "(dtypes.float32, name='x')\n", (5015, 5041), False, 'from tensorflow.python.ops import array_ops\n'), ((5255, 5293), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', ([], {'shift': 'mu', 'scale': '[2.0, 1]'}), '(shift=mu, scale=[2.0, 1])\n', (5267, 5293), False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((2497, 2514), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (2505, 2514), True, 'import numpy as np\n'), ((3316, 3333), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (3324, 3333), True, 'import numpy as np\n'), ((3889, 3900), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (3895, 3900), True, 'import numpy as np\n'), ((5522, 5531), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (5528, 5531), True, 'import numpy as np\n'), ((2183, 2194), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2189, 2194), True, 'import numpy as np\n')] |
"""
The ``ui.ScrollPanel`` class implements a panel that scrolls its contents.
If you want the scroll bars to be always visible, call
``setAlwaysShowScrollBars(True)``. You can also change the current scrolling
position programmatically by calling ``setScrollPosition(vPos)`` and
``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical
scrolling position, respectively.
It is in the nature of a scrollpanel that if you give it a relative size, it will not work.
This makes it tricky to use it where you want it to fill out a parent widget of unknown size.
To avoid this problem you will have to wrap its content in a SimplePanel and
then use css/oveflow to control its behaviour as shown in the second example:
"container" represents the parent widget that could be any absolute or relative size and
the superscrollpanel will fill it out and apply vertical scrollbars if needed.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
class ScrollPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
vert = VerticalPanel()
vert.setSpacing("10px")
self.add(vert)
panel = ScrollPanel(Size=("300px", "100px"))
contents = HTML("<b>Tao Te Ching, Chapter One</b><p>" +
"The Way that can be told of is not an unvarying " +
"way;<p>The names that can be named are not " +
"unvarying names.<p>It was from the Nameless that " +
"Heaven and Earth sprang;<p>The named is but the " +
"mother that rears the ten thousand creatures, " +
"each after its kind.")
panel.add(contents)
vert.add(panel)
container = SimplePanel(Width="400px", Height="200px")
contents2 = HTML(50*"Dont forget to grab the css for SuperScrollPanel in Showcase.css! ")
panel2 = SuperScrollPanel(contents2)
container.add(panel2)
vert.add(container)
class SuperScrollPanel(ScrollPanel):
def __init__(self, panel):
ScrollPanel.__init__(self)
self.setHeight("100%")
self.setStyleName("SuperScrollPanelOuter")
self.inner = SimplePanel(Height="100%")
self.add(self.inner)
self.inner.setStyleName("SuperScrollPanelInner")
self.inner.add(panel)
| [
"pyjamas.ui.SimplePanel.SimplePanel",
"pyjamas.ui.VerticalPanel.VerticalPanel",
"pyjamas.ui.ScrollPanel.ScrollPanel.__init__",
"pyjamas.ui.ScrollPanel.ScrollPanel",
"pyjamas.ui.HTML.HTML",
"pyjamas.ui.SimplePanel.SimplePanel.__init__"
]
| [((1154, 1180), 'pyjamas.ui.SimplePanel.SimplePanel.__init__', 'SimplePanel.__init__', (['self'], {}), '(self)\n', (1174, 1180), False, 'from pyjamas.ui.SimplePanel import SimplePanel\n'), ((1196, 1211), 'pyjamas.ui.VerticalPanel.VerticalPanel', 'VerticalPanel', ([], {}), '()\n', (1209, 1211), False, 'from pyjamas.ui.VerticalPanel import VerticalPanel\n'), ((1292, 1328), 'pyjamas.ui.ScrollPanel.ScrollPanel', 'ScrollPanel', ([], {'Size': "('300px', '100px')"}), "(Size=('300px', '100px'))\n", (1303, 1328), False, 'from pyjamas.ui.ScrollPanel import ScrollPanel\n'), ((1349, 1696), 'pyjamas.ui.HTML.HTML', 'HTML', (["('<b>Tao Te Ching, Chapter One</b><p>' +\n 'The Way that can be told of is not an unvarying ' +\n 'way;<p>The names that can be named are not ' +\n 'unvarying names.<p>It was from the Nameless that ' +\n 'Heaven and Earth sprang;<p>The named is but the ' +\n 'mother that rears the ten thousand creatures, ' + 'each after its kind.')"], {}), "('<b>Tao Te Ching, Chapter One</b><p>' +\n 'The Way that can be told of is not an unvarying ' +\n 'way;<p>The names that can be named are not ' +\n 'unvarying names.<p>It was from the Nameless that ' +\n 'Heaven and Earth sprang;<p>The named is but the ' +\n 'mother that rears the ten thousand creatures, ' + 'each after its kind.')\n", (1353, 1696), False, 'from pyjamas.ui.HTML import HTML\n'), ((1894, 1936), 'pyjamas.ui.SimplePanel.SimplePanel', 'SimplePanel', ([], {'Width': '"""400px"""', 'Height': '"""200px"""'}), "(Width='400px', Height='200px')\n", (1905, 1936), False, 'from pyjamas.ui.SimplePanel import SimplePanel\n'), ((1957, 2036), 'pyjamas.ui.HTML.HTML', 'HTML', (["(50 * 'Dont forget to grab the css for SuperScrollPanel in Showcase.css! ')"], {}), "(50 * 'Dont forget to grab the css for SuperScrollPanel in Showcase.css! ')\n", (1961, 2036), False, 'from pyjamas.ui.HTML import HTML\n'), ((2215, 2241), 'pyjamas.ui.ScrollPanel.ScrollPanel.__init__', 'ScrollPanel.__init__', (['self'], {}), '(self)\n', (2235, 2241), False, 'from pyjamas.ui.ScrollPanel import ScrollPanel\n'), ((2346, 2372), 'pyjamas.ui.SimplePanel.SimplePanel', 'SimplePanel', ([], {'Height': '"""100%"""'}), "(Height='100%')\n", (2357, 2372), False, 'from pyjamas.ui.SimplePanel import SimplePanel\n')] |
'''
* @file ElevatorTestCaseList.py
* @author <NAME>
* @date 30 July 2020
* @version 0.1
* @brief Implements a class to hold all the test cases during the program life cycle.
'''
#!/usr/bin/env python3
import sys
import ctypes
import ElevatorConfig as cfg
import ElevatorMsgProtocol as msgProto
class ElevatorTestCaseList:
'''
This class builds a test case list out of the configuration
and holds it during the runtime
'''
def __init__(self, config):
self.config = config
self.CallGoTCList = []
def create_testcase_list(self):
'''
Creates a test case list out of the configuration
'''
# ############################################################
# Construct 'call' test cases
for k in self.config.test_case['call'].keys():
msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['call'][k][0],
rx_node_addr = self.config.test_case['call'][k][1],
msg_id = self.config.test_case['call'][k][2],
msg_class = self.config.test_case['call'][k][3],
hdr_len = self.config.network['packet_header_len'],
payload_len = self.config.network['packet_payload_req_len'])
self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr,
time_tag = self.config.test_case['call'][k][4],
req_typ = self.config.usr_request['call'],
floor_num = self.config.test_case['call'][k][5],
direction = self.config.test_case['call'][k][6],
go_msg_id = self.config.test_case['call'][k][7],
state = msgProto.CallGoState.READY2GO))
# ############################################################
# Construct 'go' test cases
for k in self.config.test_case['go'].keys():
msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['go'][k][0],
rx_node_addr = self.config.test_case['go'][k][1],
msg_id = self.config.test_case['go'][k][2],
msg_class = self.config.test_case['go'][k][3],
hdr_len = self.config.network['packet_header_len'],
payload_len = self.config.network['packet_payload_req_len'])
self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr,
time_tag = self.config.test_case['go'][k][4],
req_typ = self.config.usr_request['go'],
floor_num = self.config.test_case['go'][k][5],
direction = 0,
go_msg_id = 0,
state = msgProto.CallGoState.RESET))
| [
"ElevatorMsgProtocol.MsgHeader",
"ElevatorMsgProtocol.EncodeReqPacket"
]
| [((815, 1151), 'ElevatorMsgProtocol.MsgHeader', 'msgProto.MsgHeader', ([], {'tx_node_addr': "self.config.test_case['call'][k][0]", 'rx_node_addr': "self.config.test_case['call'][k][1]", 'msg_id': "self.config.test_case['call'][k][2]", 'msg_class': "self.config.test_case['call'][k][3]", 'hdr_len': "self.config.network['packet_header_len']", 'payload_len': "self.config.network['packet_payload_req_len']"}), "(tx_node_addr=self.config.test_case['call'][k][0],\n rx_node_addr=self.config.test_case['call'][k][1], msg_id=self.config.\n test_case['call'][k][2], msg_class=self.config.test_case['call'][k][3],\n hdr_len=self.config.network['packet_header_len'], payload_len=self.\n config.network['packet_payload_req_len'])\n", (833, 1151), True, 'import ElevatorMsgProtocol as msgProto\n'), ((2185, 2513), 'ElevatorMsgProtocol.MsgHeader', 'msgProto.MsgHeader', ([], {'tx_node_addr': "self.config.test_case['go'][k][0]", 'rx_node_addr': "self.config.test_case['go'][k][1]", 'msg_id': "self.config.test_case['go'][k][2]", 'msg_class': "self.config.test_case['go'][k][3]", 'hdr_len': "self.config.network['packet_header_len']", 'payload_len': "self.config.network['packet_payload_req_len']"}), "(tx_node_addr=self.config.test_case['go'][k][0],\n rx_node_addr=self.config.test_case['go'][k][1], msg_id=self.config.\n test_case['go'][k][2], msg_class=self.config.test_case['go'][k][3],\n hdr_len=self.config.network['packet_header_len'], payload_len=self.\n config.network['packet_payload_req_len'])\n", (2203, 2513), True, 'import ElevatorMsgProtocol as msgProto\n'), ((1350, 1678), 'ElevatorMsgProtocol.EncodeReqPacket', 'msgProto.EncodeReqPacket', ([], {'msg_header': 'msgHdr', 'time_tag': "self.config.test_case['call'][k][4]", 'req_typ': "self.config.usr_request['call']", 'floor_num': "self.config.test_case['call'][k][5]", 'direction': "self.config.test_case['call'][k][6]", 'go_msg_id': "self.config.test_case['call'][k][7]", 'state': 'msgProto.CallGoState.READY2GO'}), "(msg_header=msgHdr, time_tag=self.config.test_case[\n 'call'][k][4], req_typ=self.config.usr_request['call'], floor_num=self.\n config.test_case['call'][k][5], direction=self.config.test_case['call']\n [k][6], go_msg_id=self.config.test_case['call'][k][7], state=msgProto.\n CallGoState.READY2GO)\n", (1374, 1678), True, 'import ElevatorMsgProtocol as msgProto\n'), ((2712, 2958), 'ElevatorMsgProtocol.EncodeReqPacket', 'msgProto.EncodeReqPacket', ([], {'msg_header': 'msgHdr', 'time_tag': "self.config.test_case['go'][k][4]", 'req_typ': "self.config.usr_request['go']", 'floor_num': "self.config.test_case['go'][k][5]", 'direction': '(0)', 'go_msg_id': '(0)', 'state': 'msgProto.CallGoState.RESET'}), "(msg_header=msgHdr, time_tag=self.config.test_case[\n 'go'][k][4], req_typ=self.config.usr_request['go'], floor_num=self.\n config.test_case['go'][k][5], direction=0, go_msg_id=0, state=msgProto.\n CallGoState.RESET)\n", (2736, 2958), True, 'import ElevatorMsgProtocol as msgProto\n')] |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
def read(fname):
"""Open files relative to package."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='ipyfilechooser',
version='0.3.1',
author='<NAME> (@crahan)',
author_email='<EMAIL>',
description=(
'Python file chooser widget for use in '
'Jupyter/IPython in conjunction with ipywidgets'
),
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/crahan/ipyfilechooser',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
install_requires=[
'ipywidgets'
]
)
| [
"os.path.dirname",
"setuptools.find_packages"
]
| [((630, 645), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (643, 645), False, 'from setuptools import setup, find_packages\n'), ((166, 191), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (181, 191), False, 'import os\n')] |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import tensorflow as _tf
from distutils.version import StrictVersion
is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0')
def normalize_tensor_shape(tensor_shape):
if is_tf2:
return [d for d in tensor_shape]
else:
return [d.value for d in tensor_shape]
def dump_graph_into_tensorboard(tf_graph):
# type: (_tf.Graph) -> None
_tb_log_dir = os.environ.get('TB_LOG_DIR')
if _tb_log_dir:
if is_tf2:
from tensorflow.python.ops.summary_ops_v2 import graph as write_graph
pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir)
with pb_visual_writer.as_default():
write_graph(tf_graph)
else:
from tensorflow.python.summary import summary
pb_visual_writer = summary.FileWriter(_tb_log_dir)
pb_visual_writer.add_graph(tf_graph)
if is_tf2:
tensorflow = _tf.compat.v1
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf
def is_subclassed(layer):
return False
| [
"tensorflow.__version__.split",
"distutils.version.StrictVersion",
"os.environ.get",
"tensorflow.summary.create_file_writer",
"tensorflow.python.summary.summary.FileWriter",
"tensorflow.python.ops.summary_ops_v2.graph"
]
| [((455, 477), 'distutils.version.StrictVersion', 'StrictVersion', (['"""2.0.0"""'], {}), "('2.0.0')\n", (468, 477), False, 'from distutils.version import StrictVersion\n'), ((730, 758), 'os.environ.get', 'os.environ.get', (['"""TB_LOG_DIR"""'], {}), "('TB_LOG_DIR')\n", (744, 758), False, 'import os\n'), ((421, 447), 'tensorflow.__version__.split', '_tf.__version__.split', (['"""-"""'], {}), "('-')\n", (442, 447), True, 'import tensorflow as _tf\n'), ((911, 954), 'tensorflow.summary.create_file_writer', '_tf.summary.create_file_writer', (['_tb_log_dir'], {}), '(_tb_log_dir)\n', (941, 954), True, 'import tensorflow as _tf\n'), ((1144, 1175), 'tensorflow.python.summary.summary.FileWriter', 'summary.FileWriter', (['_tb_log_dir'], {}), '(_tb_log_dir)\n', (1162, 1175), False, 'from tensorflow.python.summary import summary\n'), ((1019, 1040), 'tensorflow.python.ops.summary_ops_v2.graph', 'write_graph', (['tf_graph'], {}), '(tf_graph)\n', (1030, 1040), True, 'from tensorflow.python.ops.summary_ops_v2 import graph as write_graph\n')] |
#!/usr/bin/env python
# Converts a PoD XML file to a GeoJSON file.
#
# With the --javascript parameter, the generated file is a javascript
# file defining a variable 'basePodSpec'.
#
# Get the PoD XML file from http://dev.24-timmars.nu/PoD/xmlapi_app.php.
import xml.etree.ElementTree as etree
import argparse
import re
import json
import io
import sys
import os.path
import datetime
if sys.version < '3':
import codecs
# points number 9000 and above are not real points; they are used to mark
# area borders
MAXPOINT=8999
def run():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="input file")
parser.add_argument("-o", "--outfile", help="output file")
parser.add_argument("--id", help="id of terrain")
parser.add_argument("--javascript", action="store_true")
args = parser.parse_args()
tree = etree.parse(args.infile)
all_points, start_points, turning_points = get_points(tree)
inshore_legs, offshore_legs = get_legs(tree, all_points)
output_pod(args.outfile, args.javascript, args.id,
[('startPoints', start_points),
('turningPoints', turning_points),
('inshoreLegs', inshore_legs),
('offshoreLegs', offshore_legs)])
def output_pod(fname, javascript, id, features):
if sys.version < '3':
fd = codecs.open(fname, "w", encoding="utf-8")
else:
fd = io.open(fname, "w", encoding="utf-8")
if javascript:
fd.write(u'/* eslint-disable */\n')
fd.write(u'export var basePodSpec = ')
fd.write(u'{"id": %s, ' % id)
flen = len(features)
i = 1
for (name, obj) in features:
fd.write(u'"%s": {"type": "FeatureCollection",'
'"crs": { "type": "name",'
'"properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },'
'"features":' % name)
fd.write(json.dumps(obj, ensure_ascii=False))
if i == flen:
fd.write(u'}')
else:
i = i + 1
fd.write(u'},\n')
if javascript:
fd.write(u'};\n')
else:
fd.write(u'}\n')
def get_points(tree):
doc = tree.getroot()
startnumbers = {}
all_points = {}
start_points = []
turning_points = []
for n in doc.findall("kretsar/krets/startpoints/number"):
startnumbers[n.text] = True
for p in doc.findall("points/point"):
number = p.find("number").text
if int(number) > MAXPOINT:
continue
name = p.find("name").text
descr = p.find("descr").text
lat = p.find("lat").text
lng = p.find("long").text
footnote = None
footnoteelem = p.find("footnote")
if footnoteelem is not None:
footnote = footnoteelem.text
properties = {"number": number,
"name": name,
"descr": descr}
if footnote != None:
properties["footnote"] = footnote
coordinates = [float(lng), float(lat)]
geometry = {"type": "Point",
"coordinates": coordinates}
point = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if number in startnumbers:
start_points.extend(point)
else:
turning_points.extend(point)
all_points[number] = coordinates
return all_points, start_points, turning_points
def get_legs(tree, all_points):
doc = tree.getroot()
coast = []
offshore = []
for p in doc.findall("legs/leg"):
src = p.find("from").text
dst = p.find("to").text
if int(src) > MAXPOINT or int(dst) > MAXPOINT:
continue
if int(src) < int(dst):
# since all legs are present twice (in both directions),
# skip one direction
continue
dist = p.find("dist").text
sea = p.find("sea").text
addtime = p.find("addtime").text
if dist is None:
print("** error: no distance: src: %s dst: %s" % (src, dst))
properties = {"src": src,
"dst": dst,
"dist": float(dist)}
if properties["dist"] == 0 and addtime == "1":
properties["addtime"] = True;
src_coords = all_points[src]
dst_coords = all_points[dst]
geometry = {"type": "LineString",
"coordinates": [src_coords, dst_coords]}
leg = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if sea == "0":
coast.extend(leg)
else:
offshore.extend(leg)
return coast, offshore
if __name__ == '__main__':
run()
| [
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser",
"json.dumps",
"io.open",
"codecs.open"
]
| [((555, 580), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (578, 580), False, 'import argparse\n'), ((862, 886), 'xml.etree.ElementTree.parse', 'etree.parse', (['args.infile'], {}), '(args.infile)\n', (873, 886), True, 'import xml.etree.ElementTree as etree\n'), ((1353, 1394), 'codecs.open', 'codecs.open', (['fname', '"""w"""'], {'encoding': '"""utf-8"""'}), "(fname, 'w', encoding='utf-8')\n", (1364, 1394), False, 'import codecs\n'), ((1418, 1455), 'io.open', 'io.open', (['fname', '"""w"""'], {'encoding': '"""utf-8"""'}), "(fname, 'w', encoding='utf-8')\n", (1425, 1455), False, 'import io\n'), ((1904, 1939), 'json.dumps', 'json.dumps', (['obj'], {'ensure_ascii': '(False)'}), '(obj, ensure_ascii=False)\n', (1914, 1939), False, 'import json\n')] |
import os
import json
import importlib
from pluginbase import PluginBase
import rastervision as rv
from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg
from rastervision.utils.files import download_if_needed
class PluginError(Exception):
pass
def load_conf_list(s):
"""Loads a list of items from the config.
Lists should be comma separated.
This takes into account that previous versions of Raster Vision
allowed for a `[ "module" ]` like syntax, even though that didn't
work for multi-value lists.
"""
try:
# A comma separated list of values will be transformed to
# having a list-like string, with ' instead of ". Replacing
# single quotes with double quotes lets us parse it as a JSON list.
return json.loads(s.replace("'", '"'))
except json.JSONDecodeError:
return list(map(lambda x: x.strip(), s.split(',')))
class PluginRegistry:
@staticmethod
def get_instance():
return rv._registry._get_plugin_registry()
def __init__(self, plugin_config, rv_home):
"""Initializes this plugin registry.
A plugin registry is passed to plugins in a call
to their "register_plugin" method.
Args:
plugin_config - the everett ConfigManager for the plugin
section of the application configuration.
"""
self.plugin_root_dir = os.path.join(rv_home, 'plugins')
self.config_builders = {}
self.command_config_builders = {}
self.commands = []
self.aux_command_classes = {}
self.default_raster_sources = []
self.default_vector_sources = []
self.default_label_sources = []
self.default_label_stores = []
self.default_evaluators = []
self.experiment_runners = {}
self.filesystems = []
plugin_files = load_conf_list(plugin_config('files', default='[]'))
self._load_from_files(plugin_files)
self.plugin_files = plugin_files
plugin_modules = load_conf_list(plugin_config('modules', default='[]'))
self._load_from_modules(plugin_modules)
self.plugin_modules = plugin_modules
def _load_plugin(self, plugin, identifier):
# Check the plugin is valid
if not hasattr(plugin, 'register_plugin'):
raise PluginError('Plugin at {} does not have '
'"register_plugin" method.'.format(identifier))
register_method = getattr(plugin, 'register_plugin')
if not callable(register_method):
raise PluginError('Plugin at {} has a '
'"register_plugin" attribute, '
'but it is not callable'.format(identifier))
# TODO: Log loading plugin.
register_method(self)
def _load_from_files(self, plugin_paths):
if not plugin_paths:
return
self.plugin_sources = []
plugin_base = PluginBase(package='rastervision.plugins')
for uri in plugin_paths:
plugin_name = os.path.splitext(os.path.basename(uri))[0]
plugin_path = os.path.join(self.plugin_root_dir, plugin_name)
fs = rv._registry.get_file_system(uri, search_plugins=False)
local_path = download_if_needed(uri, plugin_path, fs=fs)
local_dir = os.path.dirname(local_path)
plugin_source = plugin_base.make_plugin_source(
searchpath=[local_dir])
# We're required to hang onto the source
# to keep it from getting GC'd.
self.plugin_sources.append(plugin_source)
self._load_plugin(plugin_source.load_plugin(plugin_name), uri)
def _load_from_modules(self, plugin_modules):
if not plugin_modules:
return
for module in plugin_modules:
plugin = importlib.import_module(module)
self._load_plugin(plugin, module)
def add_plugins_from_proto(self, plugin_msg):
new_plugin_files = list(
set(plugin_msg.plugin_uris) - set(self.plugin_files))
self._load_from_files(new_plugin_files)
self.plugin_files.extend(new_plugin_files)
new_plugin_modules = list(
set(plugin_msg.plugin_modules) - set(self.plugin_modules))
self._load_from_modules(new_plugin_modules)
self.plugin_modules.extend(new_plugin_modules)
def to_proto(self):
"""Returns a protobuf message that records the
plugin sources for plugins that are currently loaded
in the registry.
"""
return PluginConfigMsg(
plugin_uris=self.plugin_files, plugin_modules=self.plugin_modules)
def register_config_builder(self, group, key, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
group - The Config group, e.g. rv.BACKEND, rv.TASK.
key - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of ConfigBuilder that builds
the Config for this plugin.
"""
if (group, key) in self.config_builders:
raise PluginError('ConfigBuilder already registered for group '
'{} and key {}'.format(group, key))
self.config_builders[(group, key)] = builder_class
def register_command_config_builder(self, command_type, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of CommandConfigBuilder that builds
the CommandConfig for this plugin.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder already registered for command'
'with type {}'.format(command_type))
self.command_config_builders[command_type] = builder_class
self.commands.append(command_type)
def register_aux_command(self, command_type, command_class):
"""Registers a custom AuxCommand as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
command_class - The subclass of AuxCommand subclass to register.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder is already registered for command'
'with type {}'.format(command_type))
if command_type in self.aux_command_classes:
raise PluginError('AuxCommand is already registered for command'
'with type {}'.format(command_type))
self.aux_command_classes[command_type] = command_class
if command_class.options.include_by_default:
self.commands.append(command_type)
def register_default_raster_source(self, provider_class):
"""Registers a RasterSourceDefaultProvider for use as a plugin."""
self.default_raster_sources.append(provider_class)
def register_default_vector_source(self, provider_class):
"""Registers a VectorSourceDefaultProvider for use as a plugin."""
self.default_vector_sources.append(provider_class)
def register_default_label_source(self, provider_class):
"""Registers a LabelSourceDefaultProvider for use as a plugin."""
self.default_label_sources.append(provider_class)
def register_default_label_store(self, provider_class):
"""Registers a LabelStoreDefaultProvider for use as a plugin."""
self.default_label_stores.append(provider_class)
def register_default_evaluator(self, provider_class):
"""Registers an EvaluatorDefaultProvider for use as a plugin."""
self.default_evaluators.append(provider_class)
def register_experiment_runner(self, runner_key, runner_class):
"""Registers an ExperimentRunner as a plugin.
Args:
runner_key - The key used to reference this plugin runner.
This is a string that will match the command line
argument used to reference this runner; e.g. if the
key is "FOO_RUNNER", then users can use the runner
by issuing a "rastervision run foo_runner ..." command.
runner_class - The class of the ExperimentRunner plugin.
"""
if runner_key in self.experiment_runners:
raise PluginError('ExperimentRunner already registered for '
'key {}'.format(runner_key))
self.experiment_runners[runner_key] = runner_class
def register_filesystem(self, filesystem_class):
"""Registers a FileSystem as a plugin."""
self.filesystems.append(filesystem_class)
| [
"importlib.import_module",
"rastervision.utils.files.download_if_needed",
"rastervision.protos.plugin_pb2.PluginConfig",
"rastervision._registry._get_plugin_registry",
"os.path.join",
"pluginbase.PluginBase",
"os.path.dirname",
"rastervision._registry.get_file_system",
"os.path.basename"
]
| [((1001, 1036), 'rastervision._registry._get_plugin_registry', 'rv._registry._get_plugin_registry', ([], {}), '()\n', (1034, 1036), True, 'import rastervision as rv\n'), ((1427, 1459), 'os.path.join', 'os.path.join', (['rv_home', '"""plugins"""'], {}), "(rv_home, 'plugins')\n", (1439, 1459), False, 'import os\n'), ((2988, 3030), 'pluginbase.PluginBase', 'PluginBase', ([], {'package': '"""rastervision.plugins"""'}), "(package='rastervision.plugins')\n", (2998, 3030), False, 'from pluginbase import PluginBase\n'), ((4625, 4712), 'rastervision.protos.plugin_pb2.PluginConfig', 'PluginConfigMsg', ([], {'plugin_uris': 'self.plugin_files', 'plugin_modules': 'self.plugin_modules'}), '(plugin_uris=self.plugin_files, plugin_modules=self.\n plugin_modules)\n', (4640, 4712), True, 'from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg\n'), ((3159, 3206), 'os.path.join', 'os.path.join', (['self.plugin_root_dir', 'plugin_name'], {}), '(self.plugin_root_dir, plugin_name)\n', (3171, 3206), False, 'import os\n'), ((3224, 3279), 'rastervision._registry.get_file_system', 'rv._registry.get_file_system', (['uri'], {'search_plugins': '(False)'}), '(uri, search_plugins=False)\n', (3252, 3279), True, 'import rastervision as rv\n'), ((3305, 3348), 'rastervision.utils.files.download_if_needed', 'download_if_needed', (['uri', 'plugin_path'], {'fs': 'fs'}), '(uri, plugin_path, fs=fs)\n', (3323, 3348), False, 'from rastervision.utils.files import download_if_needed\n'), ((3373, 3400), 'os.path.dirname', 'os.path.dirname', (['local_path'], {}), '(local_path)\n', (3388, 3400), False, 'import os\n'), ((3891, 3922), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (3914, 3922), False, 'import importlib\n'), ((3107, 3128), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (3123, 3128), False, 'import os\n')] |
"""
Implements a non interactive controller to controt non-interactive visualizers.
(i.e. those that are used for converting TPP souce code into another format)
"""
from tpp.FileParser import FileParser
from tpp.controller.TPPController import TPPController
class ConversionController(TPPController):
"""
Implements a non interactive controller to run non-interactive visualizers.
(i.e. those that are used for converting TPP source code into another format)
"""
def __init__(self, input_file, output, visualizer_class):
"""
Todo: ApiDoc.
:rtype:
:param input:
:param output:
:param visualizer_class:
"""
super(ConversionController, self).__init__()
parser = FileParser(input_file)
self.pages = parser.get_pages()
self.vis = visualizer_class(output)
def run(self):
"""
Todo: ApiDoc.
:return:
"""
for page in self.pages:
while True:
eop = page.is_eop()
self.vis.visualize(page.next_line(), eop)
if eop:
break
def close(self):
"""
Todo: ApiDoc.
:return:
"""
self.vis.close()
| [
"tpp.FileParser.FileParser"
]
| [((757, 779), 'tpp.FileParser.FileParser', 'FileParser', (['input_file'], {}), '(input_file)\n', (767, 779), False, 'from tpp.FileParser import FileParser\n')] |
from bs4 import BeautifulSoup
from datetime import date
from lxml import html
import requests
import re
import json
class CovidScraper:
def __init__(self):
self.api_url = 'http://127.0.0.1:5000/covidgr'
self.api_sum_url = 'http://127.0.0.1:5000/summary/covidgr'
self.api_test_url = 'http://127.0.0.1:5000/covidgr/tests'
self.scrape_url = 'https://www.worldometers.info/coronavirus/country/greece/'
self.scrape_tests_url = 'https://github.com/owid/covid-19-data/blob/master/public/data/testing/covid-testing-latest-data-source-details.csv'
self.today = ''
self.covid_data = []
self.summary_data= []
def scrape_data(self):
data = []
self.today = str(date.today())
soup = self.scrape_page_content()
soup_test_page = self.scrape_page_content_contains_tests()
if soup:
self.get_daily_data(soup)
self.get_summary_data(soup)
if self.summary_data and self.covid_data:
post_daily_and_sum_covid_data = self.call_api_put_data(
self.today, self.covid_data, self.summary_data)
data.append(post_daily_and_sum_covid_data)
if soup_test_page:
tests_data = self.get_tests_per_day(soup_test_page)
if tests_data[0]:
post_daily_tests_covid_data = self.call_api_post_tested_covid_data(
tests_data[0], tests_data[1])
data.append(post_daily_tests_covid_data)
return data
def scrape_page_content(self):
page = requests.get(self.scrape_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def scrape_page_content_contains_tests(self):
page = requests.get(self.scrape_tests_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def get_daily_data(self, soup):
covid_data = []
daily_covidgr_html_content = soup.find('li', class_='news_li')
get_daily_covidgr_text = daily_covidgr_html_content.text
for elem in get_daily_covidgr_text.split():
regex = '\d*(.|)\d+'
match = re.findall(regex, elem)
if match:
covid_data.append(elem)
self.covid_data = covid_data
def get_summary_data(self, soup):
summary_data = []
all_cases_covidgr_html_content = soup.find_all(
'div', class_='maincounter-number')
for item in range(len(all_cases_covidgr_html_content)):
regex = r'(\n)|\s'
all_cases_data = re.sub(
regex, '', all_cases_covidgr_html_content[item].text)
summary_data.append(all_cases_data)
self.summary_data = summary_data
def get_tests_per_day(self, tree):
html_content = tree.find('tr', id='LC34').find_all('td')
country_code = html_content[1]
date_test = html_content[3].text
if country_code.text == 'GRC':
today_tests = html_content[10].text
total_tests = html_content[8].text
return [date_test, today_tests]
def call_api_post_tested_covid_data(self, today, tests):
headers = {
'Content-type': 'application/json',
}
data = json.dumps({"date": today, "daily_test": tests})
response_tests = requests.post(
self.api_test_url, headers=headers, data=data)
return response_tests.json()
def call_api_put_data(self, today, covid_data, summary_data):
headers = {
'Content-type': 'application/json',
}
data = json.dumps(
{"date": today, "cases": covid_data[0], "deaths": covid_data[1]})
sum_data = json.dumps(
{"sum_cases": summary_data[0], "sum_deaths": summary_data[1], "sum_recovered": summary_data[2]})
response = requests.post(self.api_url, headers=headers, data=data)
response_sum = requests.put(
self.api_sum_url, headers=headers, data=sum_data)
return [response.json(), response_sum.json()]
if __name__ == '__main__':
cs = CovidScraper()
results = cs.scrape_data()
print(results)
| [
"requests.post",
"json.dumps",
"requests.get",
"bs4.BeautifulSoup",
"re.findall",
"requests.put",
"re.sub",
"datetime.date.today"
]
| [((1607, 1636), 'requests.get', 'requests.get', (['self.scrape_url'], {}), '(self.scrape_url)\n', (1619, 1636), False, 'import requests\n'), ((1652, 1694), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (1665, 1694), False, 'from bs4 import BeautifulSoup\n'), ((1786, 1821), 'requests.get', 'requests.get', (['self.scrape_tests_url'], {}), '(self.scrape_tests_url)\n', (1798, 1821), False, 'import requests\n'), ((1837, 1879), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (1850, 1879), False, 'from bs4 import BeautifulSoup\n'), ((3370, 3418), 'json.dumps', 'json.dumps', (["{'date': today, 'daily_test': tests}"], {}), "({'date': today, 'daily_test': tests})\n", (3380, 3418), False, 'import json\n'), ((3445, 3505), 'requests.post', 'requests.post', (['self.api_test_url'], {'headers': 'headers', 'data': 'data'}), '(self.api_test_url, headers=headers, data=data)\n', (3458, 3505), False, 'import requests\n'), ((3718, 3794), 'json.dumps', 'json.dumps', (["{'date': today, 'cases': covid_data[0], 'deaths': covid_data[1]}"], {}), "({'date': today, 'cases': covid_data[0], 'deaths': covid_data[1]})\n", (3728, 3794), False, 'import json\n'), ((3828, 3939), 'json.dumps', 'json.dumps', (["{'sum_cases': summary_data[0], 'sum_deaths': summary_data[1],\n 'sum_recovered': summary_data[2]}"], {}), "({'sum_cases': summary_data[0], 'sum_deaths': summary_data[1],\n 'sum_recovered': summary_data[2]})\n", (3838, 3939), False, 'import json\n'), ((3969, 4024), 'requests.post', 'requests.post', (['self.api_url'], {'headers': 'headers', 'data': 'data'}), '(self.api_url, headers=headers, data=data)\n', (3982, 4024), False, 'import requests\n'), ((4049, 4111), 'requests.put', 'requests.put', (['self.api_sum_url'], {'headers': 'headers', 'data': 'sum_data'}), '(self.api_sum_url, headers=headers, data=sum_data)\n', (4061, 4111), False, 'import requests\n'), ((739, 751), 'datetime.date.today', 'date.today', ([], {}), '()\n', (749, 751), False, 'from datetime import date\n'), ((2221, 2244), 're.findall', 're.findall', (['regex', 'elem'], {}), '(regex, elem)\n', (2231, 2244), False, 'import re\n'), ((2660, 2720), 're.sub', 're.sub', (['regex', '""""""', 'all_cases_covidgr_html_content[item].text'], {}), "(regex, '', all_cases_covidgr_html_content[item].text)\n", (2666, 2720), False, 'import re\n')] |
import discord
from discord.ext import commands
class WowCog:
"""Custom Cog that had commands for WoW Memes"""
def __init__(self, bot):
self.bot = bot
async def _play(self, url, ctx):
"""Helper for aliasing Play in the Audio module"""
audio = self.bot.get_cog('Audio')
if not audio:
await self.bot.say("Audio module required. Load with: {}load audio".format(ctx.prefix))
return
await ctx.invoke(audio.play, url_or_search_terms=url)
@commands.command(pass_context=True, no_pm=True)
async def flamewreath(self, ctx):
"""I will not move when Flame Wreath is cast!"""
await self._play("https://www.youtube.com/watch?v=gcA6y7sxKcA", ctx)
def setup(bot):
bot.add_cog(WowCog(bot))
| [
"discord.ext.commands.command"
]
| [((518, 565), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)', 'no_pm': '(True)'}), '(pass_context=True, no_pm=True)\n', (534, 565), False, 'from discord.ext import commands\n')] |
import pytest
from privacy_evaluator.attacks.sample_attack import Sample_Attack
"""
This test only test if no error is thrown when calling the function, can be removed in the future
"""
def test_sample_attack():
test = Sample_Attack(0, 0, 0)
test.perform_attack()
| [
"privacy_evaluator.attacks.sample_attack.Sample_Attack"
]
| [((225, 247), 'privacy_evaluator.attacks.sample_attack.Sample_Attack', 'Sample_Attack', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (238, 247), False, 'from privacy_evaluator.attacks.sample_attack import Sample_Attack\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import re
from setuptools import setup
import textwrap
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('prestodb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='presto-python-client',
author='<NAME>',
author_email='<EMAIL>',
version=version,
url='https://github.com/prestodb/presto-python-client',
packages=['prestodb'],
package_data={'': ['LICENSE', 'README.md']},
description='Client for the Presto distributed SQL Engine',
long_description=textwrap.dedent("""
Client for Presto (https://prestodb.io), a distributed SQL engine for
interactive and batch big data processing. Provides a low-level client and
a DBAPI 2.0 implementation.
"""),
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database :: Front-Ends',
],
install_requires=[
'click',
'future',
'ipaddress',
'requests',
'requests_kerberos',
'six',
'typing',
],
extras_require={'tests':[
'httpretty',
'pytest',
'pytest-runner',
]}
)
| [
"textwrap.dedent",
"re.compile"
]
| [((627, 665), 're.compile', 're.compile', (['"""__version__\\\\s+=\\\\s+(.*)"""'], {}), "('__version__\\\\s+=\\\\s+(.*)')\n", (637, 665), False, 'import re\n'), ((1146, 1369), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Client for Presto (https://prestodb.io), a distributed SQL engine for\n interactive and batch big data processing. Provides a low-level client and\n a DBAPI 2.0 implementation.\n """'], {}), '(\n """\n Client for Presto (https://prestodb.io), a distributed SQL engine for\n interactive and batch big data processing. Provides a low-level client and\n a DBAPI 2.0 implementation.\n """\n )\n', (1161, 1369), False, 'import textwrap\n')] |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
def f(x):
return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, n_samples)
train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3))
df_train = pd.DataFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"})
df_test = pd.DataFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"})
x_stat = np.linspace(-1.4, 2, 100)
df_stat = pd.DataFrame({"x": x_stat, "y": f(x_stat), "type": "Model"})
df = pd.concat([df_test, df_train])
title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})"
p = gg.ggplot() + \
gg.geom_point(df, gg.aes("x", "y", color="type")) + \
gg.geom_line(df_stat, gg.aes("x", "y")) + \
gg.theme_bw() + \
gg.ggtitle(title)
# print(p)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_err = []
validation_err = []
for k in range(11):
pf = PolynomialFitting(k)
train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"k": range(11), "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"k": range(11), "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f" Cross Validation for Polynomial Fitting Over Different Degrees k"
p = gg.ggplot(df, gg.aes("k", "avg error", color="type")) + \
gg.geom_point() + \
gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \
gg.labs(y="Average training and validation errors",
title=f"{title} \nWith Noise: {noise}, Num of samples: {n_samples}")
gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False)
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = np.argmin(np.array(validation_err))
pf = PolynomialFitting(int(best_k))
pf.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = pf.predict(test_X.to_numpy())
print("best k =", best_k)
print("Test = ", round(mean_square_error(test_y.to_numpy(), y_pred), 2))
print("Validation = ", round(validation_err[best_k], 2))
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)),
("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]:
train_err = []
validation_err = []
for lam in ran:
rg = learner(lam)
train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),
mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"lambda": ran, "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f"{name} Regularization Cross Validate Over Different Lambda"
p = gg.ggplot(df, gg.aes("lambda", "avg error", color="type")) + \
gg.geom_line() + \
gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_lam = np.argmin(np.array(validation_err))
rg = learner(ran[best_lam])
rg.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = rg.predict(test_X.to_numpy())
print(f"best lambda {name} = {round(ran[best_lam], 3)}")
print(f"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}")
lr = LinearRegression()
lr.fit(train_X.to_numpy(), train_y.to_numpy())
print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy()))
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
| [
"numpy.random.normal",
"pandas.Series",
"plotnine.ggtitle",
"plotnine.ggsave",
"plotnine.ggplot",
"plotnine.theme_bw",
"plotnine.geom_line",
"plotnine.aes",
"IMLearn.learners.regressors.LinearRegression",
"numpy.array",
"numpy.linspace",
"sklearn.datasets.load_diabetes",
"numpy.random.seed",
"plotnine.geom_point",
"pandas.DataFrame",
"plotnine.labs",
"pandas.concat",
"IMLearn.learners.regressors.PolynomialFitting"
]
| [((1030, 1061), 'numpy.linspace', 'np.linspace', (['(-1.2)', '(2)', 'n_samples'], {}), '(-1.2, 2, n_samples)\n', (1041, 1061), True, 'import numpy as np\n'), ((1408, 1433), 'numpy.linspace', 'np.linspace', (['(-1.4)', '(2)', '(100)'], {}), '(-1.4, 2, 100)\n', (1419, 1433), True, 'import numpy as np\n'), ((1518, 1548), 'pandas.concat', 'pd.concat', (['[df_test, df_train]'], {}), '([df_test, df_train])\n', (1527, 1548), True, 'import pandas as pd\n'), ((1839, 1916), 'plotnine.ggsave', 'gg.ggsave', ([], {'filename': 'f"""../../IML/ex5/plots/{title}.png"""', 'plot': 'p', 'verbose': '(False)'}), "(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)\n", (1848, 1916), True, 'import plotnine as gg\n'), ((2496, 2517), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (2505, 2517), True, 'import pandas as pd\n'), ((2910, 3011), 'plotnine.ggsave', 'gg.ggsave', ([], {'filename': 'f"""../../IML/ex5/plots/{title} {noise} {n_samples}.png"""', 'plot': 'p', 'verbose': '(False)'}), "(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png',\n plot=p, verbose=False)\n", (2919, 3011), True, 'import plotnine as gg\n'), ((4036, 4090), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ([], {'return_X_y': '(True)', 'as_frame': '(True)'}), '(return_X_y=True, as_frame=True)\n', (4058, 4090), False, 'from sklearn import datasets\n'), ((5883, 5901), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5899, 5901), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((6072, 6089), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6086, 6089), True, 'import numpy as np\n'), ((1077, 1114), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise', 'n_samples'], {}), '(0, noise, n_samples)\n', (1093, 1114), True, 'import numpy as np\n'), ((1171, 1186), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1183, 1186), True, 'import pandas as pd\n'), ((1188, 1200), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1197, 1200), True, 'import pandas as pd\n'), ((1802, 1819), 'plotnine.ggtitle', 'gg.ggtitle', (['title'], {}), '(title)\n', (1812, 1819), True, 'import plotnine as gg\n'), ((2075, 2095), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['k'], {}), '(k)\n', (2092, 2095), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((2769, 2897), 'plotnine.labs', 'gg.labs', ([], {'y': '"""Average training and validation errors"""', 'title': 'f"""{title} \nWith Noise: {noise}, Num of samples: {n_samples}"""'}), '(y=\'Average training and validation errors\', title=\n f"""{title} \nWith Noise: {noise}, Num of samples: {n_samples}""")\n', (2776, 2897), True, 'import plotnine as gg\n'), ((3128, 3152), 'numpy.array', 'np.array', (['validation_err'], {}), '(validation_err)\n', (3136, 3152), True, 'import numpy as np\n'), ((4856, 4932), 'pandas.DataFrame', 'pd.DataFrame', (["{'lambda': ran, 'avg error': train_err, 'type': 'train error'}"], {}), "({'lambda': ran, 'avg error': train_err, 'type': 'train error'})\n", (4868, 4932), True, 'import pandas as pd\n'), ((4947, 5037), 'pandas.DataFrame', 'pd.DataFrame', (["{'lambda': ran, 'avg error': validation_err, 'type': 'validation error'}"], {}), "({'lambda': ran, 'avg error': validation_err, 'type':\n 'validation error'})\n", (4959, 5037), True, 'import pandas as pd\n'), ((5047, 5068), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (5056, 5068), True, 'import pandas as pd\n'), ((5354, 5431), 'plotnine.ggsave', 'gg.ggsave', ([], {'filename': 'f"""../../IML/ex5/plots/{title}.png"""', 'plot': 'p', 'verbose': '(False)'}), "(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)\n", (5363, 5431), True, 'import plotnine as gg\n'), ((1776, 1789), 'plotnine.theme_bw', 'gg.theme_bw', ([], {}), '()\n', (1787, 1789), True, 'import plotnine as gg\n'), ((4350, 4379), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.05)', '(500)'], {}), '(0.001, 0.05, 500)\n', (4361, 4379), True, 'import numpy as np\n'), ((4430, 4458), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.5)', '(500)'], {}), '(0.001, 0.5, 500)\n', (4441, 4458), True, 'import numpy as np\n'), ((5281, 5345), 'plotnine.labs', 'gg.labs', ([], {'y': '"""Average training and validation errors"""', 'title': 'title'}), "(y='Average training and validation errors', title=title)\n", (5288, 5345), True, 'import plotnine as gg\n'), ((5552, 5576), 'numpy.array', 'np.array', (['validation_err'], {}), '(validation_err)\n', (5560, 5576), True, 'import numpy as np\n'), ((2701, 2714), 'plotnine.theme_bw', 'gg.theme_bw', ([], {}), '()\n', (2712, 2714), True, 'import plotnine as gg\n'), ((5265, 5278), 'plotnine.theme_bw', 'gg.theme_bw', ([], {}), '()\n', (5276, 5278), True, 'import plotnine as gg\n'), ((1638, 1649), 'plotnine.ggplot', 'gg.ggplot', ([], {}), '()\n', (1647, 1649), True, 'import plotnine as gg\n'), ((1746, 1762), 'plotnine.aes', 'gg.aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (1752, 1762), True, 'import plotnine as gg\n'), ((2673, 2688), 'plotnine.geom_point', 'gg.geom_point', ([], {}), '()\n', (2686, 2688), True, 'import plotnine as gg\n'), ((5234, 5248), 'plotnine.geom_line', 'gg.geom_line', ([], {}), '()\n', (5246, 5248), True, 'import plotnine as gg\n'), ((1680, 1710), 'plotnine.aes', 'gg.aes', (['"""x"""', '"""y"""'], {'color': '"""type"""'}), "('x', 'y', color='type')\n", (1686, 1710), True, 'import plotnine as gg\n'), ((2621, 2659), 'plotnine.aes', 'gg.aes', (['"""k"""', '"""avg error"""'], {'color': '"""type"""'}), "('k', 'avg error', color='type')\n", (2627, 2659), True, 'import plotnine as gg\n'), ((5173, 5216), 'plotnine.aes', 'gg.aes', (['"""lambda"""', '"""avg error"""'], {'color': '"""type"""'}), "('lambda', 'avg error', color='type')\n", (5179, 5216), True, 'import plotnine as gg\n')] |
import re
import copy
def parse_media(media, content_version, project_chapters):
"""
Converts a media object into formats usable in the catalog
:param media: the media object
:type media: dict
:param content_version: the current version of the source content
:type content_version: string
:param project_chapters: a dictionary of project chapters
:type project_chapters: dict
:return: resource_formats, project_formats a list of resource formats and dictionary of project formats
"""
resource_formats = []
project_formats = {}
if 'resource' in media:
resource_formats = _parse_resource(media['resource'], content_version)
if 'projects' in media:
for project in media['projects']:
project_id = project['identifier']
chapters = []
if project_id == 'obs':
# TRICKY: obs projects always have 50 chapters
# This allows empty projects to still publish media.
for x in range(1, 51): # chapters 1..50
chapters.append(str(x).zfill(2))
if project_id in project_chapters:
chapters = project_chapters[project_id]
project_formats[project_id] = _parse_project(project, content_version, chapters)
return resource_formats, project_formats
def _parse_resource(resource, content_version):
"""
Converts a resource media object into formats usable in the catalog
:param resource: the media object
:type resource: dict
:param content_version: the current version of the source content
:type content_version: string
:return: a list of formats
"""
source_version = _expand_keys(resource['version'], {'latest': content_version})
formats = []
if 'media' in resource:
for media in resource['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
else:
# build a single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
return formats
def _make_format(source_version, media_version, quality, media, expansion_vars):
format = {
'format': '',
'modified': '',
'size': 0,
'source_version': '{}'.format(source_version),
'version': '{}'.format(media_version),
'contributor': media['contributor'],
'url': _expand_keys(media['url'], expansion_vars),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
if quality:
format['quality'] = quality
return format
def _parse_project(project, content_version, chapters_ids):
"""
Converts a project media object into formats usable in the catalog
:param project: the media object
:type project: dict
:param content_version: the current version of the source content
:type content_version: string
:param chapters_ids: a list of chapter identifiers in the project
:type chapters_ids: list
:return: a list of formats
"""
source_version = _expand_keys(project['version'], {'latest': content_version})
formats = []
if 'media' in project:
for media in project['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
else:
# build single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
return formats
def _prepare_chapter_formats(media, chapters, expansion_vars):
"""
This is a wrapper around the method `_parse_project_chapter`.
Since we routinely conditionally prepare chapters in multiple places
this handles it in one place
:param media: the media object to inspect
:param chapters: a list of chapter ids
:param expansion_vars: a dictionary of variables that may be expanded in the chapter url
:return:
"""
if 'chapter_url' in media:
chapter_url = _expand_keys(media['chapter_url'], expansion_vars)
chapters = _parse_project_chapter(chapter_url, chapters)
if chapters:
return chapters
return None
def _parse_project_chapter(chapter_url, chapters):
"""
Generates chapter formats for use in the catalog
:param chapter_url: the url template that will be used in the formats
:param chapters: a list of chapter ids
:type chapters: list
:return:
"""
# TODO: this requires that we give a well formatted list of chapter ids and check if the Rc is a book
# only book RCs can have chapter formats
formats = []
for chapter_id in chapters:
format = {
'size': 0,
'length': 0,
'modified': '',
'identifier': chapter_id,
'url': _expand_keys(chapter_url, {'chapter': chapter_id}),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
formats.append(format)
return formats
def _make_expansion_variables(media_block, content_version):
"""
Creates a dictionary of expansion variables for media items.
:param self:
:param media_block:
:param content_version:
:return:
"""
vars = copy.copy(media_block)
# strip black listed keys
black_list = ['url', 'chapter_url']
for key in black_list:
if key in vars:
del vars[key]
# TRICKY: using `latest` as an expansion variable in urls is not explicitly stated in the spec,
# but it's a common misunderstanding so we allow it.
vars['latest'] = '{}'.format(content_version)
return vars
def _expand_keys(target, replacements):
"""
Replaces all the dict keys found in the string with the dict values.
Keys in the string must be delimited by brackets {}
:param target:
:param replacements:
:return:
"""
if isinstance(target, basestring) or isinstance(target, str):
result = target
if not isinstance(replacements, dict):
raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements)))
for key in replacements:
if not isinstance(replacements[key], list):
result = re.sub(r'{\s*' + key + '\s*}', '{}'.format(replacements[key]), result)
return result
elif isinstance(target, int):
return target
else:
raise Exception('Invalid replacement target "{}". Expected string but received {}'.format(target, type(target)))
| [
"copy.copy"
]
| [((7719, 7741), 'copy.copy', 'copy.copy', (['media_block'], {}), '(media_block)\n', (7728, 7741), False, 'import copy\n')] |
from django.db.models.fields.files import (FieldFile, ImageField,
ImageFileDescriptor)
from django.utils.translation import ugettext as _
from .backends import get_backend_class
from .files import VideoFile
class VideoFileDescriptor(ImageFileDescriptor):
pass
class VideoFieldFile(VideoFile, FieldFile):
def delete(self, save=True):
# Clear the video info cache
if hasattr(self, '_info_cache'):
del self._info_cache
super(VideoFieldFile, self).delete(save=save)
class VideoField(ImageField):
attr_class = VideoFieldFile
descriptor_class = VideoFileDescriptor
description = _("Video")
def __init__(self, verbose_name=None, name=None, duration_field=None,
**kwargs):
self.duration_field = duration_field
super(VideoField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_backend())
return errors
def _check_backend(self):
backend = get_backend_class()
return backend.check()
def to_python(self, data):
# use FileField method
return super(ImageField, self).to_python(data)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
_file = getattr(instance, self.attname)
# we need a real file
if not _file._committed:
return
# write `width` and `height`
super(VideoField, self).update_dimension_fields(instance, force,
*args, **kwargs)
if not self.duration_field:
return
# Nothing to update if we have no file and not being forced to update.
if not _file and not force:
return
if getattr(instance, self.duration_field) and not force:
return
# get duration if file is defined
duration = _file.duration if _file else None
# update duration
setattr(instance, self.duration_field, duration)
def formfield(self, **kwargs):
# use normal FileFieldWidget for now
return super(ImageField, self).formfield(**kwargs)
| [
"django.utils.translation.ugettext"
]
| [((679, 689), 'django.utils.translation.ugettext', '_', (['"""Video"""'], {}), "('Video')\n", (680, 689), True, 'from django.utils.translation import ugettext as _\n')] |
from model.group import Group
def test_modify_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = app.group.get_group_list()
app.group.modify_first_group(Group(name="New group"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
def test_modify_group_header(app):
if app.group.count() == 0:
app.group.create(Group(header="test"))
old_groups = app.group.get_group_list()
app.group.modify_first_group(Group(header="New header"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
| [
"model.group.Group"
]
| [((218, 241), 'model.group.Group', 'Group', ([], {'name': '"""New group"""'}), "(name='New group')\n", (223, 241), False, 'from model.group import Group\n'), ((526, 552), 'model.group.Group', 'Group', ([], {'header': '"""New header"""'}), "(header='New header')\n", (531, 552), False, 'from model.group import Group\n'), ((121, 139), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (126, 139), False, 'from model.group import Group\n'), ((427, 447), 'model.group.Group', 'Group', ([], {'header': '"""test"""'}), "(header='test')\n", (432, 447), False, 'from model.group import Group\n')] |
import elasticsearch
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import time, json, datetime, os
class elalog:
def __init__(self, date):
es_host = os.getenv("ES_PORT_9200_TCP_ADDR") or '<%ELASTICIP%>'
es_port = os.getenv("ES_PORT_9200_TCP_PORT") or '9200'
self.lastDate = date
self.es = Elasticsearch([{'host': es_host, 'port': es_port}])
# BLOCKS INDEX
self.blocks_index_name = "blocks-" + date
self.block_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"blocks-" + date: {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"hash": {
"type": "text"
},
"signatures": {
"type": "text"
},
"tcount": {
"type": "long"
},
"validator": {
"type": "text",
"fielddata": True
},
"bheight": {
"type": "long"
}
}
}
}
}
if self.es.indices.exists(self.blocks_index_name):
try:
self.es.indices.delete(index=self.blocks_index_name)
self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)
# TRANSACTIONS INDEX
self.transactions_index_name = "transactions-" + date
self.transactions_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"transactions-" + date: {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"sender": {
"type": "text",
"fielddata": True
},
"receiver": {
"type": "text",
"fielddata": True
},
"token_count": {
"type": "float"
},
"token_type": {
"type": "text",
"fielddata": True
},
"hash": {
"type": "text"
},
"block": {
"type": "long"
}
}
}
}
}
if self.es.indices.exists(self.transactions_index_name):
try:
self.es.indices.delete(index=self.transactions_index_name)
self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)
# BALANCE HISTORY
self.balance_index_name = "balance"
self.balance_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"balance": {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"user": {
"type": "text",
"fielddata": True
},
"balance": {
"type": "float"
}
}
}
}
}
if self.es.indices.exists(self.balance_index_name):
try:
self.es.indices.delete(index=self.balance_index_name)
self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)
# VALIDATOR STATISTIC
self.clients_index_name = "clients"
self.clients_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"clients": {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"ip": {
"type": "ip"
},
"geoip": {
"properties": {
"city_name": {
"type": "text"
},
"continent_name": {
"type": "text"
},
"country_iso_code": {
"type": "text"
},
"location": {
"type": "geo_point"
},
"region_name": {
"type": "text"
}
}
},
"public_key": {
"type": "text",
"fielddata": True
},
"client_type": {
"type": "text",
"fielddata": True
}
}
}
}
}
if self.es.indices.exists(self.clients_index_name):
try:
self.es.indices.delete(index=self.clients_index_name)
self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)
def elasticClients(self, jsons:list):
try:
helpers.bulk(self.es, jsons)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save Validators:", es1)
print("Save Validators in elastic!")
def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int):
index = 'blocks-' + self.lastDate
estype = 'blocks-' + self.lastDate
eljson = json.dumps({"@dtime": int(timestamp), "validator": validator, "tcount": tcount, "signatures": list(signatures), "hash": hash, "bheight": bheight}, separators=(',', ':'))
try:
self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on send Block:", es1)
def elasticTransaction(self, jsons:list):
try:
helpers.bulk(self.es, jsons)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save bulk Transactions:", es1)
def elasticBalanceHistory(self, balance:dict):
users = balance.keys()
jsonMas = []
print("USER LEN:", len(users))
for user in users:
eljson = {"_index": "balance", "_type": "balance", "_id": user,
"_source": {"@dtime": int(time.time()), "user": user,
"balance": balance.get(user)}}
jsonMas.append(eljson)
try:
helpers.bulk(self.es, jsonMas)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save balance:", es1)
def getLastEBlock(self):
query = {"aggs" : {
"max_blnum":{"max":{"field":"bheight"}}
},"size": 0
}
try:
answer = self.es.search(index="blocks-" + self.lastDate, doc_type="blocks-" + self.lastDate, body=query)
if not answer["aggregations"]["max_blnum"]["value"] == None:
return int(answer["aggregations"]["max_blnum"]["value"])
else:
return 0
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on search last block index:", es1)
| [
"elasticsearch.helpers.bulk",
"elasticsearch.Elasticsearch",
"time.time",
"os.getenv"
]
| [((356, 407), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["[{'host': es_host, 'port': es_port}]"], {}), "([{'host': es_host, 'port': es_port}])\n", (369, 407), False, 'from elasticsearch import Elasticsearch\n'), ((192, 226), 'os.getenv', 'os.getenv', (['"""ES_PORT_9200_TCP_ADDR"""'], {}), "('ES_PORT_9200_TCP_ADDR')\n", (201, 226), False, 'import time, json, datetime, os\n'), ((264, 298), 'os.getenv', 'os.getenv', (['"""ES_PORT_9200_TCP_PORT"""'], {}), "('ES_PORT_9200_TCP_PORT')\n", (273, 298), False, 'import time, json, datetime, os\n'), ((8388, 8416), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['self.es', 'jsons'], {}), '(self.es, jsons)\n', (8400, 8416), False, 'from elasticsearch import helpers\n'), ((9266, 9294), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['self.es', 'jsons'], {}), '(self.es, jsons)\n', (9278, 9294), False, 'from elasticsearch import helpers\n'), ((9877, 9907), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['self.es', 'jsonMas'], {}), '(self.es, jsonMas)\n', (9889, 9907), False, 'from elasticsearch import helpers\n'), ((9723, 9734), 'time.time', 'time.time', ([], {}), '()\n', (9732, 9734), False, 'import time, json, datetime, os\n')] |
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import sys
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer
import numpy as np
import tensorflow as tf
import itertools
def initializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
class AtomicConvScore(Layer):
"""The scoring function used by the atomic convolution models."""
def __init__(self, atom_types, layer_sizes, **kwargs):
super(AtomicConvScore, self).__init__(**kwargs)
self.atom_types = atom_types
self.layer_sizes = layer_sizes
def build(self, input_shape):
self.type_weights = []
self.type_biases = []
self.output_weights = []
self.output_biases = []
n_features = int(input_shape[0][-1])
layer_sizes = self.layer_sizes
num_layers = len(layer_sizes)
weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
bias_init_consts = [0.0] * num_layers
for ind, atomtype in enumerate(self.atom_types):
prev_layer_size = n_features
self.type_weights.append([])
self.type_biases.append([])
self.output_weights.append([])
self.output_biases.append([])
for i in range(num_layers):
weight, bias = initializeWeightsBiases(
prev_layer_size=prev_layer_size,
size=layer_sizes[i],
weights=tf.random.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
biases=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]]))
self.type_weights[ind].append(weight)
self.type_biases[ind].append(bias)
prev_layer_size = layer_sizes[i]
weight, bias = initializeWeightsBiases(prev_layer_size, 1)
self.output_weights[ind].append(weight)
self.output_biases[ind].append(bias)
def call(self, inputs):
frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
atom_types = self.atom_types
num_layers = len(self.layer_sizes)
def atomnet(current_input, atomtype):
prev_layer = current_input
for i in range(num_layers):
layer = tf.nn.bias_add(
tf.matmul(prev_layer, self.type_weights[atomtype][i]),
self.type_biases[atomtype][i])
layer = tf.nn.relu(layer)
prev_layer = layer
output_layer = tf.squeeze(
tf.nn.bias_add(
tf.matmul(prev_layer, self.output_weights[atomtype][0]),
self.output_biases[atomtype][0]))
return output_layer
frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)
frag1_atomtype_energy = []
frag2_atomtype_energy = []
complex_atomtype_energy = []
for ind, atomtype in enumerate(atom_types):
frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)
cond = tf.equal(frag1_z, atomtype)
frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
cond = tf.equal(frag2_z, atomtype)
frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
cond = tf.equal(complex_z, atomtype)
complex_atomtype_energy.append(
tf.where(cond, complex_outputs, complex_zeros))
frag1_outputs = tf.add_n(frag1_atomtype_energy)
frag2_outputs = tf.add_n(frag2_atomtype_energy)
complex_outputs = tf.add_n(complex_atomtype_energy)
frag1_energy = tf.reduce_sum(frag1_outputs, 1)
frag2_energy = tf.reduce_sum(frag2_outputs, 1)
complex_energy = tf.reduce_sum(complex_outputs, 1)
binding_energy = complex_energy - (frag1_energy + frag2_energy)
return tf.expand_dims(binding_energy, axis=1)
class AtomicConvModel(KerasModel):
"""Implements an Atomic Convolution Model.
Implements the atomic convolutional networks as introduced in
<NAME> al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017).
The atomic convolutional networks function as a variant of
graph convolutions. The difference is that the "graph" here is
the nearest neighbors graph in 3D space. The AtomicConvModel
leverages these connections in 3D space to train models that
learn to predict energetic state starting from the spatial
geometry of the model.
"""
def __init__(self,
frag1_num_atoms=70,
frag2_num_atoms=634,
complex_num_atoms=701,
max_num_neighbors=12,
batch_size=24,
atom_types=[
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
53., -1.
],
radial=[[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
layer_sizes=[32, 32, 16],
learning_rate=0.001,
**kwargs):
"""
Parameters
----------
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in sec
max_num_neighbors: int
Maximum number of neighbors possible for an atom. Recall neighbors
are spatial neighbors.
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
TODO: add description
layer_sizes: list
TODO: add description
learning_rate: float
Learning rate for the model.
"""
# TODO: Turning off queue for now. Safe to re-activate?
self.complex_num_atoms = complex_num_atoms
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.max_num_neighbors = max_num_neighbors
self.batch_size = batch_size
self.atom_types = atom_types
rp = [x for x in itertools.product(*radial)]
frag1_X = Input(shape=(frag1_num_atoms, 3))
frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_z = Input(shape=(frag1_num_atoms,))
frag2_X = Input(shape=(frag2_num_atoms, 3))
frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_z = Input(shape=(frag2_num_atoms,))
complex_X = Input(shape=(complex_num_atoms, 3))
complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_z = Input(shape=(complex_num_atoms,))
self._frag1_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])
self._frag2_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])
self._complex_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])
score = AtomicConvScore(self.atom_types, layer_sizes)([
self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z,
frag2_z, complex_z
])
model = tf.keras.Model(
inputs=[
frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,
frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,
complex_z
],
outputs=score)
super(AtomicConvModel, self).__init__(
model, L2Loss(), batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
batch_size = self.batch_size
def replace_atom_types(z):
def place_holder(i):
if i in self.atom_types:
return i
return -1
return np.array([place_holder(x) for x in z])
for epoch in range(epochs):
for ind, (F_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, deterministic=True, pad_batches=pad_batches)):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = np.zeros((batch_size, N_1))
for i in range(batch_size):
z = replace_atom_types(F_b[i][2])
frag1_Z_b[i] = z
frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
for atom in range(N_1):
for i in range(batch_size):
atom_nbrs = F_b[i][1].get(atom, "")
frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
frag2_Nbrs = np.zeros((batch_size, N_2, M))
frag2_Z_b = np.zeros((batch_size, N_2))
for i in range(batch_size):
z = replace_atom_types(F_b[i][5])
frag2_Z_b[i] = z
frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
for atom in range(N_2):
for i in range(batch_size):
atom_nbrs = F_b[i][4].get(atom, "")
frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
complex_Nbrs = np.zeros((batch_size, N, M))
complex_Z_b = np.zeros((batch_size, N))
for i in range(batch_size):
z = replace_atom_types(F_b[i][8])
complex_Z_b[i] = z
complex_Nbrs_Z = np.zeros((batch_size, N, M))
for atom in range(N):
for i in range(batch_size):
atom_nbrs = F_b[i][7].get(atom, "")
complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]
inputs = [
frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b,
frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs,
complex_Nbrs_Z, complex_Z_b
]
y_b = np.reshape(y_b, newshape=(batch_size, 1))
yield (inputs, [y_b], [w_b])
| [
"tensorflow.equal",
"numpy.sqrt",
"tensorflow.reduce_sum",
"numpy.array",
"tensorflow.random.truncated_normal",
"tensorflow.keras.layers.Input",
"numpy.reshape",
"deepchem.models.losses.L2Loss",
"itertools.product",
"tensorflow.matmul",
"tensorflow.zeros_like",
"tensorflow.zeros",
"tensorflow.Variable",
"tensorflow.where",
"tensorflow.expand_dims",
"deepchem.models.layers.AtomicConvolution",
"tensorflow.nn.relu",
"numpy.zeros",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.keras.Model"
]
| [((1263, 1293), 'tensorflow.Variable', 'tf.Variable', (['weights'], {'name': '"""w"""'}), "(weights, name='w')\n", (1274, 1293), True, 'import tensorflow as tf\n'), ((1300, 1329), 'tensorflow.Variable', 'tf.Variable', (['biases'], {'name': '"""b"""'}), "(biases, name='b')\n", (1311, 1329), True, 'import tensorflow as tf\n'), ((1140, 1204), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[prev_layer_size, size]'], {'stddev': '(0.01)'}), '([prev_layer_size, size], stddev=0.01)\n', (1166, 1204), True, 'import tensorflow as tf\n'), ((1239, 1255), 'tensorflow.zeros', 'tf.zeros', (['[size]'], {}), '([size])\n', (1247, 1255), True, 'import tensorflow as tf\n'), ((3626, 3666), 'tensorflow.zeros_like', 'tf.zeros_like', (['frag1_z'], {'dtype': 'tf.float32'}), '(frag1_z, dtype=tf.float32)\n', (3639, 3666), True, 'import tensorflow as tf\n'), ((3685, 3725), 'tensorflow.zeros_like', 'tf.zeros_like', (['frag2_z'], {'dtype': 'tf.float32'}), '(frag2_z, dtype=tf.float32)\n', (3698, 3725), True, 'import tensorflow as tf\n'), ((3746, 3788), 'tensorflow.zeros_like', 'tf.zeros_like', (['complex_z'], {'dtype': 'tf.float32'}), '(complex_z, dtype=tf.float32)\n', (3759, 3788), True, 'import tensorflow as tf\n'), ((4555, 4586), 'tensorflow.add_n', 'tf.add_n', (['frag1_atomtype_energy'], {}), '(frag1_atomtype_energy)\n', (4563, 4586), True, 'import tensorflow as tf\n'), ((4607, 4638), 'tensorflow.add_n', 'tf.add_n', (['frag2_atomtype_energy'], {}), '(frag2_atomtype_energy)\n', (4615, 4638), True, 'import tensorflow as tf\n'), ((4661, 4694), 'tensorflow.add_n', 'tf.add_n', (['complex_atomtype_energy'], {}), '(complex_atomtype_energy)\n', (4669, 4694), True, 'import tensorflow as tf\n'), ((4715, 4746), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['frag1_outputs', '(1)'], {}), '(frag1_outputs, 1)\n', (4728, 4746), True, 'import tensorflow as tf\n'), ((4766, 4797), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['frag2_outputs', '(1)'], {}), '(frag2_outputs, 1)\n', (4779, 4797), True, 'import tensorflow as tf\n'), ((4819, 4852), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['complex_outputs', '(1)'], {}), '(complex_outputs, 1)\n', (4832, 4852), True, 'import tensorflow as tf\n'), ((4932, 4970), 'tensorflow.expand_dims', 'tf.expand_dims', (['binding_energy'], {'axis': '(1)'}), '(binding_energy, axis=1)\n', (4946, 4970), True, 'import tensorflow as tf\n'), ((7204, 7237), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag1_num_atoms, 3)'}), '(shape=(frag1_num_atoms, 3))\n', (7209, 7237), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7255, 7304), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag1_num_atoms, max_num_neighbors)'}), '(shape=(frag1_num_atoms, max_num_neighbors))\n', (7260, 7304), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7324, 7373), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag1_num_atoms, max_num_neighbors)'}), '(shape=(frag1_num_atoms, max_num_neighbors))\n', (7329, 7373), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7388, 7419), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag1_num_atoms,)'}), '(shape=(frag1_num_atoms,))\n', (7393, 7419), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7435, 7468), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag2_num_atoms, 3)'}), '(shape=(frag2_num_atoms, 3))\n', (7440, 7468), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7486, 7535), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag2_num_atoms, max_num_neighbors)'}), '(shape=(frag2_num_atoms, max_num_neighbors))\n', (7491, 7535), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7555, 7604), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag2_num_atoms, max_num_neighbors)'}), '(shape=(frag2_num_atoms, max_num_neighbors))\n', (7560, 7604), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7619, 7650), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(frag2_num_atoms,)'}), '(shape=(frag2_num_atoms,))\n', (7624, 7650), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7668, 7703), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(complex_num_atoms, 3)'}), '(shape=(complex_num_atoms, 3))\n', (7673, 7703), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7723, 7774), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(complex_num_atoms, max_num_neighbors)'}), '(shape=(complex_num_atoms, max_num_neighbors))\n', (7728, 7774), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7796, 7847), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(complex_num_atoms, max_num_neighbors)'}), '(shape=(complex_num_atoms, max_num_neighbors))\n', (7801, 7847), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((7864, 7897), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(complex_num_atoms,)'}), '(shape=(complex_num_atoms,))\n', (7869, 7897), False, 'from tensorflow.keras.layers import Input, Layer\n'), ((8555, 8741), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,\n frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z]', 'outputs': 'score'}), '(inputs=[frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X,\n frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs,\n complex_nbrs_z, complex_z], outputs=score)\n', (8569, 8741), True, 'import tensorflow as tf\n'), ((4168, 4195), 'tensorflow.equal', 'tf.equal', (['frag1_z', 'atomtype'], {}), '(frag1_z, atomtype)\n', (4176, 4195), True, 'import tensorflow as tf\n'), ((4288, 4315), 'tensorflow.equal', 'tf.equal', (['frag2_z', 'atomtype'], {}), '(frag2_z, atomtype)\n', (4296, 4315), True, 'import tensorflow as tf\n'), ((4408, 4437), 'tensorflow.equal', 'tf.equal', (['complex_z', 'atomtype'], {}), '(complex_z, atomtype)\n', (4416, 4437), True, 'import tensorflow as tf\n'), ((7922, 7999), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', ([], {'atom_types': 'self.atom_types', 'radial_params': 'rp', 'boxsize': 'None'}), '(atom_types=self.atom_types, radial_params=rp, boxsize=None)\n', (7939, 7999), False, 'from deepchem.models.layers import AtomicConvolution\n'), ((8078, 8155), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', ([], {'atom_types': 'self.atom_types', 'radial_params': 'rp', 'boxsize': 'None'}), '(atom_types=self.atom_types, radial_params=rp, boxsize=None)\n', (8095, 8155), False, 'from deepchem.models.layers import AtomicConvolution\n'), ((8236, 8313), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', ([], {'atom_types': 'self.atom_types', 'radial_params': 'rp', 'boxsize': 'None'}), '(atom_types=self.atom_types, radial_params=rp, boxsize=None)\n', (8253, 8313), False, 'from deepchem.models.layers import AtomicConvolution\n'), ((8855, 8863), 'deepchem.models.losses.L2Loss', 'L2Loss', ([], {}), '()\n', (8861, 8863), False, 'from deepchem.models.losses import L2Loss\n'), ((1906, 1916), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (1913, 1916), True, 'import numpy as np\n'), ((3357, 3374), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (3367, 3374), True, 'import tensorflow as tf\n'), ((4231, 4273), 'tensorflow.where', 'tf.where', (['cond', 'frag1_outputs', 'frag1_zeros'], {}), '(cond, frag1_outputs, frag1_zeros)\n', (4239, 4273), True, 'import tensorflow as tf\n'), ((4351, 4393), 'tensorflow.where', 'tf.where', (['cond', 'frag2_outputs', 'frag2_zeros'], {}), '(cond, frag2_outputs, frag2_zeros)\n', (4359, 4393), True, 'import tensorflow as tf\n'), ((4486, 4532), 'tensorflow.where', 'tf.where', (['cond', 'complex_outputs', 'complex_zeros'], {}), '(cond, complex_outputs, complex_zeros)\n', (4494, 4532), True, 'import tensorflow as tf\n'), ((7162, 7188), 'itertools.product', 'itertools.product', (['*radial'], {}), '(*radial)\n', (7179, 7188), False, 'import itertools\n'), ((9761, 9802), 'numpy.zeros', 'np.zeros', (['(batch_size, N_1, num_features)'], {}), '((batch_size, N_1, num_features))\n', (9769, 9802), True, 'import numpy as np\n'), ((9895, 9936), 'numpy.zeros', 'np.zeros', (['(batch_size, N_2, num_features)'], {}), '((batch_size, N_2, num_features))\n', (9903, 9936), True, 'import numpy as np\n'), ((10031, 10070), 'numpy.zeros', 'np.zeros', (['(batch_size, N, num_features)'], {}), '((batch_size, N, num_features))\n', (10039, 10070), True, 'import numpy as np\n'), ((10166, 10196), 'numpy.zeros', 'np.zeros', (['(batch_size, N_1, M)'], {}), '((batch_size, N_1, M))\n', (10174, 10196), True, 'import numpy as np\n'), ((10217, 10244), 'numpy.zeros', 'np.zeros', (['(batch_size, N_1)'], {}), '((batch_size, N_1))\n', (10225, 10244), True, 'import numpy as np\n'), ((10375, 10405), 'numpy.zeros', 'np.zeros', (['(batch_size, N_1, M)'], {}), '((batch_size, N_1, M))\n', (10383, 10405), True, 'import numpy as np\n'), ((10730, 10760), 'numpy.zeros', 'np.zeros', (['(batch_size, N_2, M)'], {}), '((batch_size, N_2, M))\n', (10738, 10760), True, 'import numpy as np\n'), ((10781, 10808), 'numpy.zeros', 'np.zeros', (['(batch_size, N_2)'], {}), '((batch_size, N_2))\n', (10789, 10808), True, 'import numpy as np\n'), ((10939, 10969), 'numpy.zeros', 'np.zeros', (['(batch_size, N_2, M)'], {}), '((batch_size, N_2, M))\n', (10947, 10969), True, 'import numpy as np\n'), ((11296, 11324), 'numpy.zeros', 'np.zeros', (['(batch_size, N, M)'], {}), '((batch_size, N, M))\n', (11304, 11324), True, 'import numpy as np\n'), ((11347, 11372), 'numpy.zeros', 'np.zeros', (['(batch_size, N)'], {}), '((batch_size, N))\n', (11355, 11372), True, 'import numpy as np\n'), ((11507, 11535), 'numpy.zeros', 'np.zeros', (['(batch_size, N, M)'], {}), '((batch_size, N, M))\n', (11515, 11535), True, 'import numpy as np\n'), ((12073, 12114), 'numpy.reshape', 'np.reshape', (['y_b'], {'newshape': '(batch_size, 1)'}), '(y_b, newshape=(batch_size, 1))\n', (12083, 12114), True, 'import numpy as np\n'), ((3243, 3296), 'tensorflow.matmul', 'tf.matmul', (['prev_layer', 'self.type_weights[atomtype][i]'], {}), '(prev_layer, self.type_weights[atomtype][i])\n', (3252, 3296), True, 'import tensorflow as tf\n'), ((3476, 3531), 'tensorflow.matmul', 'tf.matmul', (['prev_layer', 'self.output_weights[atomtype][0]'], {}), '(prev_layer, self.output_weights[atomtype][0])\n', (3485, 3531), True, 'import tensorflow as tf\n'), ((2391, 2494), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', ([], {'shape': '[prev_layer_size, layer_sizes[i]]', 'stddev': 'weight_init_stddevs[i]'}), '(shape=[prev_layer_size, layer_sizes[i]], stddev=\n weight_init_stddevs[i])\n', (2417, 2494), True, 'import tensorflow as tf\n'), ((2543, 2605), 'tensorflow.constant', 'tf.constant', ([], {'value': 'bias_init_consts[i]', 'shape': '[layer_sizes[i]]'}), '(value=bias_init_consts[i], shape=[layer_sizes[i]])\n', (2554, 2605), True, 'import tensorflow as tf\n'), ((10575, 10594), 'numpy.array', 'np.array', (['atom_nbrs'], {}), '(atom_nbrs)\n', (10583, 10594), True, 'import numpy as np\n'), ((11139, 11158), 'numpy.array', 'np.array', (['atom_nbrs'], {}), '(atom_nbrs)\n', (11147, 11158), True, 'import numpy as np\n'), ((11705, 11724), 'numpy.array', 'np.array', (['atom_nbrs'], {}), '(atom_nbrs)\n', (11713, 11724), True, 'import numpy as np\n')] |
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.oob import BrainOOBConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class BrainOOBConfigurationTests(unittest.TestCase):
def test_oob_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
classname: programy.oob.defaults.default.DefaultOutOfBandProcessor
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor", oob_config.classname)
def test_default_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertIsNone(oob_config.classname)
| [
"programy.config.file.yaml_file.YamlConfigurationFile",
"programy.config.brain.oob.BrainOOBConfiguration",
"programy.clients.events.console.config.ConsoleConfiguration"
]
| [((1390, 1413), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ([], {}), '()\n', (1411, 1413), False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((1880, 1912), 'programy.config.brain.oob.BrainOOBConfiguration', 'BrainOOBConfiguration', (['"""default"""'], {}), "('default')\n", (1901, 1912), False, 'from programy.config.brain.oob import BrainOOBConfiguration\n'), ((2140, 2163), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ([], {}), '()\n', (2161, 2163), False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((2549, 2581), 'programy.config.brain.oob.BrainOOBConfiguration', 'BrainOOBConfiguration', (['"""default"""'], {}), "('default')\n", (2570, 2581), False, 'from programy.config.brain.oob import BrainOOBConfiguration\n'), ((1633, 1655), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ([], {}), '()\n', (1653, 1655), False, 'from programy.clients.events.console.config import ConsoleConfiguration\n'), ((2302, 2324), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ([], {}), '()\n', (2322, 2324), False, 'from programy.clients.events.console.config import ConsoleConfiguration\n')] |
import math
import os
from copy import deepcopy
from ast import literal_eval
import pandas as pd
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(len(sents)))]
amount = min(amount, factorial(len(sents))-1)
for i in range(amount):
permutation = np.random.permutation(len(sents))
while permutation.tolist() in permutations:
permutation = np.random.permutation(len(sents))
permutations.append(permutation.tolist())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_df, sent_len, amount):
""" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)
sent_insert_ix = random.randint(0, sent_len-1)
permutations.append((utt, da, name, ix, sent_insert_ix))
return permutations
def draw_rand_sent_from_df(df):
ix = random.randint(0, len(df['utt'])-1)
return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]
def half_perturb(sents, sent_DAs, amount):
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
permutations = [list(range(len(sents)))]
for _ in range(amount):
while True:
speaker = random.randint(0,1) # choose one of the speakers
speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))
permuted_speaker_ix = np.random.permutation(speaker_ix)
new_sents = list(range(len(sents)))
for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):
new_sents[i_to] = i_from
if (not new_sents == permutations[0]) and (
not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):
permutations.append(new_sents)
break
return permutations[1:]
def utterance_insertions(length, amount):
possible_permutations = []
original = list(range(length))
for ix in original:
for y in range(length):
if ix == y: continue
ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:])
ix_removed.insert(y, ix)
possible_permutations.append(deepcopy(ix_removed))
permutations = []
for _ in range(amount):
i = random.randint(0, len(possible_permutations)-1)
permutations.append(possible_permutations[i])
return permutations
class DailyDialogConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):
self.data_dir = data_dir
self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')
self.tokenizer = tokenizer
self.word2id = word2id
self.output_file = None
self.task = task
self.ranking_dataset = ranking_dataset
self.perturbation_statistics = 0
self.setname = os.path.split(data_dir)[1]
assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name"
def create_act_utt(self):
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(output_file, 'w')
csv_writer = csv.writer(of, delimiter='|')
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):
dialog_name = "{}_{}".format(self.setname, line_count)
row = (act, utt, dialog_name,utt_i)
csv_writer.writerow(row)
def convert_dset(self, amounts):
# data_dir is supposed to be the dir with the respective train/test/val-dataset files
print("Creating {} perturbations for task {}".format(amounts, self.task))
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))
root_data_dir = os.path.split(self.data_dir)[0]
shuffled_path = os.path.join(root_data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files"
assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir"
with open(self.act_utt_file, 'r') as f:
act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])
rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(self.output_file, 'w')
discarded = 0
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
discarded += 1
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
if self.task == 'up':
permuted_ixs = permute(tok_seqs, acts, amounts)
elif self.task == 'us':
permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)
elif self.task == 'hup':
permuted_ixs = half_perturb(tok_seqs, acts, amounts)
elif self.task == 'ui':
permuted_ixs = utterance_insertions(len(tok_seqs), amounts)
shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".format(self.setname, line_count))
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
for perm in permuted_ixs:
if self.task == 'us':
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
csv_writer.writerow(perm)
self.perturbation_statistics += len(permuted_ixs)
if self.task == 'us':
for p in permuted_ixs:
(insert_sent, insert_da, name, ix, insert_ix) = p
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(a) for a in p_a])
p_u = deepcopy(tok_seqs)
p_u[insert_ix] = self.word2id(insert_sent)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
pa = [acts[i] for i in p]
p_a = " ".join([str(a) for a in pa])
pu = [tok_seqs[i] for i in p]
p_u = str(pu)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
print(discarded)
class SwitchboardConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):
self.corpus = CorpusReader(data_dir)
self.data_dir = data_dir
self.tokenizer = tokenizer
self.word2id = word2id
self.task = task
self.utt_num = 0
for utt in self.corpus.iter_utterances():
self.utt_num += 1
self.trans_num = 0
for trans in self.corpus.iter_transcripts():
self.trans_num += 1
self.da2num = switchboard_da_mapping()
# CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!
train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)
val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)
self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs
self.utt_da_pairs = []
prev_da = "%"
for i, utt in enumerate(self.corpus.iter_utterances()):
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_da
_, swda_name = os.path.split(utt.swda_filename)
swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name
ix = utt.utterance_index
self.utt_da_pairs.append((sentence, act, swda_name, ix))
def draw_rand_sent(self):
r = random.randint(0, len(self.utt_da_pairs)-1)
return self.utt_da_pairs[r]
def create_vocab(self):
print("Creating Vocab file for Switchboard")
cnt = Counter()
for utt in self.corpus.iter_utterances():
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.tokenizer(sentence)
for w in sentence:
cnt[w] += 1
itos_file = os.path.join(self.data_dir, "itos.txt")
itosf = open(itos_file, "w")
for (word, _) in cnt.most_common(25000):
itosf.write("{}\n".format(word))
#getKeysByValue
def swda_permute(self, sents, amount, speaker_ixs):
if amount == 0:
return []
permutations = [list(range(len(sents)))]
segment_permutations = []
amount = min(amount, factorial(len(sents))-1)
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
for i in range(amount):
while True:
permutation = []
segm_perm = np.random.permutation(len(segments))
segment_permutations.append(segm_perm)
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
break
permutations.append(permutation)
return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated
def speaker_segment_ixs(self, speaker_ixs):
i = 0
segment_indices = dict()
prev_speaker = speaker_ixs[0]
for j,speaker in enumerate(speaker_ixs):
if speaker != prev_speaker:
prev_speaker = speaker
i += 1
segment_indices[j] = i
return segment_indices
def swda_half_perturb(self, amount, speaker_ixs):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
segment_permutations = []
permutations = [list(segm_ixs.keys())]
for _ in range(amount):
speaker = random.randint(0,1) # choose one of the speakers
speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))
speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))
#TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close
if len(speaker_to_perm) < 2:
return []
while True:
permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()
new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))
if speaker == 0 :
new_segments[::2] = permuted_speaker_ix
new_segments[1::2] = speaker_orig
else:
new_segments[1::2] = permuted_speaker_ix
new_segments[::2] = speaker_orig
segment_permutations.append(new_segments)
permutation = []
for segm_ix in new_segments:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if not permutation in permutations:
permutations.append(permutation)
break
return permutations[1:], segment_permutations
def swda_utterance_insertion(self, speaker_ixs, amounts):
segment_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segment_ixs.values()))
segment_permutations = []
permutations = []
i = 0
for _ in range(amounts):
while True: # actually: do ... while permutation not in permutations
i_from = random.randint(0, len(segments)-1)
i_to = random.randint(0, len(segments)-2)
segm_perm = deepcopy(segments)
rem_elem = segments[i_from]
segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]
segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]
permutation = []
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
permutations.append(permutation)
segment_permutations.append(segm_perm)
break
return permutations, segment_permutations
def swda_utterance_sampling(self, speaker_ixs, amount):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
permutations = []
for i in range(amount):
(sentence, act, swda_name, ix) = self.draw_rand_sent()
insert_ix = random.choice(segments)
permutations.append((sentence, act, swda_name, ix, insert_ix))
return permutations
def convert_dset(self, amounts):
# create distinct train/validation/test files. they'll correspond to the created
# splits from the constructor
train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))
val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))
test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'))
if not os.path.exists(os.path.join(self.data_dir, 'validation')):
os.makedirs(os.path.join(self.data_dir, 'validation'))
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'))
trainfile = open(train_output_file, 'w')
valfile = open(val_output_file, 'w')
testfile = open(test_output_file, 'w')
shuffled_path = os.path.join(self.data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):
utterances = []
acts = []
speaker_ixs = []
prev_act = "%"
for utt in trans.utterances:
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
utterances.append(sentence)
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_act
acts.append(self.da2num[act])
prev_act = act
if "A" in utt.caller:
speaker_ixs.append(0)
else:
speaker_ixs.append(1)
if self.task == 'up':
permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)
elif self.task == 'us':
permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)
elif self.task == 'hup':
permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)
elif self.task == 'ui':
permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)
swda_fname = os.path.split(trans.swda_filename)[1]
shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
if self.task == 'us':
for perm in permuted_ixs:
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
for perm in segment_perms:
csv_writer.writerow(perm)
if self.task == 'us':
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
insert_sent, insert_da, name, ix, insert_ix = p
insert_da = self.da2num[insert_da]
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(x) for x in p_a])
p_u = deepcopy(utterances)
p_u[insert_ix] = insert_sent
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
pa = [acts[i] for i in p]
p_a = " ".join([str(x) for x in pa])
pu = [utterances[i] for i in p]
p_u = str(pu)
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def getKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def switchboard_da_mapping():
mapping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mapping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"copy.deepcopy",
"argparse.ArgumentParser",
"os.path.split",
"os.path.isdir",
"numpy.random.seed",
"os.mkdir",
"random.randint",
"numpy.random.permutation",
"random.choice",
"sklearn.model_selection.train_test_split",
"csv.writer",
"ast.literal_eval",
"os.path.isfile",
"re.sub",
"os.path.join",
"random.seed",
"collections.Counter",
"collections.defaultdict",
"swda.swda.CorpusReader"
]
| [((21335, 21360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21358, 21360), False, 'import argparse\n'), ((22879, 22901), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (22890, 22901), False, 'import random\n'), ((22906, 22931), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (22920, 22931), True, 'import numpy as np\n'), ((25293, 25317), 'collections.defaultdict', 'defaultdict', (['(lambda : 11)'], {}), '(lambda : 11)\n', (25304, 25317), False, 'from collections import Counter, defaultdict\n'), ((1684, 1715), 'random.randint', 'random.randint', (['(0)', '(sent_len - 1)'], {}), '(0, sent_len - 1)\n', (1698, 1715), False, 'import random\n'), ((1892, 1919), 'ast.literal_eval', 'literal_eval', (["df['utt'][ix]"], {}), "(df['utt'][ix])\n", (1904, 1919), False, 'from ast import literal_eval\n'), ((3613, 3655), 'os.path.join', 'os.path.join', (['data_dir', '"""act_utt_name.txt"""'], {}), "(data_dir, 'act_utt_name.txt')\n", (3625, 3655), False, 'import os\n'), ((4464, 4493), 'csv.writer', 'csv.writer', (['of'], {'delimiter': '"""|"""'}), "(of, delimiter='|')\n", (4474, 4493), False, 'import csv\n'), ((6075, 6108), 'os.path.isfile', 'os.path.isfile', (['self.act_utt_file'], {}), '(self.act_utt_file)\n', (6089, 6108), False, 'import os\n'), ((9308, 9330), 'swda.swda.CorpusReader', 'CorpusReader', (['data_dir'], {}), '(data_dir)\n', (9320, 9330), False, 'from swda.swda import CorpusReader, Transcript, Utterance\n'), ((9980, 10054), 'sklearn.model_selection.train_test_split', 'train_test_split', (['val_ixs'], {'shuffle': '(True)', 'train_size': '(0.5)', 'random_state': 'seed'}), '(val_ixs, shuffle=True, train_size=0.5, random_state=seed)\n', (9996, 10054), False, 'from sklearn.model_selection import train_test_split\n'), ((11003, 11012), 'collections.Counter', 'Counter', ([], {}), '()\n', (11010, 11012), False, 'from collections import Counter, defaultdict\n'), ((11285, 11324), 'os.path.join', 'os.path.join', (['self.data_dir', '"""itos.txt"""'], {}), "(self.data_dir, 'itos.txt')\n", (11297, 11324), False, 'import os\n'), ((2231, 2251), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2245, 2251), False, 'import random\n'), ((2404, 2437), 'numpy.random.permutation', 'np.random.permutation', (['speaker_ix'], {}), '(speaker_ix)\n', (2425, 2437), True, 'import numpy as np\n'), ((3892, 3915), 'os.path.split', 'os.path.split', (['data_dir'], {}), '(data_dir)\n', (3905, 3915), False, 'import os\n'), ((5761, 5789), 'os.path.split', 'os.path.split', (['self.data_dir'], {}), '(self.data_dir)\n', (5774, 5789), False, 'import os\n'), ((5893, 5921), 'os.path.isdir', 'os.path.isdir', (['shuffled_path'], {}), '(shuffled_path)\n', (5906, 5921), False, 'import os\n'), ((5935, 5958), 'os.mkdir', 'os.mkdir', (['shuffled_path'], {}), '(shuffled_path)\n', (5943, 5958), False, 'import os\n'), ((5975, 6000), 'os.path.isfile', 'os.path.isfile', (['dial_file'], {}), '(dial_file)\n', (5989, 6000), False, 'import os\n'), ((6005, 6029), 'os.path.isfile', 'os.path.isfile', (['act_file'], {}), '(act_file)\n', (6019, 6029), False, 'import os\n'), ((6218, 6281), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""|"""', 'names': "['act', 'utt', 'dialogue', 'ix']"}), "(f, sep='|', names=['act', 'utt', 'dialogue', 'ix'])\n", (6229, 6281), True, 'import pandas as pd\n'), ((10279, 10325), 're.sub', 're.sub', (['"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', '""""""', 'utt.text'], {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)\n", (10285, 10325), False, 'import re\n'), ((10558, 10590), 'os.path.split', 'os.path.split', (['utt.swda_filename'], {}), '(utt.swda_filename)\n', (10571, 10590), False, 'import os\n'), ((11086, 11132), 're.sub', 're.sub', (['"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', '""""""', 'utt.text'], {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)\n", (11092, 11132), False, 'import re\n'), ((13105, 13125), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (13119, 13125), False, 'import random\n'), ((15925, 15948), 'random.choice', 'random.choice', (['segments'], {}), '(segments)\n', (15938, 15948), False, 'import random\n'), ((17187, 17215), 'os.path.isdir', 'os.path.isdir', (['shuffled_path'], {}), '(shuffled_path)\n', (17200, 17215), False, 'import os\n'), ((17229, 17252), 'os.mkdir', 'os.mkdir', (['shuffled_path'], {}), '(shuffled_path)\n', (17237, 17252), False, 'import os\n'), ((18676, 18715), 'os.path.join', 'os.path.join', (['shuffled_path', 'swda_fname'], {}), '(shuffled_path, swda_fname)\n', (18688, 18715), False, 'import os\n'), ((22971, 23009), 'os.path.join', 'os.path.join', (['args.datadir', '"""itos.txt"""'], {}), "(args.datadir, 'itos.txt')\n", (22983, 23009), False, 'import os\n'), ((3224, 3244), 'copy.deepcopy', 'deepcopy', (['ix_removed'], {}), '(ix_removed)\n', (3232, 3244), False, 'from copy import deepcopy\n'), ((7638, 7651), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7648, 7651), False, 'import csv\n'), ((14947, 14965), 'copy.deepcopy', 'deepcopy', (['segments'], {}), '(segments)\n', (14955, 14965), False, 'from copy import deepcopy\n'), ((16573, 16609), 'os.path.join', 'os.path.join', (['self.data_dir', '"""train"""'], {}), "(self.data_dir, 'train')\n", (16585, 16609), False, 'import os\n'), ((16636, 16672), 'os.path.join', 'os.path.join', (['self.data_dir', '"""train"""'], {}), "(self.data_dir, 'train')\n", (16648, 16672), False, 'import os\n'), ((16704, 16745), 'os.path.join', 'os.path.join', (['self.data_dir', '"""validation"""'], {}), "(self.data_dir, 'validation')\n", (16716, 16745), False, 'import os\n'), ((16772, 16813), 'os.path.join', 'os.path.join', (['self.data_dir', '"""validation"""'], {}), "(self.data_dir, 'validation')\n", (16784, 16813), False, 'import os\n'), ((16845, 16880), 'os.path.join', 'os.path.join', (['self.data_dir', '"""test"""'], {}), "(self.data_dir, 'test')\n", (16857, 16880), False, 'import os\n'), ((16907, 16942), 'os.path.join', 'os.path.join', (['self.data_dir', '"""test"""'], {}), "(self.data_dir, 'test')\n", (16919, 16942), False, 'import os\n'), ((17534, 17580), 're.sub', 're.sub', (['"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', '""""""', 'utt.text'], {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)\n", (17540, 17580), False, 'import re\n'), ((18611, 18645), 'os.path.split', 'os.path.split', (['trans.swda_filename'], {}), '(trans.swda_filename)\n', (18624, 18645), False, 'import os\n'), ((18800, 18813), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (18810, 18813), False, 'import csv\n'), ((8302, 8316), 'copy.deepcopy', 'deepcopy', (['acts'], {}), '(acts)\n', (8310, 8316), False, 'from copy import deepcopy\n'), ((8447, 8465), 'copy.deepcopy', 'deepcopy', (['tok_seqs'], {}), '(tok_seqs)\n', (8455, 8465), False, 'from copy import deepcopy\n'), ((19499, 19513), 'copy.deepcopy', 'deepcopy', (['acts'], {}), '(acts)\n', (19507, 19513), False, 'from copy import deepcopy\n'), ((19644, 19664), 'copy.deepcopy', 'deepcopy', (['utterances'], {}), '(utterances)\n', (19652, 19664), False, 'from copy import deepcopy\n'), ((13569, 13607), 'numpy.random.permutation', 'np.random.permutation', (['speaker_to_perm'], {}), '(speaker_to_perm)\n', (13590, 13607), True, 'import numpy as np\n')] |
import pytest
from janitor.utils import _clean_accounting_column
@pytest.mark.utils
def test_clean_accounting_column():
test_str = "(1,000)"
assert _clean_accounting_column(test_str) == float(-1000)
@pytest.mark.utils
def test_clean_accounting_column_zeroes():
test_str = "()"
assert _clean_accounting_column(test_str) == 0.00
| [
"janitor.utils._clean_accounting_column"
]
| [((159, 193), 'janitor.utils._clean_accounting_column', '_clean_accounting_column', (['test_str'], {}), '(test_str)\n', (183, 193), False, 'from janitor.utils import _clean_accounting_column\n'), ((305, 339), 'janitor.utils._clean_accounting_column', '_clean_accounting_column', (['test_str'], {}), '(test_str)\n', (329, 339), False, 'from janitor.utils import _clean_accounting_column\n')] |
# coding=utf-8
import sys, getopt
import urllib
import requests
import requests_cache
import re
import time
from bs4 import BeautifulSoup
from requests import Session
sys.path.append("/home/taejoon1kim/BERT/my_bert")
from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath
from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON
def preprocessor(text):
if "감독" in text:
return text[0:text.find("감독")]
elif "등장인물" in text:
return text[0:text.find("등장인물")]
elif "누구야" in text:
return text[0:text.find("누구야")]
elif "알려줘" in text:
return text[0:text.find("알려줘")]
elif "보여줘" in text:
return text[0:text.find("보여줘")]
elif "찾아줘" in text:
return text[0:text.find("찾아줘")]
elif "언제야" in text:
return text[0:text.find("언제")]
elif "어디" in text:
return text[0:text.find("어디")]
elif "뭐야" in text:
return text[0:text.find("뭐야")]
else :
return text
def checkQType(text):
global Q_TYPE
if "감독" in text or "어디서" in text or "언제" in text or "뭐야" in text:
Q_TYPE = 2
elif "누구야" in text:
Q_TYPE = 1
else:
Q_TYPE = 3
SEARCH_RESULT['Q_TYPE'] = Q_TYPE
print("QUESTION TYPE : ", Q_TYPE)
WIKI_URL = "wikipedia.org"
YOUTUBE_URL = "youtube.com/channel"
NO_RESULT = "no_result"
SEARCH_RESULT = {
"WIKI" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"FIRST" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"YOUTUBE" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"test_input.json" : f"{NO_RESULT}",
"search_result.json" : f"{NO_RESULT}",
"Q_TYPE" : f"{NO_RESULT}"
}
def downloadURL(URL):
# desktop user-agent
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
# mobile user-agent
MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36"
headers = {"user-agent" : USER_AGENT}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "public,max-age=3600"}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "no-cache"}
#s = Session()
#s.headers.update(headers)
resp = requests.get(URL, headers=headers)
#resp = s.get(URL)
results = [{"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}]
print(resp.status_code)
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
results = []
for g in soup.find_all('div', class_='r'):
anchors = g.find_all('a')
if anchors:
link = anchors[0]['href']
title = g.find('h3').text
item = {
"title": title,
"link": link
}
results.append(item)
#print(link)
global SEARCH_RESULT
if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT:
SEARCH_RESULT['WIKI']['title'] = title
SEARCH_RESULT['WIKI']['link'] = link
elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT:
SEARCH_RESULT['YOUTUBE']['title'] = title
SEARCH_RESULT['YOUTUBE']['link'] = link
if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT:
break
SEARCH_RESULT['FIRST']['title'] = results[0].get('title')
SEARCH_RESULT['FIRST']['link'] = results[0].get('link')
else:
SEARCH_RESULT['FIRST']['title'] = f"resp.status_code {resp.status_code}"
return results
def download(text):
global cache
cache = getDownloadCachePath(text)
global start, Q_TYPE
init_start = time.time()
start = time.time()
requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache')
#if cacheExist(cache) == False:
if True:
checkQType(text)
query_text = preprocessor(text)
## 1st SEARCH
query = query_text
query = query.replace(' ', '+')
if Q_TYPE <= 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
else :
URL = f"https://google.com/search?q={query}"
print(URL)
downloadURL(URL)
printTime("1st Search Time")
pWithoutTag = f"{NO_RESULT}"
imgTag = f"{NO_RESULT}"
## 2nd SEARCH
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
downloadURL(URL)
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT:
pWithoutTag = "위키피디아가 없네요. 링크를 열어보세요"
else:
resp = requests.get(SEARCH_RESULT['WIKI']['link'])
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
p = soup.find('p')
pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip()
pWithoutTag = re.sub('"', '', str(pWithoutTag), 0).strip()
pWithoutTag = re.sub('\n', ' ', str(pWithoutTag), 0).strip()
imgTag = "http:" + soup.find('a', {'class':'image'}).find('img')['src']
## GENERATE BERT INPUT
JSON_1 = "{\"version\":\"mytest_dev\",\"data\":[{\"paragraphs\":[{\"qas\":[{\"answers\":[{\"text\":\"테스트\",\"answer_start\":0}],\"id\":\"1-1\",\"question\":\"테스트\"}],\"context\":\""
JSON_2 = "\"}],\"title\":\"테스트\"}]}"
FULL_JSON = JSON_1 + pWithoutTag + JSON_2
writeJson(FULL_JSON, BERT_INPUT_JSON)
printTime("2nd Search Time")
SEARCH_RESULT['test_input.json'] = FULL_JSON
## GENERATE SEARCH RESULT
FULL_JSON = "{\"google\":[{\"title\":\"" + SEARCH_RESULT['FIRST']['title'] + "\",\"link\":\"" + SEARCH_RESULT['FIRST']['link'] + "\"}],\"wiki\":[{\"title\":\"" + SEARCH_RESULT['WIKI']['title'] + "\",\"link\":\"" + SEARCH_RESULT['WIKI']['link'] + "\"}],\"youtube\":[{\"title\":\"" + SEARCH_RESULT['YOUTUBE']['title'] + "\",\"link\":\"" + SEARCH_RESULT['YOUTUBE']['link'] + "\"}],\"Q_TYPE\":\"" + str(Q_TYPE) + "\",\"IMG_SRC\":\"" + str(imgTag) + "\"}"
writeJson(FULL_JSON, BERT_SEARCH_JSON)
SEARCH_RESULT['search_result.json'] = FULL_JSON
writeCache(cache, SEARCH_RESULT)
else:
CACHE_RESULT = readCache(cache)
writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON)
writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON)
Q_TYPE = CACHE_RESULT['Q_TYPE']
print(f"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}")
return Q_TYPE
def writeJson(json, filePath):
f = open(filePath, 'w')
f.write(json)
f.close()
def printTime(text):
global start
print(f"[SEARCH] {text} : {format(time.time() - start, '0.5f')}")
start = time.time()
def main(argv):
download(argv[1])
if __name__ == "__main__":
main(sys.argv)
| [
"requests_cache.install_cache",
"requests.get",
"bs4.BeautifulSoup",
"utils.cacheUtils.writeCache",
"time.time",
"utils.cacheUtils.readCache",
"sys.path.append",
"utils.cacheUtils.getDownloadCachePath"
]
| [((169, 218), 'sys.path.append', 'sys.path.append', (['"""/home/taejoon1kim/BERT/my_bert"""'], {}), "('/home/taejoon1kim/BERT/my_bert')\n", (184, 218), False, 'import sys, getopt\n'), ((2320, 2354), 'requests.get', 'requests.get', (['URL'], {'headers': 'headers'}), '(URL, headers=headers)\n', (2332, 2354), False, 'import requests\n'), ((3874, 3900), 'utils.cacheUtils.getDownloadCachePath', 'getDownloadCachePath', (['text'], {}), '(text)\n', (3894, 3900), False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((3943, 3954), 'time.time', 'time.time', ([], {}), '()\n', (3952, 3954), False, 'import time\n'), ((3967, 3978), 'time.time', 'time.time', ([], {}), '()\n', (3976, 3978), False, 'import time\n'), ((3983, 4060), 'requests_cache.install_cache', 'requests_cache.install_cache', (['"""/home/taejoon1kim/BERT/my_bert/download_cache"""'], {}), "('/home/taejoon1kim/BERT/my_bert/download_cache')\n", (4011, 4060), False, 'import requests_cache\n'), ((7066, 7077), 'time.time', 'time.time', ([], {}), '()\n', (7075, 7077), False, 'import time\n'), ((2522, 2557), 'bs4.BeautifulSoup', 'BeautifulSoup', (['resp.content', '"""lxml"""'], {}), "(resp.content, 'lxml')\n", (2535, 2557), False, 'from bs4 import BeautifulSoup\n'), ((6490, 6522), 'utils.cacheUtils.writeCache', 'writeCache', (['cache', 'SEARCH_RESULT'], {}), '(cache, SEARCH_RESULT)\n', (6500, 6522), False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((6556, 6572), 'utils.cacheUtils.readCache', 'readCache', (['cache'], {}), '(cache)\n', (6565, 6572), False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((4933, 4976), 'requests.get', 'requests.get', (["SEARCH_RESULT['WIKI']['link']"], {}), "(SEARCH_RESULT['WIKI']['link'])\n", (4945, 4976), False, 'import requests\n'), ((5040, 5075), 'bs4.BeautifulSoup', 'BeautifulSoup', (['resp.content', '"""lxml"""'], {}), "(resp.content, 'lxml')\n", (5053, 5075), False, 'from bs4 import BeautifulSoup\n'), ((6796, 6807), 'time.time', 'time.time', ([], {}), '()\n', (6805, 6807), False, 'import time\n'), ((7022, 7033), 'time.time', 'time.time', ([], {}), '()\n', (7031, 7033), False, 'import time\n')] |
from xml.dom.minidom import Document, parse
class InfoBatch:
def __init__(self, title, pre_node_titles):
self.title = title
self.pre_node_titles = pre_node_titles
def save_data_xml(course_list, file_path):
doc = Document()
courses = doc.createElement('course_list')
doc.appendChild(courses)
for course in course_list:
single_course = doc.createElement('course')
courses.appendChild(single_course)
single_course_name = doc.createElement('course_name')
course_name = doc.createTextNode(course.name)
single_course.appendChild(single_course_name)
single_course_name.appendChild(course_name)
pre_course = doc.createElement('pre_course')
pre_course_name = ','.join(course.pre_course)
course_name = doc.createTextNode(pre_course_name)
single_course.appendChild(pre_course)
pre_course.appendChild(course_name)
after_course = doc.createElement('after_course')
after_course_name = ','.join(course.after_course)
course_name = doc.createTextNode(after_course_name)
single_course.appendChild(after_course)
after_course.appendChild(course_name)
with open(file_path, 'wb+') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
def load_data_xml(file_path):
info_list = []
doc = parse(file_path)
courses = doc.getElementsByTagName("course")
for course in courses:
title = course.getElementsByTagName("course_name")[0].childNodes[0].data
try:
pre_node_titles = course.getElementsByTagName("pre_node_titles")[0].childNodes[0].data
pre_node_titles = pre_node_titles.split(',')
info_list.append(InfoBatch(title, pre_node_titles))
except IndexError:
info_list.append(InfoBatch(title, []))
return info_list
'''
course_list = []
course_list.append(Course('Advance Math'))
course_list.append(Course('Linear Algebra'))
course_list.append(Course('Procedure Oriented Programming'))
course_list.append(Course('Object Oriented Programming'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('College Physics'))
course_list[-1].add_pre_course(course_list, ['Advance Math'])
course_list.append(Course('Digital Logic'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('Computer Organization'))
course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic'])
course_list.append(Course('Computer Architecture'))
course_list[-1].add_pre_course(course_list,
['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization'])
save_data_xml(course_list, 'resource/data/data.xml')
'''
| [
"xml.dom.minidom.Document",
"xml.dom.minidom.parse"
]
| [((240, 250), 'xml.dom.minidom.Document', 'Document', ([], {}), '()\n', (248, 250), False, 'from xml.dom.minidom import Document, parse\n'), ((1367, 1383), 'xml.dom.minidom.parse', 'parse', (['file_path'], {}), '(file_path)\n', (1372, 1383), False, 'from xml.dom.minidom import Document, parse\n')] |
import pytest
from theheck.rules.git_rm_local_modifications import match, get_new_command
from theheck.types import Command
@pytest.fixture
def output(target):
return ('error: the following file has local modifications:\n {}\n(use '
'--cached to keep the file, or -f to force removal)').format(target)
@pytest.mark.parametrize('script, target', [
('git rm foo', 'foo'),
('git rm foo bar', 'bar')])
def test_match(output, script, target):
assert match(Command(script, output))
@pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, target, new_command', [
('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']),
('git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])
def test_get_new_command(output, script, target, new_command):
assert get_new_command(Command(script, output)) == new_command
| [
"pytest.mark.parametrize",
"theheck.types.Command"
]
| [((326, 424), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""script, target"""', "[('git rm foo', 'foo'), ('git rm foo bar', 'bar')]"], {}), "('script, target', [('git rm foo', 'foo'), (\n 'git rm foo bar', 'bar')])\n", (349, 424), False, 'import pytest\n'), ((514, 591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""script"""', "['git rm foo', 'git rm foo bar', 'git rm']"], {}), "('script', ['git rm foo', 'git rm foo bar', 'git rm'])\n", (537, 591), False, 'import pytest\n'), ((665, 871), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""script, target, new_command"""', "[('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']), (\n 'git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])]"], {}), "('script, target, new_command', [('git rm foo',\n 'foo', ['git rm --cached foo', 'git rm -f foo']), ('git rm foo bar',\n 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])\n", (688, 871), False, 'import pytest\n'), ((486, 509), 'theheck.types.Command', 'Command', (['script', 'output'], {}), '(script, output)\n', (493, 509), False, 'from theheck.types import Command\n'), ((641, 660), 'theheck.types.Command', 'Command', (['script', '""""""'], {}), "(script, '')\n", (648, 660), False, 'from theheck.types import Command\n'), ((963, 986), 'theheck.types.Command', 'Command', (['script', 'output'], {}), '(script, output)\n', (970, 986), False, 'from theheck.types import Command\n')] |
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyvista as pv
from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .ddrtree import DDRTree, cal_ncenter
from .slice import euclidean_distance, three_d_slice
####################################
# Changes along a vector direction #
####################################
def changes_along_line(
model: Union[PolyData, UnstructuredGrid],
key: Union[str, list] = None,
n_points: int = 100,
vec: Union[tuple, list] = (1, 0, 0),
center: Union[tuple, list] = None,
) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]:
slices, line_points, line = three_d_slice(
model=model, method="line", n_slices=n_points, vec=vec, center=center
)
x, y = [], []
x_length = 0
for slice, (point_i, point) in zip(slices, enumerate(line_points)):
change_value = np.asarray(slice[key]).sum()
y.append(change_value)
if point_i == 0:
x.append(0)
else:
point1 = line_points[point_i - 1].points.flatten()
point2 = line_points[point_i].points.flatten()
ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3)
x_length += ed
x.append(x_length)
return np.asarray(x), np.asarray(y), slices, line
#################################
# Changes along the model shape #
#################################
def changes_along_shape(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
key_added: Optional[str] = "rd_spatial",
dim: int = 2,
inplace: bool = False,
**kwargs,
):
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
DDRTree_kwargs = {
"maxIter": 10,
"sigma": 0.001,
"gamma": 10,
"eps": 0,
"dim": dim,
"Lambda": 5 * X.shape[1],
"ncenter": cal_ncenter(X.shape[1]),
}
DDRTree_kwargs.update(kwargs)
Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs)
# Obtain the real part of the complex argument
model[key_added] = np.real(W).astype(np.float64)
return model if not inplace else None
##############################
# Changes along the branches #
##############################
def ElPiGraph_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a principal elastic tree.
Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach.
**kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see:
https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import elpigraph
except ImportError:
raise ImportError(
"You need to install the package `elpigraph-python`."
"\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`."
)
ElPiGraph_kwargs = {
"alpha": 0.01,
"FinalEnergy": "Penalized",
"StoreGraphEvolution": True,
"GPU": False,
}
ElPiGraph_kwargs.update(kwargs)
if ElPiGraph_kwargs["GPU"] is True:
try:
import cupy
except ImportError:
raise ImportError(
"You need to install the package `cupy`."
"\nInstall cupy via `pip install cupy-cuda113`."
)
elpi_tree = elpigraph.computeElasticPrincipalTree(
X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs
)
nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k]
matrix_edges_weights = elpi_tree[0]["ElasticMatrix"] # ['AllElasticMatrices'][k]
matrix_edges_weights = np.triu(matrix_edges_weights, 1)
edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()
return nodes, edges
def SimplePPT_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a simple principal tree.
Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach.
**kwargs: Other parameters used in simpleppt.ppt. For details, please see:
https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import igraph
import simpleppt
except ImportError:
raise ImportError(
"You need to install the package `simpleppt` and `igraph`."
"\nInstall simpleppt via `pip install -U simpleppt`."
"\nInstall igraph via `pip install -U igraph`"
)
SimplePPT_kwargs = {
"seed": 1,
"lam": 10,
}
SimplePPT_kwargs.update(kwargs)
X = np.asarray(X)
ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)
R = ppt_tree.R
nodes = (np.dot(X.T, R) / R.sum(axis=0)).T
B = ppt_tree.B
edges = np.array(
igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected").get_edgelist()
)
return nodes, edges
def map_points_to_branch(
model: Union[PolyData, UnstructuredGrid],
nodes: np.ndarray,
spatial_key: Optional[str] = None,
key_added: Optional[str] = "nodes",
inplace: bool = False,
**kwargs,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model.
nodes: The nodes in the principal tree.
spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None,
the coordinates are model.points.
key_added: The key under which to add the nodes labels.
inplace: Updates model in-place.
kwargs: Other parameters used in scipy.spatial.KDTree.
Returns:
A model, which contains the following properties:
`model.point_data[key_added]`, the nodes labels array.
"""
from scipy.spatial import KDTree
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)
_, ii = nodes_kdtree.query(np.asarray(X), k=1)
model.point_data[key_added] = ii
return model if not inplace else None
def map_gene_to_branch(
model: Union[PolyData, UnstructuredGrid],
tree: PolyData,
key: Union[str, list],
nodes_key: Optional[str] = "nodes",
inplace: bool = False,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model contains the gene expression label.
tree: A three-dims principal tree model contains the nodes label.
key: The key that corresponds to the gene expression.
nodes_key: The key that corresponds to the coordinates of the nodes in the tree.
inplace: Updates tree model in-place.
Returns:
A tree, which contains the following properties:
`tree.point_data[key]`, the gene expression array.
"""
model = model.copy()
model_data = pd.DataFrame(model[nodes_key], columns=["nodes_id"])
key = [key] if isinstance(key, str) else key
for sub_key in key:
model_data[sub_key] = np.asarray(model[sub_key])
model_data = model_data.groupby(by="nodes_id").sum()
model_data["nodes_id"] = model_data.index
model_data.index = range(len(model_data.index))
tree = tree.copy() if not inplace else tree
tree_data = pd.DataFrame(tree[nodes_key], columns=["nodes_id"])
tree_data = pd.merge(tree_data, model_data, how="outer", on="nodes_id")
tree_data.fillna(value=0, inplace=True)
for sub_key in key:
tree.point_data[sub_key] = tree_data[sub_key].values
return tree if not inplace else None
def construct_tree_model(
nodes: np.ndarray,
edges: np.ndarray,
key_added: Optional[str] = "nodes",
) -> PolyData:
"""
Construct a principal tree model.
Args:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
key_added: The key under which to add the nodes labels.
Returns:
A three-dims principal tree model, which contains the following properties:
`tree_model.point_data[key_added]`, the nodes labels array.
"""
padding = np.empty(edges.shape[0], int) * 2
padding[:] = 2
edges_w_padding = np.vstack((padding, edges.T)).T
tree_model = pv.PolyData(nodes, edges_w_padding)
tree_model.point_data[key_added] = np.arange(0, len(nodes), 1)
return tree_model
def changes_along_branch(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
map_key: Union[str, list] = None,
key_added: Optional[str] = "nodes",
rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph",
NumNodes: int = 50,
inplace: bool = False,
**kwargs,
) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]:
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
if rd_method == "ElPiGraph":
nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs)
elif rd_method == "SimplePPT":
nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs)
else:
raise ValueError(
"`rd_method` value is wrong."
"\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`."
)
map_points_to_branch(
model=model,
nodes=nodes,
spatial_key=spatial_key,
key_added=key_added,
inplace=True,
)
tree_model = construct_tree_model(nodes=nodes, edges=edges)
if not (map_key is None):
map_gene_to_branch(
model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True
)
return model if not inplace else None, tree_model
| [
"simpleppt.ppt",
"pyvista.PolyData",
"pandas.merge",
"numpy.asarray",
"numpy.real",
"numpy.dot",
"numpy.empty",
"numpy.vstack",
"numpy.nonzero",
"pandas.DataFrame",
"numpy.triu"
]
| [((4270, 4302), 'numpy.triu', 'np.triu', (['matrix_edges_weights', '(1)'], {}), '(matrix_edges_weights, 1)\n', (4277, 4302), True, 'import numpy as np\n'), ((5574, 5587), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (5584, 5587), True, 'import numpy as np\n'), ((5603, 5657), 'simpleppt.ppt', 'simpleppt.ppt', ([], {'X': 'X', 'Nodes': 'NumNodes'}), '(X=X, Nodes=NumNodes, **SimplePPT_kwargs)\n', (5616, 5657), False, 'import simpleppt\n'), ((7935, 7987), 'pandas.DataFrame', 'pd.DataFrame', (['model[nodes_key]'], {'columns': "['nodes_id']"}), "(model[nodes_key], columns=['nodes_id'])\n", (7947, 7987), True, 'import pandas as pd\n'), ((8339, 8390), 'pandas.DataFrame', 'pd.DataFrame', (['tree[nodes_key]'], {'columns': "['nodes_id']"}), "(tree[nodes_key], columns=['nodes_id'])\n", (8351, 8390), True, 'import pandas as pd\n'), ((8407, 8466), 'pandas.merge', 'pd.merge', (['tree_data', 'model_data'], {'how': '"""outer"""', 'on': '"""nodes_id"""'}), "(tree_data, model_data, how='outer', on='nodes_id')\n", (8415, 8466), True, 'import pandas as pd\n'), ((9316, 9351), 'pyvista.PolyData', 'pv.PolyData', (['nodes', 'edges_w_padding'], {}), '(nodes, edges_w_padding)\n', (9327, 9351), True, 'import pyvista as pv\n'), ((1414, 1427), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1424, 1427), True, 'import numpy as np\n'), ((1429, 1442), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1439, 1442), True, 'import numpy as np\n'), ((6957, 6974), 'numpy.asarray', 'np.asarray', (['nodes'], {}), '(nodes)\n', (6967, 6974), True, 'import numpy as np\n'), ((7017, 7030), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (7027, 7030), True, 'import numpy as np\n'), ((8091, 8117), 'numpy.asarray', 'np.asarray', (['model[sub_key]'], {}), '(model[sub_key])\n', (8101, 8117), True, 'import numpy as np\n'), ((9192, 9221), 'numpy.empty', 'np.empty', (['edges.shape[0]', 'int'], {}), '(edges.shape[0], int)\n', (9200, 9221), True, 'import numpy as np\n'), ((9267, 9296), 'numpy.vstack', 'np.vstack', (['(padding, edges.T)'], {}), '((padding, edges.T))\n', (9276, 9296), True, 'import numpy as np\n'), ((2287, 2297), 'numpy.real', 'np.real', (['W'], {}), '(W)\n', (2294, 2297), True, 'import numpy as np\n'), ((4028, 4041), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (4038, 4041), True, 'import numpy as np\n'), ((5691, 5705), 'numpy.dot', 'np.dot', (['X.T', 'R'], {}), '(X.T, R)\n', (5697, 5705), True, 'import numpy as np\n'), ((1011, 1033), 'numpy.asarray', 'np.asarray', (['slice[key]'], {}), '(slice[key])\n', (1021, 1033), True, 'import numpy as np\n'), ((4324, 4356), 'numpy.nonzero', 'np.nonzero', (['matrix_edges_weights'], {}), '(matrix_edges_weights)\n', (4334, 4356), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64decode
from binascii import hexlify, unhexlify
from struct import pack
import six
from django.db import models
from django.utils.encoding import force_text
from django_otp.models import Device
from django_otp.util import hex_validator, random_hex
from yubiotp.client import YubiClient10, YubiClient11, YubiClient20
from yubiotp.modhex import modhex
from yubiotp.otp import decode_otp
def default_id():
return force_text(random_hex(6))
def id_validator(value):
return hex_validator(6)(value)
def default_key():
return force_text(random_hex(16))
def key_validator(value):
return hex_validator(16)(value)
class YubikeyDevice(Device):
"""
Represents a locally-verified YubiKey OTP
:class:`~django_otp.models.Device`.
.. attribute:: private_id
*CharField*: The 6-byte private ID (hex-encoded).
.. attribute:: key
*CharField*: The 16-byte AES key shared with this YubiKey
(hex-encoded).
.. attribute:: session
*PositiveIntegerField*: The non-volatile session counter most recently
used by this device.
.. attribute:: counter
*PositiveIntegerField*: The volatile session usage counter most
recently used by this device.
"""
private_id = models.CharField(
max_length=12,
validators=[id_validator],
default=default_id,
verbose_name="Private ID",
help_text="The 6-byte private ID (hex-encoded)."
)
key = models.CharField(
max_length=32,
validators=[key_validator],
default=default_key,
help_text="The 16-byte AES key shared with this YubiKey (hex-encoded)."
)
session = models.PositiveIntegerField(
default=0,
help_text="The non-volatile session counter most recently used by this device."
)
counter = models.PositiveIntegerField(
default=0,
help_text="The volatile session usage counter most recently used by this device."
)
class Meta(Device.Meta):
verbose_name = "Local YubiKey device"
def public_id(self):
"""
The public ID of this device is the four-byte, big-endian,
modhex-encoded primary key.
"""
return modhex(pack('>I', self.id))
public_id.short_description = 'Public Identity'
public_id.admin_order_field = 'id'
@property
def bin_key(self):
return unhexlify(self.key.encode())
def verify_token(self, token):
if isinstance(token, six.text_type):
token = token.encode('utf-8')
try:
public_id, otp = decode_otp(token, self.bin_key)
except Exception:
return False
if public_id != self.public_id():
return False
if hexlify(otp.uid) != self.private_id.encode():
return False
if otp.session < self.session:
return False
if (otp.session == self.session) and (otp.counter <= self.counter):
return False
# All tests pass. Update the counters and return the good news.
self.session = otp.session
self.counter = otp.counter
self.save()
return True
class ValidationService(models.Model):
"""
Represents a YubiKey validation web service. By default, this will point to
Yubico's official hosted service, which you can customize. You can also
create instances to point at any other service implementing the same
protocol.
.. attribute:: name
*CharField*: The name of this validation service.
.. attribute:: api_id
*IntegerField*: Your API ID. The server needs this to sign responsees.
(Default: 1)
.. attribute:: api_key
*CharField*: Your base64-encoded API key, used to sign requests. This
is optional but strongly recommended. (Default: ``''``)
.. attribute:: base_url
*URLField*: The base URL of the verification service. Defaults to
Yubico's hosted API.
.. attribute:: api_version
*CharField*: The version of the validation API to use: '1.0', '1.1', or
'2.0'. (Default: '2.0')
.. attribute:: use_ssl
*BooleanField*: If ``True``, we'll use the HTTPS versions of the
default URLs. Because :mod:`urllib2` does not verify certificates, this
provides little benefit. (Default: ``False``).
.. attribute:: param_sl
*CharField*: The level of syncing required. See
:class:`~yubiotp.client.YubiClient20`.
.. attribute:: param_timeout
*CharField*: The time to allow for syncing. See
:class:`~yubiotp.client.YubiClient20`.
"""
API_VERSIONS = ['1.0', '1.1', '2.0']
name = models.CharField(
max_length=32,
help_text="The name of this validation service."
)
api_id = models.IntegerField(
default=1,
verbose_name="API ID",
help_text="Your API ID."
)
api_key = models.CharField(
max_length=64,
blank=True,
default='',
verbose_name="API key",
help_text="Your base64-encoded API key."
)
base_url = models.URLField(
blank=True,
default='',
verbose_name="Base URL",
help_text="The base URL of the verification service. Defaults to Yubico's hosted API."
)
api_version = models.CharField(
max_length=8,
choices=list(zip(API_VERSIONS, API_VERSIONS)),
default='2.0',
help_text="The version of the validation api to use."
)
use_ssl = models.BooleanField(
default=False,
verbose_name="Use SSL",
help_text="Use HTTPS API URLs by default?"
)
param_sl = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="SL",
help_text="The level of syncing required."
)
param_timeout = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="Timeout",
help_text="The time to allow for syncing."
)
class Meta(object):
verbose_name = "YubiKey validation service"
def __unicode__(self):
return self.name
def get_client(self):
api_key = b64decode(self.api_key.encode()) or None
if self.api_version == '2.0':
client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None)
elif self.api_version == '1.1':
client = YubiClient11(self.api_id, api_key, self.use_ssl)
else:
client = YubiClient10(self.api_id, api_key, self.use_ssl)
if self.base_url:
client.base_url = self.base_url
return client
class RemoteYubikeyDevice(Device):
"""
Represents a YubiKey device that is to be verified with a remote validation
service. In order create these devices, you must have at least one
:class:`~otp_yubikey.models.ValidationService` in the database.
.. attribute:: service
*ForeignKey*: The validation service to use for this device.
.. attribute:: public_id
*CharField*: The public identity of the YubiKey (modhex-encoded).
"""
service = models.ForeignKey(ValidationService, on_delete=models.CASCADE)
public_id = models.CharField(max_length=32, verbose_name="Public ID", help_text="The public identity of the YubiKey (modhex-encoded).")
class Meta(Device.Meta):
verbose_name = "Remote YubiKey device"
def verify_token(self, token):
verified = False
if token[:-32] == self.public_id:
client = self.service.get_client()
response = client.verify(token)
verified = response.is_ok()
return verified
| [
"django_otp.util.hex_validator",
"django_otp.util.random_hex",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"binascii.hexlify",
"struct.pack",
"django.db.models.BooleanField",
"yubiotp.otp.decode_otp",
"yubiotp.client.YubiClient20",
"yubiotp.client.YubiClient11",
"django.db.models.PositiveIntegerField",
"django.db.models.URLField",
"yubiotp.client.YubiClient10",
"django.db.models.CharField"
]
| [((1365, 1530), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'validators': '[id_validator]', 'default': 'default_id', 'verbose_name': '"""Private ID"""', 'help_text': '"""The 6-byte private ID (hex-encoded)."""'}), "(max_length=12, validators=[id_validator], default=\n default_id, verbose_name='Private ID', help_text=\n 'The 6-byte private ID (hex-encoded).')\n", (1381, 1530), False, 'from django.db import models\n'), ((1578, 1741), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'validators': '[key_validator]', 'default': 'default_key', 'help_text': '"""The 16-byte AES key shared with this YubiKey (hex-encoded)."""'}), "(max_length=32, validators=[key_validator], default=\n default_key, help_text=\n 'The 16-byte AES key shared with this YubiKey (hex-encoded).')\n", (1594, 1741), False, 'from django.db import models\n'), ((1785, 1909), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'help_text': '"""The non-volatile session counter most recently used by this device."""'}), "(default=0, help_text=\n 'The non-volatile session counter most recently used by this device.')\n", (1812, 1909), False, 'from django.db import models\n'), ((1942, 2068), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'help_text': '"""The volatile session usage counter most recently used by this device."""'}), "(default=0, help_text=\n 'The volatile session usage counter most recently used by this device.')\n", (1969, 2068), False, 'from django.db import models\n'), ((4804, 4890), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'help_text': '"""The name of this validation service."""'}), "(max_length=32, help_text=\n 'The name of this validation service.')\n", (4820, 4890), False, 'from django.db import models\n'), ((4922, 5001), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""API ID"""', 'help_text': '"""Your API ID."""'}), "(default=1, verbose_name='API ID', help_text='Your API ID.')\n", (4941, 5001), False, 'from django.db import models\n'), ((5047, 5173), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""API key"""', 'help_text': '"""Your base64-encoded API key."""'}), "(max_length=64, blank=True, default='', verbose_name=\n 'API key', help_text='Your base64-encoded API key.')\n", (5063, 5173), False, 'from django.db import models\n'), ((5231, 5393), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'default': '""""""', 'verbose_name': '"""Base URL"""', 'help_text': '"""The base URL of the verification service. Defaults to Yubico\'s hosted API."""'}), '(blank=True, default=\'\', verbose_name=\'Base URL\', help_text=\n "The base URL of the verification service. Defaults to Yubico\'s hosted API."\n )\n', (5246, 5393), False, 'from django.db import models\n'), ((5642, 5749), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Use SSL"""', 'help_text': '"""Use HTTPS API URLs by default?"""'}), "(default=False, verbose_name='Use SSL', help_text=\n 'Use HTTPS API URLs by default?')\n", (5661, 5749), False, 'from django.db import models\n'), ((5791, 5915), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)', 'default': 'None', 'verbose_name': '"""SL"""', 'help_text': '"""The level of syncing required."""'}), "(max_length=16, blank=True, default=None, verbose_name='SL',\n help_text='The level of syncing required.')\n", (5807, 5915), False, 'from django.db import models\n'), ((5979, 6109), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)', 'default': 'None', 'verbose_name': '"""Timeout"""', 'help_text': '"""The time to allow for syncing."""'}), "(max_length=16, blank=True, default=None, verbose_name=\n 'Timeout', help_text='The time to allow for syncing.')\n", (5995, 6109), False, 'from django.db import models\n'), ((7311, 7373), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ValidationService'], {'on_delete': 'models.CASCADE'}), '(ValidationService, on_delete=models.CASCADE)\n', (7328, 7373), False, 'from django.db import models\n'), ((7390, 7518), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""Public ID"""', 'help_text': '"""The public identity of the YubiKey (modhex-encoded)."""'}), "(max_length=32, verbose_name='Public ID', help_text=\n 'The public identity of the YubiKey (modhex-encoded).')\n", (7406, 7518), False, 'from django.db import models\n'), ((535, 548), 'django_otp.util.random_hex', 'random_hex', (['(6)'], {}), '(6)\n', (545, 548), False, 'from django_otp.util import hex_validator, random_hex\n'), ((588, 604), 'django_otp.util.hex_validator', 'hex_validator', (['(6)'], {}), '(6)\n', (601, 604), False, 'from django_otp.util import hex_validator, random_hex\n'), ((655, 669), 'django_otp.util.random_hex', 'random_hex', (['(16)'], {}), '(16)\n', (665, 669), False, 'from django_otp.util import hex_validator, random_hex\n'), ((710, 727), 'django_otp.util.hex_validator', 'hex_validator', (['(16)'], {}), '(16)\n', (723, 727), False, 'from django_otp.util import hex_validator, random_hex\n'), ((2337, 2356), 'struct.pack', 'pack', (['""">I"""', 'self.id'], {}), "('>I', self.id)\n", (2341, 2356), False, 'from struct import pack\n'), ((2697, 2728), 'yubiotp.otp.decode_otp', 'decode_otp', (['token', 'self.bin_key'], {}), '(token, self.bin_key)\n', (2707, 2728), False, 'from yubiotp.otp import decode_otp\n'), ((2860, 2876), 'binascii.hexlify', 'hexlify', (['otp.uid'], {}), '(otp.uid)\n', (2867, 2876), False, 'from binascii import hexlify, unhexlify\n'), ((6427, 6537), 'yubiotp.client.YubiClient20', 'YubiClient20', (['self.api_id', 'api_key', 'self.use_ssl', '(False)', '(self.param_sl or None)', '(self.param_timeout or None)'], {}), '(self.api_id, api_key, self.use_ssl, False, self.param_sl or\n None, self.param_timeout or None)\n', (6439, 6537), False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n'), ((6595, 6643), 'yubiotp.client.YubiClient11', 'YubiClient11', (['self.api_id', 'api_key', 'self.use_ssl'], {}), '(self.api_id, api_key, self.use_ssl)\n', (6607, 6643), False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n'), ((6679, 6727), 'yubiotp.client.YubiClient10', 'YubiClient10', (['self.api_id', 'api_key', 'self.use_ssl'], {}), '(self.api_id, api_key, self.use_ssl)\n', (6691, 6727), False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from neo4j.v1 import Record
class RecordTestCase(TestCase):
def test_record_equality(self):
record1 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record2 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"])
assert record1 == record2
assert record1 != record3
assert record2 != record3
def test_record_hashing(self):
record1 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record2 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"])
assert hash(record1) == hash(record2)
assert hash(record1) != hash(record3)
assert hash(record2) != hash(record3)
def test_record_iter(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert list(a_record.__iter__()) == ["name", "empire"]
def test_record_copy(self):
original = Record(["name", "empire"], ["Nigel", "The British Empire"])
duplicate = original.copy()
assert dict(original) == dict(duplicate)
assert original.keys() == duplicate.keys()
assert original is not duplicate
def test_record_as_dict(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert dict(a_record) == {"name": "Nigel", "empire": "The British Empire"}
def test_record_as_list(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert list(a_record) == ["name", "empire"]
def test_record_len(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert len(a_record) == 2
def test_record_repr(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert repr(a_record) == "<Record name='Nigel' empire='The British Empire'>"
def test_record_data(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.data(), {"name": "Alice", "age": 33, "married": True})
self.assertEqual(r.data("name"), {"name": "Alice"})
self.assertEqual(r.data("age", "name"), {"age": 33, "name": "Alice"})
self.assertEqual(r.data("age", "name", "shoe size"), {"age": 33, "name": "Alice", "shoe size": None})
self.assertEqual(r.data(0, "name"), {"name": "Alice"})
self.assertEqual(r.data(0), {"name": "Alice"})
self.assertEqual(r.data(1, 0), {"age": 33, "name": "Alice"})
with self.assertRaises(IndexError):
_ = r.data(1, 0, 999)
def test_record_keys(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.keys(), ("name", "age", "married"))
def test_record_values(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.values(), ("Alice", 33, True))
self.assertEqual(r.values("name"), ("Alice",))
self.assertEqual(r.values("age", "name"), (33, "Alice"))
self.assertEqual(r.values("age", "name", "shoe size"), (33, "Alice", None))
self.assertEqual(r.values(0, "name"), ("Alice", "Alice"))
self.assertEqual(r.values(0), ("Alice",))
self.assertEqual(r.values(1, 0), (33, "Alice"))
with self.assertRaises(IndexError):
_ = r.values(1, 0, 999)
def test_record_items(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.items(), [("name", "Alice"), ("age", 33), ("married", True)])
self.assertEqual(r.items("name"), [("name", "Alice")])
self.assertEqual(r.items("age", "name"), [("age", 33), ("name", "Alice")])
self.assertEqual(r.items("age", "name", "shoe size"), [("age", 33), ("name", "Alice"), ("shoe size", None)])
self.assertEqual(r.items(0, "name"), [("name", "Alice"), ("name", "Alice")])
self.assertEqual(r.items(0), [("name", "Alice")])
self.assertEqual(r.items(1, 0), [("age", 33), ("name", "Alice")])
with self.assertRaises(IndexError):
_ = r.items(1, 0, 999)
def test_record_index(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.index("name"), 0)
self.assertEqual(r.index("age"), 1)
self.assertEqual(r.index("married"), 2)
with self.assertRaises(KeyError):
_ = r.index("shoe size")
self.assertEqual(r.index(0), 0)
self.assertEqual(r.index(1), 1)
self.assertEqual(r.index(2), 2)
with self.assertRaises(IndexError):
_ = r.index(3)
with self.assertRaises(TypeError):
_ = r.index(None)
def test_record_value(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.value(), "Alice")
self.assertEqual(r.value("name"), "Alice")
self.assertEqual(r.value("age"), 33)
self.assertEqual(r.value("married"), True)
self.assertEqual(r.value("shoe size"), None)
self.assertEqual(r.value("shoe size", 6), 6)
self.assertEqual(r.value(0), "Alice")
self.assertEqual(r.value(1), 33)
self.assertEqual(r.value(2), True)
self.assertEqual(r.value(3), None)
self.assertEqual(r.value(3, 6), 6)
with self.assertRaises(TypeError):
_ = r.value(None)
def test_record_contains(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertTrue("name" in r)
self.assertTrue("age" in r)
self.assertTrue("married" in r)
self.assertFalse("shoe size" in r)
self.assertTrue(0 in r)
self.assertTrue(1 in r)
self.assertTrue(2 in r)
self.assertFalse(3 in r)
with self.assertRaises(TypeError):
_ = r.index(None)
| [
"neo4j.v1.Record"
]
| [((888, 947), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (894, 947), False, 'from neo4j.v1 import Record\n'), ((966, 1025), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (972, 1025), False, 'from neo4j.v1 import Record\n'), ((1044, 1101), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Stefan', 'Das Deutschland']"], {}), "(['name', 'empire'], ['Stefan', 'Das Deutschland'])\n", (1050, 1101), False, 'from neo4j.v1 import Record\n'), ((1258, 1317), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (1264, 1317), False, 'from neo4j.v1 import Record\n'), ((1336, 1395), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (1342, 1395), False, 'from neo4j.v1 import Record\n'), ((1414, 1471), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Stefan', 'Das Deutschland']"], {}), "(['name', 'empire'], ['Stefan', 'Das Deutschland'])\n", (1420, 1471), False, 'from neo4j.v1 import Record\n'), ((1662, 1721), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (1668, 1721), False, 'from neo4j.v1 import Record\n'), ((1837, 1896), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (1843, 1896), False, 'from neo4j.v1 import Record\n'), ((2129, 2188), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (2135, 2188), False, 'from neo4j.v1 import Record\n'), ((2327, 2386), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (2333, 2386), False, 'from neo4j.v1 import Record\n'), ((2490, 2549), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (2496, 2549), False, 'from neo4j.v1 import Record\n'), ((2636, 2695), 'neo4j.v1.Record', 'Record', (["['name', 'empire']", "['Nigel', 'The British Empire']"], {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])\n", (2642, 2695), False, 'from neo4j.v1 import Record\n'), ((2826, 2881), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (2832, 2881), False, 'from neo4j.v1 import Record\n'), ((3522, 3577), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (3528, 3577), False, 'from neo4j.v1 import Record\n'), ((3688, 3743), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (3694, 3743), False, 'from neo4j.v1 import Record\n'), ((4304, 4359), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (4310, 4359), False, 'from neo4j.v1 import Record\n'), ((5054, 5109), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (5060, 5109), False, 'from neo4j.v1 import Record\n'), ((5636, 5691), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (5642, 5691), False, 'from neo4j.v1 import Record\n'), ((6328, 6383), 'neo4j.v1.Record', 'Record', (["['name', 'age', 'married']", "['Alice', 33, True]"], {}), "(['name', 'age', 'married'], ['Alice', 33, True])\n", (6334, 6383), False, 'from neo4j.v1 import Record\n')] |
import tensorflow as tf
import sys
import os
from glob import glob
import png
sys.path.append(os.path.join(__file__,'..','..'))
from tfDataIngest import tfDataSetParquet as tfDsParquet
inputDataDir = sys.argv[1]
outputDir = sys.argv[2]
# test app
if __name__ == "__main__":
files = glob(os.path.join(inputDataDir,"train*.parquet"))
print("Found {0} parquet files in input dir {1}".format(len(files),inputDataDir))
print("First is {0}".format(files[0]))
ds = tfDsParquet.create_parquet_dataset([files[0]])
for element in ds.as_numpy_iterator():
#print("Iterating...")
sampleId,pixels = element
sampleId = sampleId.decode("utf-8")
fileName = os.path.join(outputDir,"{0}.png".format(sampleId))
png.from_array(pixels, mode="L").save(fileName)
#print(element)
#print("sample name is {0}".format(sampleId))
#print(sampleIds.shape)
#print(pixels.shape)
# a += 1
# if a > 10:
# break
print("Done")
#print("{0} elements in the dataset".format(len(ds.))) | [
"tfDataIngest.tfDataSetParquet.create_parquet_dataset",
"os.path.join",
"png.from_array"
]
| [((95, 129), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""'], {}), "(__file__, '..', '..')\n", (107, 129), False, 'import os\n'), ((478, 524), 'tfDataIngest.tfDataSetParquet.create_parquet_dataset', 'tfDsParquet.create_parquet_dataset', (['[files[0]]'], {}), '([files[0]])\n', (512, 524), True, 'from tfDataIngest import tfDataSetParquet as tfDsParquet\n'), ((295, 339), 'os.path.join', 'os.path.join', (['inputDataDir', '"""train*.parquet"""'], {}), "(inputDataDir, 'train*.parquet')\n", (307, 339), False, 'import os\n'), ((758, 790), 'png.from_array', 'png.from_array', (['pixels'], {'mode': '"""L"""'}), "(pixels, mode='L')\n", (772, 790), False, 'import png\n')] |
"""\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2016-2021 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, compat
import wcodegen
class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter):
tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n'
# XXX the following needs to depend on the code generator when Phoenix is about to be supported fully:
if compat.IS_PHOENIX:
import_modules = ['import wx.adv\n']
if compat.IS_PHOENIX:
def cn(self, name):
# don't process already formatted items again
if name.startswith('wx.'):
return name
if name.startswith('wx'):
return 'wx.adv.' + name[2:]
elif name.startswith('EVT_'):
return 'wx.adv.' + name
return name
def _prepare_tmpl_content(self, obj):
wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter):
import_modules = ['<wx/datectrl.h>']
tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \
'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \
'%(style)s);\n'
prefix_style = False
set_default_style = True
def _prepare_tmpl_content(self, obj):
wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject):
def write_property(self, name, val, output, tabs):
if name == 'label':
# translate & into _ as accelerator marker
val2 = val.replace('&', '_')
if val.count('&&') > 0:
while True:
index = val.find('&&')
if index < 0:
break
val = val2[:index] + '&&' + val2[index+2:]
else:
val = val2
xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs)
return DatePickerCtrlXrcObject(obj)
def initialize():
klass = 'wxDatePickerCtrl'
common.class_names['EditDatePickerCtrl'] = klass
common.register('python', klass, PythonDatePickerCtrlGenerator(klass))
common.register('C++', klass, CppDatePickerCtrlGenerator(klass))
common.register('XRC', klass, xrc_code_generator)
| [
"wcodegen.CppWidgetCodeWriter._prepare_tmpl_content",
"common.register",
"wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content"
]
| [((2676, 2725), 'common.register', 'common.register', (['"""XRC"""', 'klass', 'xrc_code_generator'], {}), "('XRC', klass, xrc_code_generator)\n", (2691, 2725), False, 'import common, compat\n'), ((995, 1059), 'wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content', 'wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content', (['self', 'obj'], {}), '(self, obj)\n', (1048, 1059), False, 'import wcodegen\n'), ((1514, 1575), 'wcodegen.CppWidgetCodeWriter._prepare_tmpl_content', 'wcodegen.CppWidgetCodeWriter._prepare_tmpl_content', (['self', 'obj'], {}), '(self, obj)\n', (1564, 1575), False, 'import wcodegen\n')] |
import os
import sys
import unittest
from tests.tests_bin_class.test_performance import *
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
]
| [((123, 138), 'unittest.main', 'unittest.main', ([], {}), '()\n', (136, 138), False, 'import unittest\n')] |
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the BSD 0-Clause License for more details.
from keras.optimizers import Adam
from models.ICCV_architectures import *
from models.unet import *
from keras.engine.topology import Network
import sys
import tensorflow as tf
from utilities.data_loader import *
class CycleGAN():
def __init__(self,
opt,
image_shape=(256 * 1, 256 * 1, 3),
load_training_data=True,
normalization=InstanceNormalization,
):
self.task = opt.task
self.im_w = opt.im_w
self.im_h = opt.im_h
self.data_root = opt.data_root
self.img_shape = image_shape
self.channels = self.img_shape[-1]
# Fetch data during training instead of pre caching all images
self.use_data_generator = True
self.generator_architecture = opt.generator_architecture
self.use_norm = opt.use_norm
self.add_extra_conv = opt.add_extra_conv
self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeA_in = (None, None, 3)
if self.task == 'Long2Short_raw':
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1)
self.image_shapeB_in = (None, None, 3)
else:
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeB_in = (None, None, 3)
# Identity loss - sometimes send images from B to G_A2B (and the opposite) to teach identity mappings
self.use_identity_learning = opt.use_identity_learning
self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done each time the iteration number is divisable with this number
# PatchGAN - if false the discriminator learning rate should be decreased
self.use_patchgan = opt.use_patchgan
self.normalization = normalization
# Loss hyperparameters
self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B
self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A
self.lambda_D = opt.lambda_D # Weight for loss from discriminator guess on synthetic images
# Learning rates
self.learning_rate_D = opt.lr_D
self.learning_rate_G = opt.lr_G
self.beta_1 = opt.beta_1
self.beta_2 = opt.beta_2
self.batch_size = 1
self.clipvalue = opt.clipvalue
self.epsilon_norm = opt.epsilon_norm
# self.crop_res = opt.crop_res
# Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
self.use_resize_convolution = opt.use_resize_convolution
# Supervised learning part
self.use_supervised_learning = opt.use_supervised_learning
self.supervised_weight = opt.supervised_weight
self.supervised_loss = opt.supervised_loss
# optimizer
if opt.clipvalue is not None:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
else:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2)
# # ======= Discriminator model ==========
if self.generator_architecture == 'ICCV':
D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
elif self.generator_architecture == 'unet_mini':
D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
# Discriminator builds
image_A = Input(self.image_shapeA)
image_B = Input(self.image_shapeB)
guess_A = D_A(image_A)
guess_B = D_B(image_B)
self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')
self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')
if self.use_patchgan:
self.D_A.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
else:
self.D_A.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
# Use Networks to avoid falsy keras error about weight descripancies
self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model')
self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model')
# ============= Generator models =======================
# Do note update discriminator weights during generator training
self.D_A_static.trainable = False
self.D_B_static.trainable = False
# Generators
if self.generator_architecture == 'ICCV':
self.G_A2B = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA,
output=self.image_shapeB, name='G_A2B_model')
self.G_B2A = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB,
output=self.image_shapeA, name='G_B2A_model')
elif self.generator_architecture == 'unet_mini':
self.G_A2B = unet_generator_mini(input=self.image_shapeA,
output=self.image_shapeB,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_A2B_model')
self.G_B2A = unet_generator_mini(input=self.image_shapeB,
output=self.image_shapeA,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_B2A_model')
if self.use_identity_learning:
self.G_A2B.compile(optimizer=self.opt_G, loss='MAE')
self.G_B2A.compile(optimizer=self.opt_G, loss='MAE')
# Generator builds
real_A = Input(shape=self.image_shapeA, name='real_A')
real_B = Input(shape=self.image_shapeB, name='real_B')
synthetic_B = self.G_A2B(real_A)
synthetic_A = self.G_B2A(real_B)
dA_guess_synthetic = self.D_A_static(synthetic_A)
dB_guess_synthetic = self.D_B_static(synthetic_B)
reconstructed_A = self.G_B2A(synthetic_B)
reconstructed_B = self.G_A2B(synthetic_A)
model_outputs = [reconstructed_A, reconstructed_B]
compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse]
compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D]
model_outputs.append(dA_guess_synthetic)
model_outputs.append(dB_guess_synthetic)
if self.use_supervised_learning:
model_outputs.append(synthetic_A)
model_outputs.append(synthetic_B)
if self.supervised_loss == 'MAE':
compile_losses.append('MAE')
compile_losses.append('MAE')
compile_weights.append(self.supervised_weight)
compile_weights.append(self.supervised_weight)
self.G_model = Model(inputs=[real_A, real_B],
outputs=model_outputs,
name='G_model')
self.G_model.compile(optimizer=self.opt_G,
loss=compile_losses,
loss_weights=compile_weights)
# ======= Data ==========
# Use 'None' to fetch all available images
nr_A_test_imgs = 1000
nr_B_test_imgs = 1000
if self.use_data_generator:
print('--- Using dataloader during training ---')
else:
print('--- Caching data ---')
sys.stdout.flush()
if load_training_data:
if self.use_data_generator:
self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size,
crop_size=self.im_w, generator=True)
# Only store test images
if opt.task == 'Vimeo2Long_SID':
self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs,
nr_B_test_imgs)
else:
self.A_test = []
self.B_test = []
self.A_train = []
self.B_train = []
if not self.use_data_generator:
print('Data has been loaded')
def load_model_and_weights(self, model, weights_path, iteration, by_name):
name = model.name + '_weights_epoch_' + str(iteration)
final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name))
model.load_weights(final_path, by_name=by_name)
def print_info(self):
print('fInitializing Cycle GAN with parameters ...')
print('task: ', self.task)
print('generator architecture: ', self.generator_architecture)
print('image width: ', self.im_w)
print('image height: ', self.im_h)
print('learning date G: ', self.learning_rate_G)
print('learning date D: ', self.learning_rate_D)
print('use patchGAN: ', self.use_patchgan)
print('use_identity_learning: ', self.use_identity_learning)
print('normalization: ', self.normalization)
print('identity_mapping_modulus: ', self.identity_mapping_modulus)
print('lambda_1: ', self.lambda_1)
print('lambda_2: ', self.lambda_2)
print('lambda_D: ', self.lambda_D)
print('beta_1: ', self.beta_1)
print('beta_2: ', self.beta_2)
print('use_supervised_learning: ', self.use_supervised_learning)
print('supervised_weight: ', self.supervised_weight)
print('supervised_loss: ', self.supervised_loss)
def lse(self, y_true, y_pred):
loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true))
return loss
def cycle_loss(self, y_true, y_pred):
loss = tf.reduce_mean(tf.abs(y_pred - y_true))
return loss
| [
"keras.optimizers.Adam",
"tensorflow.squared_difference",
"keras.engine.topology.Network",
"sys.stdout.flush",
"tensorflow.abs"
]
| [((6069, 6134), 'keras.engine.topology.Network', 'Network', ([], {'inputs': 'image_A', 'outputs': 'guess_A', 'name': '"""D_A_static_model"""'}), "(inputs=image_A, outputs=guess_A, name='D_A_static_model')\n", (6076, 6134), False, 'from keras.engine.topology import Network\n'), ((6162, 6227), 'keras.engine.topology.Network', 'Network', ([], {'inputs': 'image_B', 'outputs': 'guess_B', 'name': '"""D_B_static_model"""'}), "(inputs=image_B, outputs=guess_B, name='D_B_static_model')\n", (6169, 6227), False, 'from keras.engine.topology import Network\n'), ((10348, 10366), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10364, 10366), False, 'import sys\n'), ((3457, 3535), 'keras.optimizers.Adam', 'Adam', (['self.learning_rate_D', 'self.beta_1', 'self.beta_2'], {'clipvalue': 'self.clipvalue'}), '(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue)\n', (3461, 3535), False, 'from keras.optimizers import Adam\n'), ((3562, 3640), 'keras.optimizers.Adam', 'Adam', (['self.learning_rate_G', 'self.beta_1', 'self.beta_2'], {'clipvalue': 'self.clipvalue'}), '(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue)\n', (3566, 3640), False, 'from keras.optimizers import Adam\n'), ((3682, 3734), 'keras.optimizers.Adam', 'Adam', (['self.learning_rate_D', 'self.beta_1', 'self.beta_2'], {}), '(self.learning_rate_D, self.beta_1, self.beta_2)\n', (3686, 3734), False, 'from keras.optimizers import Adam\n'), ((3761, 3813), 'keras.optimizers.Adam', 'Adam', (['self.learning_rate_G', 'self.beta_1', 'self.beta_2'], {}), '(self.learning_rate_G, self.beta_1, self.beta_2)\n', (3765, 3813), False, 'from keras.optimizers import Adam\n'), ((12600, 12637), 'tensorflow.squared_difference', 'tf.squared_difference', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (12621, 12637), True, 'import tensorflow as tf\n'), ((12736, 12759), 'tensorflow.abs', 'tf.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (12742, 12759), True, 'import tensorflow as tf\n')] |
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from routes import doc, api
from fastapi.templating import Jinja2Templates
from starlette.requests import Request
# configure static and templates file on jinja 2
app = FastAPI(
title=f"Technical Case",
description=f"endpoint para subir planilhas para banco de dados relacional Postgres.",
version=f"0.0.1",
static_directory="static"
)
app.mount("/static", StaticFiles(directory="static"), name="static")
#import factory builders and initiate
doc.init_app(app)
api.init_app(app, "/api")
#
templates = Jinja2Templates(directory="templates")
#views
@app.get("/", tags=["/view"])
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080)
| [
"fastapi.FastAPI",
"uvicorn.run",
"routes.doc.init_app",
"fastapi.templating.Jinja2Templates",
"fastapi.staticfiles.StaticFiles",
"routes.api.init_app"
]
| [((258, 431), 'fastapi.FastAPI', 'FastAPI', ([], {'title': 'f"""Technical Case"""', 'description': 'f"""endpoint para subir planilhas para banco de dados relacional Postgres."""', 'version': 'f"""0.0.1"""', 'static_directory': '"""static"""'}), "(title=f'Technical Case', description=\n f'endpoint para subir planilhas para banco de dados relacional Postgres.',\n version=f'0.0.1', static_directory='static')\n", (265, 431), False, 'from fastapi import FastAPI\n'), ((549, 566), 'routes.doc.init_app', 'doc.init_app', (['app'], {}), '(app)\n', (561, 566), False, 'from routes import doc, api\n'), ((567, 592), 'routes.api.init_app', 'api.init_app', (['app', '"""/api"""'], {}), "(app, '/api')\n", (579, 592), False, 'from routes import doc, api\n'), ((608, 646), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (623, 646), False, 'from fastapi.templating import Jinja2Templates\n'), ((462, 493), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': '"""static"""'}), "(directory='static')\n", (473, 493), False, 'from fastapi.staticfiles import StaticFiles\n'), ((825, 875), 'uvicorn.run', 'uvicorn.run', (['"""main:app"""'], {'host': '"""0.0.0.0"""', 'port': '(8080)'}), "('main:app', host='0.0.0.0', port=8080)\n", (836, 875), False, 'import uvicorn\n')] |
from datetime import datetime, timedelta
from enum import Enum
from typing import List, Optional, Tuple, Dict, Any, Union
import time
from authlib.common.security import generate_token
from authlib.consts import default_json_headers
from authlib.oauth2 import (
OAuth2Request,
AuthorizationServer as _AuthorizationServer,
ResourceProtector as _ResourceProtector,
OAuth2Error,
HttpRequest,
)
from authlib.oauth2.rfc6749 import InvalidClientError
from authlib.oauth2.rfc6749.grants import (
AuthorizationCodeGrant as _AuthorizationCodeGrant,
RefreshTokenGrant as _RefreshTokenGrant,
BaseGrant,
)
from authlib.oauth2.rfc6749.grants import (
ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant,
)
from authlib.oauth2.rfc6749.util import scope_to_list
from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \
InsufficientScopeError
from authlib.oauth2.rfc8414 import AuthorizationServerMetadata
from authlib.oidc.core import UserInfo
from authlib.oidc.core.grants import (
OpenIDCode as _OpenIDCode,
OpenIDImplicitGrant as _OpenIDImplicitGrant,
OpenIDHybridGrant as _OpenIDHybridGrant,
)
from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token
from fastapi import HTTPException
from starlette.concurrency import run_in_threadpool
from starlette.responses import Response, JSONResponse
from user_manager.common.config import config
from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \
UserPropertyType
from user_manager.common.mongo import authorization_code_collection, token_collection, \
client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \
async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema
from . import oauth2_key
from .user_helper import UserWithRoles
USERS_SCOPE = '*users'
class TypedRequest(OAuth2Request):
user: UserWithRoles
credential: Union[DbAuthorizationCode, DbToken]
client: DbClient
class RedirectResponse(Response):
def to_json_response(self) -> JSONResponse:
return JSONResponse(
content={'redirect_uri': self.headers['Location']},
status_code=200,
headers=dict(default_json_headers),
)
class ErrorJSONResponse(JSONResponse):
pass
class ErrorRedirectResponse(RedirectResponse):
def to_json_response(self) -> JSONResponse:
return ErrorJSONResponse(
content={'redirect_uri': self.headers['Location']},
status_code=401,
headers=dict(default_json_headers),
)
class AuthorizationServer(_AuthorizationServer):
metadata_class = AuthorizationServerMetadata
def create_oauth2_request(self, request: TypedRequest):
assert isinstance(request, OAuth2Request)
return request
def create_json_request(self, request):
assert isinstance(request, HttpRequest)
raise NotImplementedError()
# TODO: Create HttpRequest with json in body.
def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]):
headers = dict(headers)
if isinstance(payload, dict):
return JSONResponse(payload, status_code=status_code, headers=headers)
elif headers.get('Location'):
assert not payload
return RedirectResponse(status_code=status_code, headers=headers)
assert False
def handle_error_response(self, request: TypedRequest, error: OAuth2Error):
status_code, body, headers = error(
translations=self.get_translations(request),
error_uris=self.get_error_uris(request)
)
headers = dict(headers)
if isinstance(body, dict):
return ErrorJSONResponse(
content=body,
status_code=status_code,
headers=headers,
)
elif headers.get('Location'):
assert not body
return ErrorRedirectResponse(
status_code=status_code,
headers=headers,
)
assert False
def save_authorization_code(code: str, request: TypedRequest):
nonce = request.data.get('nonce')
item = DbAuthorizationCode(
code=code,
client_id=request.client.id,
redirect_uri=request.redirect_uri,
scope=request.scope,
user_id=request.user.user.id,
nonce=nonce,
auth_time=int(time.time()),
expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code),
)
authorization_code_collection.insert_one(item.document())
return item
class ExistsNonceMixin(object):
def exists_nonce(self, nonce: str, request: TypedRequest):
# exists = mongo.authorization_code_collection.count_documents(
# {'client_id': request.client_id, 'nonce': nonce},
# limit=1,
# )
mod_result = authorization_code_collection.update_one(
{'client_id': request.client_id, 'nonce': nonce},
{'$set': {'nonce': None}},
)
if mod_result.modified_count != 1:
return False
return True
class JwtConfigMixin(object):
jwt_token_expiration: int
def get_jwt_config(self, *args, **kwargs):
return {
'key': oauth2_key.key.key,
'alg': oauth2_key.key.jwk.alg.value,
'iss': config.oauth2.issuer,
'exp': self.jwt_token_expiration,
}
class UserInfoMixin(object):
def _translate_properties(
self,
scope: str,
schema: DbManagerSchema,
) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]:
scope_list = ['*'] + scope_to_list(scope)
return [
(prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name)
for scope_name in scope_list
if scope_name not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key
for prop in schema.scopes_by_key[scope_name].properties
if prop.user_property in schema.properties_by_key
]
def generate_user_info(self, user: UserWithRoles, scope: str):
user_data = {
'roles': user.roles,
}
for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()):
if not hasattr(user.user, prop.key):
continue
value = getattr(user.user, prop.key, None)
if prop.type == UserPropertyType.picture:
if value is not None:
value = f"{config.oauth2.base_url}/picture/{value}"
elif prop.type == UserPropertyType.groups:
group_filter = {} if group_type is None else {'group_type': group_type}
value = [
group['group_name'] if group_by_name else group['_id']
for group in user_group_collection.find(
{'_id': {'$in': value}, 'visible': True, **group_filter},
projection={'group_name' if group_by_name else '_id': 1}
)
]
elif prop.type in (
UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token
):
continue
user_data[key] = value
return UserInfo(**user_data)
async def async_generate_user_info(self, user: UserWithRoles, scope: str):
user_data = {
'roles': user.roles,
}
for key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()):
if not hasattr(user.user, prop.key):
continue
value = getattr(user.user, prop.key, None)
if prop.type == UserPropertyType.picture:
if value is not None:
value = f"{config.oauth2.base_url}/picture/{value}"
elif prop.type == UserPropertyType.groups:
group_filter = {} if group_type is None else {'group_type': group_type}
value = [
group['group_name'] if group_by_name else group['_id']
async for group in async_user_group_collection.find(
{'_id': {'$in': value}, 'visible': True, **group_filter},
projection={'group_name' if group_by_name else '_id': 1}
)
]
elif prop.type in (
UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token
):
continue
user_data[key] = value
return UserInfo(**user_data)
class AuthorizationCodeGrant(_AuthorizationCodeGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post']
AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length
def save_authorization_code(self, code: str, request: TypedRequest):
return save_authorization_code(code, request)
def query_authorization_code(self, code: str, client: DbClient):
auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id})
if auth_code_data is None:
return None
auth_code = DbAuthorizationCode.validate_document(auth_code_data)
if auth_code.is_expired():
return None
return auth_code
def delete_authorization_code(self, authorization_code: DbAuthorizationCode):
authorization_code_collection.delete_one({'_id': authorization_code.code})
def authenticate_user(self, authorization_code: DbAuthorizationCode):
return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id)
class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant):
def authenticate_token_endpoint_client(self):
# Must override this to set the client in the request, to make it available to authenticate_user
client = super(self).authenticate_token_endpoint_client()
self.request.client = client
return client
def authenticate_user(self, username: str, password: str):
user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True})
if user_data is None:
return None
return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id)
class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode):
jwt_token_expiration = config.oauth2.token_expiration.authorization_code
class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant):
jwt_token_expiration = config.oauth2.token_expiration.implicit
class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant):
jwt_token_expiration = config.oauth2.token_expiration.implicit
def generate_authorization_code(self) -> str:
return generate_token(config.oauth2.authorization_code_length)
def save_authorization_code(self, code: str, request: TypedRequest):
return save_authorization_code(code, request)
class RefreshTokenGrant(_RefreshTokenGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic']
INCLUDE_NEW_REFRESH_TOKEN = True
def authenticate_refresh_token(self, refresh_token: str):
token_data = token_collection.find_one({'refresh_token': refresh_token})
if token_data is None:
return None
auth_code = DbToken.validate_document(token_data)
if auth_code.is_expired():
return None
return auth_code
def authenticate_user(self, credential: DbToken):
return UserWithRoles.load(credential.user_id, credential.client_id)
def revoke_old_credential(self, credential: DbToken):
# token_collection.update_one({'_id': credential.access_token}, {'revoked': True})
token_collection.delete_one({'_id': credential.access_token})
def save_token(token: Dict[str, Any], request: TypedRequest):
if request.user:
user_id = request.user.user.id
else:
user_id = None
now = int(time.time())
token_data = DbToken.validate_document({
'client_id': request.client.id,
'user_id': user_id,
'issued_at': now,
'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)),
'scope': request.scope,
'auth_time': request.credential.get_auth_time(),
**token
})
token_collection.insert_one(token_data.document())
return token_data
def query_client(client_id: str):
client_data = client_collection.find_one({'_id': client_id})
if client_data is None:
return None
return DbClient.validate_document(client_data)
async def async_query_client(client_id: str):
client_data = await async_client_collection.find_one({'_id': client_id})
if client_data is None:
return None
return DbClient.validate_document(client_data)
def token_generator(*_):
return generate_token(config.oauth2.token_length)
class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin):
jwt_token_expiration = config.oauth2.token_expiration.authorization_code
def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope: str):
jwt_config = self.get_jwt_config()
jwt_config['aud'] = [client.get_client_id()]
jwt_config['auth_time'] = int(time.time())
user_info = {'sub': user.user.id, 'roles': user.roles}
if 'groups' in scope_to_list(scope):
user_info['groups'] = user.user.groups
return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config)
def token_expires_in(_, grant_type: str):
return getattr(config.oauth2.token_expiration, grant_type)
class BearerToken(_BearerToken):
def __call__(self, client, grant_type, user=None, scope=None,
expires_in=None, include_refresh_token=True):
if 'offline_access' not in scope_to_list(scope):
include_refresh_token = False
return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token)
authorization = AuthorizationServer(
query_client,
save_token,
BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator),
)
class OpenIDSessionState:
def __call__(self, grant: BaseGrant):
grant.register_hook('process_token', self.process_token)
def process_token(self, grant: BaseGrant, token: dict):
scope = token.get('scope')
if not scope or not is_openid_scope(scope):
# standard authorization code flow
return token
token['session_state'] = str(grant.request.user.last_modified)
return token
# support all openid grants
authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(OpenIDImplicitGrant)
authorization.register_grant(OpenIDHybridGrant)
authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(ResourceOwnerPasswordCredentialsGrant)
class BearerTokenValidator(_BearerTokenValidator):
def authenticate_token(self, token_string: str):
token_data = token_collection.find_one({'_id': token_string})
if token_data is None:
return None
token = DbToken.validate_document(token_data)
if client_user_cache_collection.count_documents({
'client_id': token.client_id,
'user_id': token.user_id,
}) != 1:
return None
return token
def request_invalid(self, request: TypedRequest):
return False
def token_revoked(self, token: DbToken):
return token.revoked
class ResourceProtector(_ResourceProtector):
def validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken:
assert isinstance(request, OAuth2Request)
return self.validate_request(scope, request, scope_operator)
class UserIntrospection(UserInfoMixin):
async def create_response(self, request: TypedRequest) -> Response:
try:
assert isinstance(request, OAuth2Request)
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id)
user_info = await self.async_generate_user_info(request.user, request.token.scope)
return JSONResponse(user_info)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class RequestOriginVerifier:
async def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]:
try:
assert isinstance(request, OAuth2Request)
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
request.client = await async_query_client(request.token.client_id)
if request.client is None:
raise HTTPException(403, "Invalid client in token")
if not request.client.check_redirect_uri(origin):
raise HTTPException(403, "Allowed redirect uri does not match request")
return None
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class OtherUserInspection(UserInfoMixin):
async def create_response(self, request: TypedRequest, user_id: str, client_auth: dict = None) -> Response:
try:
assert isinstance(request, OAuth2Request)
if request.client is None:
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
client_id = request.token.client_id
scopes = request.token.scope
scope = USERS_SCOPE
else:
client_id = request.client_id
scopes = request.client.allowed_scope
scope = scopes
if USERS_SCOPE not in scope_to_list(scopes):
raise InsufficientScopeError('Missing "*users" scope', request.uri)
user = await UserWithRoles.async_load(user_id, client_id)
if user is None:
raise HTTPException(404, "User not found")
user_info = await self.async_generate_user_info(user, scope)
return JSONResponse(user_info)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class OtherUsersInspection(UserInfoMixin):
async def create_response(self, request: TypedRequest) -> Response:
try:
assert isinstance(request, OAuth2Request)
if request.client is None:
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
client_id = request.token.client_id
scopes = request.token.scope
scope = USERS_SCOPE
load_roles = False
else:
client_id = request.client_id
scopes = request.client.allowed_scope
scope = scopes
load_roles = True
if USERS_SCOPE not in scope_to_list(scopes):
raise InsufficientScopeError('Missing "*users" scope', request.uri)
user_infos = []
for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles):
user_info = await self.async_generate_user_info(user, scope)
if not load_roles:
del user_info['roles']
user_infos.append(user_info)
return JSONResponse(user_infos)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class TypeHint(str, Enum):
AccessToken = "access_token"
RefreshToken = "refresh_token"
class RevocationEndpoint:
async def create_response(
self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest
) -> Response:
token_data = None
if token_type_hint is None or token_type_hint == TypeHint.AccessToken:
token_data = await async_token_collection.find_one({'_id': raw_token})
if token_data is None and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken):
token_data = await async_token_collection.find_one({'refresh_token': raw_token})
if token_data is None:
return Response()
token = DbToken.validate_document(token_data)
try:
if request.client_id is None:
request.data['client_id'] = token.client_id
elif token.client_id != request.client_id:
raise InvalidClientError(state=request.state, status_code=401)
await run_in_threadpool(
authorization.authenticate_client, request, ["none", "client_secret_basic", "client_secret_post"]
)
# await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}})
# token_collection.update_one({'_id': credential.access_token}, {'revoked': True})
await async_token_collection.delete_one({'_id': token.access_token})
return Response()
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
resource_protector = ResourceProtector()
resource_protector.register_token_validator(BearerTokenValidator())
user_introspection = UserIntrospection()
token_revocation = RevocationEndpoint()
request_origin_verifier = RequestOriginVerifier()
other_user_inspection = OtherUserInspection()
other_users_inspection = OtherUsersInspection()
| [
"user_manager.common.mongo.authorization_code_collection.find_one",
"user_manager.common.mongo.user_group_collection.find",
"user_manager.common.mongo.token_collection.delete_one",
"user_manager.common.mongo.async_user_group_collection.find",
"user_manager.common.mongo.authorization_code_collection.update_one",
"datetime.timedelta",
"authlib.oauth2.rfc6749.InvalidClientError",
"authlib.oauth2.rfc6750.InsufficientScopeError",
"authlib.oidc.core.grants.util.is_openid_scope",
"user_manager.common.models.DbClient.validate_document",
"starlette.concurrency.run_in_threadpool",
"user_manager.common.mongo.async_read_schema",
"user_manager.common.models.DbToken.validate_document",
"user_manager.common.models.DbAuthorizationCode.validate_document",
"user_manager.common.mongo.user_collection.find_one",
"user_manager.common.mongo.async_client_collection.find_one",
"authlib.common.security.generate_token",
"starlette.responses.Response",
"authlib.oauth2.rfc6749.util.scope_to_list",
"user_manager.common.models.DbUser.validate_document",
"time.time",
"fastapi.HTTPException",
"user_manager.common.mongo.async_token_collection.delete_one",
"user_manager.common.mongo.client_collection.find_one",
"authlib.oidc.core.UserInfo",
"datetime.datetime.utcnow",
"user_manager.common.mongo.token_collection.find_one",
"user_manager.common.mongo.async_token_collection.find_one",
"starlette.responses.JSONResponse",
"user_manager.common.mongo.client_user_cache_collection.count_documents",
"user_manager.common.mongo.read_schema",
"user_manager.common.mongo.authorization_code_collection.delete_one"
]
| [((12948, 12994), 'user_manager.common.mongo.client_collection.find_one', 'client_collection.find_one', (["{'_id': client_id}"], {}), "({'_id': client_id})\n", (12974, 12994), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((13054, 13093), 'user_manager.common.models.DbClient.validate_document', 'DbClient.validate_document', (['client_data'], {}), '(client_data)\n', (13080, 13093), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((13278, 13317), 'user_manager.common.models.DbClient.validate_document', 'DbClient.validate_document', (['client_data'], {}), '(client_data)\n', (13304, 13317), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((13356, 13398), 'authlib.common.security.generate_token', 'generate_token', (['config.oauth2.token_length'], {}), '(config.oauth2.token_length)\n', (13370, 13398), False, 'from authlib.common.security import generate_token\n'), ((5117, 5238), 'user_manager.common.mongo.authorization_code_collection.update_one', 'authorization_code_collection.update_one', (["{'client_id': request.client_id, 'nonce': nonce}", "{'$set': {'nonce': None}}"], {}), "({'client_id': request.client_id,\n 'nonce': nonce}, {'$set': {'nonce': None}})\n", (5157, 5238), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((7605, 7626), 'authlib.oidc.core.UserInfo', 'UserInfo', ([], {}), '(**user_data)\n', (7613, 7626), False, 'from authlib.oidc.core import UserInfo\n'), ((8922, 8943), 'authlib.oidc.core.UserInfo', 'UserInfo', ([], {}), '(**user_data)\n', (8930, 8943), False, 'from authlib.oidc.core import UserInfo\n'), ((9384, 9461), 'user_manager.common.mongo.authorization_code_collection.find_one', 'authorization_code_collection.find_one', (["{'_id': code, 'client_id': client.id}"], {}), "({'_id': code, 'client_id': client.id})\n", (9422, 9461), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((9541, 9594), 'user_manager.common.models.DbAuthorizationCode.validate_document', 'DbAuthorizationCode.validate_document', (['auth_code_data'], {}), '(auth_code_data)\n', (9578, 9594), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((9770, 9844), 'user_manager.common.mongo.authorization_code_collection.delete_one', 'authorization_code_collection.delete_one', (["{'_id': authorization_code.code}"], {}), "({'_id': authorization_code.code})\n", (9810, 9844), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((10464, 10562), 'user_manager.common.mongo.user_collection.find_one', 'user_collection.find_one', (["{'email': username, 'access_tokens.token': password, 'active': True}"], {}), "({'email': username, 'access_tokens.token':\n password, 'active': True})\n", (10488, 10562), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((11262, 11317), 'authlib.common.security.generate_token', 'generate_token', (['config.oauth2.authorization_code_length'], {}), '(config.oauth2.authorization_code_length)\n', (11276, 11317), False, 'from authlib.common.security import generate_token\n'), ((11680, 11739), 'user_manager.common.mongo.token_collection.find_one', 'token_collection.find_one', (["{'refresh_token': refresh_token}"], {}), "({'refresh_token': refresh_token})\n", (11705, 11739), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((11815, 11852), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', (['token_data'], {}), '(token_data)\n', (11840, 11852), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((12226, 12287), 'user_manager.common.mongo.token_collection.delete_one', 'token_collection.delete_one', (["{'_id': credential.access_token}"], {}), "({'_id': credential.access_token})\n", (12253, 12287), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((12459, 12470), 'time.time', 'time.time', ([], {}), '()\n', (12468, 12470), False, 'import time\n'), ((13166, 13218), 'user_manager.common.mongo.async_client_collection.find_one', 'async_client_collection.find_one', (["{'_id': client_id}"], {}), "({'_id': client_id})\n", (13198, 13218), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((15684, 15732), 'user_manager.common.mongo.token_collection.find_one', 'token_collection.find_one', (["{'_id': token_string}"], {}), "({'_id': token_string})\n", (15709, 15732), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((15804, 15841), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', (['token_data'], {}), '(token_data)\n', (15829, 15841), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((21465, 21502), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', (['token_data'], {}), '(token_data)\n', (21490, 21502), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((3356, 3419), 'starlette.responses.JSONResponse', 'JSONResponse', (['payload'], {'status_code': 'status_code', 'headers': 'headers'}), '(payload, status_code=status_code, headers=headers)\n', (3368, 3419), False, 'from starlette.responses import Response, JSONResponse\n'), ((5914, 5934), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', (['scope'], {}), '(scope)\n', (5927, 5934), False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((6565, 6578), 'user_manager.common.mongo.read_schema', 'read_schema', ([], {}), '()\n', (6576, 6578), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((10654, 10689), 'user_manager.common.models.DbUser.validate_document', 'DbUser.validate_document', (['user_data'], {}), '(user_data)\n', (10678, 10689), False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((13764, 13775), 'time.time', 'time.time', ([], {}), '()\n', (13773, 13775), False, 'import time\n'), ((13864, 13884), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', (['scope'], {}), '(scope)\n', (13877, 13884), False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((14361, 14381), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', (['scope'], {}), '(scope)\n', (14374, 14381), False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((15853, 15959), 'user_manager.common.mongo.client_user_cache_collection.count_documents', 'client_user_cache_collection.count_documents', (["{'client_id': token.client_id, 'user_id': token.user_id}"], {}), "({'client_id': token.client_id,\n 'user_id': token.user_id})\n", (15897, 15959), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((17060, 17083), 'starlette.responses.JSONResponse', 'JSONResponse', (['user_info'], {}), '(user_info)\n', (17072, 17083), False, 'from starlette.responses import Response, JSONResponse\n'), ((19200, 19223), 'starlette.responses.JSONResponse', 'JSONResponse', (['user_info'], {}), '(user_info)\n', (19212, 19223), False, 'from starlette.responses import Response, JSONResponse\n'), ((20600, 20624), 'starlette.responses.JSONResponse', 'JSONResponse', (['user_infos'], {}), '(user_infos)\n', (20612, 20624), False, 'from starlette.responses import Response, JSONResponse\n'), ((21438, 21448), 'starlette.responses.Response', 'Response', ([], {}), '()\n', (21446, 21448), False, 'from starlette.responses import Response, JSONResponse\n'), ((22224, 22234), 'starlette.responses.Response', 'Response', ([], {}), '()\n', (22232, 22234), False, 'from starlette.responses import Response, JSONResponse\n'), ((4616, 4627), 'time.time', 'time.time', ([], {}), '()\n', (4625, 4627), False, 'import time\n'), ((4654, 4671), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4669, 4671), False, 'from datetime import datetime, timedelta\n'), ((4674, 4742), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'config.oauth2.token_expiration.authorization_code'}), '(seconds=config.oauth2.token_expiration.authorization_code)\n', (4683, 4742), False, 'from datetime import datetime, timedelta\n'), ((7864, 7883), 'user_manager.common.mongo.async_read_schema', 'async_read_schema', ([], {}), '()\n', (7881, 7883), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((12638, 12655), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (12653, 12655), False, 'from datetime import datetime, timedelta\n'), ((13990, 14039), 'authlib.common.security.generate_token', 'generate_token', (['config.oauth2.access_token_length'], {}), '(config.oauth2.access_token_length)\n', (14004, 14039), False, 'from authlib.common.security import generate_token\n'), ((14995, 15017), 'authlib.oidc.core.grants.util.is_openid_scope', 'is_openid_scope', (['scope'], {}), '(scope)\n', (15010, 15017), False, 'from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token\n'), ((16674, 16743), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['resource_protector.validate_request', 'None', 'request'], {}), '(resource_protector.validate_request, None, request)\n', (16691, 16743), False, 'from starlette.concurrency import run_in_threadpool\n'), ((16804, 16839), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Invalid token"""'], {}), "(403, 'Invalid token')\n", (16817, 16839), False, 'from fastapi import HTTPException\n'), ((17419, 17488), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['resource_protector.validate_request', 'None', 'request'], {}), '(resource_protector.validate_request, None, request)\n', (17436, 17488), False, 'from starlette.concurrency import run_in_threadpool\n'), ((17549, 17584), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Invalid token"""'], {}), "(403, 'Invalid token')\n", (17562, 17584), False, 'from fastapi import HTTPException\n'), ((17725, 17770), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Invalid client in token"""'], {}), "(403, 'Invalid client in token')\n", (17738, 17770), False, 'from fastapi import HTTPException\n'), ((17855, 17920), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Allowed redirect uri does not match request"""'], {}), "(403, 'Allowed redirect uri does not match request')\n", (17868, 17920), False, 'from fastapi import HTTPException\n'), ((18843, 18864), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', (['scopes'], {}), '(scopes)\n', (18856, 18864), False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((18888, 18949), 'authlib.oauth2.rfc6750.InsufficientScopeError', 'InsufficientScopeError', (['"""Missing "*users" scope"""', 'request.uri'], {}), '(\'Missing "*users" scope\', request.uri)\n', (18910, 18949), False, 'from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, InsufficientScopeError\n'), ((19071, 19107), 'fastapi.HTTPException', 'HTTPException', (['(404)', '"""User not found"""'], {}), "(404, 'User not found')\n", (19084, 19107), False, 'from fastapi import HTTPException\n'), ((20152, 20173), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', (['scopes'], {}), '(scopes)\n', (20165, 20173), False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((20197, 20258), 'authlib.oauth2.rfc6750.InsufficientScopeError', 'InsufficientScopeError', (['"""Missing "*users" scope"""', 'request.uri'], {}), '(\'Missing "*users" scope\', request.uri)\n', (20219, 20258), False, 'from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, InsufficientScopeError\n'), ((21138, 21189), 'user_manager.common.mongo.async_token_collection.find_one', 'async_token_collection.find_one', (["{'_id': raw_token}"], {}), "({'_id': raw_token})\n", (21169, 21189), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((21326, 21387), 'user_manager.common.mongo.async_token_collection.find_one', 'async_token_collection.find_one', (["{'refresh_token': raw_token}"], {}), "({'refresh_token': raw_token})\n", (21357, 21387), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((21770, 21890), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['authorization.authenticate_client', 'request', "['none', 'client_secret_basic', 'client_secret_post']"], {}), "(authorization.authenticate_client, request, ['none',\n 'client_secret_basic', 'client_secret_post'])\n", (21787, 21890), False, 'from starlette.concurrency import run_in_threadpool\n'), ((22142, 22204), 'user_manager.common.mongo.async_token_collection.delete_one', 'async_token_collection.delete_one', (["{'_id': token.access_token}"], {}), "({'_id': token.access_token})\n", (22175, 22204), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((18353, 18422), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['resource_protector.validate_request', 'None', 'request'], {}), '(resource_protector.validate_request, None, request)\n', (18370, 18422), False, 'from starlette.concurrency import run_in_threadpool\n'), ((18491, 18526), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Invalid token"""'], {}), "(403, 'Invalid token')\n", (18504, 18526), False, 'from fastapi import HTTPException\n'), ((19593, 19662), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['resource_protector.validate_request', 'None', 'request'], {}), '(resource_protector.validate_request, None, request)\n', (19610, 19662), False, 'from starlette.concurrency import run_in_threadpool\n'), ((19731, 19766), 'fastapi.HTTPException', 'HTTPException', (['(403)', '"""Invalid token"""'], {}), "(403, 'Invalid token')\n", (19744, 19766), False, 'from fastapi import HTTPException\n'), ((21695, 21751), 'authlib.oauth2.rfc6749.InvalidClientError', 'InvalidClientError', ([], {'state': 'request.state', 'status_code': '(401)'}), '(state=request.state, status_code=401)\n', (21713, 21751), False, 'from authlib.oauth2.rfc6749 import InvalidClientError\n'), ((7151, 7300), 'user_manager.common.mongo.user_group_collection.find', 'user_group_collection.find', (["{'_id': {'$in': value}, 'visible': True, **group_filter}"], {'projection': "{('group_name' if group_by_name else '_id'): 1}"}), "({'_id': {'$in': value}, 'visible': True, **\n group_filter}, projection={('group_name' if group_by_name else '_id'): 1})\n", (7177, 7300), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((8462, 8621), 'user_manager.common.mongo.async_user_group_collection.find', 'async_user_group_collection.find', (["{'_id': {'$in': value}, 'visible': True, **group_filter}"], {'projection': "{('group_name' if group_by_name else '_id'): 1}"}), "({'_id': {'$in': value}, 'visible': True,\n **group_filter}, projection={('group_name' if group_by_name else '_id'): 1}\n )\n", (8494, 8621), False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n')] |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class FixedClassifier(Base):
"""Specifies the packets to apply this profile to. If there are multiple patterns enabled, they are ANDed: each packet must match all packets in order to be impaired by this profile.
The FixedClassifier class encapsulates a list of fixedClassifier resources that are managed by the user.
A list of resources can be retrieved from the server using the FixedClassifier.find() method.
The list can be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'fixedClassifier'
_SDM_ATT_MAP = {
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(FixedClassifier, self).__init__(parent, list_op)
@property
def Pattern(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern
if self._properties.get('Pattern', None) is not None:
return self._properties.get('Pattern')
else:
return Pattern(self)
def add(self):
"""Adds a new fixedClassifier resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved fixedClassifier resources using find and the newly added fixedClassifier resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained fixedClassifier resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self):
"""Finds and retrieves fixedClassifier resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve fixedClassifier resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all fixedClassifier resources from the server.
Returns
-------
- self: This instance with matching fixedClassifier resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of fixedClassifier data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the fixedClassifier resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern"
]
| [((2601, 2614), 'uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern', 'Pattern', (['self'], {}), '(self)\n', (2608, 2614), False, 'from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern\n')] |
# sacher_epos.py, python wrapper for sacher epos motor
# <NAME> <<EMAIL>>, August 2014
#
"""
Possbily Maxon EPOS now
"""
"""
This is the actual version that works
But only in the lab32 virtual environment
"""
# from instrument import Instrument
# import qt
import ctypes
import ctypes.wintypes
import logging
import time
# from instrument import Instrument
from ctypes.wintypes import DWORD, WORD
import numpy as np
"""
okay so we import a bunch of random stuff
I always forget what ctypes is for but I'll worry about it later
"""
# from subprocess import Popen, PIPE
# from multiprocessing.managers import BaseManager
# import atexit
# import os
# python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32"
# assert os.path.isdir(python32_dir)
# os.chdir(python32_dir)
# derp = "C:\\Users\\Alex\\Documents\\wow_such_code"
# assert os.path.isdir(derp)
# os.chdir(derp)
# p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp)
# atexit.register(p.terminate)
# port = int(p.stdout.readline())
# authkey = p.stdout.read()
# print(port, authkey)
# m = BaseManager(address=("localhost", port), authkey=authkey)
# m.connect()
# tell manager to expect an attribute called LibC
# m.register("SacherLasaTeknique")
# access and use libc
# libc = m.SacherLasaTeknique()
# print(libc.vcs())
# eposlib = ctypes.windll.eposcmd
eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll')
DeviceName = b'EPOS'
ProtocolStackName = b'MAXON_RS232'
InterfaceName = b'RS232'
"""
Max on
Max off
but anyway it looks like ctypes is the thing that's talking to the epos dll
"""
HISTCHAN = 65536
TTREADMAX = 131072
RANGES = 8
MODE_HIST = 0
MODE_T2 = 2
MODE_T3 = 3
FLAG_OVERFLOW = 0x0040
FLAG_FIFOFULL = 0x0003
# in mV
ZCMIN = 0
ZCMAX = 20
DISCRMIN = 0
DISCRMAX = 800
# in ps
OFFSETMIN = 0
OFFSETMAX = 1000000000
# in ms
ACQTMIN = 1
ACQTMAX = 10 * 60 * 60 * 1000
# in mV
PHR800LVMIN = -1600
PHR800LVMAX = 2400
"""
wooooooo a bunch a variables and none of them are explained
way to go dc you da real champ
"""
class Sacher_EPOS():
"""
ok before I dive into this giant Sacher class thing let me just list here all the functions that are being defined in this class:
check(self)
before
wreck(self)
ok but actually:
__init__(self, name, address, reset=False)
__del__(self)
get_bit(self, byteval,idx)
_u32todouble(self, uinput)
open(self)
close(self)
get_offset(self)
fine_tuning_steps(self, steps)
set_new_offset(self, new_offset)
get_motor_position(self)
set_target_position(self, target, absolute, immediately)
do_get_wavelength(self)
do_set_wavelength(self, wavelength)
is_open(self)
clear_fault(self)
initialize(self)
The last one is really long
And also damn there are 16 of them
I'll comment about them as I go through them
"""
def __init__(self, name, address, reset=False):
# Instrument.__init__(self, name, tags=['physical'])
# self._port_name = str(address)
self._port_name = address
self._is_open = False
self._HPM = True
# self.add_parameter('wavelength',
# flags = Instrument.FLAG_GETSET,
# type = types.FloatType,
# units = 'nm',
# minval=1070.0,maxval=1180.0)
# self.add_function('open')
# self.add_function('close')
# self.add_function('fine_tuning_steps')
# self.add_function('get_motor_position')
# self.add_function('set_target_position')
# try:
self.open()
self.initialize()
# except:
# logging.error('Error loading Sacher EPOS motor. In use?')
"""
I mean to me this really seems like the initialize function
so I wonder what initialize(self) is doing
At any rate there doesn't seem to be a lot going on here
"""
def __del__(self):
# execute disconnect
self.close()
return
"""
this might be the only self explanatory one
it disconnects
"""
@staticmethod
def get_bit(byteval, idx):
# def get_bit(self, byteval,idx):
return ((byteval & (1 << idx)) != 0)
"""
you get the bits, and then you use them
but honestly I don't really get what this is doing
sudo git a_clue
"""
@staticmethod
def _u32todouble(uinput):
# def _u32todouble(self, uinput):
# this function implements the really weird/non-standard U32 to
# floating point conversion in the sacher VIs
# get sign of number
sign = Sacher_EPOS.get_bit(uinput, 31)
if sign == False:
mantissa_sign = 1
elif sign == True:
mantissa_sign = -1
exp_mask = 0b111111
# print 'uin u is %d' % uinput
# print 'type uin %s' % type(uinput)
# print 'binary input is %s' % bin(long(uinput))
# get sign of exponent
if Sacher_EPOS.get_bit(uinput, 7) == False:
exp_sign = 1
elif Sacher_EPOS.get_bit(uinput, 7) == True:
exp_sign = -1
# print 'exp extract %s' % bin(int(uinput & exp_mask))
# print 'exp conv %s' % (exp_sign*int(uinput & exp_mask))
# print 'sign of exponent %s' % self.get_bit(uinput,7)
# print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000))
mantissa_mask = 0b01111111111111111111111100000000
# mantissa_mask = 0b0111111111111111111111110000000
# print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8)
mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8)
# print 'mantissa is %.12f' % mantissa
# print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask)
output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask)))
# print 'output is %s' % output
return output
"""
ok dc gave some slight explanations here
Apparently there's a "really weird/non-standard U32 to floating point conversion in the sacher VIs"
It'd be gr8 if I knew what U32's were
unsigned 32 bit something something?
ah whatever
I'll have to worry about this later
"""
@staticmethod
def _doubletou32(dinput):
mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else 1
exp_bit = 1 if -1 < dinput < 1 else 0
b = np.ceil(np.log10(abs(dinput)))
a = dinput / 10 ** b
if dinput < 0:
a = -a
# print('a:\t{}\tb:\t{}'.format(a, b))
d = np.log2(10) * b
d_ = np.ceil(d)
c = a * 2 ** (d - d_)
# print('c:\t{}\td_:{}\toriginal:\t{}'.format(c, d_, c * 2 ** d_))
return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_))
def open(self):
eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,
ctypes.POINTER(DWORD)]
eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.HANDLE()
# print 'types are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf))
ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf)
self._keyhandle = ret
# print 'keyhandle is %s' % self._keyhandle
# print 'open device ret %s' % buf
# print 'printing'
# print buf.contents.value
# print 'done printer'
if int(buf.contents.value) >= 0:
self._is_open = True
self._keyhandle = ret
return
"""
I have absolutely no idea what the hell this is doing
Considering that close(self) is apparently closing the EPOS motor, maybe this is opening it
"""
def close(self):
print('closing EPOS motor.')
eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)]
eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.BOOL()
ret = eposlib.VCS_CloseDevice(self._keyhandle, buf)
# print 'close device returned %s' % buf
if int(buf.contents.value) >= 0:
self._is_open = False
else:
logging.error(__name__ + ' did not close Sacher EPOS motor correctly.')
return
"""
Apparently this closes the EPOS motor
I don't know what "opening" and "closing" the motor means though
and yeah also these random variables don't make any sense to me
"""
def get_motor_current(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL
motorCurrent = ctypes.c_uint8(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf))
return motorCurrent.value
"""
Not sure what this is doing yet
"""
def find_home(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf))
print('Homing: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def restore(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf))
print('Restore: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def get_offset(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32))
if ret == 0:
logging.error(__name__ + ' Could not read stored position from Sacher EPOS motor')
return CastedObjectData[0]
"""
Not sure what this is doing yet
"""
def fine_tuning_steps(self, steps):
current_motor_pos = self.get_motor_position()
self._offset = self.get_offset()
self.set_target_position(steps, False, True)
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
"""
Not sure what this is doing yet
"""
def set_new_offset(self, new_offset):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def set_coeffs(self, a, b, c, min_wl, max_wl):
print('')
print("setting coefficients...")
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
d = (min_wl << 16) + max_wl
StoredPositionObject = ctypes.wintypes.WORD(8204)
for subidx, coeff in enumerate([a, b, c]):
print(subidx, coeff)
StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(d)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def get_motor_position(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
pPosition = ctypes.pointer(ctypes.c_long())
eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))
# print 'get motor position ret %s' % ret
# print 'get motor position buf %s' % buf.value
# print 'get motor position value %s' % pPosition.contents.value
return pPosition.contents.value
# print('getting motor position...')
# print(ret)
# return print(pPosition.contents.value)
"""
Not sure what this is doing yet
"""
def set_target_position(self, target, absolute, immediately):
# print('check #1')
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# First, set enabled state
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('Enable state ret %s buf %s' % (ret, buf.value))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
pTarget = ctypes.c_long(target)
pAbsolute = ctypes.wintypes.BOOL(absolute)
pImmediately = ctypes.wintypes.BOOL(immediately)
eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,
ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL
# print('check #2')
# print('About to set motor position')
# print('Current motor position is %d' % (self.get_motor_position()))
ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now
nchecks = 0
# print('check #3')
while nchecks < 1000:
# get the movement state. a movement state of 1 indicates the motor
# is done moving
# print('')
# print('check #4')
# print('Motor current: {}'.format(self.get_motor_current()))
print('Motor position: {}'.format(self.get_motor_position()))
# print('Motor offset: {}'.format(self.get_offset()))
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())
# print(pMovementState.contents.value)
eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.BOOL),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL
# print('Getting movement state')
ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
# print('Movement state is %s' % pMovementState.contents.value)
if pMovementState.contents.value == 1:
break
nchecks = nchecks + 1
# print('Current motor position is %d' % self.get_motor_position())
# print('check #5')
# print(nchecks)
# print('')
time.sleep(0.01)
# Now set disabled state
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('check #6')
# print('Disable state ret %s buf %s' % (ret, buf.value))
# print('Final motor position is %d' % (self.get_motor_position()))
# print('check #7')
return ret
"""
Not sure what this is doing yet
"""
def fuck_my_life(self, wavelength):
print('goddamn this piece of shit')
print('')
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
print(b2a)
print(np.sqrt(sqrtarg))
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
print('wavelength_to_pos: {}'.format(wavelength_to_pos))
print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))
print('self._offset: {}'.format(int(self._offset)))
"""
Not sure what this is doing yet
"""
def do_get_wavelength(self):
self._offset = self.get_offset()
# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC
self._currentwl = self._doubleA * (
self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
return self._currentwl
"""
Not sure what this is doing yet
"""
def do_set_wavelength(self, wavelength):
print('setting wavelength...')
print('')
# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
# x is what the motor position should be
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
# print('Diff wavelength offset %s' % diff_wavelength_offset)
# Step 5: If HPM is activated and the wavelength position is lower, overshoot
# the movement by 10,000 steps
# print('Step 5...')
# print('#4 Motor current: {}'.format(self.get_motor_current()))
if 1 == 2:
print('uh-oh')
# if self._HPM and diff_wavelength_offset < 0:
#
# print('Overshooting by 10000')
#
# self.set_target_position(diff_wavelength_offset - 10000, False, True)
# # Step 6: Set the real target position
#
# """
# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!
# """
#
# #print('Step 6a... diff wavelength')
#
# self.set_target_position(10000, False, True)
else:
# print('Step 6b... diff wavelength')
# self.set_target_position(diff_wavelength_offset, False, True)
"""WRONG"""
self.set_target_position(wavelength_to_pos, True, True)
"""this is the real shit right here
I need to set the absolute position to true
"""
# self.set_target_position(10000, False, True)
# Step 7: Get the actual motor position
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset))
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
# Step 8, get and print current wavelength
# print('Current wavelength is %.3f' % self.do_get_wavelength())
# print('setting wavelength done')
return
"""
Not sure what this is doing yet
"""
def is_open(self):
return self._is_open
"""
Not sure what this is doing yet
"""
def clear_fault(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
print('clear fault buf %s, ret %s' % (buf, ret))
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
"""
Not sure what this is doing yet
"""
def initialize(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
BaudRate = DWORD(38400)
Timeout = DWORD(100)
ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf))
# print 'set protocol buf %s ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
# eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
# print 'clear fault buf %s, ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
plsenabled = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf))
# print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
if int(plsenabled.value) != 0:
logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.')
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
if int(ret) != 0:
logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding')
else:
logging.error(__name__ + ' EPOS motor was not successfully disabled!')
buf = ctypes.wintypes.DWORD(0)
Counts = WORD(512) # incremental encoder counts in pulses per turn
PositionSensorType = WORD(4)
ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf))
## if ret == int(0):
## print 'errr'
## errbuf = ctypes.create_string_buffer(64)
## print 'sending'
## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL
## print 'boolerrorinfo'
## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD]
## print 'arg'
##
## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64))
## print 'err'
## raise ValueError(errbuf.value)
# For some reason, it appears normal in the LabVIEW code that this
# function actually returns an error, i.e. the return value is zero
# and the buffer has a non-zero error code in it; the LabVIEW code
# doesn't check it.
# Also, it appears that in the 2005 version of this DLL, the function
# VCS_GetErrorInfo doesn't exist!
# Get operation mode, check if it's 1 -- this is "profile position mode"
buf = ctypes.wintypes.DWORD(0)
pMode = ctypes.pointer(ctypes.c_int8())
eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf))
# if mode is not 1, make it 1
if pMode.contents.value != 1:
eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL
pMode_setting = ctypes.c_int8(1)
ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf))
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD())
ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value)
if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int(
60000) or int(pProfileDeceleration.contents.value) > int(60000)):
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.wintypes.DWORD(429)
pProfileAcceleration = ctypes.wintypes.DWORD(429)
pProfileDeceleration = ctypes.wintypes.DWORD(429)
logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...')
ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
# Now get the motor position (stored position offset)
# from the device's "homposition" object
self._offset = self.get_offset()
# Now read the stored 'calculation parameters'
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# More hardcoded values
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(1)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefA = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# Get coefficient B
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(2)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefB = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(3)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefC = CastedObjectData[0]
# Get coefficient D
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefD = CastedObjectData[0]
# print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD)
self._doubleA = self._u32todouble(self._coefA)
self._doubleB = self._u32todouble(self._coefB)
self._doubleC = self._u32todouble(self._coefC)
firstHalf = np.int16(self._coefD >> 16)
secondHalf = np.int16(self._coefD & 0xffff)
# Set the minimum and maximum wavelengths for the motor
self._minwl = float(firstHalf) / 10.0
self._maxwl = float(secondHalf) / 10.0
# print 'first %s second %s' % (firstHalf, secondHalf)
# This returns '10871' and '11859' for the Sacher, which are the correct
# wavelength ranges in Angstroms
# print 'Now calculate the current wavelength position:'
self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
print('initializing done')
return True
"""
Not sure what this is doing yet
"""
"""
Also we're done with the Sacher_EPOS() class at this point
"""
if __name__ == '__main__':
epos = Sacher_EPOS(None, b'COM3')
# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)
# epos.do_get_wavelength()
# print('#1 Motor current: {}'.format(epos.get_motor_current()))
# epos.do_get_wavelength()
# print('motor position is...')
# current_pos = epos.get_motor_position()
# print('current position is {}'.format(current_pos))
# new_pos = current_pos + 10000
# epos.set_target_position(new_pos, True, True)
# print(epos.get_motor_position())
# print('#2 Motor current: {}'.format(epos.get_motor_current()))
# epos.find_home()
# epos.restore()
# time.sleep(7)
epos.do_set_wavelength(1151.5)
# epos.do_get_wavelength()
print('Motor current: {}'.format(epos.get_motor_current()))
print('Motor position: {}'.format(epos.get_motor_position()))
"""
OTHER MISC. NOTES:
increasing wavelength:
causes the square to rotate left
causes base to move to the left when square is stuck in
causes screw to loosen
causes large gold base to tighten
decreasing wavelength:
there's an overshoot when lowering wavelength
causes the square to rotate right
causes base to move to the right when square is stuck in
causes screw to tighten
causes large gold base to loosen, and also unplug the motor
Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize()
"""
# womp the end
| [
"numpy.sqrt",
"time.sleep",
"ctypes.create_string_buffer",
"ctypes.c_void_p",
"logging.error",
"ctypes.c_int8",
"ctypes.wintypes.DWORD",
"ctypes.windll.LoadLibrary",
"numpy.ceil",
"numpy.int16",
"logging.warning",
"ctypes.wintypes.WORD",
"ctypes.wintypes.BOOL",
"ctypes.wintypes.HANDLE",
"numpy.log2",
"ctypes.c_long",
"ctypes.byref",
"ctypes.POINTER",
"ctypes.c_uint8"
]
| [((1377, 1462), 'ctypes.windll.LoadLibrary', 'ctypes.windll.LoadLibrary', (['"""C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll"""'], {}), "('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll'\n )\n", (1402, 1462), False, 'import ctypes\n'), ((6700, 6710), 'numpy.ceil', 'np.ceil', (['d'], {}), '(d)\n', (6707, 6710), True, 'import numpy as np\n'), ((7232, 7256), 'ctypes.wintypes.HANDLE', 'ctypes.wintypes.HANDLE', ([], {}), '()\n', (7254, 7256), False, 'import ctypes\n'), ((8290, 8312), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ([], {}), '()\n', (8310, 8312), False, 'import ctypes\n'), ((8860, 8883), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (8880, 8883), False, 'import ctypes\n'), ((9179, 9196), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(0)'], {}), '(0)\n', (9193, 9196), False, 'import ctypes\n'), ((9211, 9235), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (9232, 9235), False, 'import ctypes\n'), ((9477, 9500), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (9497, 9500), False, 'import ctypes\n'), ((9759, 9783), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (9780, 9783), False, 'import ctypes\n'), ((10036, 10059), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (10056, 10059), False, 'import ctypes\n'), ((10302, 10326), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (10323, 10326), False, 'import ctypes\n'), ((10562, 10585), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (10582, 10585), False, 'import ctypes\n'), ((10600, 10624), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (10621, 10624), False, 'import ctypes\n'), ((11194, 11220), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8321)'], {}), '(8321)\n', (11214, 11220), False, 'import ctypes\n'), ((11260, 11277), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(0)'], {}), '(0)\n', (11274, 11277), False, 'import ctypes\n'), ((11316, 11340), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (11337, 11340), False, 'import ctypes\n'), ((11362, 11379), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (11377, 11379), False, 'import ctypes\n'), ((12764, 12787), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (12784, 12787), False, 'import ctypes\n'), ((12802, 12826), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (12823, 12826), False, 'import ctypes\n'), ((13307, 13333), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8321)'], {}), '(8321)\n', (13327, 13333), False, 'import ctypes\n'), ((13373, 13390), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(0)'], {}), '(0)\n', (13387, 13390), False, 'import ctypes\n'), ((13430, 13454), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (13451, 13454), False, 'import ctypes\n'), ((14269, 14292), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (14289, 14292), False, 'import ctypes\n'), ((14307, 14331), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (14328, 14331), False, 'import ctypes\n'), ((14848, 14874), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8204)'], {}), '(8204)\n', (14868, 14874), False, 'import ctypes\n'), ((15675, 15692), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(4)'], {}), '(4)\n', (15689, 15692), False, 'import ctypes\n'), ((15732, 15756), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (15753, 15756), False, 'import ctypes\n'), ((16577, 16600), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (16597, 16600), False, 'import ctypes\n'), ((16615, 16639), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (16636, 16639), False, 'import ctypes\n'), ((17562, 17585), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (17582, 17585), False, 'import ctypes\n'), ((17600, 17624), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (17621, 17624), False, 'import ctypes\n'), ((18562, 18583), 'ctypes.c_long', 'ctypes.c_long', (['target'], {}), '(target)\n', (18575, 18583), False, 'import ctypes\n'), ((18604, 18634), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', (['absolute'], {}), '(absolute)\n', (18624, 18634), False, 'import ctypes\n'), ((18658, 18691), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', (['immediately'], {}), '(immediately)\n', (18678, 18691), False, 'import ctypes\n'), ((22231, 22254), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (22251, 22254), False, 'import ctypes\n'), ((22269, 22293), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (22290, 22293), False, 'import ctypes\n'), ((25009, 25032), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (25029, 25032), False, 'import ctypes\n'), ((25047, 25071), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (25068, 25071), False, 'import ctypes\n'), ((28773, 28796), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (28793, 28796), False, 'import ctypes\n'), ((28811, 28835), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (28832, 28835), False, 'import ctypes\n'), ((29248, 29271), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(0)'], {}), '(0)\n', (29268, 29271), False, 'import ctypes\n'), ((29286, 29310), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (29307, 29310), False, 'import ctypes\n'), ((29330, 29342), 'ctypes.wintypes.DWORD', 'DWORD', (['(38400)'], {}), '(38400)\n', (29335, 29342), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((29361, 29371), 'ctypes.wintypes.DWORD', 'DWORD', (['(100)'], {}), '(100)\n', (29366, 29371), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((29730, 29754), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (29751, 29754), False, 'import ctypes\n'), ((30085, 30109), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (30106, 30109), False, 'import ctypes\n'), ((30131, 30155), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (30152, 30155), False, 'import ctypes\n'), ((30991, 31015), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (31012, 31015), False, 'import ctypes\n'), ((31033, 31042), 'ctypes.wintypes.WORD', 'WORD', (['(512)'], {}), '(512)\n', (31037, 31042), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((31121, 31128), 'ctypes.wintypes.WORD', 'WORD', (['(4)'], {}), '(4)\n', (31125, 31128), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((32355, 32379), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (32376, 32379), False, 'import ctypes\n'), ((36092, 36118), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8204)'], {}), '(8204)\n', (36112, 36118), False, 'import ctypes\n'), ((36158, 36175), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(1)'], {}), '(1)\n', (36172, 36175), False, 'import ctypes\n'), ((36214, 36238), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (36235, 36238), False, 'import ctypes\n'), ((36260, 36277), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (36275, 36277), False, 'import ctypes\n'), ((37377, 37403), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8204)'], {}), '(8204)\n', (37397, 37403), False, 'import ctypes\n'), ((37443, 37460), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(2)'], {}), '(2)\n', (37457, 37460), False, 'import ctypes\n'), ((37499, 37523), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (37520, 37523), False, 'import ctypes\n'), ((37545, 37562), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (37560, 37562), False, 'import ctypes\n'), ((38783, 38809), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8204)'], {}), '(8204)\n', (38803, 38809), False, 'import ctypes\n'), ((38849, 38866), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(3)'], {}), '(3)\n', (38863, 38866), False, 'import ctypes\n'), ((38905, 38929), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (38926, 38929), False, 'import ctypes\n'), ((38951, 38968), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (38966, 38968), False, 'import ctypes\n'), ((40218, 40244), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', (['(8204)'], {}), '(8204)\n', (40238, 40244), False, 'import ctypes\n'), ((40284, 40301), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(4)'], {}), '(4)\n', (40298, 40301), False, 'import ctypes\n'), ((40340, 40364), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (40361, 40364), False, 'import ctypes\n'), ((40386, 40403), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (40401, 40403), False, 'import ctypes\n'), ((41341, 41368), 'numpy.int16', 'np.int16', (['(self._coefD >> 16)'], {}), '(self._coefD >> 16)\n', (41349, 41368), True, 'import numpy as np\n'), ((41390, 41419), 'numpy.int16', 'np.int16', (['(self._coefD & 65535)'], {}), '(self._coefD & 65535)\n', (41398, 41419), True, 'import numpy as np\n'), ((6671, 6682), 'numpy.log2', 'np.log2', (['(10)'], {}), '(10)\n', (6678, 6682), True, 'import numpy as np\n'), ((7092, 7113), 'ctypes.POINTER', 'ctypes.POINTER', (['DWORD'], {}), '(DWORD)\n', (7106, 7113), False, 'import ctypes\n'), ((7208, 7216), 'ctypes.wintypes.DWORD', 'DWORD', (['(0)'], {}), '(0)\n', (7213, 7216), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((8151, 8172), 'ctypes.POINTER', 'ctypes.POINTER', (['DWORD'], {}), '(DWORD)\n', (8165, 8172), False, 'import ctypes\n'), ((8266, 8274), 'ctypes.wintypes.DWORD', 'DWORD', (['(0)'], {}), '(0)\n', (8271, 8274), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((8526, 8597), 'logging.error', 'logging.error', (["(__name__ + ' did not close Sacher EPOS motor correctly.')"], {}), "(__name__ + ' did not close Sacher EPOS motor correctly.')\n", (8539, 8597), False, 'import logging\n'), ((9020, 9050), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint8'], {}), '(ctypes.c_uint8)\n', (9034, 9050), False, 'import ctypes\n'), ((9052, 9089), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (9066, 9089), False, 'import ctypes\n'), ((9300, 9326), 'ctypes.byref', 'ctypes.byref', (['motorCurrent'], {}), '(motorCurrent)\n', (9312, 9326), False, 'import ctypes\n'), ((9328, 9345), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (9340, 9345), False, 'import ctypes\n'), ((9645, 9682), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (9659, 9682), False, 'import ctypes\n'), ((9844, 9862), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(35)'], {}), '(35)\n', (9858, 9862), False, 'import ctypes\n'), ((9864, 9881), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (9876, 9881), False, 'import ctypes\n'), ((10188, 10225), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (10202, 10225), False, 'import ctypes\n'), ((10386, 10403), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (10398, 10403), False, 'import ctypes\n'), ((10875, 10912), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (10889, 10912), False, 'import ctypes\n'), ((10914, 10951), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (10928, 10951), False, 'import ctypes\n'), ((11480, 11510), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int32'], {}), '(ctypes.c_int32)\n', (11494, 11510), False, 'import ctypes\n'), ((11563, 11587), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (11584, 11587), False, 'import ctypes\n'), ((11842, 11859), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (11854, 11859), False, 'import ctypes\n'), ((11954, 11984), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int32'], {}), '(ctypes.c_int32)\n', (11968, 11984), False, 'import ctypes\n'), ((12019, 12105), 'logging.error', 'logging.error', (["(__name__ + ' Could not read stored position from Sacher EPOS motor')"], {}), "(__name__ +\n ' Could not read stored position from Sacher EPOS motor')\n", (12032, 12105), False, 'import logging\n'), ((12995, 13032), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (13009, 13032), False, 'import ctypes\n'), ((13099, 13136), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (13113, 13136), False, 'import ctypes\n'), ((13138, 13175), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (13152, 13175), False, 'import ctypes\n'), ((13566, 13597), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (13580, 13597), False, 'import ctypes\n'), ((13653, 13677), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (13674, 13677), False, 'import ctypes\n'), ((13936, 13953), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (13948, 13953), False, 'import ctypes\n'), ((13989, 14076), 'logging.error', 'logging.error', (["(__name__ + ' Could not write stored position from Sacher EPOS motor')"], {}), "(__name__ +\n ' Could not write stored position from Sacher EPOS motor')\n", (14002, 14076), False, 'import logging\n'), ((14500, 14537), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (14514, 14537), False, 'import ctypes\n'), ((14604, 14641), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (14618, 14641), False, 'import ctypes\n'), ((14643, 14680), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (14657, 14680), False, 'import ctypes\n'), ((15002, 15028), 'ctypes.c_uint8', 'ctypes.c_uint8', (['(subidx + 1)'], {}), '(subidx + 1)\n', (15016, 15028), False, 'import ctypes\n'), ((15072, 15096), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(4)'], {}), '(4)\n', (15093, 15096), False, 'import ctypes\n'), ((15858, 15889), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (15872, 15889), False, 'import ctypes\n'), ((15945, 15969), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (15966, 15969), False, 'import ctypes\n'), ((16228, 16245), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (16240, 16245), False, 'import ctypes\n'), ((16373, 16460), 'logging.error', 'logging.error', (["(__name__ + ' Could not write stored position from Sacher EPOS motor')"], {}), "(__name__ +\n ' Could not write stored position from Sacher EPOS motor')\n", (16386, 16460), False, 'import logging\n'), ((16675, 16690), 'ctypes.c_long', 'ctypes.c_long', ([], {}), '()\n', (16688, 16690), False, 'import ctypes\n'), ((16830, 16859), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_long'], {}), '(ctypes.c_long)\n', (16844, 16859), False, 'import ctypes\n'), ((16861, 16898), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (16875, 16898), False, 'import ctypes\n'), ((17041, 17058), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (17053, 17058), False, 'import ctypes\n'), ((18093, 18110), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (18105, 18110), False, 'import ctypes\n'), ((18939, 18976), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (18953, 18976), False, 'import ctypes\n'), ((19300, 19317), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (19312, 19317), False, 'import ctypes\n'), ((21540, 21556), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (21550, 21556), False, 'import time\n'), ((21657, 21674), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (21669, 21674), False, 'import ctypes\n'), ((23397, 23489), 'logging.error', 'logging.error', (["(__name__ + ' Negative value under square root sign -- something is wrong')"], {}), "(__name__ +\n ' Negative value under square root sign -- something is wrong')\n", (23410, 23489), False, 'import logging\n'), ((23700, 23716), 'numpy.sqrt', 'np.sqrt', (['sqrtarg'], {}), '(sqrtarg)\n', (23707, 23716), True, 'import numpy as np\n'), ((26175, 26267), 'logging.error', 'logging.error', (["(__name__ + ' Negative value under square root sign -- something is wrong')"], {}), "(__name__ +\n ' Negative value under square root sign -- something is wrong')\n", (26188, 26267), False, 'import logging\n'), ((28898, 28915), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (28910, 28915), False, 'import ctypes\n'), ((29016, 29047), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(64)'], {}), '(64)\n', (29043, 29047), False, 'import ctypes\n'), ((29459, 29476), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (29471, 29476), False, 'import ctypes\n'), ((29578, 29609), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(64)'], {}), '(64)\n', (29605, 29609), False, 'import ctypes\n'), ((29817, 29834), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (29829, 29834), False, 'import ctypes\n'), ((29936, 29967), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(64)'], {}), '(64)\n', (29963, 29967), False, 'import ctypes\n'), ((30222, 30246), 'ctypes.byref', 'ctypes.byref', (['plsenabled'], {}), '(plsenabled)\n', (30234, 30246), False, 'import ctypes\n'), ((30248, 30265), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (30260, 30265), False, 'import ctypes\n'), ((30393, 30424), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(64)'], {}), '(64)\n', (30420, 30424), False, 'import ctypes\n'), ((30580, 30659), 'logging.warning', 'logging.warning', (["(__name__ + ' EPOS motor enabled, disabling before proceeding.')"], {}), "(__name__ + ' EPOS motor enabled, disabling before proceeding.')\n", (30595, 30659), False, 'import logging\n'), ((31228, 31245), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (31240, 31245), False, 'import ctypes\n'), ((32411, 32426), 'ctypes.c_int8', 'ctypes.c_int8', ([], {}), '()\n', (32424, 32426), False, 'import ctypes\n'), ((32572, 32601), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int8'], {}), '(ctypes.c_int8)\n', (32586, 32601), False, 'import ctypes\n'), ((32603, 32640), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (32617, 32640), False, 'import ctypes\n'), ((32785, 32802), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (32797, 32802), False, 'import ctypes\n'), ((33186, 33202), 'ctypes.c_int8', 'ctypes.c_int8', (['(1)'], {}), '(1)\n', (33199, 33202), False, 'import ctypes\n'), ((33457, 33494), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (33471, 33494), False, 'import ctypes\n'), ((33547, 33584), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (33561, 33584), False, 'import ctypes\n'), ((33637, 33674), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (33651, 33674), False, 'import ctypes\n'), ((33727, 33764), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (33741, 33764), False, 'import ctypes\n'), ((33878, 33901), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ([], {}), '()\n', (33899, 33901), False, 'import ctypes\n'), ((33949, 33972), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ([], {}), '()\n', (33970, 33972), False, 'import ctypes\n'), ((34020, 34043), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ([], {}), '()\n', (34041, 34043), False, 'import ctypes\n'), ((34223, 34240), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (34235, 34240), False, 'import ctypes\n'), ((34984, 35010), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(429)'], {}), '(429)\n', (35005, 35010), False, 'import ctypes\n'), ((35046, 35072), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(429)'], {}), '(429)\n', (35067, 35072), False, 'import ctypes\n'), ((35108, 35134), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(429)'], {}), '(429)\n', (35129, 35134), False, 'import ctypes\n'), ((35147, 35224), 'logging.warning', 'logging.warning', (["(__name__ + ' GetPositionProfile out of bounds, resetting...')"], {}), "(__name__ + ' GetPositionProfile out of bounds, resetting...')\n", (35162, 35224), False, 'import logging\n'), ((35889, 35926), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (35903, 35926), False, 'import ctypes\n'), ((35928, 35965), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (35942, 35965), False, 'import ctypes\n'), ((36378, 36409), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (36392, 36409), False, 'import ctypes\n'), ((36462, 36486), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (36483, 36486), False, 'import ctypes\n'), ((36741, 36758), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (36753, 36758), False, 'import ctypes\n'), ((36852, 36883), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (36866, 36883), False, 'import ctypes\n'), ((37178, 37215), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (37192, 37215), False, 'import ctypes\n'), ((37217, 37254), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (37231, 37254), False, 'import ctypes\n'), ((37663, 37694), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (37677, 37694), False, 'import ctypes\n'), ((37747, 37771), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (37768, 37771), False, 'import ctypes\n'), ((38026, 38043), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (38038, 38043), False, 'import ctypes\n'), ((38137, 38168), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (38151, 38168), False, 'import ctypes\n'), ((38463, 38500), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (38477, 38500), False, 'import ctypes\n'), ((38502, 38539), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (38516, 38539), False, 'import ctypes\n'), ((39069, 39100), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (39083, 39100), False, 'import ctypes\n'), ((39153, 39177), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (39174, 39177), False, 'import ctypes\n'), ((39432, 39449), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (39444, 39449), False, 'import ctypes\n'), ((39543, 39574), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (39557, 39574), False, 'import ctypes\n'), ((39898, 39935), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (39912, 39935), False, 'import ctypes\n'), ((39937, 39974), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (39951, 39974), False, 'import ctypes\n'), ((40504, 40535), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (40518, 40535), False, 'import ctypes\n'), ((40588, 40612), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (40609, 40612), False, 'import ctypes\n'), ((40867, 40884), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (40879, 40884), False, 'import ctypes\n'), ((40978, 41009), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (40992, 41009), False, 'import ctypes\n'), ((15230, 15261), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (15244, 15261), False, 'import ctypes\n'), ((15321, 15345), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', (['(0)'], {}), '(0)\n', (15342, 15345), False, 'import ctypes\n'), ((15616, 15633), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (15628, 15633), False, 'import ctypes\n'), ((20480, 20502), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ([], {}), '()\n', (20500, 20502), False, 'import ctypes\n'), ((20708, 20744), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.BOOL'], {}), '(ctypes.wintypes.BOOL)\n', (20722, 20744), False, 'import ctypes\n'), ((20799, 20836), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (20813, 20836), False, 'import ctypes\n'), ((21044, 21061), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (21056, 21061), False, 'import ctypes\n'), ((23581, 23597), 'numpy.sqrt', 'np.sqrt', (['sqrtarg'], {}), '(sqrtarg)\n', (23588, 23597), True, 'import numpy as np\n'), ((26359, 26375), 'numpy.sqrt', 'np.sqrt', (['sqrtarg'], {}), '(sqrtarg)\n', (26366, 26375), True, 'import numpy as np\n'), ((29098, 29106), 'ctypes.wintypes.WORD', 'WORD', (['(64)'], {}), '(64)\n', (29102, 29106), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((30018, 30026), 'ctypes.wintypes.WORD', 'WORD', (['(64)'], {}), '(64)\n', (30022, 30026), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((30475, 30483), 'ctypes.wintypes.WORD', 'WORD', (['(64)'], {}), '(64)\n', (30479, 30483), False, 'from ctypes.wintypes import DWORD, WORD\n'), ((30731, 30748), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (30743, 30748), False, 'import ctypes\n'), ((30796, 30871), 'logging.warning', 'logging.warning', (["(__name__ + ' EPOS motor successfully disabled, proceeding')"], {}), "(__name__ + ' EPOS motor successfully disabled, proceeding')\n", (30811, 30871), False, 'import logging\n'), ((30906, 30976), 'logging.error', 'logging.error', (["(__name__ + ' EPOS motor was not successfully disabled!')"], {}), "(__name__ + ' EPOS motor was not successfully disabled!')\n", (30919, 30976), False, 'import logging\n'), ((33047, 33084), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (33061, 33084), False, 'import ctypes\n'), ((33290, 33307), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (33302, 33307), False, 'import ctypes\n'), ((34840, 34877), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.wintypes.DWORD'], {}), '(ctypes.wintypes.DWORD)\n', (34854, 34877), False, 'import ctypes\n'), ((35410, 35427), 'ctypes.byref', 'ctypes.byref', (['buf'], {}), '(buf)\n', (35422, 35427), False, 'import ctypes\n'), ((23649, 23665), 'numpy.sqrt', 'np.sqrt', (['sqrtarg'], {}), '(sqrtarg)\n', (23656, 23665), True, 'import numpy as np\n'), ((26427, 26443), 'numpy.sqrt', 'np.sqrt', (['sqrtarg'], {}), '(sqrtarg)\n', (26434, 26443), True, 'import numpy as np\n')] |
from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
@keep_lazy(str, SafeText)
def escapejson(value):
"""Hex encodes characters for use in a application/json type script."""
return mark_safe(force_str(value).translate(_json_escapes))
@keep_lazy(str, SafeText)
def escapejson_attr(value):
"""Hex encodes characters for use in a html attributw script."""
return mark_safe(force_str(value).translate(_json_escapes_attr))
| [
"django.utils.encoding.force_str",
"django.utils.functional.keep_lazy"
]
| [((415, 439), 'django.utils.functional.keep_lazy', 'keep_lazy', (['str', 'SafeText'], {}), '(str, SafeText)\n', (424, 439), False, 'from django.utils.functional import keep_lazy\n'), ((606, 630), 'django.utils.functional.keep_lazy', 'keep_lazy', (['str', 'SafeText'], {}), '(str, SafeText)\n', (615, 630), False, 'from django.utils.functional import keep_lazy\n'), ((560, 576), 'django.utils.encoding.force_str', 'force_str', (['value'], {}), '(value)\n', (569, 576), False, 'from django.utils.encoding import force_str\n'), ((749, 765), 'django.utils.encoding.force_str', 'force_str', (['value'], {}), '(value)\n', (758, 765), False, 'from django.utils.encoding import force_str\n')] |
# File: Converting_RGB_to_GreyScale.py
# Description: Opening RGB image as array, converting to GreyScale and saving result into new file
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 <NAME>
# github.com/sichkar-valentyn
#
# Reference to:
# <NAME>. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603
# Opening RGB image as array, converting to GreyScale and saving result into new file
# Importing needed libraries
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import color
from skimage import io
import scipy.misc
# Creating an array from image data
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
# Checking the type of the array
print(type(image_np)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_np.shape)
# Showing image with every channel separately
channel_R = image_np[:, :, 0]
channel_G = image_np[:, :, 1]
channel_B = image_np[:, :, 2]
# Creating a figure with subplots
f, ax = plt.subplots(nrows=2, ncols=2)
# ax is (2, 2) np array and to make it easier to read we use 'flatten' function
# Or we can call each time ax[0, 0]
ax0, ax1, ax2, ax3 = ax.flatten()
# Adjusting first subplot
ax0.imshow(channel_R, cmap='Reds')
ax0.set_xlabel('')
ax0.set_ylabel('')
ax0.set_title('Red channel')
# Adjusting second subplot
ax1.imshow(channel_G, cmap='Greens')
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_title('Green channel')
# Adjusting third subplot
ax2.imshow(channel_B, cmap='Blues')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax2.set_title('Blue channel')
# Adjusting fourth subplot
ax3.imshow(image_np)
ax3.set_xlabel('')
ax3.set_ylabel('')
ax3.set_title('Original image')
# Function to make distance between figures
plt.tight_layout()
# Giving the name to the window with figure
f.canvas.set_window_title('Eagle image in three channels R, G and B')
# Showing the plots
plt.show()
# Converting RGB image into GrayScale image
# Using formula:
# Y' = 0.299 R + 0.587 G + 0.114 B
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Preparing array for saving - creating three channels with the same data in each
# Firstly, creating array with zero elements
# And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple
# Now the shape will be (1080, 1920, 3) - which is tuple type
image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3]))
# Secondly, reshaping GreyScale image from 2D to 3D
x = image_GreyScale.reshape((1080, 1920, 1))
# Finally, writing all data in three channels
image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0]
# Saving image into a file from obtained 3D array
scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels)
# Checking that image was written with three channels and they are identical
result_1 = Image.open("images/result_1.jpg")
result_1_np = np.array(result_1)
print(result_1_np.shape)
print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1]))
print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2]))
# Showing saved resulted image
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Here we don't need to specify the map like cmap='Greys'
plt.imshow(result_1_np)
plt.show()
# Another way to convert RGB image into GreyScale image
image_RGB = io.imread("images/eagle.jpg")
image_GreyScale = color.rgb2gray(image_RGB)
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_2.jpg", image_GreyScale)
# One more way for converting
image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True)
# Checking the type of the array
print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_RGB_as_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_RGB_as_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
| [
"matplotlib.pyplot.imshow",
"skimage.color.rgb2gray",
"PIL.Image.open",
"numpy.array",
"matplotlib.pyplot.figure",
"skimage.io.imread",
"numpy.array_equal",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
| [((673, 703), 'PIL.Image.open', 'Image.open', (['"""images/eagle.jpg"""'], {}), "('images/eagle.jpg')\n", (683, 703), False, 'from PIL import Image\n'), ((715, 734), 'numpy.array', 'np.array', (['image_RGB'], {}), '(image_RGB)\n', (723, 734), True, 'import numpy as np\n'), ((1054, 1084), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (1066, 1084), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1811), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1809, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1946, 1956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((2067, 2097), 'PIL.Image.open', 'Image.open', (['"""images/eagle.jpg"""'], {}), "('images/eagle.jpg')\n", (2077, 2097), False, 'from PIL import Image\n'), ((2109, 2128), 'numpy.array', 'np.array', (['image_RGB'], {}), '(image_RGB)\n', (2117, 2128), True, 'import numpy as np\n'), ((2425, 2464), 'matplotlib.pyplot.figure', 'plt.figure', (['"""GreyScaled image from RGB"""'], {}), "('GreyScaled image from RGB')\n", (2435, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2509, 2550), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_GreyScale'], {'cmap': '"""Greys"""'}), "(image_GreyScale, cmap='Greys')\n", (2519, 2550), True, 'import matplotlib.pyplot as plt\n'), ((2551, 2561), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2559, 2561), True, 'import matplotlib.pyplot as plt\n'), ((3434, 3467), 'PIL.Image.open', 'Image.open', (['"""images/result_1.jpg"""'], {}), "('images/result_1.jpg')\n", (3444, 3467), False, 'from PIL import Image\n'), ((3482, 3500), 'numpy.array', 'np.array', (['result_1'], {}), '(result_1)\n', (3490, 3500), True, 'import numpy as np\n'), ((3733, 3772), 'matplotlib.pyplot.figure', 'plt.figure', (['"""GreyScaled image from RGB"""'], {}), "('GreyScaled image from RGB')\n", (3743, 3772), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3854), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result_1_np'], {}), '(result_1_np)\n', (3841, 3854), True, 'import matplotlib.pyplot as plt\n'), ((3855, 3865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3863, 3865), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3965), 'skimage.io.imread', 'io.imread', (['"""images/eagle.jpg"""'], {}), "('images/eagle.jpg')\n", (3945, 3965), False, 'from skimage import io\n'), ((3984, 4009), 'skimage.color.rgb2gray', 'color.rgb2gray', (['image_RGB'], {}), '(image_RGB)\n', (3998, 4009), False, 'from skimage import color\n'), ((4206, 4245), 'matplotlib.pyplot.figure', 'plt.figure', (['"""GreyScaled image from RGB"""'], {}), "('GreyScaled image from RGB')\n", (4216, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4331), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_GreyScale'], {'cmap': '"""Greys"""'}), "(image_GreyScale, cmap='Greys')\n", (4300, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4340, 4342), True, 'import matplotlib.pyplot as plt\n'), ((4516, 4559), 'skimage.io.imread', 'io.imread', (['"""images/eagle.jpg"""'], {'as_gray': '(True)'}), "('images/eagle.jpg', as_gray=True)\n", (4525, 4559), False, 'from skimage import io\n'), ((4770, 4809), 'matplotlib.pyplot.figure', 'plt.figure', (['"""GreyScaled image from RGB"""'], {}), "('GreyScaled image from RGB')\n", (4780, 4809), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4902), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_RGB_as_GreyScale'], {'cmap': '"""Greys"""'}), "(image_RGB_as_GreyScale, cmap='Greys')\n", (4864, 4902), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4911, 4913), True, 'import matplotlib.pyplot as plt\n'), ((3532, 3590), 'numpy.array_equal', 'np.array_equal', (['result_1_np[:, :, 0]', 'result_1_np[:, :, 1]'], {}), '(result_1_np[:, :, 0], result_1_np[:, :, 1])\n', (3546, 3590), True, 'import numpy as np\n'), ((3598, 3656), 'numpy.array_equal', 'np.array_equal', (['result_1_np[:, :, 1]', 'result_1_np[:, :, 2]'], {}), '(result_1_np[:, :, 1], result_1_np[:, :, 2])\n', (3612, 3656), True, 'import numpy as np\n')] |
# coding=UTF-8
# ex:ts=4:sw=4:et=on
#
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.models.properties import StringProperty
from pyxrd.generic.io.custom_io import storables, Storable
from pyxrd.generic.models.base import DataModel
from pyxrd.refinement.refinables.mixins import RefinementGroup
@storables.register()
class InSituBehaviour(DataModel, RefinementGroup, Storable):
"""
Interface class for coding in-situ behaviour scripts.
Sub-classes should override or implement the methods below.
"""
# MODEL INTEL:
class Meta(DataModel.Meta):
store_id = "InSituBehaviour" # Override this so it is a unique string
concrete = False # Indicates this cannot be instantiated and added in the UI
mixture = property(DataModel.parent.fget, DataModel.parent.fset)
# REFINEMENT GROUP IMPLEMENTATION:
@property
def refine_title(self):
return "In-situ behaviour"
@property
def refine_descriptor_data(self):
return dict(
phase_name=self.phase.refine_title,
component_name="*"
)
#: The name of this Behaviour
name = StringProperty(
default="New Behaviour", text="Name",
visible=True, persistent=True, tabular=True
)
# ------------------------------------------------------------
# Initialization and other internals
# ------------------------------------------------------------
def __init__(self, *args, **kwargs):
my_kwargs = self.pop_kwargs(kwargs,
*[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()]
)
super(InSituBehaviour, self).__init__(*args, **kwargs)
kwargs = my_kwargs
with self.data_changed.hold():
self.name = self.get_kwarg(kwargs, self.name, "name")
pass #end of constructor
# ------------------------------------------------------------
# Methods & Functions
# ------------------------------------------------------------
def apply(self, phase):
assert phase is not None, "Cannot apply on None"
assert self.is_compatible_with(phase), "`%r` is not compatible with phase `%r`" % (self, phase)
def is_compatible_with(self, phase):
return False # sub classes need to override this
pass #end of class | [
"pyxrd.generic.io.custom_io.storables.register",
"mvc.models.properties.StringProperty"
]
| [((367, 387), 'pyxrd.generic.io.custom_io.storables.register', 'storables.register', ([], {}), '()\n', (385, 387), False, 'from pyxrd.generic.io.custom_io import storables, Storable\n'), ((1214, 1315), 'mvc.models.properties.StringProperty', 'StringProperty', ([], {'default': '"""New Behaviour"""', 'text': '"""Name"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)'}), "(default='New Behaviour', text='Name', visible=True,\n persistent=True, tabular=True)\n", (1228, 1315), False, 'from mvc.models.properties import StringProperty\n')] |
"""
Remove comments from bib file.
"""
from textx import metamodel_for_language
from txbibtex import bibentry_str
BIB_FILE = 'references.bib'
bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE)
# Drop line comments.
print('\n'.join([bibentry_str(e) for e in bibfile.entries
if e.__class__.__name__ != 'BibLineComment']))
| [
"txbibtex.bibentry_str",
"textx.metamodel_for_language"
]
| [((153, 185), 'textx.metamodel_for_language', 'metamodel_for_language', (['"""bibtex"""'], {}), "('bibtex')\n", (175, 185), False, 'from textx import metamodel_for_language\n'), ((252, 267), 'txbibtex.bibentry_str', 'bibentry_str', (['e'], {}), '(e)\n', (264, 267), False, 'from txbibtex import bibentry_str\n')] |
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create resource policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils as compute_api
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
def _CommonArgs(parser, api_version):
"""A helper function to build args based on different API version."""
messages = apis.GetMessagesModule('compute', api_version)
flags.MakeResourcePolicyArg().AddArgument(parser)
flags.AddCommonArgs(parser)
flags.AddGroupPlacementArgs(parser, messages)
parser.display_info.AddCacheUpdater(None)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateGroupPlacement(base.CreateCommand):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
messages = holder.client.messages
resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages)
create_request = messages.ComputeResourcePoliciesInsertRequest(
resourcePolicy=resource_policy,
project=policy_ref.project,
region=policy_ref.region)
service = holder.client.apitools_client.resourcePolicies
return client.MakeRequests([(service, 'Insert', create_request)])[0]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateGroupPlacementBeta(CreateGroupPlacement):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION)
CreateGroupPlacement.detailed_help = {
'DESCRIPTION':
"""\
Create a Google Compute Engine Group Placement Resource Policy.
""",
'EXAMPLES':
"""\
To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run:
$ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2
"""
}
| [
"googlecloudsdk.command_lib.compute.flags.GetDefaultScopeLister",
"googlecloudsdk.calliope.base.ReleaseTracks",
"googlecloudsdk.command_lib.compute.resource_policies.flags.MakeResourcePolicyArg",
"googlecloudsdk.command_lib.compute.resource_policies.util.MakeGroupPlacementPolicy",
"googlecloudsdk.command_lib.compute.resource_policies.flags.AddCommonArgs",
"googlecloudsdk.api_lib.util.apis.GetMessagesModule",
"googlecloudsdk.command_lib.compute.resource_policies.flags.AddGroupPlacementArgs"
]
| [((1538, 1581), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', (['base.ReleaseTrack.ALPHA'], {}), '(base.ReleaseTrack.ALPHA)\n', (1556, 1581), False, 'from googlecloudsdk.calliope import base\n'), ((2544, 2586), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', (['base.ReleaseTrack.BETA'], {}), '(base.ReleaseTrack.BETA)\n', (2562, 2586), False, 'from googlecloudsdk.calliope import base\n'), ((1314, 1360), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""compute"""', 'api_version'], {}), "('compute', api_version)\n", (1336, 1360), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((1415, 1442), 'googlecloudsdk.command_lib.compute.resource_policies.flags.AddCommonArgs', 'flags.AddCommonArgs', (['parser'], {}), '(parser)\n', (1434, 1442), False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((1445, 1490), 'googlecloudsdk.command_lib.compute.resource_policies.flags.AddGroupPlacementArgs', 'flags.AddGroupPlacementArgs', (['parser', 'messages'], {}), '(parser, messages)\n', (1472, 1490), False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((2170, 2227), 'googlecloudsdk.command_lib.compute.resource_policies.util.MakeGroupPlacementPolicy', 'util.MakeGroupPlacementPolicy', (['policy_ref', 'args', 'messages'], {}), '(policy_ref, args, messages)\n', (2199, 2227), False, 'from googlecloudsdk.command_lib.compute.resource_policies import util\n'), ((1363, 1392), 'googlecloudsdk.command_lib.compute.resource_policies.flags.MakeResourcePolicyArg', 'flags.MakeResourcePolicyArg', ([], {}), '()\n', (1390, 1392), False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((1947, 1976), 'googlecloudsdk.command_lib.compute.resource_policies.flags.MakeResourcePolicyArg', 'flags.MakeResourcePolicyArg', ([], {}), '()\n', (1974, 1976), False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((2057, 2107), 'googlecloudsdk.command_lib.compute.flags.GetDefaultScopeLister', 'compute_flags.GetDefaultScopeLister', (['holder.client'], {}), '(holder.client)\n', (2092, 2107), True, 'from googlecloudsdk.command_lib.compute import flags as compute_flags\n')] |
# Generated by Django 3.1.2 on 2020-10-29 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishes', '0004_auto_20201029_0857'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='image',
field=models.FilePathField(path='/images'),
),
]
| [
"django.db.models.FilePathField"
]
| [((335, 371), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': '"""/images"""'}), "(path='/images')\n", (355, 371), False, 'from django.db import migrations, models\n')] |
import math
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import sys
import os
sys.path.append(os.path.abspath('../DecisionTree'))
from DecisionTree import DecisionTree
class RandomForest(BaseEstimator):
"""
Simple implementation of Random Forest.
This class has implementation for Random Forest classifier and regressor.
Dataset bagging is done by simple numpy random choice with replacement.
For classification the prediction is by majority vote.
For regression tree the prediction is averge of all estimator predictions.
Args:
n_estimators Number of base estimators (Decision Trees here)
max_features Maximum features to be used to construct tree.
Default:
- If classifier, default is square root of total
features.
- If regressor, default is total number of features.
max_depth The maximum depth to which estimators needs to be constructed.
Default: np.inf
min_samples_split Minimum number of samples need to present for split at the
node.
Default: 2
criterion criterion to be used for split.
For classification tree following criterion are supported:
- gini
- entropy
For regression tree following criterion are supported:
- mse (mean squared error)
- mae (mean absolute error)
Default: gini
random_seed random seed value for numpy operations.
Default: 0
"""
def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2,
criterion='gini', random_seed=0):
self.n_estimators = n_estimators
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criterion = criterion
self.random_seed = random_seed
self.idxs = []
self.trees = []
for i in range(self.n_estimators):
self.trees.append(DecisionTree(max_depth= self.max_depth,
min_samples_split=self.min_samples_split,
max_features = self.max_features,
criterion=self.criterion,
random_seed = self.random_seed))
self.is_classification_forest = False
if self.criterion == 'gini' or self.criterion == 'entropy':
self.is_classification_forest = True
elif self.criterion == 'mse' or self.criterion == 'mae':
self.is_classification_forest = False
else:
raise Exception("Invalid criterion: {}".format(self.criterion))
def get_subsets(self, X, y, num=1):
subsets = []
if len(np.shape(y)) == 1:
y = np.expand_dims(y, axis=1)
Xy = np.concatenate((X, y), axis=1)
num_samples = X.shape[0]
np.random.shuffle(Xy)
rng = np.random.default_rng(seed= self.random_seed)
for _ in range(num):
idx = rng.choice(
range(num_samples),
size = np.shape(range(int(num_samples)), ),
replace=True
)
subsets.append([X[idx], y[idx]])
return subsets
def fit(self, X, y):
np.random.seed(self.random_seed)
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
subsets = self.get_subsets(X, y, self.n_estimators)
if self.max_features == 0:
if self.is_classification_forest:
self.max_features = int(math.sqrt(X.shape[1]))
else:
self.max_features = int(X.shape[1])
# Bagging - choose random features for each estimator
# if max_features is provided, else use square root of
# total number of features.
for i, _ in enumerate(self.trees):
self.trees[i].max_features = self.max_features
X_sub, y_sub = subsets[i]
self.trees[i].fit(X_sub, y_sub)
def predict(self, X):
all_preds = np.empty((X.shape[0], self.n_estimators))
for i, tree in enumerate(self.trees):
preds = tree.predict(X)
all_preds[:, i] = preds
y_preds = []
for preds in all_preds:
if self.is_classification_forest:
y_preds.append(np.bincount(preds.astype('int')).argmax())
else:
y_preds.append(np.average(preds))
return y_preds | [
"numpy.random.default_rng",
"numpy.average",
"math.sqrt",
"DecisionTree.DecisionTree",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"numpy.expand_dims",
"os.path.abspath",
"numpy.shape",
"numpy.random.shuffle"
]
| [((127, 161), 'os.path.abspath', 'os.path.abspath', (['"""../DecisionTree"""'], {}), "('../DecisionTree')\n", (142, 161), False, 'import os\n'), ((3279, 3309), 'numpy.concatenate', 'np.concatenate', (['(X, y)'], {'axis': '(1)'}), '((X, y), axis=1)\n', (3293, 3309), True, 'import numpy as np\n'), ((3351, 3372), 'numpy.random.shuffle', 'np.random.shuffle', (['Xy'], {}), '(Xy)\n', (3368, 3372), True, 'import numpy as np\n'), ((3387, 3431), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'self.random_seed'}), '(seed=self.random_seed)\n', (3408, 3431), True, 'import numpy as np\n'), ((3750, 3782), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (3764, 3782), True, 'import numpy as np\n'), ((4556, 4597), 'numpy.empty', 'np.empty', (['(X.shape[0], self.n_estimators)'], {}), '((X.shape[0], self.n_estimators))\n', (4564, 4597), True, 'import numpy as np\n'), ((3239, 3264), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3253, 3264), True, 'import numpy as np\n'), ((2374, 2552), 'DecisionTree.DecisionTree', 'DecisionTree', ([], {'max_depth': 'self.max_depth', 'min_samples_split': 'self.min_samples_split', 'max_features': 'self.max_features', 'criterion': 'self.criterion', 'random_seed': 'self.random_seed'}), '(max_depth=self.max_depth, min_samples_split=self.\n min_samples_split, max_features=self.max_features, criterion=self.\n criterion, random_seed=self.random_seed)\n', (2386, 2552), False, 'from DecisionTree import DecisionTree\n'), ((3204, 3215), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (3212, 3215), True, 'import numpy as np\n'), ((4034, 4055), 'math.sqrt', 'math.sqrt', (['X.shape[1]'], {}), '(X.shape[1])\n', (4043, 4055), False, 'import math\n'), ((4940, 4957), 'numpy.average', 'np.average', (['preds'], {}), '(preds)\n', (4950, 4957), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 09:49:47 2020
@author: james.z.hare
"""
from src.UnitModule import UnitClass, advance
from copy import deepcopy
import math
class ProjectileClass(UnitClass):
"""
The Projectile Class
This is a subclass to the UnitClass
Virtual Functions
-----------------
- `__copy__()` to make shallow copies
- `__deepcopy__(memo)` to make deep copies
- `possibleActions(State)` to identify legal actions
- `observe(Unit)` to observe units located within VisibleRange
- `overlaps(Unit)` to identify if the unit overlaps with another unit
- `execute(Action, State)` to execute the action
Attributes
----------
ID:
a unique identifier of this unit
Owner:
the player the unit belongs to
Health:
the health of the unit
Extent:
the space occupied by unit
Position:
location of unit
Orientation:
as the name says
VisibleRange:
how far the unit can observe
Actions: dict
dictionary of actions common accross all units
ActionOptions:
list of list of action options.
Attack:
int that defines whether the unit is attacking in an advance action
RemaingLifetime:
int that defines the total number of turns until the unit is dead
"""
def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf):
UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1))
self.Actions = { "advance": lambda x: advance(self, x) }
self.ActionOptions = ( ( "advance", ), )
self.Attack = None
self.RemainingLifetime = RemainingLifetime
def __copy__(self):
Duplicate = ProjectileClass(self.ID, self.Owner, self.Health)
Duplicate.Position = self.Position
Duplicate.Orientation = self.Orientation
Duplicate.Attack = self.Attack
Duplicate.RemainingLifetime = self.RemainingLifetime
return Duplicate
def __deepcopy__(self, memo):
Default = None
Exists = memo.get(self, Default)
if Exists is not Default:
return Exists
Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo))
Duplicate.Position = deepcopy(self.Position, memo)
Duplicate.Orientation = deepcopy(self.Orientation, memo)
Duplicate.Attack = deepcopy(self.Attack, memo)
Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo)
memo[self] = Duplicate
return Duplicate
def possibleActions(self, State):
"""
Identifies the set of feasible actions given the board size and position of the unit
Parameters
----------
State: StateClass
Returns
-------
TrueActions: list[str]
A list of the feasible actions
"""
return self.ActionOptions
def observe(self, Unit):
if Unit.ID == self.ID:
return Unit
return None
def overlaps(self, Unit):
MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ])
#print(Unit)
TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ])
return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0
def execute(self, Actions, State):
"""
Execute `Actions` on `State`.
Parameters
----------
Actions : list[str]
A set of actions to be performed on `State`.
State : StateClass
State on which to inflict actions.
Returns
-------
Changes : list
Resulting state of executed `Actions`.
"""
NewState = deepcopy(State)
Changes = []
for Action in Actions:
ActionResult = self.Actions[Action](NewState)
ActionResult[1].RemainingLifetime -= 1
if isinstance(ActionResult, list):
Changes += ActionResult
else:
Changes.append(ActionResult)
return Changes
# Will be used as the projectile for the missile launcher unit
class MissileClass(ProjectileClass):
def __init__(self, ID, Owner, Position, Life=1):
ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life) | [
"src.UnitModule.UnitClass.__init__",
"src.UnitModule.advance",
"copy.deepcopy"
]
| [((1443, 1501), 'src.UnitModule.UnitClass.__init__', 'UnitClass.__init__', (['self', 'ID', 'Owner', 'Health'], {'Extent': '(1, 1)'}), '(self, ID, Owner, Health, Extent=(1, 1))\n', (1461, 1501), False, 'from src.UnitModule import UnitClass, advance\n'), ((2311, 2340), 'copy.deepcopy', 'deepcopy', (['self.Position', 'memo'], {}), '(self.Position, memo)\n', (2319, 2340), False, 'from copy import deepcopy\n'), ((2373, 2405), 'copy.deepcopy', 'deepcopy', (['self.Orientation', 'memo'], {}), '(self.Orientation, memo)\n', (2381, 2405), False, 'from copy import deepcopy\n'), ((2433, 2460), 'copy.deepcopy', 'deepcopy', (['self.Attack', 'memo'], {}), '(self.Attack, memo)\n', (2441, 2460), False, 'from copy import deepcopy\n'), ((2499, 2537), 'copy.deepcopy', 'deepcopy', (['self.RemainingLifetime', 'memo'], {}), '(self.RemainingLifetime, memo)\n', (2507, 2537), False, 'from copy import deepcopy\n'), ((3929, 3944), 'copy.deepcopy', 'deepcopy', (['State'], {}), '(State)\n', (3937, 3944), False, 'from copy import deepcopy\n'), ((2200, 2223), 'copy.deepcopy', 'deepcopy', (['self.ID', 'memo'], {}), '(self.ID, memo)\n', (2208, 2223), False, 'from copy import deepcopy\n'), ((2225, 2251), 'copy.deepcopy', 'deepcopy', (['self.Owner', 'memo'], {}), '(self.Owner, memo)\n', (2233, 2251), False, 'from copy import deepcopy\n'), ((2253, 2280), 'copy.deepcopy', 'deepcopy', (['self.Health', 'memo'], {}), '(self.Health, memo)\n', (2261, 2280), False, 'from copy import deepcopy\n'), ((1547, 1563), 'src.UnitModule.advance', 'advance', (['self', 'x'], {}), '(self, x)\n', (1554, 1563), False, 'from src.UnitModule import UnitClass, advance\n')] |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype validators.**
This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)
classes enabling callers to validate the internal structure of arbitrarily
complex scalars, data structures, and third-party objects. Like annotation
objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these
classes dynamically generate PEP-compliant type hints when subscripted
(indexed) and are thus intended to annotate callables and variables. Unlike
annotation objects defined by the :mod:`typing` module, these classes are *not*
explicitly covered by existing PEPs and thus *not* directly usable as
annotations.
Instead, callers are expected to (in order):
#. Annotate callable parameters and returns to be validated with
:pep:`593`-compliant :attr:`typing.Annotated` type hints.
#. Subscript those hints with (in order):
#. The type of those parameters and returns.
#. One or more subscriptions of classes declared by this submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.vale._is._valeis import _IsFactory
from beartype.vale._is._valeistype import (
_IsInstanceFactory,
_IsSubclassFactory,
)
from beartype.vale._is._valeisobj import _IsAttrFactory
from beartype.vale._is._valeisoper import _IsEqualFactory
# ....................{ SINGLETONS }....................
# Public factory singletons instantiating these private factory classes.
Is = _IsFactory(basename='Is')
IsAttr = _IsAttrFactory(basename='IsAttr')
IsEqual = _IsEqualFactory(basename='IsEqual')
IsInstance = _IsInstanceFactory(basename='IsInstance')
IsSubclass = _IsSubclassFactory(basename='IsSubclass')
# Delete all private factory classes imported above for safety.
del (
_IsFactory,
_IsAttrFactory,
_IsEqualFactory,
_IsInstanceFactory,
_IsSubclassFactory,
)
# ....................{ TODO }....................
#FIXME: As intelligently requested by @Saphyel at #32, add support for
#additional classes support constraints resembling:
#
#* String constraints:
# * Email.
# * Uuid.
# * Choice.
# * Language.
# * Locale.
# * Country.
# * Currency.
#* Comparison constraints
# * IdenticalTo.
# * NotIdenticalTo.
# * LessThan.
# * GreaterThan.
# * Range.
# * DivisibleBy.
#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same
#signature and docstring as the existing CauseSleuth.get_cause_or_none()
#method. This new BeartypeValidator.get_cause_or_none() method should then be
#called by the "_peperrorannotated" submodule to generate human-readable
#exception messages. Note that this implies that:
#* The BeartypeValidator.__init__() method will need to additionally accept a new
# mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which
# that method should then localize to "self.get_cause_or_none".
#* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need
# to additionally define and pass that callable when creating and returning
# its "BeartypeValidator" instance.
#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can
#leverage all of our existing "beartype.is" infrastructure to dynamically
#synthesize PEP-compliant type hints that would then be implicitly supported by
#any runtime type checker. At present, subscriptions of "Is" (e.g.,
#"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype
#itself. Of course, does anyone care? I mean, if you're using a runtime type
#checker, you're probably *ONLY* using beartype. Right? That said, this would
#technically improve portability by allowing users to switch between different
#checkers... except not really, since they'd still have to import beartype
#infrastructure to do so. So, this is probably actually useless.
#
#Nonetheless, the idea itself is trivial. We declare a new
#"beartype.is.Portable" singleton accessed in the same way: e.g.,
# from beartype import beartype
# from beartype.is import Portable
# NonEmptyStringTest = Is[lambda text: bool(text)]
# NonEmptyString = Portable[str, NonEmptyStringTest]
# @beartype
# def munge_it(text: NonEmptyString) -> str: ...
#
#So what's the difference between "typing.Annotated" and "beartype.is.Portable"
#then? Simple. The latter dynamically generates one new PEP 3119-compliant
#metaclass and associated class whenever subscripted. Clearly, this gets
#expensive in both space and time consumption fast -- which is why this won't
#be the default approach. For safety, this new class does *NOT* subclass the
#first subscripted class. Instead:
#* This new metaclass of this new class simply defines an __isinstancecheck__()
# dunder method. For the above example, this would be:
# class NonEmptyStringMetaclass(object):
# def __isinstancecheck__(cls, obj) -> bool:
# return isinstance(obj, str) and NonEmptyStringTest(obj)
#* This new class would then be entirely empty. For the above example, this
# would be:
# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):
# pass
#
#Well, so much for brilliant. It's slow and big, so it seems doubtful anyone
#would actually do that. Nonetheless, that's food for thought for you.
| [
"beartype.vale._is._valeistype._IsSubclassFactory",
"beartype.vale._is._valeisobj._IsAttrFactory",
"beartype.vale._is._valeistype._IsInstanceFactory",
"beartype.vale._is._valeis._IsFactory",
"beartype.vale._is._valeisoper._IsEqualFactory"
]
| [((2119, 2144), 'beartype.vale._is._valeis._IsFactory', '_IsFactory', ([], {'basename': '"""Is"""'}), "(basename='Is')\n", (2129, 2144), False, 'from beartype.vale._is._valeis import _IsFactory\n'), ((2154, 2187), 'beartype.vale._is._valeisobj._IsAttrFactory', '_IsAttrFactory', ([], {'basename': '"""IsAttr"""'}), "(basename='IsAttr')\n", (2168, 2187), False, 'from beartype.vale._is._valeisobj import _IsAttrFactory\n'), ((2198, 2233), 'beartype.vale._is._valeisoper._IsEqualFactory', '_IsEqualFactory', ([], {'basename': '"""IsEqual"""'}), "(basename='IsEqual')\n", (2213, 2233), False, 'from beartype.vale._is._valeisoper import _IsEqualFactory\n'), ((2247, 2288), 'beartype.vale._is._valeistype._IsInstanceFactory', '_IsInstanceFactory', ([], {'basename': '"""IsInstance"""'}), "(basename='IsInstance')\n", (2265, 2288), False, 'from beartype.vale._is._valeistype import _IsInstanceFactory, _IsSubclassFactory\n'), ((2302, 2343), 'beartype.vale._is._valeistype._IsSubclassFactory', '_IsSubclassFactory', ([], {'basename': '"""IsSubclass"""'}), "(basename='IsSubclass')\n", (2320, 2343), False, 'from beartype.vale._is._valeistype import _IsInstanceFactory, _IsSubclassFactory\n')] |
import unittest
from mock import Mock
import base64
from cellardoor import errors
from cellardoor.authentication import *
from cellardoor.authentication.basic import BasicAuthIdentifier
class FooIdentifier(Identifier):
pass
class BarAuthenticator(Authenticator):
pass
class TestAuthentication(unittest.TestCase):
def test_abstract_identifier(self):
id = Identifier()
with self.assertRaises(NotImplementedError):
id.identify({})
def test_abstract_authenticator(self):
auth = Authenticator()
with self.assertRaises(NotImplementedError):
auth.authenticate({})
def test_bad_identifier(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())])
def test_bad_authenticator(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)])
def test_middleware(self):
identifier = FooIdentifier()
identifier.identify = Mock(return_value='foo')
authenticator = BarAuthenticator()
authenticator.authenticate = Mock(return_value='bar')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)])
environ = {'skidoo':23}
middleware(environ, lambda: None)
identifier.identify.assert_called_once_with(environ)
authenticator.authenticate.assert_called_once_with('foo')
self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'})
def test_middleware_skip(self):
id_one = FooIdentifier()
id_one.identify = Mock(return_value=None)
id_two = FooIdentifier()
id_two.identify = Mock(return_value='two')
id_three = FooIdentifier()
id_three.identify = Mock(return_value='three')
auth_one = BarAuthenticator()
auth_one.authenticate = Mock(return_value='one')
auth_two = BarAuthenticator()
auth_two.authenticate = Mock(return_value='two')
auth_three = BarAuthenticator()
auth_three.authenticate = Mock(return_value='three')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(
app,
pairs=[
(id_one, auth_one),
(id_two, auth_two),
(id_three, auth_three)
]
)
environ = {}
middleware(environ, lambda: None)
self.assertEquals(environ, {'cellardoor.identity':'two'})
class TestBasic(unittest.TestCase):
def test_skip_if_no_auth_header(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({})
self.assertEquals(credentials, None)
def test_skip_if_not_a_pair(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'})
self.assertEquals(credentials, None)
def test_skip_if_not_basic(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'})
self.assertEquals(credentials, None)
def test_error_if_not_base64(self):
identifier = BasicAuthIdentifier()
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic \x000'})
def test_error_if_malformed(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foobar')
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
def test_pass(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foo:bar')
identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
self.assertEquals(identified_credentials, {'username':'foo', 'password':'<PASSWORD>'})
| [
"mock.Mock",
"cellardoor.authentication.basic.BasicAuthIdentifier",
"base64.standard_b64encode"
]
| [((931, 955), 'mock.Mock', 'Mock', ([], {'return_value': '"""foo"""'}), "(return_value='foo')\n", (935, 955), False, 'from mock import Mock\n'), ((1024, 1048), 'mock.Mock', 'Mock', ([], {'return_value': '"""bar"""'}), "(return_value='bar')\n", (1028, 1048), False, 'from mock import Mock\n'), ((1057, 1078), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (1061, 1078), False, 'from mock import Mock\n'), ((1494, 1517), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (1498, 1517), False, 'from mock import Mock\n'), ((1565, 1589), 'mock.Mock', 'Mock', ([], {'return_value': '"""two"""'}), "(return_value='two')\n", (1569, 1589), False, 'from mock import Mock\n'), ((1641, 1667), 'mock.Mock', 'Mock', ([], {'return_value': '"""three"""'}), "(return_value='three')\n", (1645, 1667), False, 'from mock import Mock\n'), ((1726, 1750), 'mock.Mock', 'Mock', ([], {'return_value': '"""one"""'}), "(return_value='one')\n", (1730, 1750), False, 'from mock import Mock\n'), ((1809, 1833), 'mock.Mock', 'Mock', ([], {'return_value': '"""two"""'}), "(return_value='two')\n", (1813, 1833), False, 'from mock import Mock\n'), ((1896, 1922), 'mock.Mock', 'Mock', ([], {'return_value': '"""three"""'}), "(return_value='three')\n", (1900, 1922), False, 'from mock import Mock\n'), ((1931, 1952), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (1935, 1952), False, 'from mock import Mock\n'), ((2312, 2333), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (2331, 2333), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((2470, 2491), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (2489, 2491), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((2653, 2674), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (2672, 2674), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((2842, 2863), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (2861, 2863), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((3036, 3057), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (3055, 3057), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((3074, 3109), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""foobar"""'], {}), "('foobar')\n", (3099, 3109), False, 'import base64\n'), ((3281, 3302), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ([], {}), '()\n', (3300, 3302), False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((3319, 3355), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""foo:bar"""'], {}), "('foo:bar')\n", (3344, 3355), False, 'import base64\n')] |
import os
from mcstasscript.instr_reader.control import InstrumentReader
from mcstasscript.interface.instr import McStas_instr
class McStas_file:
"""
Reader of McStas files, can add to an existing McStasScript
instrument instance or create a corresponding McStasScript python
file.
Methods
-------
add_to_instr(Instr)
Add information from McStas file to McStasScript Instr instance
write_python_file(filename)
Write python file named filename that reproduce the McStas instr
"""
def __init__(self, filename):
"""
Initialization of McStas_file class, needs McStas instr filename
Parameters
----------
filename (str)
Name of McStas instrument file to be read
"""
# Check filename
if not os.path.isfile(filename):
raise ValueError("Given filename, \"" + filename
+ "\" could not be found.")
self.Reader = InstrumentReader(filename)
def add_to_instr(self, Instr):
"""
Adds information from the McStas file to McStasScript instr
Parameters
----------
Instr (McStasScript McStas_instr instance)
McStas_instr instance to add instrument information to
"""
# Check Instr
if not isinstance(Instr, McStas_instr):
raise TypeError("Given object is not of type McStas_instr!")
self.Reader.add_to_instr(Instr)
def write_python_file(self, filename, **kwargs):
"""
Writes python file that reproduces McStas instrument file
Parameters
----------
filename (str)
Filename of python file to be written
"""
if "force" in kwargs:
force = kwargs["force"]
else:
force = False
# Check product_filename is available
if os.path.isfile(filename):
if force:
os.remove(filename)
else:
raise ValueError("Filename \"" + filename
+ "\" already exists, you can overwrite with "
+ "force=True")
self.Reader.generate_py_version(filename)
| [
"os.path.isfile",
"mcstasscript.instr_reader.control.InstrumentReader",
"os.remove"
]
| [((1000, 1026), 'mcstasscript.instr_reader.control.InstrumentReader', 'InstrumentReader', (['filename'], {}), '(filename)\n', (1016, 1026), False, 'from mcstasscript.instr_reader.control import InstrumentReader\n'), ((1934, 1958), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1948, 1958), False, 'import os\n'), ((833, 857), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (847, 857), False, 'import os\n'), ((1998, 2017), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2007, 2017), False, 'import os\n')] |
#! /usr/bin/env python3
#######################
"""####################
Index:
1. Imports and Readme
2. Functions
3. Main
4. Testing
####################"""
#######################
###################################################################
# 1. IMPORTS AND README
###################################################################
import easygui
import country_list_getter
###################################################################
# 2. FUNCTIONS
###################################################################
# Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa)
country_list_getter.main()
COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST
def ask_to_play():
return easygui.ynbox("Do you want to play a game?", "Country Guesser", ("Yes", "No"))
def ask_to_replay(correct_answers, total_questions):
score = round(((correct_answers / total_questions) * 100), 2)
if score >= 50:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/happy_puppy.jpg", ["Yes", "No"])
else:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/sad_puppy.jpg", ["Yes", "No"])
def main_question_box(country):
return easygui.enterbox("What is the capital of: " + country + "?", "Country Capital Guesser!!")
###################################################################
# 3. MAIN
###################################################################
def funtime():
playing = 1
correct_answers = 0
total_questions = 0
ask_to_play()
while playing:
for key, value in COUNTRIES_CAPITALS.items():
answer = main_question_box(key)
# answer = input("Name the capital of: " + key + "\n").lower()
total_questions += 1 # Short for total_questions = total_questions + 1
if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]:
correct_answers += 1
print("Correct!")
else:
print("Wrong!")
# Should we keep playing?
response = input("Would you like to play again?: \n")
if response.lower() == "yes" or response == "y":
playing = 1
else:
playing = 0
#score_screen(correct_answers, total_questions)
ask_to_replay(correct_answers, total_questions)
#print("You scored " + str(correct_answers)+ "/" + str(total_questions) + " (" + str(correct_percent) + "%)")
###################################################################
# 4. TESTING
###################################################################
# COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"}
def test_1():
pass
# ask_to_play()
# main_question_box("Canada")
funtime()
| [
"easygui.enterbox",
"easygui.ynbox",
"country_list_getter.main"
]
| [((632, 658), 'country_list_getter.main', 'country_list_getter.main', ([], {}), '()\n', (656, 658), False, 'import country_list_getter\n'), ((742, 820), 'easygui.ynbox', 'easygui.ynbox', (['"""Do you want to play a game?"""', '"""Country Guesser"""', "('Yes', 'No')"], {}), "('Do you want to play a game?', 'Country Guesser', ('Yes', 'No'))\n", (755, 820), False, 'import easygui\n'), ((1335, 1428), 'easygui.enterbox', 'easygui.enterbox', (["('What is the capital of: ' + country + '?')", '"""Country Capital Guesser!!"""'], {}), "('What is the capital of: ' + country + '?',\n 'Country Capital Guesser!!')\n", (1351, 1428), False, 'import easygui\n')] |
# Need this to import from parent directory when running outside pycharm
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from ac_utils.general import save_to_json, load_from_json
import click
import xml.etree.ElementTree
from urllib import unquote
def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file):
collection = rekordbox_file.find('COLLECTION')
found = False
for document in collection:
if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]):
found = document
break
return found
@click.command()
@click.argument('dataset_path')
def rekordbox_file_to_analysis_file(dataset_path):
"""
Read information from rekordbox_rhythm.xml present in dataset_path and convert it into
analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible with our evaluation
framework.
"""
rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot()
metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json'))
out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json')
analysis = dict()
with click.progressbar(metadata_file.keys(), label="Converting...") as metadata_keys:
for key in metadata_keys:
entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file)
if entry is not False:
tempo_entry = entry.find('TEMPO')
if tempo_entry is not None:
bpm_raw = float(tempo_entry.attrib['Bpm'])
else:
bpm_raw = 0.0
analysis[key] = {"RekBox": {
"bpm": bpm_raw,
}
}
save_to_json(out_file_path, analysis, verbose=True)
if __name__ == '__main__':
rekordbox_file_to_analysis_file()
| [
"click.argument",
"ac_utils.general.save_to_json",
"os.path.join",
"os.path.realpath",
"click.command"
]
| [((959, 974), 'click.command', 'click.command', ([], {}), '()\n', (972, 974), False, 'import click\n'), ((976, 1006), 'click.argument', 'click.argument', (['"""dataset_path"""'], {}), "('dataset_path')\n", (990, 1006), False, 'import click\n'), ((1494, 1554), 'os.path.join', 'os.path.join', (['dataset_path', '"""analysis_rhythm_rekordbox.json"""'], {}), "(dataset_path, 'analysis_rhythm_rekordbox.json')\n", (1506, 1554), False, 'import os\n'), ((2170, 2221), 'ac_utils.general.save_to_json', 'save_to_json', (['out_file_path', 'analysis'], {'verbose': '(True)'}), '(out_file_path, analysis, verbose=True)\n', (2182, 2221), False, 'from ac_utils.general import save_to_json, load_from_json\n'), ((1429, 1472), 'os.path.join', 'os.path.join', (['dataset_path', '"""metadata.json"""'], {}), "(dataset_path, 'metadata.json')\n", (1441, 1472), False, 'import os\n'), ((139, 165), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (155, 165), False, 'import os\n'), ((1332, 1382), 'os.path.join', 'os.path.join', (['dataset_path', '"""rekordbox_rhythm.xml"""'], {}), "(dataset_path, 'rekordbox_rhythm.xml')\n", (1344, 1382), False, 'import os\n')] |
import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import abc, web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
@pytest.mark.parametrize(
"show_index,status,prefix,data",
[pytest.param(False, 403, '/', None, id="index_forbidden"),
pytest.param(True, 200, '/',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_root"),
pytest.param(True, 200, '/static',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/static/my_dir">my_dir/</a></li>\n'
b'<li><a href="/static/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_static")])
async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client,
show_index, status, prefix, data):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path, 'my_file')
with open(my_file_path, 'w') as fw:
fw.write('hello')
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write('world')
app = web.Application()
# Register global static route:
app.router.add_static(prefix, tmp_dir_path, show_index=show_index)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get(prefix)
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (await r.read())
assert read_ == data
async def test_follow_symlink(tmp_dir_path, aiohttp_client):
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write(data)
my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_symlink_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = os.path.join(tmp_dir_path, dir_name)
if dir_name:
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, filename)
with open(my_file_path, 'w') as fw:
fw.write(data)
app = web.Application()
url = os.path.join('/', dir_name, filename)
app.router.add_static('/', tmp_dir_path)
client = await aiohttp_client(app)
r = await client.get(url)
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
async def test_url_escaping(aiohttp_client, registered_path, request_url):
"""
Tests accessing a resource with
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(registered_path, handler)
client = await aiohttp_client(app)
r = await client.get(request_url)
assert r.status == 200
async def test_handler_metadata_persistence():
"""
Tests accessing metadata of a handler after registering it on the app
router.
"""
app = web.Application()
async def async_handler(request):
"""Doc"""
return web.Response()
def sync_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
with pytest.warns(DeprecationWarning):
app.router.add_get('/sync', sync_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client):
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_dir')
assert r.status == 403
async def test_access_symlink_loop(tmp_dir_path, aiohttp_client):
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_dir_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink')
assert r.status == 404
async def test_access_special_resource(tmp_dir_path, aiohttp_client):
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/special')
assert r.status == 404
async def test_partialy_applied_handler(aiohttp_client):
app = web.Application()
async def handler(data, request):
return web.Response(body=data)
with pytest.warns(DeprecationWarning):
app.router.add_route('GET', '/', functools.partial(handler, b'hello'))
client = await aiohttp_client(app)
r = await client.get('/')
data = (await r.read())
assert data == b'hello'
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
async def test_412_is_returned(aiohttp_client):
class MyRouter(abc.AbstractRouter):
async def resolve(self, request):
raise web.HTTPPreconditionFailed()
app = web.Application(router=MyRouter())
client = await aiohttp_client(app)
resp = await client.get('/')
assert resp.status == 412
async def test_allow_head(aiohttp_client):
"""
Test allow_head on routes.
"""
app = web.Application()
async def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = await aiohttp_client(app)
r = await client.get('/a')
assert r.status == 200
await r.release()
r = await client.head('/a')
assert r.status == 200
await r.release()
r = await client.get('/b')
assert r.status == 200
await r.release()
r = await client.head('/b')
assert r.status == 405
await r.release()
@pytest.mark.parametrize("path", [
'/a',
'/{a}',
])
def test_reuse_last_added_resource(path):
"""
Test that adding a route with the same name and path of the last added
resource doesn't create a new resource.
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(path, handler, name="a")
app.router.add_post(path, handler, name="a")
assert len(app.router.resources()) == 1
def test_resource_raw_match():
app = web.Application()
async def handler(request):
return web.Response()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
async def test_add_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_view("/a", MyView)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_decorate_view(aiohttp_client):
routes = web.RouteTableDef()
@routes.view("/a")
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app = web.Application()
app.router.add_routes(routes)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_web_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_routes([
web.view("/a", MyView)
])
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
| [
"aiohttp.web.Application",
"pytest.fixture",
"unittest.mock.patch",
"aiohttp.web.Response",
"aiohttp.web.view",
"os.mkdir",
"aiohttp.web.RouteTableDef",
"aiohttp.web.HTTPCreated",
"aiohttp.web.HTTPPreconditionFailed",
"unittest.mock.MagicMock",
"tempfile.mkdtemp",
"pytest.raises",
"os.path.join",
"os.symlink",
"pytest.param",
"pytest.mark.parametrize",
"functools.partial",
"shutil.rmtree",
"pytest.warns"
]
| [((217, 249), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (231, 249), False, 'import pytest\n'), ((3505, 3666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dir_name,filename,data"""', "[('', 'test file.txt', 'test text'), ('test dir name', 'test dir file .txt',\n 'test text file folder')]"], {}), "('dir_name,filename,data', [('', 'test file.txt',\n 'test text'), ('test dir name', 'test dir file .txt',\n 'test text file folder')])\n", (3528, 3666), False, 'import pytest\n'), ((4885, 5002), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""registered_path,request_url"""', "[('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab')]"], {}), "('registered_path,request_url', [('/a:b', '/a:b'), (\n '/a@b', '/a@b'), ('/a:b', '/a%3Ab')])\n", (4908, 5002), False, 'import pytest\n'), ((10299, 10346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', "['/a', '/{a}']"], {}), "('path', ['/a', '/{a}'])\n", (10322, 10346), False, 'import pytest\n'), ((431, 449), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (447, 449), False, 'import tempfile\n'), ((1938, 1975), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_file"""'], {}), "(tmp_dir_path, 'my_file')\n", (1950, 1975), False, 'import os\n'), ((2061, 2097), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_dir"""'], {}), "(tmp_dir_path, 'my_dir')\n", (2073, 2097), False, 'import os\n'), ((2102, 2123), 'os.mkdir', 'os.mkdir', (['my_dir_path'], {}), '(my_dir_path)\n', (2110, 2123), False, 'import os\n'), ((2144, 2187), 'os.path.join', 'os.path.join', (['my_dir_path', '"""my_file_in_dir"""'], {}), "(my_dir_path, 'my_file_in_dir')\n", (2156, 2187), False, 'import os\n'), ((2265, 2282), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (2280, 2282), False, 'from aiohttp import abc, web\n'), ((2864, 2900), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_dir"""'], {}), "(tmp_dir_path, 'my_dir')\n", (2876, 2900), False, 'import os\n'), ((2905, 2926), 'os.mkdir', 'os.mkdir', (['my_dir_path'], {}), '(my_dir_path)\n', (2913, 2926), False, 'import os\n'), ((2947, 2990), 'os.path.join', 'os.path.join', (['my_dir_path', '"""my_file_in_dir"""'], {}), "(my_dir_path, 'my_file_in_dir')\n", (2959, 2990), False, 'import os\n'), ((3077, 3117), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_symlink"""'], {}), "(tmp_dir_path, 'my_symlink')\n", (3089, 3117), False, 'import os\n'), ((3122, 3162), 'os.symlink', 'os.symlink', (['my_dir_path', 'my_symlink_path'], {}), '(my_dir_path, my_symlink_path)\n', (3132, 3162), False, 'import os\n'), ((3174, 3191), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (3189, 3191), False, 'from aiohttp import abc, web\n'), ((3902, 3938), 'os.path.join', 'os.path.join', (['tmp_dir_path', 'dir_name'], {}), '(tmp_dir_path, dir_name)\n', (3914, 3938), False, 'import os\n'), ((4007, 4042), 'os.path.join', 'os.path.join', (['my_dir_path', 'filename'], {}), '(my_dir_path, filename)\n', (4019, 4042), False, 'import os\n'), ((4118, 4135), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (4133, 4135), False, 'from aiohttp import abc, web\n'), ((4147, 4184), 'os.path.join', 'os.path.join', (['"""/"""', 'dir_name', 'filename'], {}), "('/', dir_name, filename)\n", (4159, 4184), False, 'import os\n'), ((4599, 4616), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (4614, 4616), False, 'from aiohttp import abc, web\n'), ((5150, 5167), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (5165, 5167), False, 'from aiohttp import abc, web\n'), ((5546, 5563), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (5561, 5563), False, 'from aiohttp import abc, web\n'), ((6299, 6335), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_dir"""'], {}), "(tmp_dir_path, 'my_dir')\n", (6311, 6335), False, 'import os\n'), ((6340, 6361), 'os.mkdir', 'os.mkdir', (['my_dir_path'], {}), '(my_dir_path)\n', (6348, 6361), False, 'import os\n'), ((6373, 6390), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (6388, 6390), False, 'from aiohttp import abc, web\n'), ((7134, 7174), 'os.path.join', 'os.path.join', (['tmp_dir_path', '"""my_symlink"""'], {}), "(tmp_dir_path, 'my_symlink')\n", (7146, 7174), False, 'import os\n'), ((7179, 7215), 'os.symlink', 'os.symlink', (['my_dir_path', 'my_dir_path'], {}), '(my_dir_path, my_dir_path)\n', (7189, 7215), False, 'import os\n'), ((7227, 7244), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (7242, 7244), False, 'from aiohttp import abc, web\n'), ((7797, 7814), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (7812, 7814), False, 'from aiohttp import abc, web\n'), ((8637, 8654), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (8652, 8654), False, 'from aiohttp import abc, web\n'), ((9742, 9759), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (9757, 9759), False, 'from aiohttp import abc, web\n'), ((10545, 10562), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (10560, 10562), False, 'from aiohttp import abc, web\n'), ((10812, 10829), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (10827, 10829), False, 'from aiohttp import abc, web\n'), ((11247, 11264), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (11262, 11264), False, 'from aiohttp import abc, web\n'), ((11806, 11825), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (11823, 11825), False, 'from aiohttp import abc, web\n'), ((12017, 12034), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (12032, 12034), False, 'from aiohttp import abc, web\n'), ((12406, 12423), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (12421, 12423), False, 'from aiohttp import abc, web\n'), ((517, 539), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (530, 539), False, 'import shutil\n'), ((665, 722), 'pytest.param', 'pytest.param', (['(False)', '(403)', '"""/"""', 'None'], {'id': '"""index_forbidden"""'}), "(False, 403, '/', None, id='index_forbidden')\n", (677, 722), False, 'import pytest\n'), ((729, 981), 'pytest.param', 'pytest.param', (['(True)', '(200)', '"""/"""', 'b\'<html>\\n<head>\\n<title>Index of /.</title>\\n</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n<li><a href="/my_dir">my_dir/</a></li>\\n<li><a href="/my_file">my_file</a></li>\\n</ul>\\n</body>\\n</html>\''], {'id': '"""index_root"""'}), '(True, 200, \'/\',\n b\'<html>\\n<head>\\n<title>Index of /.</title>\\n</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n<li><a href="/my_dir">my_dir/</a></li>\\n<li><a href="/my_file">my_file</a></li>\\n</ul>\\n</body>\\n</html>\'\n , id=\'index_root\')\n', (741, 981), False, 'import pytest\n'), ((1103, 1377), 'pytest.param', 'pytest.param', (['(True)', '(200)', '"""/static"""', 'b\'<html>\\n<head>\\n<title>Index of /.</title>\\n</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n<li><a href="/static/my_dir">my_dir/</a></li>\\n<li><a href="/static/my_file">my_file</a></li>\\n</ul>\\n</body>\\n</html>\''], {'id': '"""index_static"""'}), '(True, 200, \'/static\',\n b\'<html>\\n<head>\\n<title>Index of /.</title>\\n</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n<li><a href="/static/my_dir">my_dir/</a></li>\\n<li><a href="/static/my_file">my_file</a></li>\\n</ul>\\n</body>\\n</html>\'\n , id=\'index_static\')\n', (1115, 1377), False, 'import pytest\n'), ((3965, 3986), 'os.mkdir', 'os.mkdir', (['my_dir_path'], {}), '(my_dir_path)\n', (3973, 3986), False, 'import os\n'), ((5216, 5230), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (5228, 5230), False, 'from aiohttp import abc, web\n'), ((5636, 5650), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (5648, 5650), False, 'from aiohttp import abc, web\n'), ((5716, 5730), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (5728, 5730), False, 'from aiohttp import abc, web\n'), ((5789, 5821), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (5801, 5821), False, 'import pytest\n'), ((6401, 6435), 'unittest.mock.patch', 'mock.patch', (['"""pathlib.Path.__new__"""'], {}), "('pathlib.Path.__new__')\n", (6411, 6435), False, 'from unittest import mock\n'), ((6472, 6483), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6481, 6483), False, 'from unittest.mock import MagicMock\n'), ((7825, 7859), 'unittest.mock.patch', 'mock.patch', (['"""pathlib.Path.__new__"""'], {}), "('pathlib.Path.__new__')\n", (7835, 7859), False, 'from unittest import mock\n'), ((7899, 7910), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (7908, 7910), False, 'from unittest.mock import MagicMock\n'), ((8016, 8027), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (8025, 8027), False, 'from unittest.mock import MagicMock\n'), ((8709, 8732), 'aiohttp.web.Response', 'web.Response', ([], {'body': 'data'}), '(body=data)\n', (8721, 8732), False, 'from aiohttp import abc, web\n'), ((8743, 8775), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (8755, 8775), False, 'import pytest\n'), ((9033, 9063), 'aiohttp.web.HTTPCreated', 'web.HTTPCreated', ([], {'reason': '"""test"""'}), "(reason='test')\n", (9048, 9063), False, 'from aiohttp import abc, web\n'), ((9074, 9101), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (9087, 9101), False, 'import pytest\n'), ((9802, 9816), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (9814, 9816), False, 'from aiohttp import abc, web\n'), ((10611, 10625), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (10623, 10625), False, 'from aiohttp import abc, web\n'), ((10878, 10892), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (10890, 10892), False, 'from aiohttp import abc, web\n'), ((8818, 8854), 'functools.partial', 'functools.partial', (['handler', "b'hello'"], {}), "(handler, b'hello')\n", (8835, 8854), False, 'import functools\n'), ((9460, 9488), 'aiohttp.web.HTTPPreconditionFailed', 'web.HTTPPreconditionFailed', ([], {}), '()\n', (9486, 9488), False, 'from aiohttp import abc, web\n'), ((11342, 11356), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (11354, 11356), False, 'from aiohttp import abc, web\n'), ((11407, 11421), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (11419, 11421), False, 'from aiohttp import abc, web\n'), ((11926, 11940), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (11938, 11940), False, 'from aiohttp import abc, web\n'), ((11991, 12005), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (12003, 12005), False, 'from aiohttp import abc, web\n'), ((12501, 12515), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (12513, 12515), False, 'from aiohttp import abc, web\n'), ((12566, 12580), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (12578, 12580), False, 'from aiohttp import abc, web\n'), ((12618, 12640), 'aiohttp.web.view', 'web.view', (['"""/a"""', 'MyView'], {}), "('/a', MyView)\n", (12626, 12640), False, 'from aiohttp import abc, web\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>)
# @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering
# @License : MIT License
import torch
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim import Adam
from sklearn.mixture import GaussianMixture
from torch.optim.lr_scheduler import StepLR
from preprocessing import sparse_to_tuple
from sklearn.neighbors import NearestNeighbors
from sklearn import metrics
from munkres import Munkres
def random_uniform_init(input_dim, output_dim):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = torch.rand(input_dim, output_dim)*2*init_range - init_range
return nn.Parameter(initial)
def q_mat(X, centers, alpha=1.0):
X = X.detach().numpy()
centers = centers.detach().numpy()
if X.size == 0:
q = np.array([])
else:
q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha))
q = q ** ((alpha + 1.0) / 2.0)
q = np.transpose(np.transpose(q) / np.sum(q, axis=1))
return q
def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2):
unconf_indices = []
conf_indices = []
q = q_mat(emb, centers_emb, alpha=1.0)
confidence1 = q.max(1)
confidence2 = np.zeros((q.shape[0],))
a = np.argsort(q, axis=1)
for i in range(q.shape[0]):
confidence1[i] = q[i,a[i,-1]]
confidence2[i] = q[i,a[i,-2]]
if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2:
unconf_indices.append(i)
else:
conf_indices.append(i)
unconf_indices = np.asarray(unconf_indices, dtype=int)
conf_indices = np.asarray(conf_indices, dtype=int)
return unconf_indices, conf_indices
class clustering_metrics():
def __init__(self, true_label, predict_label):
self.true_label = true_label
self.pred_label = predict_label
def clusteringAcc(self):
# best mapping between true_label and predict label
l1 = list(set(self.true_label))
numclass1 = len(l1)
l2 = list(set(self.pred_label))
numclass2 = len(l2)
if numclass1 != numclass2:
print('Class Not equal, Error!!!!')
return 0
cost = np.zeros((numclass1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2]
cost[i][j] = len(mps_d)
# match two clustering results by Munkres algorithm
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
# get the match results
new_predict = np.zeros(len(self.pred_label))
for i, c in enumerate(l1):
# correponding label in l2:
c2 = l2[indexes[i][1]]
# ai is the index with label==c2 in the pred_label list
ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(self.true_label, new_predict)
f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')
precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')
recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')
f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')
precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')
recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')
return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro
def evaluationClusterModelFromLabel(self):
nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)
adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)
acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc()
print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore))
fh = open('recoder.txt', 'a')
fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) )
fh.write('\r\n')
fh.flush()
fh.close()
return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro
class GraphConvSparse(nn.Module):
def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs):
super(GraphConvSparse, self).__init__(**kwargs)
self.weight = random_uniform_init(input_dim, output_dim)
self.activation = activation
def forward(self, inputs, adj):
x = inputs
x = torch.mm(x,self.weight)
x = torch.mm(adj, x)
outputs = self.activation(x)
return outputs
class ReGMM_VGAE(nn.Module):
def __init__(self, **kwargs):
super(ReGMM_VGAE, self).__init__()
self.num_neurons = kwargs['num_neurons']
self.num_features = kwargs['num_features']
self.embedding_size = kwargs['embedding_size']
self.nClusters = kwargs['nClusters']
# VGAE training parameters
self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons)
self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
# GMM training parameters
self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True)
self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset):
opti = Adam(self.parameters(), lr=lr)
epoch_bar = tqdm(range(epochs))
gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag')
for _ in epoch_bar:
opti.zero_grad()
_,_, z = self.encode(features, adj)
x_ = self.decode(z)
loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor)
loss.backward()
opti.step()
gmm.fit_predict(z.detach().numpy())
self.pi.data = torch.from_numpy(gmm.weights_)
self.mu_c.data = torch.from_numpy(gmm.means_)
self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_))
self.logstd = self.mean
def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1):
pi = self.pi
mu_c = self.mu_c
log_sigma2_c = self.log_sigma2_c
det = 1e-2
Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor)
Loss = Loss * features.size(0)
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det
yita_c = yita_c / (yita_c.sum(1).view(-1,1))
KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+
torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+
(z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1))
Loss1 = KL1
KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1))
Loss1 -= KL2
return Loss, Loss1, Loss+Loss1
def generate_centers(self, emb_unconf):
y_pred = self.predict(emb_unconf)
nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy())
_, indices = nn.kneighbors(self.mu_c.detach().numpy())
return indices[y_pred]
def update_graph(self, adj, labels, emb, unconf_indices, conf_indices):
k = 0
y_pred = self.predict(emb)
emb_unconf = emb[unconf_indices]
adj = adj.tolil()
idx = unconf_indices[self.generate_centers(emb_unconf)]
for i, k in enumerate(unconf_indices):
adj_k = adj[k].tocsr().indices
if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) :
adj[k, idx[i]] = 1
for j in adj_k:
if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]):
adj[k, j] = 0
adj = adj.tocsr()
adj_label = adj + sp.eye(adj.shape[0])
adj_label = sparse_to_tuple(adj_label)
adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T),
torch.FloatTensor(adj_label[1]),
torch.Size(adj_label[2]))
weight_mask = adj_label.to_dense().view(-1) == 1
weight_tensor = torch.ones(weight_mask.size(0))
pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
weight_tensor[weight_mask] = pos_weight_orig
return adj, adj_label, weight_tensor
def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset):
self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk'))
opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089)
lr_s = StepLR(opti, step_size=10, gamma=0.9)
import os, csv
epoch_bar = tqdm(range(epochs))
previous_unconflicted = []
previous_conflicted = []
epoch_stable = 0
for epoch in epoch_bar:
opti.zero_grad()
z_mu, z_sigma2_log, emb = self.encode(features, adj_norm)
x_ = self.decode(emb)
unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2)
if epoch == 0:
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
if len(previous_unconflicted) < len(unconflicted_ind) :
z_mu = z_mu[unconflicted_ind]
z_sigma2_log = z_sigma2_log[unconflicted_ind]
emb_unconf = emb[unconflicted_ind]
emb_conf = emb[conflicted_ind]
previous_conflicted = conflicted_ind
previous_unconflicted = unconflicted_ind
else :
epoch_stable += 1
z_mu = z_mu[previous_unconflicted]
z_sigma2_log = z_sigma2_log[previous_unconflicted]
emb_unconf = emb[previous_unconflicted]
emb_conf = emb[previous_conflicted]
if epoch_stable >= 15:
epoch_stable = 0
beta1 = beta1 * 0.96
beta2 = beta2 * 0.98
if epoch % 50 == 0 and epoch <= 200 :
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf)
epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy()))
y_pred = self.predict(emb)
cm = clustering_metrics(y, y_pred)
acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel()
elbo_loss.backward()
opti.step()
lr_s.step()
def gaussian_pdfs_log(self,x,mus,log_sigma2s):
G=[]
for c in range(self.nClusters):
G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1))
return torch.cat(G,1)
def gaussian_pdf_log(self,x,mu,log_sigma2):
c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1)
return c
def predict(self, z):
pi = self.pi
log_sigma2_c = self.log_sigma2_c
mu_c = self.mu_c
det = 1e-2
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det
yita = yita_c.detach().numpy()
return np.argmax(yita, axis=1)
def encode(self, x_features, adj):
hidden = self.base_gcn(x_features, adj)
self.mean = self.gcn_mean(hidden, adj)
self.logstd = self.gcn_logstddev(hidden, adj)
gaussian_noise = torch.randn(x_features.size(0), self.embedding_size)
sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean
return self.mean, self.logstd ,sampled_z
@staticmethod
def decode(z):
A_pred = torch.sigmoid(torch.matmul(z,z.t()))
return A_pred | [
"numpy.sqrt",
"torch.LongTensor",
"numpy.log",
"sklearn.metrics.adjusted_rand_score",
"torch.from_numpy",
"sklearn.metrics.precision_score",
"numpy.argsort",
"numpy.array",
"sklearn.metrics.recall_score",
"torch.exp",
"torch.sum",
"numpy.isin",
"sklearn.metrics.normalized_mutual_info_score",
"scipy.sparse.eye",
"numpy.asarray",
"sklearn.neighbors.NearestNeighbors",
"torch.randn",
"sklearn.mixture.GaussianMixture",
"torch.rand",
"numpy.argmax",
"numpy.transpose",
"torch.Size",
"sklearn.metrics.accuracy_score",
"torch.cat",
"sklearn.metrics.f1_score",
"torch.load",
"torch.optim.lr_scheduler.StepLR",
"torch.mm",
"numpy.sum",
"numpy.zeros",
"torch.nn.Parameter",
"munkres.Munkres",
"numpy.expand_dims",
"torch.FloatTensor",
"preprocessing.sparse_to_tuple",
"torch.ones"
]
| [((664, 703), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (input_dim + output_dim))'], {}), '(6.0 / (input_dim + output_dim))\n', (671, 703), True, 'import numpy as np\n'), ((789, 810), 'torch.nn.Parameter', 'nn.Parameter', (['initial'], {}), '(initial)\n', (801, 810), True, 'import torch.nn as nn\n'), ((1386, 1409), 'numpy.zeros', 'np.zeros', (['(q.shape[0],)'], {}), '((q.shape[0],))\n', (1394, 1409), True, 'import numpy as np\n'), ((1418, 1439), 'numpy.argsort', 'np.argsort', (['q'], {'axis': '(1)'}), '(q, axis=1)\n', (1428, 1439), True, 'import numpy as np\n'), ((1738, 1775), 'numpy.asarray', 'np.asarray', (['unconf_indices'], {'dtype': 'int'}), '(unconf_indices, dtype=int)\n', (1748, 1775), True, 'import numpy as np\n'), ((1795, 1830), 'numpy.asarray', 'np.asarray', (['conf_indices'], {'dtype': 'int'}), '(conf_indices, dtype=int)\n', (1805, 1830), True, 'import numpy as np\n'), ((944, 956), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (952, 956), True, 'import numpy as np\n'), ((2375, 2418), 'numpy.zeros', 'np.zeros', (['(numclass1, numclass2)'], {'dtype': 'int'}), '((numclass1, numclass2), dtype=int)\n', (2383, 2418), True, 'import numpy as np\n'), ((2757, 2766), 'munkres.Munkres', 'Munkres', ([], {}), '()\n', (2764, 2766), False, 'from munkres import Munkres\n'), ((3245, 3297), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['self.true_label', 'new_predict'], {}), '(self.true_label, new_predict)\n', (3267, 3297), False, 'from sklearn import metrics\n'), ((3326, 3389), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['self.true_label', 'new_predict'], {'average': '"""macro"""'}), "(self.true_label, new_predict, average='macro')\n", (3342, 3389), False, 'from sklearn import metrics\n'), ((3416, 3486), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['self.true_label', 'new_predict'], {'average': '"""macro"""'}), "(self.true_label, new_predict, average='macro')\n", (3439, 3486), False, 'from sklearn import metrics\n'), ((3510, 3577), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['self.true_label', 'new_predict'], {'average': '"""macro"""'}), "(self.true_label, new_predict, average='macro')\n", (3530, 3577), False, 'from sklearn import metrics\n'), ((3597, 3660), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['self.true_label', 'new_predict'], {'average': '"""micro"""'}), "(self.true_label, new_predict, average='micro')\n", (3613, 3660), False, 'from sklearn import metrics\n'), ((3687, 3757), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['self.true_label', 'new_predict'], {'average': '"""micro"""'}), "(self.true_label, new_predict, average='micro')\n", (3710, 3757), False, 'from sklearn import metrics\n'), ((3781, 3848), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['self.true_label', 'new_predict'], {'average': '"""micro"""'}), "(self.true_label, new_predict, average='micro')\n", (3801, 3848), False, 'from sklearn import metrics\n'), ((4012, 4082), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['self.true_label', 'self.pred_label'], {}), '(self.true_label, self.pred_label)\n', (4048, 4082), False, 'from sklearn import metrics\n'), ((4102, 4163), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['self.true_label', 'self.pred_label'], {}), '(self.true_label, self.pred_label)\n', (4129, 4163), False, 'from sklearn import metrics\n'), ((5338, 5362), 'torch.mm', 'torch.mm', (['x', 'self.weight'], {}), '(x, self.weight)\n', (5346, 5362), False, 'import torch\n'), ((5374, 5390), 'torch.mm', 'torch.mm', (['adj', 'x'], {}), '(adj, x)\n', (5382, 5390), False, 'import torch\n'), ((6689, 6757), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'self.nClusters', 'covariance_type': '"""diag"""'}), "(n_components=self.nClusters, covariance_type='diag')\n", (6704, 6757), False, 'from sklearn.mixture import GaussianMixture\n'), ((7134, 7164), 'torch.from_numpy', 'torch.from_numpy', (['gmm.weights_'], {}), '(gmm.weights_)\n', (7150, 7164), False, 'import torch\n'), ((7190, 7218), 'torch.from_numpy', 'torch.from_numpy', (['gmm.means_'], {}), '(gmm.means_)\n', (7206, 7218), False, 'import torch\n'), ((9426, 9452), 'preprocessing.sparse_to_tuple', 'sparse_to_tuple', (['adj_label'], {}), '(adj_label)\n', (9441, 9452), False, 'from preprocessing import sparse_to_tuple\n'), ((10231, 10268), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['opti'], {'step_size': '(10)', 'gamma': '(0.9)'}), '(opti, step_size=10, gamma=0.9)\n', (10237, 10268), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((12602, 12617), 'torch.cat', 'torch.cat', (['G', '(1)'], {}), '(G, 1)\n', (12611, 12617), False, 'import torch\n'), ((13070, 13093), 'numpy.argmax', 'np.argmax', (['yita'], {'axis': '(1)'}), '(yita, axis=1)\n', (13079, 13093), True, 'import numpy as np\n'), ((6269, 6317), 'torch.randn', 'torch.randn', (['self.nClusters', 'self.embedding_size'], {}), '(self.nClusters, self.embedding_size)\n', (6280, 6317), False, 'import torch\n'), ((6379, 6427), 'torch.randn', 'torch.randn', (['self.nClusters', 'self.embedding_size'], {}), '(self.nClusters, self.embedding_size)\n', (6390, 6427), False, 'import torch\n'), ((7263, 7297), 'torch.from_numpy', 'torch.from_numpy', (['gmm.covariances_'], {}), '(gmm.covariances_)\n', (7279, 7297), False, 'import torch\n'), ((9385, 9405), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (9391, 9405), True, 'import scipy.sparse as sp\n'), ((9498, 9530), 'torch.LongTensor', 'torch.LongTensor', (['adj_label[0].T'], {}), '(adj_label[0].T)\n', (9514, 9530), False, 'import torch\n'), ((9569, 9600), 'torch.FloatTensor', 'torch.FloatTensor', (['adj_label[1]'], {}), '(adj_label[1])\n', (9586, 9600), False, 'import torch\n'), ((9638, 9662), 'torch.Size', 'torch.Size', (['adj_label[2]'], {}), '(adj_label[2])\n', (9648, 9662), False, 'import torch\n'), ((10092, 10146), 'torch.load', 'torch.load', (["(save_path + dataset + '/pretrain/model.pk')"], {}), "(save_path + dataset + '/pretrain/model.pk')\n", (10102, 10146), False, 'import torch\n'), ((718, 751), 'torch.rand', 'torch.rand', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (728, 751), False, 'import torch\n'), ((1125, 1140), 'numpy.transpose', 'np.transpose', (['q'], {}), '(q)\n', (1137, 1140), True, 'import numpy as np\n'), ((1143, 1160), 'numpy.sum', 'np.sum', (['q'], {'axis': '(1)'}), '(q, axis=1)\n', (1149, 1160), True, 'import numpy as np\n'), ((6173, 6199), 'torch.ones', 'torch.ones', (['self.nClusters'], {}), '(self.nClusters)\n', (6183, 6199), False, 'import torch\n'), ((8517, 8571), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=1, algorithm='ball_tree')\n", (8533, 8571), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((13398, 13420), 'torch.exp', 'torch.exp', (['self.logstd'], {}), '(self.logstd)\n', (13407, 13420), False, 'import torch\n'), ((8328, 8358), 'torch.sum', 'torch.sum', (['(1 + z_sigma2_log)', '(1)'], {}), '(1 + z_sigma2_log, 1)\n', (8337, 8358), False, 'import torch\n'), ((9071, 9093), 'numpy.isin', 'np.isin', (['idx[i]', 'adj_k'], {}), '(idx[i], adj_k)\n', (9078, 9093), True, 'import numpy as np\n'), ((9213, 9239), 'numpy.isin', 'np.isin', (['j', 'unconf_indices'], {}), '(j, unconf_indices)\n', (9220, 9239), True, 'import numpy as np\n'), ((9245, 9267), 'numpy.isin', 'np.isin', (['idx[i]', 'adj_k'], {}), '(idx[i], adj_k)\n', (9252, 9267), True, 'import numpy as np\n'), ((12695, 12712), 'numpy.log', 'np.log', (['(np.pi * 2)'], {}), '(np.pi * 2)\n', (12701, 12712), True, 'import numpy as np\n'), ((12736, 12757), 'torch.exp', 'torch.exp', (['log_sigma2'], {}), '(log_sigma2)\n', (12745, 12757), False, 'import torch\n'), ((1010, 1030), 'numpy.expand_dims', 'np.expand_dims', (['X', '(1)'], {}), '(X, 1)\n', (1024, 1030), True, 'import numpy as np\n')] |
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db, self._num_classes)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data'] = blobs['data'].squeeze(axis=0)
return blobs
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
def __iter__(self):
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
| [
"torch.utils.data.dataloader.default_collate",
"numpy.ceil",
"roi_data.minibatch.get_minibatch",
"numpy.empty",
"numpy.random.permutation"
]
| [((1746, 1768), 'numpy.empty', 'np.empty', (['(DATA_SIZE,)'], {}), '((DATA_SIZE,))\n', (1754, 1768), True, 'import numpy as np\n'), ((815, 858), 'roi_data.minibatch.get_minibatch', 'get_minibatch', (['single_db', 'self._num_classes'], {}), '(single_db, self._num_classes)\n', (828, 858), False, 'from roi_data.minibatch import get_minibatch\n'), ((1793, 1837), 'numpy.ceil', 'np.ceil', (['(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)'], {}), '(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)\n', (1800, 1837), True, 'import numpy as np\n'), ((2788, 2818), 'numpy.random.permutation', 'npr.permutation', (['self.num_data'], {}), '(self.num_data)\n', (2803, 2818), True, 'import numpy.random as npr\n'), ((5973, 5999), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['mini_list'], {}), '(mini_list)\n', (5988, 5999), False, 'from torch.utils.data.dataloader import default_collate\n')] |
import dataclasses
import pytest
from dataclasses_avroschema import fields
from . import consts
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, dataclasses.MISSING)
avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type]
assert {"name": name, "type": avro_type} == field.to_dict()
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types_with_default_value_none(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, None)
avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]]
assert {"name": name, "type": avro_type, "default": fields.NULL} == field.to_dict()
@pytest.mark.parametrize("primitive_type,default", consts.PRIMITIVE_TYPES_AND_DEFAULTS)
def test_primitive_types_with_default_value(primitive_type, default):
name = "a_field"
field = fields.Field(name, primitive_type, default)
avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL]
assert {"name": name, "type": avro_type, "default": default} == field.to_dict()
@pytest.mark.parametrize(
"primitive_type,invalid_default", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS
)
def test_invalid_default_values(primitive_type, invalid_default):
name = "a_field"
field = fields.Field(name, primitive_type, invalid_default)
msg = f"Invalid default type. Default should be {primitive_type}"
with pytest.raises(AssertionError, match=msg):
field.to_dict()
| [
"pytest.mark.parametrize",
"pytest.raises",
"dataclasses_avroschema.fields.Field"
]
| [((102, 174), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""primitive_type"""', 'fields.PYTHON_INMUTABLE_TYPES'], {}), "('primitive_type', fields.PYTHON_INMUTABLE_TYPES)\n", (125, 174), False, 'import pytest\n'), ((433, 505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""primitive_type"""', 'fields.PYTHON_INMUTABLE_TYPES'], {}), "('primitive_type', fields.PYTHON_INMUTABLE_TYPES)\n", (456, 505), False, 'import pytest\n'), ((812, 903), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""primitive_type,default"""', 'consts.PRIMITIVE_TYPES_AND_DEFAULTS'], {}), "('primitive_type,default', consts.\n PRIMITIVE_TYPES_AND_DEFAULTS)\n", (835, 903), False, 'import pytest\n'), ((1208, 1315), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""primitive_type,invalid_default"""', 'consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS'], {}), "('primitive_type,invalid_default', consts.\n PRIMITIVE_TYPES_AND_INVALID_DEFAULTS)\n", (1231, 1315), False, 'import pytest\n'), ((250, 305), 'dataclasses_avroschema.fields.Field', 'fields.Field', (['name', 'primitive_type', 'dataclasses.MISSING'], {}), '(name, primitive_type, dataclasses.MISSING)\n', (262, 305), False, 'from dataclasses_avroschema import fields\n'), ((605, 645), 'dataclasses_avroschema.fields.Field', 'fields.Field', (['name', 'primitive_type', 'None'], {}), '(name, primitive_type, None)\n', (617, 645), False, 'from dataclasses_avroschema import fields\n'), ((1002, 1045), 'dataclasses_avroschema.fields.Field', 'fields.Field', (['name', 'primitive_type', 'default'], {}), '(name, primitive_type, default)\n', (1014, 1045), False, 'from dataclasses_avroschema import fields\n'), ((1416, 1467), 'dataclasses_avroschema.fields.Field', 'fields.Field', (['name', 'primitive_type', 'invalid_default'], {}), '(name, primitive_type, invalid_default)\n', (1428, 1467), False, 'from dataclasses_avroschema import fields\n'), ((1548, 1588), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': 'msg'}), '(AssertionError, match=msg)\n', (1561, 1588), False, 'import pytest\n')] |
"""
Utility methods for parsing data returned from MapD
"""
import datetime
from collections import namedtuple
from sqlalchemy import text
import mapd.ttypes as T
from ._utils import seconds_to_time
Description = namedtuple("Description", ["name", "type_code", "display_size",
"internal_size", "precision", "scale",
"null_ok"])
ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable",
"precision", "scale",
"comp_param"])
_typeattr = {
'SMALLINT': 'int',
'INT': 'int',
'BIGINT': 'int',
'TIME': 'int',
'TIMESTAMP': 'int',
'DATE': 'int',
'BOOL': 'int',
'FLOAT': 'real',
'DECIMAL': 'real',
'DOUBLE': 'real',
'STR': 'str',
}
_thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES
_thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES
def _extract_row_val(desc, val):
# type: (T.TColumnType, T.TDatum) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
if val.is_null:
return None
val = getattr(val.val, _typeattr[typename] + '_val')
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
val = (base + datetime.timedelta(seconds=val))
elif typename == 'DATE':
val = (base + datetime.timedelta(seconds=val)).date()
elif typename == 'TIME':
val = seconds_to_time(val)
return val
def _extract_col_vals(desc, val):
# type: (T.TColumnType, T.TColumn) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
nulls = val.nulls
vals = getattr(val.data, _typeattr[typename] + '_col')
vals = [None if null else v
for null, v in zip(nulls, vals)]
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
vals = [None if v is None else base + datetime.timedelta(seconds=v)
for v in vals]
elif typename == 'DATE':
vals = [None if v is None else (base +
datetime.timedelta(seconds=v)).date()
for v in vals]
elif typename == 'TIME':
vals = [None if v is None else seconds_to_time(v) for v in vals]
return vals
def _extract_description(row_desc):
# type: (List[T.TColumnType]) -> List[Description]
"""
Return a tuple of (name, type_code, display_size, internal_size,
precision, scale, null_ok)
https://www.python.org/dev/peps/pep-0249/#description
"""
return [Description(col.col_name, col.col_type.type,
None, None, None, None,
col.col_type.nullable)
for col in row_desc]
def _extract_column_details(row_desc):
# For Connection.get_table_details
return [
ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type],
x.col_type.nullable, x.col_type.precision,
x.col_type.scale, x.col_type.comp_param)
for x in row_desc
]
def _is_columnar(data):
# type: (T.TQueryResult) -> bool
return data.row_set.is_columnar
def _load_schema(buf):
"""
Load a `pyarrow.Schema` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
Returns
-------
schema : pyarrow.Schema
"""
import pyarrow as pa
reader = pa.RecordBatchStreamReader(buf)
return reader.schema
def _load_data(buf, schema):
"""
Load a `pandas.DataFrame` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
shcema : pyarrow.Schema
Returns
-------
df : pandas.DataFrame
"""
import pyarrow as pa
message = pa.read_message(buf)
rb = pa.read_record_batch(message, schema)
return rb.to_pandas()
def _parse_tdf_gpu(tdf):
"""
Parse the results of a select ipc_gpu into a GpuDataFrame
Parameters
----------
tdf : TDataFrame
Returns
-------
gdf : GpuDataFrame
"""
import numpy as np
from pygdf.gpuarrow import GpuArrowReader
from pygdf.dataframe import DataFrame
from numba import cuda
from numba.cuda.cudadrv import drvapi
from .shm import load_buffer
ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle)
ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size)
ctx = cuda.current_context()
dptr = ipch.open(ctx)
schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size)
# TODO: extra copy.
schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8)
dtype = np.dtype(np.byte)
darr = cuda.devicearray.DeviceNDArray(shape=dptr.size,
strides=dtype.itemsize,
dtype=dtype,
gpu_data=dptr)
reader = GpuArrowReader(schema_buffer, darr)
df = DataFrame()
for k, v in reader.to_dict().items():
df[k] = v
return df
def _bind_parameters(operation, parameters):
return (text(operation)
.bindparams(**parameters)
.compile(compile_kwargs={"literal_binds": True}))
| [
"datetime.datetime",
"pyarrow.read_message",
"collections.namedtuple",
"sqlalchemy.text",
"pygdf.gpuarrow.GpuArrowReader",
"pyarrow.read_record_batch",
"numba.cuda.devicearray.DeviceNDArray",
"pygdf.dataframe.DataFrame",
"datetime.timedelta",
"numba.cuda.cudadrv.drvapi.cu_ipc_mem_handle",
"pyarrow.RecordBatchStreamReader",
"numba.cuda.driver.IpcHandle",
"numba.cuda.current_context",
"numpy.dtype"
]
| [((216, 334), 'collections.namedtuple', 'namedtuple', (['"""Description"""', "['name', 'type_code', 'display_size', 'internal_size', 'precision', 'scale',\n 'null_ok']"], {}), "('Description', ['name', 'type_code', 'display_size',\n 'internal_size', 'precision', 'scale', 'null_ok'])\n", (226, 334), False, 'from collections import namedtuple\n'), ((429, 526), 'collections.namedtuple', 'namedtuple', (['"""ColumnDetails"""', "['name', 'type', 'nullable', 'precision', 'scale', 'comp_param']"], {}), "('ColumnDetails', ['name', 'type', 'nullable', 'precision',\n 'scale', 'comp_param'])\n", (439, 526), False, 'from collections import namedtuple\n'), ((1222, 1251), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1239, 1251), False, 'import datetime\n'), ((1827, 1856), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1844, 1856), False, 'import datetime\n'), ((3456, 3487), 'pyarrow.RecordBatchStreamReader', 'pa.RecordBatchStreamReader', (['buf'], {}), '(buf)\n', (3482, 3487), True, 'import pyarrow as pa\n'), ((3804, 3824), 'pyarrow.read_message', 'pa.read_message', (['buf'], {}), '(buf)\n', (3819, 3824), True, 'import pyarrow as pa\n'), ((3834, 3871), 'pyarrow.read_record_batch', 'pa.read_record_batch', (['message', 'schema'], {}), '(message, schema)\n', (3854, 3871), True, 'import pyarrow as pa\n'), ((4335, 4375), 'numba.cuda.cudadrv.drvapi.cu_ipc_mem_handle', 'drvapi.cu_ipc_mem_handle', (['*tdf.df_handle'], {}), '(*tdf.df_handle)\n', (4359, 4375), False, 'from numba.cuda.cudadrv import drvapi\n'), ((4387, 4444), 'numba.cuda.driver.IpcHandle', 'cuda.driver.IpcHandle', (['None', 'ipc_handle'], {'size': 'tdf.df_size'}), '(None, ipc_handle, size=tdf.df_size)\n', (4408, 4444), False, 'from numba import cuda\n'), ((4455, 4477), 'numba.cuda.current_context', 'cuda.current_context', ([], {}), '()\n', (4475, 4477), False, 'from numba import cuda\n'), ((4680, 4697), 'numpy.dtype', 'np.dtype', (['np.byte'], {}), '(np.byte)\n', (4688, 4697), True, 'import numpy as np\n'), ((4709, 4812), 'numba.cuda.devicearray.DeviceNDArray', 'cuda.devicearray.DeviceNDArray', ([], {'shape': 'dptr.size', 'strides': 'dtype.itemsize', 'dtype': 'dtype', 'gpu_data': 'dptr'}), '(shape=dptr.size, strides=dtype.itemsize,\n dtype=dtype, gpu_data=dptr)\n', (4739, 4812), False, 'from numba import cuda\n'), ((4948, 4983), 'pygdf.gpuarrow.GpuArrowReader', 'GpuArrowReader', (['schema_buffer', 'darr'], {}), '(schema_buffer, darr)\n', (4962, 4983), False, 'from pygdf.gpuarrow import GpuArrowReader\n'), ((4993, 5004), 'pygdf.dataframe.DataFrame', 'DataFrame', ([], {}), '()\n', (5002, 5004), False, 'from pygdf.dataframe import DataFrame\n'), ((1306, 1337), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'val'}), '(seconds=val)\n', (1324, 1337), False, 'import datetime\n'), ((1935, 1964), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'v'}), '(seconds=v)\n', (1953, 1964), False, 'import datetime\n'), ((5139, 5154), 'sqlalchemy.text', 'text', (['operation'], {}), '(operation)\n', (5143, 5154), False, 'from sqlalchemy import text\n'), ((1390, 1421), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'val'}), '(seconds=val)\n', (1408, 1421), False, 'import datetime\n'), ((2112, 2141), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'v'}), '(seconds=v)\n', (2130, 2141), False, 'import datetime\n')] |
import logging
import warnings
import dask.dataframe as dd
import numpy as np
import pandas as pd
from featuretools import variable_types as vtypes
from featuretools.utils.entity_utils import (
col_is_datetime,
convert_all_variable_data,
convert_variable_data,
get_linked_vars,
infer_variable_types
)
from featuretools.utils.gen_utils import import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _dataframes_equal
from featuretools.variable_types import Text, find_variable_types
ks = import_or_none('databricks.koalas')
logger = logging.getLogger('featuretools.entityset')
_numeric_types = vtypes.PandasTypes._pandas_numerics
_categorical_types = [vtypes.PandasTypes._categorical]
_datetime_types = vtypes.PandasTypes._pandas_datetimes
class Entity(object):
"""Represents an entity in a Entityset, and stores relevant metadata and data
An Entity is analogous to a table in a relational database
See Also:
:class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`
"""
def __init__(self, id, df, entityset, variable_types=None,
index=None, time_index=None, secondary_time_index=None,
last_time_index=None, already_sorted=False, make_index=False,
verbose=False):
""" Create Entity
Args:
id (str): Id of Entity.
df (pd.DataFrame): Dataframe providing the data for the
entity.
entityset (EntitySet): Entityset for this Entity.
variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of id column in the dataframe.
time_index (str): Name of time column in the dataframe.
secondary_time_index (dict[str -> str]): Dictionary mapping columns
in the dataframe to the time index column they are associated with.
last_time_index (pd.Series): Time index of the last event for each
instance across all child entities.
make_index (bool, optional) : If True, assume index does not exist as a column in
dataframe, and create a new column of that name using integers the (0, len(dataframe)).
Otherwise, assume index exists in dataframe.
"""
_validate_entity_params(id, df, time_index)
created_index, index, df = _create_index(index, make_index, df)
self.id = id
self.entityset = entityset
self.data = {'df': df, 'last_time_index': last_time_index}
self.created_index = created_index
self._verbose = verbose
secondary_time_index = secondary_time_index or {}
self._create_variables(variable_types, index, time_index, secondary_time_index)
self.df = df[[v.id for v in self.variables]]
self.set_index(index)
self.time_index = None
if time_index:
self.set_time_index(time_index, already_sorted=already_sorted)
self.set_secondary_time_index(secondary_time_index)
def __repr__(self):
repr_out = u"Entity: {}\n".format(self.id)
repr_out += u" Variables:"
for v in self.variables:
repr_out += u"\n {} (dtype: {})".format(v.id, v.type_string)
shape = self.shape
repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format(
shape[0], shape[1])
return repr_out
@property
def shape(self):
'''Shape of the entity's dataframe'''
return self.df.shape
def __eq__(self, other, deep=False):
if self.index != other.index:
return False
if self.time_index != other.time_index:
return False
if self.secondary_time_index != other.secondary_time_index:
return False
if len(self.variables) != len(other.variables):
return False
if set(self.variables) != set(other.variables):
return False
if deep:
if self.last_time_index is None and other.last_time_index is not None:
return False
elif self.last_time_index is not None and other.last_time_index is None:
return False
elif self.last_time_index is not None and other.last_time_index is not None:
if not self.last_time_index.equals(other.last_time_index):
return False
if not _dataframes_equal(self.df, other.df):
return False
variables = {variable: (variable, ) for variable in self.variables}
for variable in other.variables:
variables[variable] += (variable, )
for self_var, other_var in variables.values():
if not self_var.__eq__(other_var, deep=True):
return False
return True
def __sizeof__(self):
return sum([value.__sizeof__() for value in self.data.values()])
@property
def df(self):
'''Dataframe providing the data for the entity.'''
return self.data["df"]
@df.setter
def df(self, _df):
self.data["df"] = _df
@property
def last_time_index(self):
'''
Time index of the last event for each instance across all child entities.
'''
return self.data["last_time_index"]
@last_time_index.setter
def last_time_index(self, lti):
self.data["last_time_index"] = lti
def __hash__(self):
return id(self.id)
def __getitem__(self, variable_id):
return self._get_variable(variable_id)
def _get_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to get.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
@property
def variable_types(self):
'''Dictionary mapping variable id's to variable types'''
return {v.id: type(v) for v in self.variables}
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in dataframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> from featuretools.tests.testing_utils import make_ecommerce_entityset
>>> es = make_ecommerce_entityset()
>>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical)
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.df = convert_variable_data(df=self.df,
column_id=variable_id,
new_type=new_type,
**kwargs)
# replace the old variable with the new one, maintaining order
variable = self._get_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def _create_variables(self, variable_types, index, time_index, secondary_time_index):
"""Extracts the variables from a dataframe
Args:
variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of index column
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each map to a list of columns that depend on that secondary time
"""
variables = []
variable_types = variable_types.copy() or {}
string_to_class_map = find_variable_types()
# TODO: Remove once Text has been removed from variable types
string_to_class_map[Text.type_string] = Text
for vid in variable_types.copy():
vtype = variable_types[vid]
if isinstance(vtype, str):
if vtype in string_to_class_map:
variable_types[vid] = string_to_class_map[vtype]
else:
variable_types[vid] = string_to_class_map['unknown']
warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype))
if index not in variable_types:
variable_types[index] = vtypes.Index
link_vars = get_linked_vars(self)
inferred_variable_types = infer_variable_types(self.df,
link_vars,
variable_types,
time_index,
secondary_time_index)
inferred_variable_types.update(variable_types)
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if isinstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
variables += [_v]
# convert data once we've inferred
self.df = convert_all_variable_data(df=self.df,
variable_types=inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in variables
if v.id == index][0]
self.variables = [index_variable] + [v for v in variables
if v.id != index]
def update_data(self, df, already_sorted=False,
recalculate_last_time_indexes=True):
'''Update entity's internal dataframe, optionaly making sure data is sorted,
reference indexes to other entities are consistent, and last_time_indexes
are consistent.
'''
if len(df.columns) != len(self.variables):
raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns),
len(self.variables)))
for v in self.variables:
if v.id not in df.columns:
raise ValueError("Updated dataframe is missing new {} column".format(v.id))
# Make sure column ordering matches variable ordering
self.df = df[[v.id for v in self.variables]]
self.set_index(self.index)
if self.time_index is not None:
self.set_time_index(self.time_index, already_sorted=already_sorted)
self.set_secondary_time_index(self.secondary_time_index)
if recalculate_last_time_indexes and self.last_time_index is not None:
self.entityset.add_last_time_indexes(updated_entities=[self.id])
self.entityset.reset_data_description()
def add_interesting_values(self, max_values=5, verbose=False):
"""
Find interesting values for categorical variables, to be used to
generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for variable in self.variables:
# some heuristics to find basic 'where'-able variables
if isinstance(variable, vtypes.Discrete):
variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype)
# TODO - consider removing this constraints
# don't add interesting values for entities in relationships
skip = False
for r in self.entityset.relationships:
if variable in [r.child_variable, r.parent_variable]:
skip = True
break
if skip:
continue
counts = self.df[variable.id].value_counts()
# find how many of each unique value there are; sort by count,
# and add interesting values to each variable
total_count = np.sum(counts)
counts[:] = counts.sort_values()[::-1]
for i in range(min(max_values, len(counts.index))):
idx = counts.index[i]
# add the value to interesting_values if it represents more than
# 25% of the values we have not seen so far
if len(counts.index) < 25:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
else:
fraction = counts[idx] / total_count
if fraction > 0.05 and fraction < 0.95:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
# total_count -= counts[idx]
else:
break
self.entityset.reset_data_description()
def delete_variables(self, variable_ids):
"""
Remove variables from entity's dataframe and from
self.variables
Args:
variable_ids (list[str]): Variables to delete
Returns:
None
"""
# check if variable is not a list
if not isinstance(variable_ids, list):
raise TypeError('variable_ids must be a list of variable names')
if len(variable_ids) == 0:
return
self.df = self.df.drop(variable_ids, axis=1)
for v_id in variable_ids:
v = self._get_variable(v_id)
self.variables.remove(v)
def set_time_index(self, variable_id, already_sorted=False):
# check time type
if not isinstance(self.df, pd.DataFrame) or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype]
else:
time_to_check = self.df[variable_id].iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type is None:
self.entityset.time_type = time_type
elif self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if is_instance(self.df, (dd, ks), 'DataFrame'):
t = time_type # skip checking values
already_sorted = True # skip sorting
else:
t = vtypes.NumericTimeIndex
if col_is_datetime(self.df[variable_id]):
t = vtypes.DatetimeTimeIndex
# use stable sort
if not already_sorted:
# sort by time variable, then by index
self.df = self.df.sort_values([variable_id, self.index])
self.convert_variable_type(variable_id, t, convert_data=False)
self.time_index = variable_id
def set_index(self, variable_id, unique=True):
"""
Args:
variable_id (string) : Name of an existing variable to set as index.
unique (bool) : Whether to assert that the index is unique.
"""
if isinstance(self.df, pd.DataFrame):
self.df = self.df.set_index(self.df[variable_id], drop=False)
self.df.index.name = None
if unique:
assert self.df.index.is_unique, "Index is not unique on dataframe " \
"(Entity {})".format(self.id)
self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)
self.index = variable_id
def set_secondary_time_index(self, secondary_time_index):
for time_index, columns in secondary_time_index.items():
if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype]
else:
time_to_check = self.df[time_index].head(1).iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if time_index not in columns:
columns.append(time_index)
self.secondary_time_index = secondary_time_index
def _create_index(index, make_index, df):
'''Handles index creation logic base on user input'''
created_index = None
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(("Using first column as index. "
"To change this, specify the index parameter"))
index = df.columns[0]
elif make_index and index in df.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError("Cannot make index: index variable already present")
elif index not in df.columns:
if not make_index:
# Case 4: user names index, it is not in df. does not specify
# make_index. Make new index column and warn
warnings.warn("index {} not found in dataframe, creating new "
"integer column".format(index))
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
if isinstance(df, dd.DataFrame):
df[index] = 1
df[index] = df[index].cumsum() - 1
elif is_instance(df, ks, 'DataFrame'):
df = df.koalas.attach_id_column('distributed-sequence', index)
else:
df.insert(0, index, range(len(df)))
created_index = index
# Case 6: user specified index, which is already in df. No action needed.
return created_index, index, df
def _validate_entity_params(id, df, time_index):
'''Validation checks for Entity inputs'''
assert isinstance(id, str), "Entity id must be a string"
assert len(df.columns) == len(set(df.columns)), "Duplicate column names"
for c in df.columns:
if not isinstance(c, str):
raise ValueError("All column names must be strings (Column {} "
"is not a string)".format(c))
if time_index is not None and time_index not in df.columns:
raise LookupError('Time index not found in dataframe')
| [
"logging.getLogger",
"pandas.Series",
"featuretools.utils.entity_utils.convert_variable_data",
"featuretools.utils.gen_utils.is_instance",
"numpy.sum",
"featuretools.utils.entity_utils.convert_all_variable_data",
"featuretools.utils.entity_utils.col_is_datetime",
"featuretools.utils.entity_utils.get_linked_vars",
"featuretools.variable_types.find_variable_types",
"featuretools.utils.wrangle._dataframes_equal",
"featuretools.utils.entity_utils.infer_variable_types",
"warnings.warn",
"featuretools.utils.wrangle._check_time_type",
"featuretools.utils.gen_utils.import_or_none"
]
| [((539, 574), 'featuretools.utils.gen_utils.import_or_none', 'import_or_none', (['"""databricks.koalas"""'], {}), "('databricks.koalas')\n", (553, 574), False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((585, 628), 'logging.getLogger', 'logging.getLogger', (['"""featuretools.entityset"""'], {}), "('featuretools.entityset')\n", (602, 628), False, 'import logging\n'), ((8738, 8759), 'featuretools.variable_types.find_variable_types', 'find_variable_types', ([], {}), '()\n', (8757, 8759), False, 'from featuretools.variable_types import Text, find_variable_types\n'), ((9453, 9474), 'featuretools.utils.entity_utils.get_linked_vars', 'get_linked_vars', (['self'], {}), '(self)\n', (9468, 9474), False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((9509, 9603), 'featuretools.utils.entity_utils.infer_variable_types', 'infer_variable_types', (['self.df', 'link_vars', 'variable_types', 'time_index', 'secondary_time_index'], {}), '(self.df, link_vars, variable_types, time_index,\n secondary_time_index)\n', (9529, 9603), False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((10331, 10408), 'featuretools.utils.entity_utils.convert_all_variable_data', 'convert_all_variable_data', ([], {'df': 'self.df', 'variable_types': 'inferred_variable_types'}), '(df=self.df, variable_types=inferred_variable_types)\n', (10356, 10408), False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((15713, 15744), 'featuretools.utils.wrangle._check_time_type', '_check_time_type', (['time_to_check'], {}), '(time_to_check)\n', (15729, 15744), False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((16247, 16290), 'featuretools.utils.gen_utils.is_instance', 'is_instance', (['self.df', '(dd, ks)', '"""DataFrame"""'], {}), "(self.df, (dd, ks), 'DataFrame')\n", (16258, 16290), False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((18919, 19013), 'warnings.warn', 'warnings.warn', (['"""Using first column as index. To change this, specify the index parameter"""'], {}), "(\n 'Using first column as index. To change this, specify the index parameter')\n", (18932, 19013), False, 'import warnings\n'), ((7438, 7527), 'featuretools.utils.entity_utils.convert_variable_data', 'convert_variable_data', ([], {'df': 'self.df', 'column_id': 'variable_id', 'new_type': 'new_type'}), '(df=self.df, column_id=variable_id, new_type=new_type,\n **kwargs)\n', (7459, 7527), False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((16461, 16498), 'featuretools.utils.entity_utils.col_is_datetime', 'col_is_datetime', (['self.df[variable_id]'], {}), '(self.df[variable_id])\n', (16476, 16498), False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((17925, 17956), 'featuretools.utils.wrangle._check_time_type', '_check_time_type', (['time_to_check'], {}), '(time_to_check)\n', (17941, 17956), False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((4631, 4667), 'featuretools.utils.wrangle._dataframes_equal', '_dataframes_equal', (['self.df', 'other.df'], {}), '(self.df, other.df)\n', (4648, 4667), False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((12627, 12681), 'pandas.Series', 'pd.Series', ([], {'dtype': 'variable.entity.df[variable.id].dtype'}), '(dtype=variable.entity.df[variable.id].dtype)\n', (12636, 12681), True, 'import pandas as pd\n'), ((13332, 13346), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (13338, 13346), True, 'import numpy as np\n'), ((17653, 17696), 'featuretools.utils.gen_utils.is_instance', 'is_instance', (['self.df', '(dd, ks)', '"""DataFrame"""'], {}), "(self.df, (dd, ks), 'DataFrame')\n", (17664, 17696), False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((19815, 19847), 'featuretools.utils.gen_utils.is_instance', 'is_instance', (['df', 'ks', '"""DataFrame"""'], {}), "(df, ks, 'DataFrame')\n", (19826, 19847), False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((14026, 14042), 'pandas.Series', 'pd.Series', (['[idx]'], {}), '([idx])\n', (14035, 14042), True, 'import pandas as pd\n'), ((14532, 14548), 'pandas.Series', 'pd.Series', (['[idx]'], {}), '([idx])\n', (14541, 14548), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.business.models import Venture, VentureRole
def all_ventures():
yield '', '---------'
for v in Venture.objects.filter(show_in_ralph=True).order_by('path'):
yield (
v.id,
"%s[%s] %s" % (
'\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space'
v.symbol,
v.name,
)
)
def all_roles():
yield '', '---------'
for r in VentureRole.objects.order_by(
'-venture__is_infrastructure', 'venture__name',
'parent__parent__name', 'parent__name', 'name'
):
yield r.id, '{} / {}'.format(r.venture.name, r.full_name)
| [
"ralph.business.models.Venture.objects.filter",
"ralph.business.models.VentureRole.objects.order_by"
]
| [((624, 752), 'ralph.business.models.VentureRole.objects.order_by', 'VentureRole.objects.order_by', (['"""-venture__is_infrastructure"""', '"""venture__name"""', '"""parent__parent__name"""', '"""parent__name"""', '"""name"""'], {}), "('-venture__is_infrastructure', 'venture__name',\n 'parent__parent__name', 'parent__name', 'name')\n", (652, 752), False, 'from ralph.business.models import Venture, VentureRole\n'), ((290, 332), 'ralph.business.models.Venture.objects.filter', 'Venture.objects.filter', ([], {'show_in_ralph': '(True)'}), '(show_in_ralph=True)\n', (312, 332), False, 'from ralph.business.models import Venture, VentureRole\n')] |
"""
manage.py for flask application
"""
import unittest
import coverage
import os
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.models import User
# Code coverage
COV = coverage.Coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py',
]
)
COV.start()
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command()
def cov():
"""
Runs the unit tests with coverage
"""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return -1
@cli.command()
def recreate_db():
"""
Destroys all db and recreates a new db
"""
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def test():
"""
Runs test without code coverage
"""
tests = unittest.TestLoader().discover(
'project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return -1
@cli.command()
def seed_db():
"""
Seeds the database with some initial data
"""
user1 = User(
eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower())
user1.username = "Meeting Room Of The Century"
user1.email = "<EMAIL>"
user1.city_country = "Singapore, SG"
user1.tags = "Meeting Spaces"
user1.about = '''This is the best meeting space you will ever see'''
user1.seller_detail = '''We sell space'''
user1.buyer_detail = '''We are not buying'''
user2 = User(
eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower())
user2.username = "Makeup Till You Breakup"
user2.email = "<EMAIL>"
user2.city_country = "Singapore, SG"
user2.tags = "Stylist"
user2.about = '''Reimagine your looks with us'''
user2.seller_detail = '''We are serving looks tonight'''
user2.buyer_detail = '''We are not buying'''
user3 = User(
eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower())
user3.username = "Heart Attack Buffet"
user3.email = "<EMAIL>"
user3.city_country = "Singapore, SG"
user3.tags = "Buffet"
user3.about = '''Eat till you get a heart attack'''
user3.seller_detail = '''We sell food'''
user3.buyer_detail = '''We are not buying'''
user4 = User(
eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower())
user4.username = "Pleasant Photography"
user4.email = "<EMAIL>"
user4.city_country = "Singapore, SG"
user4.tags = "Photography"
user4.about = ('We are a group of photographers specialized in wedding'
'photography. '
'We have won numerous awards for our photos. '
'We will capture your '
'memories in ways you cannot imagine.')
user4.seller_detail = '''We sell photos'''
user4.buyer_detail = '''We are not buying'''
user5 = User(
eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower())
user5.username = "Epic Winebar"
user5.email = "<EMAIL>"
user5.city_country = "Singapore, SG"
user5.tags = "Bar, Restaurant"
user5.about = ('Award winnning winebar with the best selection of alcohol.'
'We serve delicious international cuisine, with fusion'
'dishes inspired from our travels. We are always ready for'
'your craziest events.')
user5.seller_detail = '''We sell wine'''
user5.buyer_detail = '''We are not buying'''
user6 = User(
eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower())
user6.username = "Dancers Who Dance"
user6.email = "<EMAIL>"
user6.city_country = "Singapore, SG"
user6.tags = "Performer"
user6.about = ('Dancers who dance are people who like to dance alot.'
'Give us music and we will dance for you.')
user6.seller_detail = '''We sell dance'''
user6.buyer_detail = '''We are not buying'''
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.add(user4)
db.session.add(user5)
db.session.add(user6)
db.session.commit()
if __name__ == '__main__':
cli()
| [
"project.db.drop_all",
"project.db.create_all",
"os.path.join",
"coverage.Coverage",
"project.create_app",
"project.db.session.add",
"os.path.dirname",
"flask.cli.FlaskGroup",
"unittest.TextTestRunner",
"unittest.TestLoader",
"project.db.session.commit"
]
| [((211, 313), 'coverage.Coverage', 'coverage.Coverage', ([], {'branch': '(True)', 'include': '"""project/*"""', 'omit': "['project/tests/*', 'project/config.py']"}), "(branch=True, include='project/*', omit=['project/tests/*',\n 'project/config.py'])\n", (228, 313), False, 'import coverage\n'), ((367, 379), 'project.create_app', 'create_app', ([], {}), '()\n', (377, 379), False, 'from project import create_app, db\n'), ((386, 419), 'flask.cli.FlaskGroup', 'FlaskGroup', ([], {'create_app': 'create_app'}), '(create_app=create_app)\n', (396, 419), False, 'from flask.cli import FlaskGroup\n'), ((1118, 1131), 'project.db.drop_all', 'db.drop_all', ([], {}), '()\n', (1129, 1131), False, 'from project import create_app, db\n'), ((1136, 1151), 'project.db.create_all', 'db.create_all', ([], {}), '()\n', (1149, 1151), False, 'from project import create_app, db\n'), ((1156, 1175), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1173, 1175), False, 'from project import create_app, db\n'), ((4458, 4479), 'project.db.session.add', 'db.session.add', (['user1'], {}), '(user1)\n', (4472, 4479), False, 'from project import create_app, db\n'), ((4484, 4505), 'project.db.session.add', 'db.session.add', (['user2'], {}), '(user2)\n', (4498, 4505), False, 'from project import create_app, db\n'), ((4510, 4531), 'project.db.session.add', 'db.session.add', (['user3'], {}), '(user3)\n', (4524, 4531), False, 'from project import create_app, db\n'), ((4536, 4557), 'project.db.session.add', 'db.session.add', (['user4'], {}), '(user4)\n', (4550, 4557), False, 'from project import create_app, db\n'), ((4562, 4583), 'project.db.session.add', 'db.session.add', (['user5'], {}), '(user5)\n', (4576, 4583), False, 'from project import create_app, db\n'), ((4588, 4609), 'project.db.session.add', 'db.session.add', (['user6'], {}), '(user6)\n', (4602, 4609), False, 'from project import create_app, db\n'), ((4615, 4634), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4632, 4634), False, 'from project import create_app, db\n'), ((826, 863), 'os.path.join', 'os.path.join', (['basedir', '"""tmp/coverage"""'], {}), "(basedir, 'tmp/coverage')\n", (838, 863), False, 'import os\n'), ((514, 535), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (533, 535), False, 'import unittest\n'), ((575, 611), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (598, 611), False, 'import unittest\n'), ((782, 807), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (797, 807), False, 'import os\n'), ((1269, 1290), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1288, 1290), False, 'import unittest\n'), ((1359, 1395), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1382, 1395), False, 'import unittest\n')] |
"""
Support for controlling projector via the PJLink protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pjlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pypjlink2==1.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = 'encoding'
DEFAULT_PORT = 4352
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
})
SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if 'pjlink' not in hass.data:
hass.data['pjlink'] = {}
hass_data = hass.data['pjlink']
device_label = "{}:{}".format(host, port)
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return "{} {}".format(input_source_name, input_source_number)
class PjLinkDevice(MediaPlayerDevice):
"""Representation of a PJLink device."""
def __init__(self, host, port, name, encoding, password):
"""Iinitialize the PJLink device."""
self._host = host
self._port = port
self._name = name
self._password = password
self._encoding = encoding
self._muted = False
self._pwstate = STATE_OFF
self._current_source = None
with self.projector() as projector:
if not self._name:
self._name = projector.get_name()
inputs = projector.get_inputs()
self._source_name_mapping = \
{format_input_source(*x): x for x in inputs}
self._source_list = sorted(self._source_name_mapping.keys())
def projector(self):
"""Create PJLink Projector instance."""
from pypjlink import Projector
projector = Projector.from_address(
self._host, self._port, self._encoding)
projector.authenticate(self._password)
return projector
def update(self):
"""Get the latest state from the device."""
with self.projector() as projector:
pwstate = projector.get_power()
if pwstate == 'off':
self._pwstate = STATE_OFF
else:
self._pwstate = STATE_ON
self._muted = projector.get_mute()[1]
self._current_source = \
format_input_source(*projector.get_input())
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def source(self):
"""Return current input source."""
return self._current_source
@property
def source_list(self):
"""Return all available input sources."""
return self._source_list
@property
def supported_features(self):
"""Return projector supported features."""
return SUPPORT_PJLINK
def turn_off(self):
"""Turn projector off."""
with self.projector() as projector:
projector.set_power('off')
def turn_on(self):
"""Turn projector on."""
with self.projector() as projector:
projector.set_power('on')
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
from pypjlink import MUTE_AUDIO
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| [
"logging.getLogger",
"pypjlink.Projector.from_address",
"voluptuous.Required",
"voluptuous.Optional"
]
| [((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((778, 801), 'voluptuous.Required', 'vol.Required', (['CONF_HOST'], {}), '(CONF_HOST)\n', (790, 801), True, 'import voluptuous as vol\n'), ((818, 863), 'voluptuous.Optional', 'vol.Optional', (['CONF_PORT'], {'default': 'DEFAULT_PORT'}), '(CONF_PORT, default=DEFAULT_PORT)\n', (830, 863), True, 'import voluptuous as vol\n'), ((878, 901), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {}), '(CONF_NAME)\n', (890, 901), True, 'import voluptuous as vol\n'), ((918, 971), 'voluptuous.Optional', 'vol.Optional', (['CONF_ENCODING'], {'default': 'DEFAULT_ENCODING'}), '(CONF_ENCODING, default=DEFAULT_ENCODING)\n', (930, 971), True, 'import voluptuous as vol\n'), ((988, 1015), 'voluptuous.Optional', 'vol.Optional', (['CONF_PASSWORD'], {}), '(CONF_PASSWORD)\n', (1000, 1015), True, 'import voluptuous as vol\n'), ((2847, 2909), 'pypjlink.Projector.from_address', 'Projector.from_address', (['self._host', 'self._port', 'self._encoding'], {}), '(self._host, self._port, self._encoding)\n', (2869, 2909), False, 'from pypjlink import Projector\n')] |
# Built-in
import copy
import logging
import time
# External
from Qt.QtWidgets import QUndoCommand
# Internal
from nxt_editor import colors
from nxt_editor import user_dir
from nxt import nxt_path
from nxt.nxt_layer import LAYERS, SAVE_KEY
from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,
list_merger)
from nxt import nxt_io
from nxt import GRID_SIZE
import nxt_editor
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
def processing(func):
def wrapper(self):
self.model.processing.emit(True)
func(self)
self.model.processing.emit(False)
return wrapper
class NxtCommand(QUndoCommand):
def __init__(self, model):
super(NxtCommand, self).__init__()
self.model = model
self.model.layer_saved.connect(self.reset_layer_effected)
self._layers_effected_by_me = {}
def _get_effects(self, layer_path):
"""Gets the effected state for a given layer with context to this
command. Since a single command can effect layers in different ways.
:param layer_path: string of layer real path
:return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo)
"""
first_eff_by_undo = False
first_eff_by_redo = False
try:
first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo']
except KeyError:
pass
try:
first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo']
except KeyError:
pass
return first_eff_by_undo, first_eff_by_redo
def reset_layer_effected(self, layer_just_saved):
"""When the model marks a layer as saved we reset the class attr
`_first_effected_by_redo` to False. This makes sure the layer is
properly marked as unsaved even if we undo an action after saving it.
:param layer_just_saved: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved)
where_were_at = self.model.undo_stack.index()
cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1))
if cur_cmd is self:
return
if layer_just_saved in self._layers_effected_by_me:
if eff_by_undo:
# This command has already been marked as undo effects the
# layer, meaning the layer has been saved and the undo queue
# was moved to an index before this command and the same
# layer was saved again.
eff_by_redo = True
eff_by_undo = False
else:
# Now the undo of this command effects the layer not the redo
eff_by_redo = False
eff_by_undo = True
self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo,
'redo': eff_by_redo}
def redo_effected_layer(self, layer_path):
"""Adds layer to the model's set of effected (unsaved) layers. If
this command was the first to effect the layer we mark it as such
by setting the class attr `_first_effected_by_redo` to True.
:param layer_path: string of layer real path
:return: None
"""
layer_unsaved = layer_path in self.model.effected_layers
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
if not eff_by_undo and layer_unsaved:
return
if not eff_by_undo:
self._layers_effected_by_me[layer_path] = {'undo': False,
'redo': True}
self.model.effected_layers.add(layer_path)
else:
# Layer was saved and then undo was called, thus this redo has a
# net zero effect on the layer
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
def undo_effected_layer(self, layer_path):
"""Removes layer from the model's set of effected (unsaved) layers.
If the layer is not marked as effected in the model we mark it as
effected. This case happens when undo is called after a layer is saved.
:param layer_path: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
layer_saved = layer_path not in self.model.effected_layers
if layer_saved:
eff_by_undo = True
# Set redo to False since now its been saved & the undo effects it
eff_by_redo = False
self.model.effected_layers.add(layer_path)
elif eff_by_redo:
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo,
'redo': eff_by_redo}
class AddNode(NxtCommand):
"""Add a node to the graph"""
def __init__(self, name, data, parent_path, pos, model, layer_path):
super(AddNode, self).__init__(model)
self.name = name
self.data = data
self.parent_path = parent_path
self.layer_path = layer_path
self.stage = model.stage
# command data
self.pos = pos or [0.0, 0.0]
self.prev_selection = self.model.selection
# resulting node
self.node_path = None
self.created_node_paths = []
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
dirty_nodes = []
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
_, dirty = self.stage.delete_node(node, layer,
remove_layer_data=False)
dirty_nodes += dirty
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
comp_layer = self.model.comp_layer
if node is not None:
# delete node
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data)
dirty_nodes += dirty
dirty_nodes += self.created_node_paths
dirty_nodes += [self.node_path]
self.undo_effected_layer(self.layer_path)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = self.prev_selection
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.created_node_paths = []
dirty_nodes = []
nodes, dirty = self.stage.add_node(name=self.name, data=self.data,
parent=self.parent_path,
layer=layer.layer_idx(),
comp_layer=self.model.comp_layer)
dirty_nodes += dirty
self.node_path = layer.get_node_path(nodes[0])
self.model._set_node_pos(node_path=self.node_path, pos=self.pos,
layer=layer)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = [self.node_path]
self.redo_effected_layer(layer.real_path)
self.setText('Added node: {}'.format(self.node_path))
class DeleteNode(NxtCommand):
def __init__(self, node_path, model, layer_path, other_removed_nodes):
"""Delete node from the layer at the layer path and the comp layer.
It is important to note that the other_removed_nodes
list must be shared by other DeleteNode commands in a command macro.
The list will be mutated by the stage as it deletes node, this
behavior is depended upon!
:param node_path: String of node path
:param model: StageModel
:param layer_path: String of layer realpath
:param other_removed_nodes: list of node paths that will be deleted
in this event loop.
"""
super(DeleteNode, self).__init__(model)
self.layer_path = layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
self.prev_starts = []
self.prev_breaks = {}
self.node_path = node_path
self.node_data = {}
self.others = other_removed_nodes
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
parent = self.node_data['parent']
# We don't want to fix names because we know this node should be
# named what it was named when it was deleted
new_nodes, dirty = self.stage.add_node(name=self.node_data['name'],
data=self.node_data['save_dict'],
parent=parent,
layer=layer.layer_idx(),
comp_layer=comp_layer,
fix_names=False)
if self.node_data['break']:
self.model._add_breakpoint(self.node_path, layer)
self.model._add_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._add_start_node(self.node_path, layer)
# restore layer data
pos = self.node_data.get('pos')
if pos:
self.model.top_layer.positions[self.node_path] = pos
# This might be a bug? We don't touch the top layer in redo...
self.undo_effected_layer(self.stage.top_layer.real_path)
attr_display = self.node_data.get('attr_display')
if attr_display is not None:
self.model._set_attr_display_state(self.node_path, attr_display)
user_dir.breakpoints = self.prev_breaks
ancestor_tuple = self.node_data.get('ancestor_child_order')
if ancestor_tuple:
ancestor_path, ancestor_child_order = ancestor_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER,
ancestor_child_order)
self.model.selection = self.prev_selection
# Fixme: Does not account for rebuilding proxy nodes for the dirty nodes
dirty_set = tuple(set(dirty))
self.undo_effected_layer(self.layer_path)
if dirty_set != (self.node_path,):
self.model.update_comp_layer(rebuild=True)
else:
self.model.nodes_changed.emit(dirty_set)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
self.node_data = {}
self.prev_starts = self.model.get_start_nodes(layer)
self.prev_breaks = user_dir.breakpoints
dirty_nodes = []
node = layer.lookup(self.node_path)
# get node info
parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
name = getattr(node, INTERNAL_ATTRS.NAME)
is_break = self.model.get_is_node_breakpoint(self.node_path, layer)
self.node_data = {'parent': parent, 'name': name,
'pos': self.model.get_node_pos(self.node_path),
'break': is_break}
closest_ancestor = layer.ancestors(self.node_path)
if closest_ancestor:
closest_ancestor = closest_ancestor[0]
else:
closest_ancestor = None
closest_ancestor_path = layer.get_node_path(closest_ancestor)
if closest_ancestor_path:
ancestor_child_order = getattr(closest_ancestor,
INTERNAL_ATTRS.CHILD_ORDER)
self.node_data['ancestor_child_order'] = (closest_ancestor_path,
ancestor_child_order[:])
# Attr display data
attr_display = self.model.get_attr_display_state(self.node_path)
if attr_display is not None:
self.node_data['attr_display'] = attr_display
# get layer data
is_start = self.model.get_is_node_start(self.node_path, layer)
self.node_data['start'] = is_start
self.node_data['save_dict'] = get_node_as_dict(node)
if self.node_data['break']:
self.model._remove_breakpoint(self.node_path, layer)
self.model._remove_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._remove_start_node(self.node_path, layer)
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
for p in self.others[:]:
self.others += comp_layer.get_node_dirties(p)
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data,
other_removed_nodes=self.others)
dirty_nodes += dirty + [self.node_path]
if self.node_path in self.model.selection:
fix_selection = self.model.selection[:]
fix_selection.remove(self.node_path)
self.model.selection = fix_selection
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.redo_effected_layer(layer.real_path)
self.setText("Delete node: {}".format(self.node_path))
class SetNodeAttributeData(NxtCommand):
"""Set attribute value"""
def __init__(self, node_path, attr_name, data, model, layer_path):
super(SetNodeAttributeData, self).__init__(model)
self.node_path = node_path
self.nice_attr_name = attr_name
self.attr_name = attr_name
self.data = data
self.stage = model.stage
self.layer_path = layer_path
self.created_node_paths = []
self.remove_attr = False
self.prev_data = {}
self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP
self.return_value = None
self.prev_selection = model.selection
@processing
def undo(self):
start = time.time()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
comp = self.model.comp_layer
dirties = [self.node_path]
# delete any created nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=layer, comp_layer=comp,
remove_layer_data=False)
n = layer.lookup(self.node_path)
if n is not None:
if self.remove_attr:
self.stage.delete_node_attr(n, self.attr_name)
dirties += comp.get_node_dirties(self.node_path)
else:
result = self.stage.node_setattr_data(node=n,
attr=self.attr_name,
layer=layer, create=False,
comp_layer=comp,
**self.prev_data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += result
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
self.model.attrs_changed.emit(changed_attrs)
if not self.recomp:
changed = tuple([self.node_path] + self.created_node_paths)
self.model.nodes_changed.emit(changed)
self.model.selection = self.prev_selection
# undo_debug(self, start)
@processing
def redo(self):
start = time.time()
created_node = False
self.prev_selection = self.model.selection
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
comp = self.model.comp_layer
self.remove_attr = False
self.created_node_paths = []
# get the node
node = layer.lookup(self.node_path)
dirties = [self.node_path]
if node is None:
parent_path = nxt_path.get_parent_path(self.node_path)
name = nxt_path.node_name_from_node_path(self.node_path)
if self.attr_name in INTERNAL_ATTRS.ALL:
self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name)
attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)}
else:
attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}}
self.return_value = self.attr_name
_, dirties = self.stage.add_node(name=name, data=attr_data,
parent=parent_path,
layer=layer.layer_idx(),
comp_layer=comp,
fix_names=False)
# Fixme: Targeted parenting would avoid the need for a recomp
if layer.descendants(self.node_path):
self.recomp = True
created_node = True
self.created_node_paths += [self.node_path]
node = layer.lookup(self.node_path)
self.prev_data = self.stage.get_node_attr_data(node, self.attr_name,
layer, quiet=True)
if self.prev_data:
self.prev_data = copy.deepcopy(self.prev_data)
# set attribute value this also adds the attribute if it does not exist
if not self.stage.node_attr_exists(node, self.attr_name):
self.remove_attr = True
if not created_node:
self.return_value = self.stage.node_setattr_data(node,
self.attr_name,
layer=layer,
create=True,
comp_layer=comp,
**self.data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += self.return_value
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
self.model.attrs_changed.emit(changed_attrs)
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
val = str(self.data.get(META_ATTRS.VALUE))
self.setText("Set {} to {}".format(attr_path, val))
# redo_debug(self, start)
class SetNodeAttributeValue(SetNodeAttributeData):
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data,
model, layer_path)
class RenameNode(SetNodeAttributeValue):
"""Rename node"""
def __init__(self, node_path, name, model, layer_path):
self.old_node_path = node_path
layer = model.lookup_layer(layer_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(name=name, layer=layer,
parent_path=parent_path,
layer_only=True)
super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME,
new_name, model, layer_path)
def undo(self):
self.model.about_to_rename.emit()
self.prev_data['force'] = True
super(RenameNode, self).undo()
self.node_path = self.old_node_path
self.model.selection = [self.node_path]
def redo(self):
self.model.about_to_rename.emit()
super(RenameNode, self).redo()
self.node_path = self.return_value
self.model.selection = [self.node_path]
if self.model.get_is_node_start(self.node_path, self.model.comp_layer):
self.model.starts_changed.emit(self.model.get_start_nodes())
self.setText("{} renamed to {}".format(self.old_node_path,
self.return_value))
class DuplicateNodes(NxtCommand):
"""Duplicate nodes on this graph"""
def __init__(self, node_paths, descendants, model, source_layer_path,
target_layer_path):
# TODO: We should make another base command class that can be used to
# set multiple attr's data. That way duplicate can just be a
# setattr. The way it works now we can only set one attr's data at a
# time and duplicate needs to get local + INTERNAL number of attrs.
super(DuplicateNodes, self).__init__(model)
self.node_paths = node_paths
self.descendants = descendants
self.source_layer_path = source_layer_path
self.target_layer_path = target_layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
# resulting nodes
self.new_node_paths = []
@processing
def undo(self):
target_layer = self.model.lookup_layer(self.target_layer_path)
# delete duplicated nodes
for node_path in self.new_node_paths:
n = target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, target_layer,
remove_layer_data=True)
self.model.selection = self.prev_selection
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(target_layer.real_path)
@processing
def redo(self):
new_selection = []
self.new_node_paths = []
source_layer = self.model.lookup_layer(self.source_layer_path)
target_layer = self.model.lookup_layer(self.target_layer_path)
self.redo_effected_layer(target_layer.real_path)
for node_path in self.node_paths:
node = source_layer.lookup(node_path)
# duplicate node
new, dirty = self.stage.duplicate_node(node=node,
layer=target_layer,
descendants=self.descendants)
new_selection.append(target_layer.get_node_path(new[0]))
# process new nodes
for new_node in new:
# add new node path to the list and emit model signal
new_node_path = target_layer.get_node_path(new_node)
self.new_node_paths += [new_node_path]
# self.model.node_added.emit(new_node_path)
# set position
has_parent = self.model.node_has_parent(new_node_path,
target_layer)
if not has_parent and new_node_path != node_path:
pos = self.model.get_node_pos(node_path)
pos = [pos[0] + 20, pos[1] + 20]
self.model._set_node_pos(new_node_path, pos,
layer=target_layer)
self.model.selection = new_selection
self.model.update_comp_layer(rebuild=True)
if len(self.node_paths) == 1:
nodes_str = self.node_paths[0]
else:
nodes_str = 'nodes'
self.setText('Duplicated {}'.format(nodes_str))
class InstanceNode(SetNodeAttributeValue):
"""Instance nodes on this graph"""
def __init__(self, node_path, model, source_layer_path, target_layer_path):
src_name = nxt_path.node_name_from_node_path(node_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(src_name,
model.comp_layer,
parent_path=parent_path)
new_path = nxt_path.join_node_paths(parent_path, new_name)
self.new_path = new_path
super(InstanceNode, self).__init__(new_path,
INTERNAL_ATTRS.INSTANCE_PATH,
node_path, model, target_layer_path)
def redo(self):
node_path = self.data.get(META_ATTRS.VALUE)
layer = self.model.lookup_layer(self.layer_path)
new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0),
layer)
self.model._set_node_pos(self.new_path, new_pos, layer)
super(InstanceNode, self).redo()
self.return_value = self.new_path
self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))
class SetNodesPosition(NxtCommand):
"""Move nodes"""
def __init__(self, node_positions, model, layer_path):
super(SetNodesPosition, self).__init__(model)
self.model = model
self.layer_path = layer_path
self.new_positions = node_positions
self.old_positions = {}
for path in self.new_positions.keys():
self.old_positions[path] = model.get_node_pos(path)
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
for node_path, old_pos in self.old_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=old_pos, layer=layer)
self.undo_effected_layer(self.layer_path)
@processing
def redo(self):
delta_str = None
layer = self.model.lookup_layer(self.layer_path)
for node_path, new_pos in self.new_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=new_pos, layer=layer)
if not delta_str:
pos = new_pos
prev_pos = self.old_positions[node_path]
# Only letting it set text once, relying on consistent delta.
x_delta = pos[0] - prev_pos[0]
y_delta = pos[1] - prev_pos[1]
delta_str = '{}, {}'.format(x_delta, y_delta)
if len(self.new_positions) == 1:
nodes_str = node_path
else:
nodes_str = 'nodes'
self.setText('Move {} {}'.format(nodes_str, delta_str))
self.redo_effected_layer(layer.real_path)
class SetSelection(QUndoCommand):
"""Select Nodes and Connections"""
def __init__(self, paths, model):
super(SetSelection, self).__init__()
self.new_paths = paths
self.model = model
self.prev_paths = self.model.selection
def undo(self):
self.model.selection = self.prev_paths
def redo(self):
self.model.selection = self.new_paths
self.setText('Set selection: {}'.format(str(self.new_paths)))
class AddSelection(SetSelection):
def __init__(self, paths, model):
self.added_paths = paths
curr_selection = model.selection
new_paths = curr_selection + paths
super(AddSelection, self).__init__(new_paths, model)
def redo(self):
super(AddSelection, self).redo()
self.setText('Add {} to selection'.format(self.added_paths))
class RemoveFromSelection(SetSelection):
def __init__(self, paths, model):
self.rem_paths = paths
new_selection = model.selection[:]
for path in paths:
try:
new_selection.remove(path)
except ValueError:
continue
super(RemoveFromSelection, self).__init__(new_selection, model)
def redo(self):
super(RemoveFromSelection, self).redo()
self.setText('Remove {} from selection'.format(self.rem_paths))
class LocalizeNodes(NxtCommand):
"""Localize nodes"""
def __init__(self, node_paths, model):
super(LocalizeNodes, self).__init__(model)
self.node_paths = node_paths
self.model = model
self.stage = model.stage
self.prev_selection = self.model.selection
self.prev_node_data = {}
self.created_node_paths = []
@processing
def undo(self):
for node_path in self.created_node_paths:
n = self.model.target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=self.model.target_layer,
remove_layer_data=False)
layers = [self.model.target_layer]
for node_path, all_data in self.prev_node_data.items():
apply_data = {}
node = self.model.target_layer.lookup(node_path)
if not node:
continue
data = all_data['data']
child_order = all_data['data'].get('child_order', [])
apply_data['child_order'] = child_order
apply_data['attributes'] = data.get('attributes', {})
attrs_to_keep = apply_data['attributes'].keys()
apply_data['enabled'] = data.get('enabled')
if data.get('instance'):
apply_data['instance'] = data['instance']
self.stage.transfer_node_data(node, self.model.target_layer,
apply_data, self.model.comp_layer)
local_attrs = self.stage.get_node_local_attr_names(node_path,
layers)
for attr in local_attrs:
if attr not in attrs_to_keep:
self.stage.delete_node_attr(node=node, attr_name=attr)
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(layers[0].real_path)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.created_node_paths = []
layer = self.model.target_layer
for node_path in self.node_paths:
node_data = {}
display_node = self.model.comp_layer.lookup(node_path)
if not display_node:
continue
# add node if it doesn't exist on the target layer
target_node = self.model.target_layer.lookup(node_path)
if not target_node:
new_nodes, new_paths, dirty = _add_node_hierarchy(node_path,
self.model,
layer)
target_node = new_nodes[-1]
self.created_node_paths += new_paths
# self.model.node_added.emit(node_path)
# preserve original data
node_data['data'] = get_node_as_dict(target_node)
# localize source node
self.stage.transfer_node_data(target_node, self.model.target_layer,
display_node,
self.model.comp_layer)
self.prev_node_data[node_path] = node_data
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.redo_effected_layer(layer.real_path)
self.model.selection = self.prev_selection
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText('Localize {}'.format(str(path_str)))
class LocalizeUserAttr(SetNodeAttributeData):
"""Localize nodes"""
def __init__(self, node_path, attr_name, model, layer_path):
node = model.comp_layer.lookup(node_path)
data = model.stage.get_node_attr_data(node, attr_name,
model.comp_layer)
if META_ATTRS.SOURCE in data:
data.pop(META_ATTRS.SOURCE)
super(LocalizeUserAttr, self).__init__(node_path, attr_name, data,
model, layer_path)
class LocalizeCompute(SetNodeAttributeValue):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path):
comp_layer = model.comp_layer
display_node = comp_layer.lookup(node_path)
code_lines = model.stage.get_node_code_lines(display_node, comp_layer)
super(LocalizeCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(LocalizeCompute, self).redo()
self.setText("Localize compute on {}".format(self.node_path))
class LocalizeInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
inst_path = model.get_node_instance_path(node_path, model.comp_layer,
expand=False)
super(LocalizeInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
inst_path, model, layer_path)
def redo(self):
super(LocalizeInstancePath, self).redo()
self.setText("Localize instance path to {}".format(self.node_path))
class RevertInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
None, model, layer_path)
def redo(self):
super(RevertInstancePath, self).redo()
self.setText("Revert instance path on {}".format(self.node_path))
class LocalizeExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
exec_path = model.get_node_exec_in(node_path)
super(LocalizeExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_path, model, layer_path)
def redo(self):
super(LocalizeExecPath, self).redo()
self.setText("Localize exec input on {}".format(self.node_path))
class RevertExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN, None,
model, layer_path)
def redo(self):
self.setText("Revert exec input on {}".format(self.node_path))
class RevertNode(DeleteNode):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path, others):
super(RevertNode, self).__init__(node_path, model, layer_path, others)
self.rebuild = False # Tells the delete command not to re-comp
self.created_node_paths = []
self.node_path = node_path
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
# Remove our created empty nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer, remove_layer_data=False)
super(RevertNode, self).undo()
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
def redo(self):
self.created_node_paths = []
super(RevertNode, self).redo()
layer = self.model.lookup_layer(self.layer_path)
# Re-create the node as an empty node
new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path,
self.model, layer)
self.created_node_paths += new_paths
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.model.selection = self.prev_selection
self.setText('Revert {}'.format(self.node_path))
class ParentNodes(NxtCommand):
"""Parent Nodes"""
def __init__(self, node_paths, parent_node_path, model):
super(ParentNodes, self).__init__(model)
self.parent_node_path = parent_node_path
self.parent_node = None
self.model = model
self.stage = model.stage
self.node_paths = node_paths
# resulting nodes
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
# get node selection for undo
self.prev_selection = self.model.selection
# get previous node data for all child nodes for undo
self.prev_node_data = {}
@processing
def undo(self):
layer = self.model.target_layer
self.undo_effected_layer(layer.real_path)
# undo parent
common_parent_nodes = {}
for old_path, node_data in self.prev_node_data.items():
prev_parent_path = node_data['parent']
prev_parent_node = layer.lookup(prev_parent_path)
new_path = self.node_path_data[old_path]
node = layer.lookup(new_path)
if prev_parent_path not in list(common_parent_nodes.keys()):
common_parent_nodes[prev_parent_path] = {node: old_path}
else:
common_parent_nodes[prev_parent_path][node] = old_path
child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER)
if child_order_tuple:
ancestor_path, child_order = child_order_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
self.stage.set_node_child_order(ancestor, child_order,
layer)
if new_path in list(self.model.top_layer.positions.keys()):
source_layer = self.stage.get_node_source_layer(node)
source_layer.positions.pop(new_path)
for parent_path, nodes_dict in common_parent_nodes.items():
self.stage.parent_nodes(nodes=list(nodes_dict.keys()),
parent_path=parent_path,
layer=layer)
for parent_path, nodes_dict in common_parent_nodes.items():
for node, old_path in nodes_dict.items():
node_data = self.prev_node_data[old_path]
# restore name
prev_name = node_data['name']
name = getattr(node, INTERNAL_ATTRS.NAME)
if name != prev_name:
self.stage.set_node_name(node, name=prev_name,
layer=layer, force=True)
# restore position
if self.parent_node_path != nxt_path.WORLD:
prev_pos = node_data['pos']
source_layer = self.stage.get_node_source_layer(node)
self.model._set_node_pos(old_path, prev_pos,
layer=source_layer)
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
self.stage.delete_node(node, layer)
idx = 0
for old_node_path in self.node_paths:
new_node_path = self.new_node_paths[idx]
attr_state = self.model.remove_attr_display_state(new_node_path)
if attr_state is not None:
self.model._set_attr_display_state(old_node_path, attr_state)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
nodes = []
layer = self.model.target_layer
self.redo_effected_layer(layer.real_path)
for node_path in self.node_paths:
node = layer.lookup(node_path)
name = getattr(node, INTERNAL_ATTRS.NAME)
parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
self.stage.get_node_data(node, layer)
node_data = self.stage.get_node_data(node, layer)
node_data['pos'] = self.model.get_node_pos(node_path)
node_data['name'] = name
node_data['parent'] = parent_path
parent_node = layer.lookup(parent_path)
ancestor_path = parent_path
child_order = []
if parent_node:
child_order = getattr(parent_node,
INTERNAL_ATTRS.CHILD_ORDER)
else:
ancestors = layer.ancestors(node_path)
if ancestors:
ancestor = ancestors[0]
ancestor_path = layer.get_node_path(ancestor)
child_order = self.stage.get_node_child_order(ancestor)
node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path,
child_order]
self.prev_node_data[node_path] = node_data
nodes += [node]
# get current node hierarchy information for each node. each node
# path is placed in a list of descendants for each top node so when
# they are un-parented each node can be placed visually beside it's
# original top node.
node_hierarchy_data = {}
if self.parent_node_path is nxt_path.WORLD:
for node_path in self.node_paths:
node = layer.lookup(node_path)
top_node = self.stage.get_top_node(node,
self.model.target_layer)
if top_node is None:
top_node = node
top_node_path = layer.get_node_path(top_node)
top_node_descendant_list = node_hierarchy_data.get(top_node, [])
top_node_descendant_list += [node]
node_hierarchy_data[top_node_path] = top_node_descendant_list
if not node_hierarchy_data:
return
# parent
self.node_path_data = self.stage.parent_nodes(nodes,
self.parent_node_path,
layer)
self.new_node_paths = list(self.node_path_data.values())
idx = 0
for new_node_path in self.new_node_paths:
old_node_path = self.node_paths[idx]
attr_state = self.model.remove_attr_display_state(old_node_path)
if attr_state is not None:
self.model._set_attr_display_state(new_node_path, attr_state)
# set position for un-parent
if self.parent_node_path == nxt_path.WORLD:
old_root = nxt_path.get_root_path(old_node_path)
new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14,
GRID_SIZE),
self.model.top_layer)
self.model._set_node_pos(new_node_path, new_pos, layer)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = list(self.node_path_data.values())
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText("Parent {} to {}".format(path_str, self.parent_node_path))
class AddAttribute(SetNodeAttributeData):
"""Add an attribute to a node."""
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(AddAttribute, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(AddAttribute, self).redo()
self.remove_attr = True
self.setText("Add {} attr to {}".format(self.attr_name,
self.node_path))
class DeleteAttribute(AddAttribute):
"""Delete attribute on a node"""
def __init__(self, node_path, attr_name, model, layer_path):
super(DeleteAttribute, self).__init__(node_path, attr_name, None,
model, layer_path)
# Get the data to be set if undo is called
layer = self.model.lookup_layer(self.layer_path)
node = layer.lookup(self.node_path)
self.data = self.stage.get_node_attr_data(node, self.attr_name, layer)
def undo(self):
super(DeleteAttribute, self).redo()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
def redo(self):
# Overload remove attr here to insure attr is deleted
self.remove_attr = True
super(DeleteAttribute, self).undo()
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.setText("Remove {} attr from {}".format(self.attr_name,
self.node_path))
class RevertCompute(SetNodeAttributeValue):
"""Revert compute"""
def __init__(self, node_path, model, layer_path):
super(RevertCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE, [], model,
layer_path)
def redo(self):
super(RevertCompute, self).redo()
self.setText("Revert compute on {}".format(self.node_path))
class RenameAttribute(NxtCommand):
"""Rename attribute"""
def __init__(self, node_path, attr_name, new_attr_name, model, layer_path):
super(RenameAttribute, self).__init__(model)
self.node_path = node_path
self.attr_name = attr_name
self.new_attr_name = new_attr_name
self.model = model
self.stage = model.stage
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.new_attr_name, self.attr_name)
self.undo_effected_layer(layer.real_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.attr_name, self.new_attr_name)
self.redo_effected_layer(layer.real_path)
def rename_attribute(self, layer, attr_name, new_attr_name):
node = layer.lookup(self.node_path)
self.stage.rename_node_attr(node, attr_name, new_attr_name, layer)
self.model.update_comp_layer()
old_name = nxt_path.make_attr_path(self.node_path, attr_name)
new_name = nxt_path.make_attr_path(self.node_path, new_attr_name)
self.setText("Rename {} to {}".format(old_name, new_name))
class SetAttributeComment(SetNodeAttributeData):
"""Set attribute comment"""
def __init__(self, node_path, attr_name, comment, model, layer_path):
data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment}
super(SetAttributeComment, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(SetAttributeComment, self).redo()
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
self.setText("Changed comment on {}".format(attr_path))
class SetCompute(SetNodeAttributeValue):
"""Set node code value"""
def __init__(self, node_path, code_lines, model, layer_path):
super(SetCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(SetCompute, self).redo()
self.setText("Changed compute on {}".format(self.node_path))
class SetNodeComment(SetNodeAttributeValue):
"""Set node comment"""
def __init__(self, node_path, comment, model, layer_path):
super(SetNodeComment, self).__init__(node_path,
INTERNAL_ATTRS.COMMENT,
comment, model, layer_path)
def redo(self):
super(SetNodeComment, self).redo()
self.setText("Changed comment on {}".format(self.node_path))
class SetNodeInstance(SetNodeAttributeValue):
"""Set node instance"""
def __init__(self, node_path, instance_path, model, layer_path):
super(SetNodeInstance, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
instance_path, model, layer_path)
def redo(self):
super(SetNodeInstance, self).redo()
txt = ("Set inst path on "
"{} to {}".format(self.node_path,
self.data.get(META_ATTRS.VALUE)))
self.setText(txt)
class SetNodeEnabledState(SetNodeAttributeValue):
"""Set node enabled state"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeEnabledState, self).__init__(node_path,
INTERNAL_ATTRS.ENABLED,
value, model, layer_path)
def redo(self):
super(SetNodeEnabledState, self).redo()
if self.data.get(META_ATTRS.VALUE):
self.setText("Enabled {}".format(self.node_path))
else:
self.setText("Disabled {}".format(self.node_path))
class SetNodeCollapse(NxtCommand):
"""Set the node collapse state"""
def __init__(self, node_paths, value,
model, layer_path):
super(SetNodeCollapse, self).__init__(model)
self.node_paths = node_paths
self.value = value
self.model = model
self.stage = model.stage
self.layer_path = layer_path
self.prev_values = {}
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
for node_path, prev_value in self.prev_values.items():
layer.collapse[node_path] = prev_value
self.model.comp_layer.collapse[node_path] = prev_value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.prev_values = {}
for np in self.node_paths:
self.prev_values[np] = self.model.get_node_collapse(np, layer)
for node_path in self.node_paths:
layer.collapse[node_path] = self.value
self.model.comp_layer.collapse[node_path] = self.value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Collapsed {}".format(path_str))
else:
self.setText("Expanded {}".format(path_str))
class SetNodeExecuteSources(SetNodeAttributeValue):
"""Set node execute sources"""
def __init__(self, node_path, exec_source, model, layer_path):
super(SetNodeExecuteSources, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_source, model,
layer_path)
def redo(self):
super(SetNodeExecuteSources, self).redo()
val = self.data.get(META_ATTRS.VALUE)
if val is None:
self.setText("Removed exec input for {}".format(self.node_path))
return
self.setText("Set {} exec input to {}".format(self.node_path, val))
class SetNodeBreakPoint(QUndoCommand):
"""Set node as a break point"""
def __init__(self, node_paths, value, model, layer_path):
super(SetNodeBreakPoint, self).__init__()
self.node_paths = node_paths
self.value = value
self.model = model
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if not self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Add breakpoint to {}".format(path_str))
else:
self.setText("Remove breakpoint from {}".format(path_str))
class ClearBreakpoints(QUndoCommand):
"""Clear all the breakpoints for a given layer"""
def __init__(self, model, layer_path):
super(ClearBreakpoints, self).__init__()
self.model = model
self.layer_path = layer_path
self.prev_breaks = []
@processing
def undo(self):
user_dir.breakpoints[self.layer_path] = self.prev_breaks
self.model.nodes_changed.emit(tuple(self.prev_breaks))
@processing
def redo(self):
self.prev_breaks = user_dir.breakpoints.get(self.layer_path, [])
if self.layer_path in list(user_dir.breakpoints.keys()):
user_dir.breakpoints.pop(self.layer_path)
self.model.nodes_changed.emit(tuple(self.prev_breaks))
self.setText("Clear all breakpoints")
class SetNodeStartPoint(SetNodeAttributeValue):
"""Set this node as the execution start point"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeStartPoint, self).__init__(node_path,
INTERNAL_ATTRS.START_POINT,
value, model, layer_path)
class SetNodeChildOrder(SetNodeAttributeValue):
"""Set node child order"""
def __init__(self, node_path, child_order, model, layer_path):
super(SetNodeChildOrder, self).__init__(node_path,
INTERNAL_ATTRS.CHILD_ORDER,
child_order, model, layer_path)
def redo(self):
super(SetNodeChildOrder, self).redo()
self.setText("Change child order on {}".format(self.node_path))
class SetLayerAlias(NxtCommand):
"""Set Layer Alias"""
def __init__(self, alias, layer_path, model):
super(SetLayerAlias, self).__init__(model)
self.layer_path = layer_path
self.alias = alias
self.old_alias = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.set_alias(self.old_alias)
else:
layer.set_alias_over(self.old_alias)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_alias = layer.get_alias(local=True)
layer.set_alias(self.alias)
else:
self.old_alias = layer.get_alias(fallback_to_local=False)
layer.set_alias_over(self.alias)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
self.setText("Set {} alias to {}".format(layer.filepath, self.alias))
class NewLayer(NxtCommand):
"""Add new layer"""
def __init__(self, file_path, file_name, idx, model, chdir):
super(NewLayer, self).__init__(model)
self.new_layer_path = None
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.file_name = file_name
self.chdir = chdir
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.new_layer_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(LAYERS.TOP)
self.undo_effected_layer(self.new_layer_path)
self.model.layer_removed.emit(self.new_layer_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS]
open_layer_colors = []
for layer in self.stage._sub_layers:
color = layer.color
if color:
color = color.lower()
open_layer_colors += [color]
layer_color = layer_color_index[0]
for c in layer_color_index:
if c not in open_layer_colors:
layer_color = c
break
real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir)
layer_data = {"parent_layer": parent_layer,
SAVE_KEY.FILEPATH: self.file_path,
SAVE_KEY.REAL_PATH: real_path,
SAVE_KEY.COLOR: layer_color,
SAVE_KEY.ALIAS: self.file_name
}
new_layer = self.stage.new_sublayer(layer_data=layer_data,
idx=self.insert_idx)
self.new_layer_path = new_layer.real_path
self.redo_effected_layer(new_layer.real_path)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.new_layer_path)
self.model.layer_added.emit(self.new_layer_path)
self.setText("New layer {}".format(self.new_layer_path))
class ReferenceLayer(NxtCommand):
"""Refernce existing layer"""
def __init__(self, file_path, idx, model, chdir):
super(ReferenceLayer, self).__init__(model)
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.real_path = nxt_path.full_file_expand(self.file_path, chdir)
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.real_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.set_target_layer(LAYERS.TOP)
self.model.update_comp_layer(rebuild=True)
self.model.layer_removed.emit(self.real_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_data = nxt_io.load_file_data(self.real_path)
extra_data = {"parent_layer": parent_layer,
"filepath": self.file_path,
"real_path": self.real_path,
"alias": layer_data['name']
}
layer_data.update(extra_data)
self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.real_path)
self.model.layer_added.emit(self.real_path)
self.setText("Added reference to {}".format(self.real_path))
class RemoveLayer(ReferenceLayer):
"""Remove existing layer"""
def __init__(self, layer_path, model):
idx = model.lookup_layer(layer_path).layer_idx()
super(RemoveLayer, self).__init__(layer_path, idx, model, None)
self.text = "Removed reference to {}".format(layer_path)
@processing
def undo(self):
super(RemoveLayer, self).redo()
self.setText(self.text)
@processing
def redo(self):
super(RemoveLayer, self).undo()
self.setText(self.text)
class MuteToggleLayer(NxtCommand):
"""Toggles muting an existing layer"""
def __init__(self, layer_path, model):
super(MuteToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_muted(local=True)
layer.set_muted(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_muted(local=False)
self.model.top_layer.set_mute_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_mute_changed.emit((self.layer_path,))
self.setText("Toggle {} muted.".format(layer.get_alias()))
class SoloToggleLayer(NxtCommand):
"""Toggles soloing an existing layer"""
def __init__(self, layer_path, model):
super(SoloToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_soloed(local=True)
layer.set_soloed(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_soloed(local=False)
self.model.top_layer.set_solo_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_solo_changed.emit((self.layer_path,))
self.setText("Toggle {} soloed.".format(layer.get_alias()))
class SetLayerColor(NxtCommand):
def __init__(self, color, layer_path, model):
"""Sets the color for a given layer, if the layer is not a top layer
the top layer store an overrides.
:param color: string of new layer alias (name)
:param layer_path: real path of layer
:param model: StageModel
"""
super(SetLayerColor, self).__init__(model)
self.layer_path = layer_path
self.color = color
self.old_color = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.color = self.old_color
else:
layer.set_color_over(self.old_color)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_color = layer.get_color(local=True)
layer.color = self.color
else:
self.old_color = layer.get_color(fallback_to_local=False)
layer.set_color_over(self.color)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
self.setText("Set {} color to {}".format(layer.filepath, self.color))
def _add_node_hierarchy(base_node_path, model, layer):
stage = model.stage
comp_layer = model.comp_layer
new_node_paths = []
new_nodes = []
node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)
new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,
parent=None, layer=layer,
comp_layer=comp_layer)
for nn_p, n in new_node_table:
display_node = comp_layer.lookup(nn_p)
if display_node is not None:
display_child_order = getattr(display_node,
INTERNAL_ATTRS.CHILD_ORDER)
old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)
new_child_order = list_merger(display_child_order,
old_child_order)
setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)
new_node_paths += [nn_p]
new_nodes += [n]
return new_nodes, new_node_paths, dirty
def undo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug("Undo " + cmd.text() + " | " + update_time + "ms")
def redo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug(cmd.text() + " | " + update_time + "ms")
| [
"logging.getLogger",
"nxt.nxt_path.full_file_expand",
"nxt.nxt_node.list_merger",
"nxt.nxt_io.load_file_data",
"copy.deepcopy",
"nxt.nxt_node.META_ATTRS.as_save_key",
"nxt_editor.user_dir.breakpoints.pop",
"nxt.nxt_node.get_node_as_dict",
"nxt.nxt_node.INTERNAL_ATTRS.as_save_key",
"nxt.nxt_path.get_root_path",
"nxt.nxt_path.join_node_paths",
"nxt.nxt_path.str_path_to_node_namespace",
"nxt.nxt_path.node_name_from_node_path",
"nxt.nxt_path.make_attr_path",
"nxt.nxt_path.get_parent_path",
"time.time",
"nxt_editor.user_dir.breakpoints.get",
"nxt_editor.user_dir.breakpoints.keys"
]
| [((430, 471), 'logging.getLogger', 'logging.getLogger', (['nxt_editor.LOGGER_NAME'], {}), '(nxt_editor.LOGGER_NAME)\n', (447, 471), False, 'import logging\n'), ((67175, 67226), 'nxt.nxt_path.str_path_to_node_namespace', 'nxt_path.str_path_to_node_namespace', (['base_node_path'], {}), '(base_node_path)\n', (67210, 67226), False, 'from nxt import nxt_path\n'), ((12741, 12763), 'nxt.nxt_node.get_node_as_dict', 'get_node_as_dict', (['node'], {}), '(node)\n', (12757, 12763), False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((14765, 14776), 'time.time', 'time.time', ([], {}), '()\n', (14774, 14776), False, 'import time\n'), ((16963, 16974), 'time.time', 'time.time', ([], {}), '()\n', (16972, 16974), False, 'import time\n'), ((20288, 20348), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['self.node_path', 'self.nice_attr_name'], {}), '(self.node_path, self.nice_attr_name)\n', (20311, 20348), False, 'from nxt import nxt_path\n'), ((21045, 21080), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', (['node_path'], {}), '(node_path)\n', (21069, 21080), False, 'from nxt import nxt_path\n'), ((25562, 25606), 'nxt.nxt_path.node_name_from_node_path', 'nxt_path.node_name_from_node_path', (['node_path'], {}), '(node_path)\n', (25595, 25606), False, 'from nxt import nxt_path\n'), ((25629, 25664), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', (['node_path'], {}), '(node_path)\n', (25653, 25664), False, 'from nxt import nxt_path\n'), ((25893, 25940), 'nxt.nxt_path.join_node_paths', 'nxt_path.join_node_paths', (['parent_path', 'new_name'], {}), '(parent_path, new_name)\n', (25917, 25940), False, 'from nxt import nxt_path\n'), ((48592, 48642), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['self.node_path', 'attr_name'], {}), '(self.node_path, attr_name)\n', (48615, 48642), False, 'from nxt import nxt_path\n'), ((48662, 48716), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['self.node_path', 'new_attr_name'], {}), '(self.node_path, new_attr_name)\n', (48685, 48716), False, 'from nxt import nxt_path\n'), ((49248, 49308), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['self.node_path', 'self.nice_attr_name'], {}), '(self.node_path, self.nice_attr_name)\n', (49271, 49308), False, 'from nxt import nxt_path\n'), ((55744, 55789), 'nxt_editor.user_dir.breakpoints.get', 'user_dir.breakpoints.get', (['self.layer_path', '[]'], {}), '(self.layer_path, [])\n', (55768, 55789), False, 'from nxt_editor import user_dir\n'), ((59818, 59877), 'nxt.nxt_path.full_file_expand', 'nxt_path.full_file_expand', (['self.file_path'], {'start': 'self.chdir'}), '(self.file_path, start=self.chdir)\n', (59843, 59877), False, 'from nxt import nxt_path\n'), ((61010, 61058), 'nxt.nxt_path.full_file_expand', 'nxt_path.full_file_expand', (['self.file_path', 'chdir'], {}), '(self.file_path, chdir)\n', (61035, 61058), False, 'from nxt import nxt_path\n'), ((61819, 61856), 'nxt.nxt_io.load_file_data', 'nxt_io.load_file_data', (['self.real_path'], {}), '(self.real_path)\n', (61840, 61856), False, 'from nxt import nxt_io\n'), ((16150, 16196), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['dirty', 'self.attr_name'], {}), '(dirty, self.attr_name)\n', (16173, 16196), False, 'from nxt import nxt_path\n'), ((17422, 17462), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', (['self.node_path'], {}), '(self.node_path)\n', (17446, 17462), False, 'from nxt import nxt_path\n'), ((17482, 17531), 'nxt.nxt_path.node_name_from_node_path', 'nxt_path.node_name_from_node_path', (['self.node_path'], {}), '(self.node_path)\n', (17515, 17531), False, 'from nxt import nxt_path\n'), ((18728, 18757), 'copy.deepcopy', 'copy.deepcopy', (['self.prev_data'], {}), '(self.prev_data)\n', (18741, 18757), False, 'import copy\n'), ((32630, 32659), 'nxt.nxt_node.get_node_as_dict', 'get_node_as_dict', (['target_node'], {}), '(target_node)\n', (32646, 32659), False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((48959, 49001), 'nxt.nxt_node.META_ATTRS.as_save_key', 'META_ATTRS.as_save_key', (['META_ATTRS.COMMENT'], {}), '(META_ATTRS.COMMENT)\n', (48981, 49001), False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((55867, 55908), 'nxt_editor.user_dir.breakpoints.pop', 'user_dir.breakpoints.pop', (['self.layer_path'], {}), '(self.layer_path)\n', (55891, 55908), False, 'from nxt_editor import user_dir\n'), ((67796, 67845), 'nxt.nxt_node.list_merger', 'list_merger', (['display_child_order', 'old_child_order'], {}), '(display_child_order, old_child_order)\n', (67807, 67845), False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((17621, 17663), 'nxt.nxt_node.INTERNAL_ATTRS.as_save_key', 'INTERNAL_ATTRS.as_save_key', (['self.attr_name'], {}), '(self.attr_name)\n', (17647, 17663), False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((44719, 44756), 'nxt.nxt_path.get_root_path', 'nxt_path.get_root_path', (['old_node_path'], {}), '(old_node_path)\n', (44741, 44756), False, 'from nxt import nxt_path\n'), ((55825, 55852), 'nxt_editor.user_dir.breakpoints.keys', 'user_dir.breakpoints.keys', ([], {}), '()\n', (55850, 55852), False, 'from nxt_editor import user_dir\n'), ((20110, 20156), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', (['dirty', 'self.attr_name'], {}), '(dirty, self.attr_name)\n', (20133, 20156), False, 'from nxt import nxt_path\n'), ((68121, 68132), 'time.time', 'time.time', ([], {}), '()\n', (68130, 68132), False, 'import time\n'), ((68283, 68294), 'time.time', 'time.time', ([], {}), '()\n', (68292, 68294), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from PyPDF2 import PdfFileWriter
from frappe.utils.file_manager import save_file
class ArbitrationAuthority(Document):
pass
def _get_sb(**kwargs):
'''
call on [IP]/api/method/mietrechtspraxis.api.get_sb
Mandatory Parameter:
- token
- plz
'''
# check that token is present
try:
token = kwargs['token']
except:
# 400 Bad Request (Missing Token)
return raise_4xx(400, 'Bad Request', 'Token Required')
# check that token is correct
if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'):
# 401 Unauthorized (Invalid Token)
return raise_4xx(401, 'Unauthorized', 'Invalid Token')
# check that plz_city is present
try:
plz_city = kwargs['plz_city']
except:
# 400 Bad Request (Missing PLZ/City)
return raise_4xx(400, 'Bad Request', 'PLZ/City Required')
answer = []
# lookup for plz
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `pincode` = '{plz_city}'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) < 1:
# lookup for city
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `city` LIKE '%{plz_city}%'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) > 0:
for city in city_results:
data = {}
data['plz'] = city.plz
data['ort'] = city.city
data['gemeinde'] = city.municipality
data['bezirk'] = city.district
data['kanton'] = city.canton
data['allgemein'] = get_informations(city.canton)
data['schlichtungsbehoerde'] = frappe.db.sql("""
SELECT
`schlichtungsbehoerde`.`titel` AS `Titel`,
`schlichtungsbehoerde`.`telefon` AS `Telefon`,
`schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`,
`schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`,
`schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`,
`schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`,
`schlichtungsbehoerde`.`homepage` AS `Homepage`
FROM `tabArbitration Authority` AS `schlichtungsbehoerde`
LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent`
WHERE `geminendentbl`.`municipality` = '{municipality}'
""".format(municipality=city.municipality), as_dict=True)
answer.append(data)
if len(answer) > 0:
return raise_200(answer)
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
def get_informations(kanton):
search = frappe.db.sql("""
SELECT
`informationen`,
`homepage`,
`gesetzessammlung`,
`formulare`
FROM `tabKantonsinformationen`
WHERE `kanton` = '{kanton}'
""".format(kanton=kanton), as_dict=True)
if len(search) > 0:
result = search[0]
else:
result = {}
return result
def raise_4xx(code, title, message):
# 4xx Bad Request / Unauthorized / Not Found
return ['{code} {title}'.format(code=code, title=title), {
"error": {
"code": code,
"message": "{message}".format(message=message)
}
}]
def raise_200(answer):
return ['200 OK', answer]
@frappe.whitelist()
def get_sammel_pdf(no_letterhead=1):
frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead})
return
def _get_sammel_pdf(no_letterhead=1):
output = PdfFileWriter()
schlichtungsbehoerden = frappe.db.sql("""SELECT `name` FROM `tabArbitration Authority`""", as_dict=True)
for schlichtungsbehoerde in schlichtungsbehoerden:
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead)
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead)
pdf = frappe.utils.pdf.get_file_data_from_writer(output)
now = datetime.now()
ts = "{0:04d}-{1:02d}-{2:02d}".format(now.year, now.month, now.day)
file_name = "{0}_{1}.pdf".format('SB_Sammel-PDF', ts)
save_file(file_name, pdf, '', '', is_private=1)
return
| [
"frappe.utils.file_manager.save_file",
"frappe.db.get_single_value",
"frappe.get_print",
"frappe.whitelist",
"frappe.enqueue",
"datetime.datetime.now",
"frappe.utils.pdf.get_file_data_from_writer",
"frappe.db.sql",
"PyPDF2.PdfFileWriter"
]
| [((5555, 5573), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (5571, 5573), False, 'import frappe\n'), ((5615, 5752), 'frappe.enqueue', 'frappe.enqueue', ([], {'method': '_get_sammel_pdf', 'queue': '"""long"""', 'job_name': '"""Schlichtungsbehörden Sammel-PDF"""'}), "(method=_get_sammel_pdf, queue='long', job_name=\n 'Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead})\n", (5629, 5752), False, 'import frappe\n'), ((5811, 5826), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (5824, 5826), False, 'from PyPDF2 import PdfFileWriter\n'), ((5855, 5931), 'frappe.db.sql', 'frappe.db.sql', (['"""SELECT `name` FROM `tabArbitration Authority`"""'], {'as_dict': '(True)'}), "('SELECT `name` FROM `tabArbitration Authority`', as_dict=True)\n", (5868, 5931), False, 'import frappe\n'), ((6363, 6413), 'frappe.utils.pdf.get_file_data_from_writer', 'frappe.utils.pdf.get_file_data_from_writer', (['output'], {}), '(output)\n', (6405, 6413), False, 'import frappe\n'), ((6429, 6443), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6441, 6443), False, 'from datetime import datetime\n'), ((6578, 6625), 'frappe.utils.file_manager.save_file', 'save_file', (['file_name', 'pdf', '""""""', '""""""'], {'is_private': '(1)'}), "(file_name, pdf, '', '', is_private=1)\n", (6587, 6625), False, 'from frappe.utils.file_manager import save_file\n'), ((6008, 6162), 'frappe.get_print', 'frappe.get_print', (['"""Arbitration Authority"""', 'schlichtungsbehoerde.name', '"""Datenüberprüfung"""'], {'as_pdf': '(True)', 'output': 'output', 'no_letterhead': 'no_letterhead'}), "('Arbitration Authority', schlichtungsbehoerde.name,\n 'Datenüberprüfung', as_pdf=True, output=output, no_letterhead=no_letterhead\n )\n", (6024, 6162), False, 'import frappe\n'), ((6177, 6349), 'frappe.get_print', 'frappe.get_print', (['"""Arbitration Authority"""', 'schlichtungsbehoerde.name', '"""Fragebogen für Schlichtungsbehörden"""'], {'as_pdf': '(True)', 'output': 'output', 'no_letterhead': 'no_letterhead'}), "('Arbitration Authority', schlichtungsbehoerde.name,\n 'Fragebogen für Schlichtungsbehörden', as_pdf=True, output=output,\n no_letterhead=no_letterhead)\n", (6193, 6349), False, 'import frappe\n'), ((795, 854), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""mietrechtspraxis API"""', '"""token"""'], {}), "('mietrechtspraxis API', 'token')\n", (821, 854), False, 'import frappe\n')] |
from .connection import Connection
import socket
class ClientSocket:
def __init__(self) -> None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, host: str, port: int) -> Connection:
self.__socket.connect((host, port))
return Connection(self.__socket)
| [
"socket.socket"
]
| [((127, 176), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (140, 176), False, 'import socket\n')] |
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
# pylint: disable=map-builtin-not-iterating
import sys, unittest
from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf
class TestUsdGeomSchemata(unittest.TestCase):
def test_Basic(self):
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
p = stage.DefinePrim("/Mesh", "Mesh")
self.assertTrue(p)
mesh = UsdGeom.Mesh(p)
self.assertTrue(mesh)
self.assertTrue(mesh.GetPrim())
self.assertTrue(not mesh.GetPointsAttr().Get(1))
self.assertEqual(p.GetTypeName(),
Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType()))
#
# Make sure uniform access behaves as expected.
#
ori = p.GetAttribute("orientation")
# The generic orientation attribute should be automatically defined because
# it is a registered attribute of a well known schema. However, it's not
# yet authored at the current edit target.
self.assertTrue(ori.IsDefined())
self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
# Author a value, and check that it's still defined, and now is in fact
# authored at the current edit target.
ori.Set(UsdGeom.Tokens.leftHanded)
self.assertTrue(ori.IsDefined())
self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10)
# "leftHanded" should have been authored at Usd.TimeCode.Default, so reading the
# attribute at Default should return lh, not rh.
self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded)
# The value "rightHanded" was set at t=10, so reading *any* time should
# return "rightHanded"
self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded)
#
# Attribute name sanity check. We expect the names returned by the schema
# to match the names returned via the generic API.
#
self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0)
self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False))
for n in mesh.GetSchemaAttributeNames():
# apiName overrides
if n == "primvars:displayColor":
n = "displayColor"
elif n == "primvars:displayOpacity":
n = "displayOpacity"
name = n[0].upper() + n[1:]
self.assertTrue(("Get" + name + "Attr") in dir(mesh),
("Get" + name + "Attr() not found in: " + str(dir(mesh))))
def test_IsA(self):
# Author Scene and Compose Stage
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
# For every prim schema type in this module, validate that:
# 1. We can define a prim of its type
# 2. Its type and inheritance matches our expectations
# 3. At least one of its builtin properties is available and defined
# BasisCurves Tests
schema = UsdGeom.BasisCurves.Define(stage, "/BasisCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder
self.assertTrue(schema.GetBasisAttr())
# Camera Tests
schema = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder
self.assertTrue(schema.GetFocalLengthAttr())
# Capsule Tests
schema = UsdGeom.Capsule.Define(stage, "/Capsule")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cone Tests
schema = UsdGeom.Cone.Define(stage, "/Cone")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cube Tests
schema = UsdGeom.Cube.Define(stage, "/Cube")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder
self.assertTrue(schema.GetSizeAttr())
# Cylinder Tests
schema = UsdGeom.Cylinder.Define(stage, "/Cylinder")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable
self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Mesh Tests
schema = UsdGeom.Mesh.Define(stage, "/Mesh")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder
self.assertTrue(schema.GetFaceVertexCountsAttr())
# NurbsCurves Tests
schema = UsdGeom.NurbsCurves.Define(stage, "/NurbsCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder
self.assertTrue(schema.GetKnotsAttr())
# NurbsPatch Tests
schema = UsdGeom.NurbsPatch.Define(stage, "/NurbsPatch")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder
self.assertTrue(schema.GetUKnotsAttr())
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder
self.assertTrue(schema.GetWidthsAttr())
# Scope Tests
schema = UsdGeom.Scope.Define(stage, "/Scope")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh
self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder
# Scope has no builtins!
# Sphere Tests
schema = UsdGeom.Sphere.Define(stage, "/Sphere")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder
self.assertTrue(schema.GetRadiusAttr())
# Xform Tests
schema = UsdGeom.Xform.Define(stage, "/Xform")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder
self.assertTrue(schema.GetXformOpOrderAttr())
def test_Fallbacks(self):
# Author Scene and Compose Stage
stage = Usd.Stage.CreateInMemory()
# Xformable Tests
identity = Gf.Matrix4d(1)
origin = Gf.Vec3f(0, 0, 0)
xform = UsdGeom.Xform.Define(stage, "/Xform") # direct subclass
xformOpOrder = xform.GetXformOpOrderAttr()
self.assertFalse(xformOpOrder.HasAuthoredValue())
# xformOpOrder has no fallback value
self.assertEqual(xformOpOrder.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
# Try authoring and reverting...
xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder)
self.assertTrue(xformOpOrderAttr)
self.assertEqual(xformOpOrderAttr.Get(), None)
opOrderVal = ["xformOp:transform"]
self.assertTrue(xformOpOrderAttr.Set(opOrderVal))
self.assertTrue(xformOpOrderAttr.HasAuthoredValue())
self.assertNotEqual(xformOpOrderAttr.Get(), None)
self.assertTrue(xformOpOrderAttr.Clear())
self.assertFalse(xformOpOrderAttr.HasAuthoredValue())
self.assertEqual(xformOpOrderAttr.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
mesh = UsdGeom.Mesh.Define(stage, "/Mesh") # multiple ancestor hops
# PointBased and Curves
curves = UsdGeom.BasisCurves.Define(stage, "/Curves")
self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex)
self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex)
# Before we go, test that CreateXXXAttr performs as we expect in various
# scenarios
# Number 1: Sparse and non-sparse authoring on def'd prim
mesh.CreateDoubleSidedAttr(False, True)
self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue())
mesh.CreateDoubleSidedAttr(False, False)
self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue())
# Number 2: Sparse authoring demotes to dense for non-defed prim
overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh'))
overMesh.CreateDoubleSidedAttr(False, True)
self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue())
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False)
overMesh.CreateDoubleSidedAttr(True, True)
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# make it a defined mesh, and sanity check it still evals the same
mesh2 = UsdGeom.Mesh.Define(stage, "/overMesh")
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# Check querying of fallback values.
sphere = UsdGeom.Sphere.Define(stage, "/Sphere")
radius = sphere.GetRadiusAttr()
self.assertTrue(radius.HasFallbackValue())
radiusQuery = Usd.AttributeQuery(radius)
self.assertTrue(radiusQuery.HasFallbackValue())
def test_DefineSchema(self):
s = Usd.Stage.CreateInMemory()
parent = s.OverridePrim('/parent')
self.assertTrue(parent)
# Make a subscope.
scope = UsdGeom.Scope.Define(s, '/parent/subscope')
self.assertTrue(scope)
# Assert that a simple find or create gives us the scope back.
self.assertTrue(s.OverridePrim('/parent/subscope'))
self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim())
# Try to make a mesh at subscope's path. This transforms the scope into a
# mesh, since Define() always authors typeName.
mesh = UsdGeom.Mesh.Define(s, '/parent/subscope')
self.assertTrue(mesh)
self.assertTrue(not scope)
# Make a mesh at a different path, should work.
mesh = UsdGeom.Mesh.Define(s, '/parent/mesh')
self.assertTrue(mesh)
def test_BasicMetadataCases(self):
s = Usd.Stage.CreateInMemory()
spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim()
radius = spherePrim.GetAttribute('radius')
self.assertTrue(radius.HasMetadata('custom'))
self.assertTrue(radius.HasMetadata('typeName'))
self.assertTrue(radius.HasMetadata('variability'))
self.assertTrue(radius.IsDefined())
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetTypeName(), 'double')
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# Author a custom property spec.
layer = s.GetRootLayer()
sphereSpec = layer.GetPrimAtPath('/sphere')
radiusSpec = Sdf.AttributeSpec(
sphereSpec, 'radius', Sdf.ValueTypeNames.Double,
variability=Sdf.VariabilityUniform, declaresCustom=True)
self.assertTrue(radiusSpec.custom)
self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform)
# Definition should win.
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying)
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# List fields on 'visibility' attribute -- should include 'allowedTokens',
# provided by the property definition.
visibility = spherePrim.GetAttribute('visibility')
self.assertTrue(visibility.IsDefined())
self.assertTrue('allowedTokens' in visibility.GetAllMetadata())
# Assert that attribute fallback values are returned for builtin attributes.
do = spherePrim.GetAttribute('primvars:displayOpacity')
self.assertTrue(do.IsDefined())
self.assertTrue(do.Get() is None)
def test_Camera(self):
from pxr import Gf
stage = Usd.Stage.CreateInMemory()
camera = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable
self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective')
camera.GetProjectionAttr().Set('orthographic')
self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic')
self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(),
0.825 * 25.4, 1e-5))
camera.GetHorizontalApertureAttr().Set(3.0)
self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0)
self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(),
0.602 * 25.4, 1e-5))
camera.GetVerticalApertureAttr().Set(2.0)
self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0)
self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0)
camera.GetFocalLengthAttr().Set(35.0)
self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5))
self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000))
camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10))
self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(),
Gf.Vec2f(5, 10), 1e-5))
self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray())
cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)])
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
cp = Vt.Vec4fArray()
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
self.assertEqual(camera.GetFStopAttr().Get(), 0.0)
camera.GetFStopAttr().Set(2.8)
self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5))
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0)
camera.GetFocusDistanceAttr().Set(10.0)
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0)
def test_Points(self):
stage = Usd.Stage.CreateInMemory()
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
# Test that id's roundtrip properly, for big numbers, and negative numbers
ids = [8589934592, 1099511627776, 0, -42]
schema.CreateIdsAttr(ids)
resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list
self.assertEqual(ids, resolvedIds)
def test_Revert_Bug111239(self):
# This used to test a change for Bug111239, but now tests that this
# fix has been reverted. We no longer allow the C++ typename be used as
# a prim's typename.
s = Usd.Stage.CreateInMemory()
sphere = s.DefinePrim('/sphere', typeName='Sphere')
tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName
self.assertEqual(tfTypeName, 'UsdGeomSphere')
usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName')
self.assertTrue(UsdGeom.Sphere(sphere))
self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()])
self.assertFalse(UsdGeom.Sphere(usdGeomSphere))
self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()])
def test_ComputeExtent(self):
from pxr import Gf
# Create some simple test cases
allPoints = [
[(1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
[(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points Test
# Complex Test, Many Points/Widths
[(3, -1, 5), (-1.5, 0, 3), (1, 3, -2), (2, 2, -4)],
]
allWidths = [
[0], # Zero-Volume Extent Test
[2], # Simple Width Test
[2, 4], # Multiple Width Test
[2, 4, 5], # Erroneous Widths/Points Test
[1, 2, 2, 1] # Complex Test, Many Points/Widths
]
pointBasedSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0), (0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
# Erroneous Widths/Points Test -> Ok For Point-Based
[(-1, -1, -1), (1, 1, 1)],
[(-1.5, -1, -4), (3, 3, 5)] # Complex Test, Many Points/Widths
]
pointsSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(-1, -1, -1), (1, 1, 1)], # Simple Width Test
[(-2, -2, -2), (3, 3, 3)], # Multiple Width Test
# Erroneous Widths/Points Test -> Returns None
None,
[(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths
]
# Perform the correctness tests for PointBased and Points
# Test for empty points prims
emptyPoints = []
extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints)
# We need to map the contents of extremeExtentArr to floats from
# num.float32s due to the way Gf.Vec3f is wrapped out
# XXX: This is awful, it'd be nice to not do it
extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])),
Gf.Vec3f(*map(float, extremeExtentArr[1])))
self.assertTrue(extremeExtentRange.IsEmpty())
# PointBased Test
numDataSets = len(allPoints)
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Points Test
for i in range(numDataSets):
pointsData = allPoints[i]
widthsData = allWidths[i]
expectedExtent = pointsSolutions[i]
actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData)
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
pointsPrim = UsdGeom.Points.Define(s, "/Points")
pointsPrim.CreatePointsAttr(pointsData)
pointsPrim.CreateWidthsAttr(widthsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
pointsPrim, Usd.TimeCode.Default())
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, list(actualExtent)):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Mesh Test
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
# Compute extent via generic UsdGeom.Boundable API.
# UsdGeom.Mesh does not have its own compute extent function, so
# it should fall back to the extent for PointBased prims.
s = Usd.Stage.CreateInMemory()
meshPrim = UsdGeom.Mesh.Define(s, "/Mesh")
meshPrim.CreatePointsAttr(pointsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
meshPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Test UsdGeomCurves
curvesPoints = [
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width
]
curvesWidths = [
[1], # Test Curve with 1 width
[.5, .1], # Test Curve with 2 widths
[] # Test Curve with no width
]
curvesSolutions = [
[(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width
[(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX)
[(0,0,0), (3,1,1)], # Test Curve with no width
]
# Perform the actual v. expected comparison
numDataSets = len(curvesPoints)
for i in range(numDataSets):
pointsData = curvesPoints[i]
widths = curvesWidths[i]
expectedExtent = curvesSolutions[i]
actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, "/NurbsCurves")
nurbsCurvesPrim.CreatePointsAttr(pointsData)
nurbsCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
nurbsCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
basisCurvesPrim = UsdGeom.BasisCurves.Define(s, "/BasisCurves")
basisCurvesPrim.CreatePointsAttr(pointsData)
basisCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
basisCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_TypeUsage(self):
# Perform Type-Ness Checking for ComputeExtent
pointsAsList = [(0, 0, 0), (1, 1, 1), (2, 2, 2)]
pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList)
comp = UsdGeom.PointBased.ComputeExtent
expectedExtent = comp(pointsAsVec3fArr)
actualExtent = comp(pointsAsList)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_Bug116593(self):
from pxr import Gf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/sphere', typeName='Sphere')
# set with list of tuples
vec = [(1,2,2),(12,3,3)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3))
# set with Gf vecs
vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1))
def test_Typed(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry.IsTyped(xform))
self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable))
self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI))
def test_Concrete(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI))
def test_Apply(self):
s = Usd.Stage.CreateInMemory('AppliedSchemas.usd')
root = s.DefinePrim('/hello')
self.assertEqual([], root.GetAppliedSchemas())
# Check duplicates
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
# Ensure duplicates aren't picked up
UsdGeom.ModelAPI.Apply(root)
self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas())
# Verify that we get exceptions but don't crash when applying to the
# null prim.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim()))
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim()))
def test_IsATypeless(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
spherePrim = s.DefinePrim('/sphere', typeName='Sphere')
typelessPrim = s.DefinePrim('/regular')
types = [Tf.Type.FindByName('UsdGeomSphere'),
Tf.Type.FindByName('UsdGeomGprim'),
Tf.Type.FindByName('UsdGeomBoundable'),
Tf.Type.FindByName('UsdGeomXformable'),
Tf.Type.FindByName('UsdGeomImageable'),
Tf.Type.FindByName('UsdTyped')]
# Our sphere prim should return true on IsA queries for Sphere
# and everything it inherits from. Our plain prim should return false
# for all of them.
for t in types:
self.assertTrue(spherePrim.IsA(t))
self.assertFalse(typelessPrim.IsA(t))
def test_HasAPI(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/prim')
types = [Tf.Type.FindByName('UsdGeomMotionAPI'),
Tf.Type.FindByName('UsdGeomModelAPI')]
# Check that no APIs have yet been applied
for t in types:
self.assertFalse(prim.HasAPI(t))
# Apply our schemas to this prim
UsdGeom.ModelAPI.Apply(prim)
UsdGeom.MotionAPI.Apply(prim)
# Check that all our applied schemas show up
for t in types:
self.assertTrue(prim.HasAPI(t))
# Check that we get an exception for unknown and non-API types
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.Unknown)
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomXform'))
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable'))
with self.assertRaises(Tf.ErrorException):
# Test with a non-applied API schema.
prim.HasAPI(Tf.Type.FindByName('UsdModelAPI'))
if __name__ == "__main__":
unittest.main()
| [
"pxr.UsdGeom.Cube.Define",
"pxr.UsdGeom.Sphere",
"pxr.UsdGeom.BasisCurves.Define",
"pxr.UsdGeom.Cone.Define",
"pxr.UsdGeom.MotionAPI.Apply",
"unittest.main",
"pxr.UsdGeom.Camera.Define",
"pxr.Usd.TimeCode.Default",
"pxr.UsdGeom.Curves.ComputeExtent",
"pxr.Tf.Type.FindByName",
"pxr.UsdGeom.Points.Define",
"pxr.Vt.Vec4fArray",
"pxr.UsdGeom.NurbsPatch.Define",
"pxr.Usd.AttributeQuery",
"pxr.UsdGeom.ModelAPI",
"pxr.Sdf.AttributeSpec",
"pxr.UsdGeom.Capsule.Define",
"pxr.UsdGeom.PointBased.ComputeExtent",
"pxr.Gf.Vec2f",
"pxr.Usd.SchemaRegistry.IsTyped",
"pxr.UsdGeom.Sphere._GetStaticTfType",
"pxr.Usd.Stage.CreateInMemory",
"pxr.Usd.SchemaRegistry",
"pxr.Sdf.Layer.CreateAnonymous",
"pxr.Usd.Prim",
"pxr.UsdGeom.Mesh",
"pxr.Usd.Stage.Open",
"pxr.UsdGeom.NurbsCurves.Define",
"pxr.UsdGeom.Xform.Define",
"pxr.Gf.IsClose",
"pxr.Gf.Vec3f",
"pxr.UsdGeom.Mesh.Define",
"pxr.Vt.Vec3fArray",
"pxr.Gf.Matrix4d",
"pxr.UsdGeom.Points.ComputeExtent",
"pxr.UsdGeom.Cylinder.Define",
"pxr.UsdGeom.ModelAPI.Apply",
"pxr.UsdGeom.Sphere.Define",
"pxr.UsdGeom.Scope.Define"
]
| [((31022, 31037), 'unittest.main', 'unittest.main', ([], {}), '()\n', (31035, 31037), False, 'import sys, unittest\n'), ((1273, 1300), 'pxr.Sdf.Layer.CreateAnonymous', 'Sdf.Layer.CreateAnonymous', ([], {}), '()\n', (1298, 1300), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((1317, 1345), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', (['l.identifier'], {}), '(l.identifier)\n', (1331, 1345), False, 'from pxr import Usd, Tf\n'), ((1436, 1451), 'pxr.UsdGeom.Mesh', 'UsdGeom.Mesh', (['p'], {}), '(p)\n', (1448, 1451), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((3957, 3984), 'pxr.Sdf.Layer.CreateAnonymous', 'Sdf.Layer.CreateAnonymous', ([], {}), '()\n', (3982, 3984), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((4001, 4029), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', (['l.identifier'], {}), '(l.identifier)\n', (4015, 4029), False, 'from pxr import Usd, Tf\n'), ((4332, 4381), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', (['stage', '"""/BasisCurves"""'], {}), "(stage, '/BasisCurves')\n", (4358, 4381), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((4789, 4828), 'pxr.UsdGeom.Camera.Define', 'UsdGeom.Camera.Define', (['stage', '"""/Camera"""'], {}), "(stage, '/Camera')\n", (4810, 4828), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((5228, 5269), 'pxr.UsdGeom.Capsule.Define', 'UsdGeom.Capsule.Define', (['stage', '"""/Capsule"""'], {}), "(stage, '/Capsule')\n", (5250, 5269), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((5662, 5697), 'pxr.UsdGeom.Cone.Define', 'UsdGeom.Cone.Define', (['stage', '"""/Cone"""'], {}), "(stage, '/Cone')\n", (5681, 5697), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((6081, 6116), 'pxr.UsdGeom.Cube.Define', 'UsdGeom.Cube.Define', (['stage', '"""/Cube"""'], {}), "(stage, '/Cube')\n", (6100, 6116), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((6504, 6547), 'pxr.UsdGeom.Cylinder.Define', 'UsdGeom.Cylinder.Define', (['stage', '"""/Cylinder"""'], {}), "(stage, '/Cylinder')\n", (6527, 6547), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((6939, 6974), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['stage', '"""/Mesh"""'], {}), "(stage, '/Mesh')\n", (6958, 6974), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((7373, 7422), 'pxr.UsdGeom.NurbsCurves.Define', 'UsdGeom.NurbsCurves.Define', (['stage', '"""/NurbsCurves"""'], {}), "(stage, '/NurbsCurves')\n", (7399, 7422), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((7834, 7881), 'pxr.UsdGeom.NurbsPatch.Define', 'UsdGeom.NurbsPatch.Define', (['stage', '"""/NurbsPatch"""'], {}), "(stage, '/NurbsPatch')\n", (7859, 7881), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((8287, 8326), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', (['stage', '"""/Points"""'], {}), "(stage, '/Points')\n", (8308, 8326), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((8719, 8756), 'pxr.UsdGeom.Scope.Define', 'UsdGeom.Scope.Define', (['stage', '"""/Scope"""'], {}), "(stage, '/Scope')\n", (8739, 8756), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((9136, 9175), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', (['stage', '"""/Sphere"""'], {}), "(stage, '/Sphere')\n", (9157, 9175), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((9568, 9605), 'pxr.UsdGeom.Xform.Define', 'UsdGeom.Xform.Define', (['stage', '"""/Xform"""'], {}), "(stage, '/Xform')\n", (9588, 9605), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((10050, 10076), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (10074, 10076), False, 'from pxr import Usd, Tf\n'), ((10124, 10138), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', (['(1)'], {}), '(1)\n', (10135, 10138), False, 'from pxr import Gf\n'), ((10156, 10173), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (10164, 10173), False, 'from pxr import Gf\n'), ((10191, 10228), 'pxr.UsdGeom.Xform.Define', 'UsdGeom.Xform.Define', (['stage', '"""/Xform"""'], {}), "(stage, '/Xform')\n", (10211, 10228), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((11198, 11233), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['stage', '"""/Mesh"""'], {}), "(stage, '/Mesh')\n", (11217, 11233), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((11310, 11354), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', (['stage', '"""/Curves"""'], {}), "(stage, '/Curves')\n", (11336, 11354), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((12468, 12507), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['stage', '"""/overMesh"""'], {}), "(stage, '/overMesh')\n", (12487, 12507), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((12639, 12678), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', (['stage', '"""/Sphere"""'], {}), "(stage, '/Sphere')\n", (12660, 12678), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((12793, 12819), 'pxr.Usd.AttributeQuery', 'Usd.AttributeQuery', (['radius'], {}), '(radius)\n', (12811, 12819), False, 'from pxr import Usd, Tf\n'), ((12922, 12948), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (12946, 12948), False, 'from pxr import Usd, Tf\n'), ((13067, 13110), 'pxr.UsdGeom.Scope.Define', 'UsdGeom.Scope.Define', (['s', '"""/parent/subscope"""'], {}), "(s, '/parent/subscope')\n", (13087, 13110), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((13505, 13547), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['s', '"""/parent/subscope"""'], {}), "(s, '/parent/subscope')\n", (13524, 13547), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((13684, 13722), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['s', '"""/parent/mesh"""'], {}), "(s, '/parent/mesh')\n", (13703, 13722), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((13805, 13831), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (13829, 13831), False, 'from pxr import Usd, Tf\n'), ((14652, 14779), 'pxr.Sdf.AttributeSpec', 'Sdf.AttributeSpec', (['sphereSpec', '"""radius"""', 'Sdf.ValueTypeNames.Double'], {'variability': 'Sdf.VariabilityUniform', 'declaresCustom': '(True)'}), "(sphereSpec, 'radius', Sdf.ValueTypeNames.Double,\n variability=Sdf.VariabilityUniform, declaresCustom=True)\n", (14669, 14779), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((15922, 15948), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (15946, 15948), False, 'from pxr import Usd, Tf\n'), ((15967, 16006), 'pxr.UsdGeom.Camera.Define', 'UsdGeom.Camera.Define', (['stage', '"""/Camera"""'], {}), "(stage, '/Camera')\n", (15988, 16006), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((17377, 17420), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', (['[(1, 2, 3, 4), (8, 7, 6, 5)]'], {}), '([(1, 2, 3, 4), (8, 7, 6, 5)])\n', (17390, 17420), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((17548, 17563), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', ([], {}), '()\n', (17561, 17563), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((18081, 18107), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (18105, 18107), False, 'from pxr import Usd, Tf\n'), ((18150, 18189), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', (['stage', '"""/Points"""'], {}), "(stage, '/Points')\n", (18171, 18189), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((18762, 18788), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (18786, 18788), False, 'from pxr import Usd, Tf\n'), ((21107, 21152), 'pxr.UsdGeom.PointBased.ComputeExtent', 'UsdGeom.PointBased.ComputeExtent', (['emptyPoints'], {}), '(emptyPoints)\n', (21139, 21152), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((26137, 26164), 'pxr.Vt.Vec3fArray', 'Vt.Vec3fArray', (['pointsAsList'], {}), '(pointsAsList)\n', (26150, 26164), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((26483, 26509), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (26507, 26509), False, 'from pxr import Usd, Tf\n'), ((27264, 27298), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomXform"""'], {}), "('UsdGeomXform')\n", (27282, 27298), False, 'from pxr import Usd, Tf\n'), ((27319, 27357), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomImageable"""'], {}), "('UsdGeomImageable')\n", (27337, 27357), False, 'from pxr import Usd, Tf\n'), ((27381, 27418), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomModelAPI"""'], {}), "('UsdGeomModelAPI')\n", (27399, 27418), False, 'from pxr import Usd, Tf\n'), ((27694, 27728), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomXform"""'], {}), "('UsdGeomXform')\n", (27712, 27728), False, 'from pxr import Usd, Tf\n'), ((27749, 27787), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomImageable"""'], {}), "('UsdGeomImageable')\n", (27767, 27787), False, 'from pxr import Usd, Tf\n'), ((27811, 27848), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomModelAPI"""'], {}), "('UsdGeomModelAPI')\n", (27829, 27848), False, 'from pxr import Usd, Tf\n'), ((28094, 28140), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', (['"""AppliedSchemas.usd"""'], {}), "('AppliedSchemas.usd')\n", (28118, 28140), False, 'from pxr import Usd, Tf\n'), ((28270, 28299), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', (['root'], {}), '(root)\n', (28293, 28299), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((28374, 28403), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', (['root'], {}), '(root)\n', (28397, 28403), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((28532, 28560), 'pxr.UsdGeom.ModelAPI.Apply', 'UsdGeom.ModelAPI.Apply', (['root'], {}), '(root)\n', (28554, 28560), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((29053, 29079), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (29077, 29079), False, 'from pxr import Usd, Tf\n'), ((29891, 29917), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (29915, 29917), False, 'from pxr import Usd, Tf\n'), ((30240, 30268), 'pxr.UsdGeom.ModelAPI.Apply', 'UsdGeom.ModelAPI.Apply', (['prim'], {}), '(prim)\n', (30262, 30268), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((30277, 30306), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', (['prim'], {}), '(prim)\n', (30300, 30306), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((17069, 17089), 'pxr.Gf.Vec2f', 'Gf.Vec2f', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (17077, 17089), False, 'from pxr import Gf\n'), ((17133, 17148), 'pxr.Gf.Vec2f', 'Gf.Vec2f', (['(5)', '(10)'], {}), '(5, 10)\n', (17141, 17148), False, 'from pxr import Gf\n'), ((17346, 17361), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', ([], {}), '()\n', (17359, 17361), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((18870, 18903), 'pxr.UsdGeom.Sphere._GetStaticTfType', 'UsdGeom.Sphere._GetStaticTfType', ([], {}), '()\n', (18901, 18903), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((19069, 19091), 'pxr.UsdGeom.Sphere', 'UsdGeom.Sphere', (['sphere'], {}), '(sphere)\n', (19083, 19091), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((19201, 19230), 'pxr.UsdGeom.Sphere', 'UsdGeom.Sphere', (['usdGeomSphere'], {}), '(usdGeomSphere)\n', (19215, 19230), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((21786, 21830), 'pxr.UsdGeom.PointBased.ComputeExtent', 'UsdGeom.PointBased.ComputeExtent', (['pointsData'], {}), '(pointsData)\n', (21818, 21830), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((22159, 22211), 'pxr.UsdGeom.Points.ComputeExtent', 'UsdGeom.Points.ComputeExtent', (['pointsData', 'widthsData'], {}), '(pointsData, widthsData)\n', (22187, 22211), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((22487, 22513), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (22511, 22513), False, 'from pxr import Usd, Tf\n'), ((22539, 22574), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', (['s', '"""/Points"""'], {}), "(s, '/Points')\n", (22560, 22574), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((23394, 23420), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (23418, 23420), False, 'from pxr import Usd, Tf\n'), ((23444, 23475), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', (['s', '"""/Mesh"""'], {}), "(s, '/Mesh')\n", (23463, 23475), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((24832, 24880), 'pxr.UsdGeom.Curves.ComputeExtent', 'UsdGeom.Curves.ComputeExtent', (['pointsData', 'widths'], {}), '(pointsData, widths)\n', (24860, 24880), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((25077, 25103), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ([], {}), '()\n', (25101, 25103), False, 'from pxr import Usd, Tf\n'), ((25134, 25179), 'pxr.UsdGeom.NurbsCurves.Define', 'UsdGeom.NurbsCurves.Define', (['s', '"""/NurbsCurves"""'], {}), "(s, '/NurbsCurves')\n", (25160, 25179), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((25566, 25611), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', (['s', '"""/BasisCurves"""'], {}), "(s, '/BasisCurves')\n", (25592, 25611), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((26773, 26790), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (26781, 26790), False, 'from pxr import Gf\n'), ((26859, 26877), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(12)', '(3)', '(3)'], {}), '(12, 3, 3)\n', (26867, 26877), False, 'from pxr import Gf\n'), ((26920, 26937), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (26928, 26937), False, 'from pxr import Gf\n'), ((26937, 26954), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (26945, 26954), False, 'from pxr import Gf\n'), ((27091, 27108), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (27099, 27108), False, 'from pxr import Gf\n'), ((27177, 27194), 'pxr.Gf.Vec3f', 'Gf.Vec3f', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (27185, 27194), False, 'from pxr import Gf\n'), ((27444, 27477), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', (['xform'], {}), '(xform)\n', (27470, 27477), False, 'from pxr import Usd, Tf\n'), ((27503, 27540), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', (['imageable'], {}), '(imageable)\n', (27529, 27540), False, 'from pxr import Usd, Tf\n'), ((27567, 27607), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', (['geomModelAPI'], {}), '(geomModelAPI)\n', (27593, 27607), False, 'from pxr import Usd, Tf\n'), ((29210, 29245), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomSphere"""'], {}), "('UsdGeomSphere')\n", (29228, 29245), False, 'from pxr import Usd, Tf\n'), ((29264, 29298), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomGprim"""'], {}), "('UsdGeomGprim')\n", (29282, 29298), False, 'from pxr import Usd, Tf\n'), ((29317, 29355), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomBoundable"""'], {}), "('UsdGeomBoundable')\n", (29335, 29355), False, 'from pxr import Usd, Tf\n'), ((29374, 29412), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomXformable"""'], {}), "('UsdGeomXformable')\n", (29392, 29412), False, 'from pxr import Usd, Tf\n'), ((29431, 29469), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomImageable"""'], {}), "('UsdGeomImageable')\n", (29449, 29469), False, 'from pxr import Usd, Tf\n'), ((29488, 29518), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdTyped"""'], {}), "('UsdTyped')\n", (29506, 29518), False, 'from pxr import Usd, Tf\n'), ((29973, 30011), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomMotionAPI"""'], {}), "('UsdGeomMotionAPI')\n", (29991, 30011), False, 'from pxr import Usd, Tf\n'), ((30030, 30067), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomModelAPI"""'], {}), "('UsdGeomModelAPI')\n", (30048, 30067), False, 'from pxr import Usd, Tf\n'), ((13853, 13888), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', (['s', '"""/sphere"""'], {}), "(s, '/sphere')\n", (13874, 13888), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((17258, 17273), 'pxr.Gf.Vec2f', 'Gf.Vec2f', (['(5)', '(10)'], {}), '(5, 10)\n', (17266, 17273), False, 'from pxr import Gf\n'), ((22779, 22801), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ([], {}), '()\n', (22799, 22801), False, 'from pxr import Usd, Tf\n'), ((23624, 23646), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ([], {}), '()\n', (23644, 23646), False, 'from pxr import Usd, Tf\n'), ((25395, 25417), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ([], {}), '()\n', (25415, 25417), False, 'from pxr import Usd, Tf\n'), ((25827, 25849), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ([], {}), '()\n', (25847, 25849), False, 'from pxr import Usd, Tf\n'), ((26388, 26411), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (26398, 26411), False, 'from pxr import Gf\n'), ((30677, 30711), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomXform"""'], {}), "('UsdGeomXform')\n", (30695, 30711), False, 'from pxr import Usd, Tf\n'), ((30789, 30827), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdGeomImageable"""'], {}), "('UsdGeomImageable')\n", (30807, 30827), False, 'from pxr import Usd, Tf\n'), ((30955, 30988), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', (['"""UsdModelAPI"""'], {}), "('UsdModelAPI')\n", (30973, 30988), False, 'from pxr import Usd, Tf\n'), ((1634, 1654), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ([], {}), '()\n', (1652, 1654), False, 'from pxr import Usd, Tf\n'), ((21923, 21946), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (21933, 21946), False, 'from pxr import Gf\n'), ((23740, 23763), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (23750, 23763), False, 'from pxr import Gf\n'), ((24973, 24996), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (24983, 24996), False, 'from pxr import Gf\n'), ((25511, 25534), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (25521, 25534), False, 'from pxr import Gf\n'), ((25943, 25966), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (25953, 25966), False, 'from pxr import Gf\n'), ((26660, 26682), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (26676, 26682), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((26978, 27000), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (26994, 27000), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((27874, 27894), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ([], {}), '()\n', (27892, 27894), False, 'from pxr import Usd, Tf\n'), ((27939, 27959), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ([], {}), '()\n', (27957, 27959), False, 'from pxr import Usd, Tf\n'), ((28008, 28028), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ([], {}), '()\n', (28026, 28028), False, 'from pxr import Usd, Tf\n'), ((28847, 28857), 'pxr.Usd.Prim', 'Usd.Prim', ([], {}), '()\n', (28855, 28857), False, 'from pxr import Usd, Tf\n'), ((28963, 28973), 'pxr.Usd.Prim', 'Usd.Prim', ([], {}), '()\n', (28971, 28973), False, 'from pxr import Usd, Tf\n'), ((22383, 22406), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (22393, 22406), False, 'from pxr import Gf\n'), ((22993, 23016), 'pxr.Gf.IsClose', 'Gf.IsClose', (['a', 'b', '(1e-05)'], {}), '(a, b, 1e-05)\n', (23003, 23016), False, 'from pxr import Gf\n'), ((26729, 26751), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (26745, 26751), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((26815, 26837), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (26831, 26837), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((27047, 27069), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (27063, 27069), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((27133, 27155), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', (['prim'], {}), '(prim)\n', (27149, 27155), False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n')] |
import pandas as pd
from datetime import timedelta
def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger):
time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns)
if game_stagger == 0:
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num))
time_df.loc[round_key, :] = match_time.strftime('%I:%M%p')
return time_df
else:
"""
# Given the algorithm, at worst every player can play every (game duration + stagger time)
# This is b/c your opponent begins play one stagger count after you at the latest.
"""
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in
range(time_df.shape[1])]
match_times = [
(def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for
def_time in default_spread]
time_df.loc[round_key, :] = match_times
return time_df
| [
"pandas.DataFrame",
"datetime.timedelta"
]
| [((165, 229), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'matchup_df.index', 'columns': 'matchup_df.columns'}), '(index=matchup_df.index, columns=matchup_df.columns)\n', (177, 229), True, 'import pandas as pd\n'), ((409, 453), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(game_duration * round_num)'}), '(minutes=game_duration * round_num)\n', (418, 453), False, 'from datetime import timedelta\n'), ((932, 974), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(game_num * game_stagger)'}), '(minutes=game_num * game_stagger)\n', (941, 974), False, 'from datetime import timedelta\n'), ((1102, 1163), 'datetime.timedelta', 'timedelta', ([], {'minutes': '((game_duration + game_stagger) * round_num)'}), '(minutes=(game_duration + game_stagger) * round_num)\n', (1111, 1163), False, 'from datetime import timedelta\n')] |
# Databricks notebook source
# MAGIC %md
# MAGIC # XGBoost training
# MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.
# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.)
# MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar.
# MAGIC
# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_
# COMMAND ----------
import mlflow
import databricks.automl_runtime
# Use MLflow to track experiments
mlflow.set_experiment("/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38")
target_col = "label"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load Data
# COMMAND ----------
from mlflow.tracking import MlflowClient
import os
import uuid
import shutil
import pandas as pd
# Create temp directory to download input data from MLflow
input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8])
os.makedirs(input_temp_dir)
# Download the artifact and read it into a pandas DataFrame
input_client = MlflowClient()
input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir)
df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data"))
# Delete the temp data
shutil.rmtree(input_temp_dir)
# Preview data
df_loaded.head(5)
# COMMAND ----------
df_loaded.head(1).to_dict()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Select supported columns
# MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training.
# MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped.
# COMMAND ----------
from databricks.automl_runtime.sklearn.column_selector import ColumnSelector
supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"]
col_selector = ColumnSelector(supported_cols)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preprocessors
# COMMAND ----------
transformers = []
# COMMAND ----------
# MAGIC %md
# MAGIC ### Categorical columns
# COMMAND ----------
# MAGIC %md
# MAGIC #### Low-cardinality categoricals
# MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding.
# MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column.
# COMMAND ----------
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"]))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Medium-cardinality categoricals
# MAGIC Convert each medium-cardinality categorical column into a numerical representation.
# MAGIC Each string column is hashed to 1024 float columns.
# MAGIC Each numeric column is imputed with zeros.
# COMMAND ----------
from sklearn.feature_extraction import FeatureHasher
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
for feature in ["text", "main_img_url"]:
hash_transformer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
(f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))])
transformers.append((f"{feature}_hasher", hash_transformer, [feature]))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Text features
# MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output
# MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams
# MAGIC where n is in the range [1, 2].
# COMMAND ----------
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
for col in {'type', 'author'}:
vectorizer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
# Reshape to 1D since SimpleImputer changes the shape of the input to 2D
("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})),
("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))])
transformers.append((f"text_{col}", vectorizer, [col]))
# COMMAND ----------
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature standardization
# MAGIC Scale all feature columns to be centered around zero with unit variance.
# COMMAND ----------
from sklearn.preprocessing import StandardScaler
standardizer = StandardScaler()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train - Validation - Test Split
# MAGIC Split the input data into 3 sets:
# MAGIC - Train (60% of the dataset used to train the model)
# MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model)
# MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset)
# COMMAND ----------
df_loaded.columns
# COMMAND ----------
from sklearn.model_selection import train_test_split
split_X = df_loaded.drop([target_col], axis=1)
split_y = df_loaded[target_col]
# Split out train data
X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)
# Split remaining data equally for validation and test
X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train classification model
# MAGIC - Log relevant metrics to MLflow to track runs
# MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment
# MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below
# COMMAND ----------
from xgboost import XGBClassifier
help(XGBClassifier)
# COMMAND ----------
import mlflow
import sklearn
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display="diagram")
xgbc_classifier = XGBClassifier(
colsample_bytree=0.7324555878929649,
learning_rate=0.007636627530856404,
max_depth=7,
min_child_weight=6,
n_estimators=106,
n_jobs=100,
subsample=0.6972187716458148,
verbosity=0,
random_state=799811440,
)
model = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
("classifier", xgbc_classifier),
])
# Create a separate pipeline to transform the validation dataset. This is used for early stopping.
pipeline = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
])
mlflow.sklearn.autolog(disable=True)
X_val_processed = pipeline.fit_transform(X_val, y_val)
model
# COMMAND ----------
# Enable automatic logging of input samples, metrics, parameters, and models
mlflow.sklearn.autolog(log_input_examples=True, silent=True)
with mlflow.start_run(run_name="xgboost") as mlflow_run:
model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)
# Training metrics are logged by MLflow autologging
# Log metrics for the validation set
xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_")
# Log metrics for the test set
xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_")
# Display the logged metrics
xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()}
xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()}
display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"]))
# COMMAND ----------
# Patch requisite packages to the model environment YAML for model serving
import os
import shutil
import uuid
import yaml
None
import xgboost
from mlflow.tracking import MlflowClient
xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8])
os.makedirs(xgbc_temp_dir)
xgbc_client = MlflowClient()
xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir)
xgbc_model_env_str = open(xgbc_model_env_path)
xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)
xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}")
with open(xgbc_model_env_path, "w") as f:
f.write(yaml.dump(xgbc_parsed_model_env_str))
xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model")
shutil.rmtree(xgbc_temp_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature importance
# MAGIC
# MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot
# MAGIC of the relationship between features and model output. Features are ranked in descending order of
# MAGIC importance, and impact/color describe the correlation between the feature and the target variable.
# MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without
# MAGIC running out of memory, we disable SHAP by default.<br />
# MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots.
# MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br />
# MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain.
# MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and
# MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed
# MAGIC SHAP values, as the imputed samples may not match the actual data distribution.
# MAGIC
# MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).
# COMMAND ----------
# Set this flag to True and re-run the notebook to see the SHAP plots
shap_enabled = True
# COMMAND ----------
if shap_enabled:
from shap import KernelExplainer, summary_plot
# SHAP cannot explain models using data with nulls.
# To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values).
mode = X_train.mode().iloc[0]
# Sample background data for SHAP Explainer. Increase the sample size to reduce variance.
train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode)
# Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results.
example = X_val.sample(n=1).fillna(mode)
# Use Kernel SHAP to explain feature importance on the example from the validation set.
predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns))
explainer = KernelExplainer(predict, train_sample, link="logit")
shap_values = explainer.shap_values(example, l1_reg=False)
summary_plot(shap_values, example, class_names=model.classes_)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Inference
# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference.
# MAGIC
# MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below
# MAGIC
# MAGIC ### Register to Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)
# MAGIC ```
# MAGIC
# MAGIC ### Load from Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC model_version = registered_model_version.version
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}")
# MAGIC model.predict(input_X)
# MAGIC ```
# MAGIC
# MAGIC ### Load model without registering
# MAGIC ```
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri)
# MAGIC model.predict(input_X)
# MAGIC ```
# COMMAND ----------
# model_uri for the generated model
print(f"runs:/{ mlflow_run.info.run_id }/model")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Loading model to make prediction
# COMMAND ----------
model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model"
model = mlflow.pyfunc.load_model(model_uri)
#model.predict(input_X)
# COMMAND ----------
import pandas as pd
data = {'author': {0: '<EMAIL>jim.<EMAIL>'},
'published': {0: '2016-10-27T18:05:26.351+03:00'},
'title': {0: 'aliens are coming to invade earth'},
'text': {0: 'aliens are coming to invade earth'},
'language': {0: 'english'},
'site_url': {0: 'cnn.com'},
'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},
'type': {0: 'bs'},
'title_without_stopwords': {0: 'aliens are coming to invade earth'},
'text_without_stopwords': {0: 'aliens are coming to invade earth'},
'hasImage': {0: 1.0}}
df = pd.DataFrame(data=data)
df.head()
# COMMAND ----------
model.predict(df)
# COMMAND ----------
| [
"shap.summary_plot",
"mlflow.sklearn.autolog",
"yaml.load",
"mlflow.set_experiment",
"shap.KernelExplainer",
"sklearn.feature_extraction.FeatureHasher",
"mlflow.tracking.MlflowClient",
"mlflow.sklearn.eval_and_log_metrics",
"sklearn.compose.ColumnTransformer",
"pandas.DataFrame",
"mlflow.start_run",
"sklearn.preprocessing.FunctionTransformer",
"databricks.automl_runtime.sklearn.column_selector.ColumnSelector",
"yaml.dump",
"sklearn.model_selection.train_test_split",
"sklearn.set_config",
"uuid.uuid4",
"sklearn.pipeline.Pipeline",
"xgboost.XGBClassifier",
"os.makedirs",
"sklearn.preprocessing.OneHotEncoder",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.impute.SimpleImputer",
"shutil.rmtree",
"mlflow.pyfunc.load_model"
]
| [((802, 910), 'mlflow.set_experiment', 'mlflow.set_experiment', (['"""/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38"""'], {}), "(\n '/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38'\n )\n", (823, 910), False, 'import mlflow\n'), ((1251, 1278), 'os.makedirs', 'os.makedirs', (['input_temp_dir'], {}), '(input_temp_dir)\n', (1262, 1278), False, 'import os\n'), ((1356, 1370), 'mlflow.tracking.MlflowClient', 'MlflowClient', ([], {}), '()\n', (1368, 1370), False, 'from mlflow.tracking import MlflowClient\n'), ((1581, 1610), 'shutil.rmtree', 'shutil.rmtree', (['input_temp_dir'], {}), '(input_temp_dir)\n', (1594, 1610), False, 'import shutil\n'), ((2360, 2390), 'databricks.automl_runtime.sklearn.column_selector.ColumnSelector', 'ColumnSelector', (['supported_cols'], {}), '(supported_cols)\n', (2374, 2390), False, 'from databricks.automl_runtime.sklearn.column_selector import ColumnSelector\n'), ((3028, 3066), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (3041, 3066), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5101, 5177), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (['transformers'], {'remainder': '"""passthrough"""', 'sparse_threshold': '(0)'}), "(transformers, remainder='passthrough', sparse_threshold=0)\n", (5118, 5177), False, 'from sklearn.compose import ColumnTransformer\n'), ((5418, 5434), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5432, 5434), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6076, 6172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['split_X', 'split_y'], {'train_size': '(0.6)', 'random_state': '(799811440)', 'stratify': 'split_y'}), '(split_X, split_y, train_size=0.6, random_state=799811440,\n stratify=split_y)\n', (6092, 6172), False, 'from sklearn.model_selection import train_test_split\n'), ((6256, 6364), 'sklearn.model_selection.train_test_split', 'train_test_split', (['split_X_rem', 'split_y_rem'], {'test_size': '(0.5)', 'random_state': '(799811440)', 'stratify': 'split_y_rem'}), '(split_X_rem, split_y_rem, test_size=0.5, random_state=\n 799811440, stratify=split_y_rem)\n', (6272, 6364), False, 'from sklearn.model_selection import train_test_split\n'), ((7059, 7088), 'sklearn.set_config', 'set_config', ([], {'display': '"""diagram"""'}), "(display='diagram')\n", (7069, 7088), False, 'from sklearn import set_config\n'), ((7108, 7338), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'colsample_bytree': '(0.7324555878929649)', 'learning_rate': '(0.007636627530856404)', 'max_depth': '(7)', 'min_child_weight': '(6)', 'n_estimators': '(106)', 'n_jobs': '(100)', 'subsample': '(0.6972187716458148)', 'verbosity': '(0)', 'random_state': '(799811440)'}), '(colsample_bytree=0.7324555878929649, learning_rate=\n 0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106,\n n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=\n 799811440)\n', (7121, 7338), False, 'from xgboost import XGBClassifier\n'), ((7355, 7501), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('column_selector', col_selector), ('preprocessor', preprocessor), (\n 'standardizer', standardizer), ('classifier', xgbc_classifier)]"], {}), "([('column_selector', col_selector), ('preprocessor', preprocessor),\n ('standardizer', standardizer), ('classifier', xgbc_classifier)])\n", (7363, 7501), False, 'from sklearn.pipeline import Pipeline\n'), ((7628, 7741), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('column_selector', col_selector), ('preprocessor', preprocessor), (\n 'standardizer', standardizer)]"], {}), "([('column_selector', col_selector), ('preprocessor', preprocessor),\n ('standardizer', standardizer)])\n", (7636, 7741), False, 'from sklearn.pipeline import Pipeline\n'), ((7754, 7790), 'mlflow.sklearn.autolog', 'mlflow.sklearn.autolog', ([], {'disable': '(True)'}), '(disable=True)\n', (7776, 7790), False, 'import mlflow\n'), ((7953, 8013), 'mlflow.sklearn.autolog', 'mlflow.sklearn.autolog', ([], {'log_input_examples': '(True)', 'silent': '(True)'}), '(log_input_examples=True, silent=True)\n', (7975, 8013), False, 'import mlflow\n'), ((9148, 9174), 'os.makedirs', 'os.makedirs', (['xgbc_temp_dir'], {}), '(xgbc_temp_dir)\n', (9159, 9174), False, 'import os\n'), ((9189, 9203), 'mlflow.tracking.MlflowClient', 'MlflowClient', ([], {}), '()\n', (9201, 9203), False, 'from mlflow.tracking import MlflowClient\n'), ((9391, 9444), 'yaml.load', 'yaml.load', (['xgbc_model_env_str'], {'Loader': 'yaml.FullLoader'}), '(xgbc_model_env_str, Loader=yaml.FullLoader)\n', (9400, 9444), False, 'import yaml\n'), ((9743, 9771), 'shutil.rmtree', 'shutil.rmtree', (['xgbc_temp_dir'], {}), '(xgbc_temp_dir)\n', (9756, 9771), False, 'import shutil\n'), ((14131, 14166), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['model_uri'], {}), '(model_uri)\n', (14155, 14166), False, 'import mlflow\n'), ((14819, 14842), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (14831, 14842), True, 'import pandas as pd\n'), ((1510, 1556), 'os.path.join', 'os.path.join', (['input_data_path', '"""training_data"""'], {}), "(input_data_path, 'training_data')\n", (1522, 1556), False, 'import os\n'), ((8020, 8056), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': '"""xgboost"""'}), "(run_name='xgboost')\n", (8036, 8056), False, 'import mlflow\n'), ((8341, 8412), 'mlflow.sklearn.eval_and_log_metrics', 'mlflow.sklearn.eval_and_log_metrics', (['model', 'X_val', 'y_val'], {'prefix': '"""val_"""'}), "(model, X_val, y_val, prefix='val_')\n", (8376, 8412), False, 'import mlflow\n'), ((8473, 8547), 'mlflow.sklearn.eval_and_log_metrics', 'mlflow.sklearn.eval_and_log_metrics', (['model', 'X_test', 'y_test'], {'prefix': '"""test_"""'}), "(model, X_test, y_test, prefix='test_')\n", (8508, 8547), False, 'import mlflow\n'), ((12253, 12305), 'shap.KernelExplainer', 'KernelExplainer', (['predict', 'train_sample'], {'link': '"""logit"""'}), "(predict, train_sample, link='logit')\n", (12268, 12305), False, 'from shap import KernelExplainer, summary_plot\n'), ((12373, 12435), 'shap.summary_plot', 'summary_plot', (['shap_values', 'example'], {'class_names': 'model.classes_'}), '(shap_values, example, class_names=model.classes_)\n', (12385, 12435), False, 'from shap import KernelExplainer, summary_plot\n'), ((8771, 8856), 'pandas.DataFrame', 'pd.DataFrame', (['[xgbc_val_metrics, xgbc_test_metrics]'], {'index': "['validation', 'test']"}), "([xgbc_val_metrics, xgbc_test_metrics], index=['validation',\n 'test'])\n", (8783, 8856), True, 'import pandas as pd\n'), ((9594, 9630), 'yaml.dump', 'yaml.dump', (['xgbc_parsed_model_env_str'], {}), '(xgbc_parsed_model_env_str)\n', (9603, 9630), False, 'import yaml\n'), ((1232, 1244), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1242, 1244), False, 'import uuid\n'), ((9129, 9141), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9139, 9141), False, 'import uuid\n'), ((12195, 12235), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'X_train.columns'}), '(x, columns=X_train.columns)\n', (12207, 12235), True, 'import pandas as pd\n'), ((3770, 3840), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'None', 'strategy': '"""constant"""', 'fill_value': '""""""'}), "(missing_values=None, strategy='constant', fill_value='')\n", (3783, 3840), False, 'from sklearn.impute import SimpleImputer\n'), ((3873, 3924), 'sklearn.feature_extraction.FeatureHasher', 'FeatureHasher', ([], {'n_features': '(1024)', 'input_type': '"""string"""'}), "(n_features=1024, input_type='string')\n", (3886, 3924), False, 'from sklearn.feature_extraction import FeatureHasher\n'), ((4621, 4691), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'None', 'strategy': '"""constant"""', 'fill_value': '""""""'}), "(missing_values=None, strategy='constant', fill_value='')\n", (4634, 4691), False, 'from sklearn.impute import SimpleImputer\n'), ((4795, 4852), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['np.reshape'], {'kw_args': "{'newshape': -1}"}), "(np.reshape, kw_args={'newshape': -1})\n", (4814, 4852), False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((4872, 4949), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'decode_error': '"""ignore"""', 'ngram_range': '(1, 2)', 'max_features': '(1024)'}), "(decode_error='ignore', ngram_range=(1, 2), max_features=1024)\n", (4887, 4949), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n')] |
import importlib
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
module = importlib.import_module('.'+module_name,package=__name__)
try:
globals().update({k: getattr(module, k) for k in module.__all__})
__all__ += module.__all__
except AttributeError: continue | [
"importlib.import_module",
"pkgutil.walk_packages"
]
| [((84, 115), 'pkgutil.walk_packages', 'pkgutil.walk_packages', (['__path__'], {}), '(__path__)\n', (105, 115), False, 'import pkgutil\n'), ((130, 190), 'importlib.import_module', 'importlib.import_module', (["('.' + module_name)"], {'package': '__name__'}), "('.' + module_name, package=__name__)\n", (153, 190), False, 'import importlib\n')] |
import numpy as np
def get_position_of_minimum(matrix):
return np.unravel_index(np.nanargmin(matrix), matrix.shape)
def get_position_of_maximum(matrix):
return np.unravel_index(np.nanargmax(matrix), matrix.shape)
def get_distance_matrix(cell_grid_x, cell_grid_y, x, y):
return np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)
def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y):
return (x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2
| [
"numpy.nanargmax",
"numpy.nanargmin",
"numpy.sqrt"
]
| [((295, 351), 'numpy.sqrt', 'np.sqrt', (['((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)'], {}), '((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)\n', (302, 351), True, 'import numpy as np\n'), ((86, 106), 'numpy.nanargmin', 'np.nanargmin', (['matrix'], {}), '(matrix)\n', (98, 106), True, 'import numpy as np\n'), ((189, 209), 'numpy.nanargmax', 'np.nanargmax', (['matrix'], {}), '(matrix)\n', (201, 209), True, 'import numpy as np\n')] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Nicira Networks, Inc.
from abc import abstractmethod
from quantum.api import extensions
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import exceptions as qexception
from quantum import manager
# For policy.json/Auth
qos_queue_create = "create_qos_queue"
qos_queue_delete = "delete_qos_queue"
qos_queue_get = "get_qos_queue"
qos_queue_list = "get_qos_queues"
class DefaultQueueCreateNotAdmin(qexception.InUse):
message = _("Need to be admin in order to create queue called default")
class DefaultQueueAlreadyExists(qexception.InUse):
message = _("Default queue already exists.")
class QueueInvalidDscp(qexception.InvalidInput):
message = _("Invalid value for dscp %(data)s must be integer.")
class QueueMinGreaterMax(qexception.InvalidInput):
message = _("Invalid bandwidth rate, min greater than max.")
class QueueInvalidBandwidth(qexception.InvalidInput):
message = _("Invalid bandwidth rate, %(data)s must be a non negative"
" integer.")
class MissingDSCPForTrusted(qexception.InvalidInput):
message = _("No DSCP field needed when QoS workload marked trusted")
class QueueNotFound(qexception.NotFound):
message = _("Queue %(id)s does not exist")
class QueueInUseByPort(qexception.InUse):
message = _("Unable to delete queue attached to port.")
class QueuePortBindingNotFound(qexception.NotFound):
message = _("Port is not associated with lqueue")
def convert_to_unsigned_int_or_none(val):
if val is None:
return
try:
val = int(val)
if val < 0:
raise ValueError
except (ValueError, TypeError):
msg = _("'%s' must be a non negative integer.") % val
raise qexception.InvalidInput(error_message=msg)
return val
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'qos_queues': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'default': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'is_visible': True, 'default': False},
'name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'min': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'max': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_to_unsigned_int_or_none},
'qos_marking': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['untrusted', 'trusted']},
'default': 'untrusted', 'is_visible': True},
'dscp': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
},
}
QUEUE = 'queue_id'
RXTX_FACTOR = 'rxtx_factor'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
RXTX_FACTOR: {'allow_post': True,
'allow_put': False,
'is_visible': False,
'default': 1,
'convert_to': convert_to_unsigned_int_or_none},
QUEUE: {'allow_post': False,
'allow_put': False,
'is_visible': True,
'default': False}},
'networks': {QUEUE: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': False}}
}
class Nvp_qos(object):
"""Port Queue extension."""
@classmethod
def get_name(cls):
return "nvp-qos"
@classmethod
def get_alias(cls):
return "nvp-qos"
@classmethod
def get_description(cls):
return "NVP QoS extension."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/nvp-qos/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.QuantumManager.get_plugin()
resource_name = 'qos_queue'
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=False)
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items() +
RESOURCE_ATTRIBUTE_MAP.items())
else:
return {}
class QueuePluginBase(object):
@abstractmethod
def create_qos_queue(self, context, queue):
pass
@abstractmethod
def delete_qos_queue(self, context, id):
pass
@abstractmethod
def get_qos_queue(self, context, id, fields=None):
pass
@abstractmethod
def get_qos_queues(self, context, filters=None, fields=None):
pass
| [
"quantum.common.exceptions.InvalidInput",
"quantum.manager.QuantumManager.get_plugin",
"quantum.api.extensions.ResourceExtension",
"quantum.api.v2.base.create_resource"
]
| [((5189, 5224), 'quantum.manager.QuantumManager.get_plugin', 'manager.QuantumManager.get_plugin', ([], {}), '()\n', (5222, 5224), False, 'from quantum import manager\n'), ((5419, 5509), 'quantum.api.v2.base.create_resource', 'base.create_resource', (['collection_name', 'resource_name', 'plugin', 'params'], {'allow_bulk': '(False)'}), '(collection_name, resource_name, plugin, params,\n allow_bulk=False)\n', (5439, 5509), False, 'from quantum.api.v2 import base\n'), ((5604, 5661), 'quantum.api.extensions.ResourceExtension', 'extensions.ResourceExtension', (['collection_name', 'controller'], {}), '(collection_name, controller)\n', (5632, 5661), False, 'from quantum.api import extensions\n'), ((2441, 2483), 'quantum.common.exceptions.InvalidInput', 'qexception.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (2464, 2483), True, 'from quantum.common import exceptions as qexception\n')] |
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base
from oslo_config import cfg
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase):
def setUp(self):
super(OdlDhcpDriverTestCase, self).setUp()
cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl')
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
def test_dhcp_flag_test(self):
self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service)
def test_dhcp_driver_load(self):
self.assertTrue(isinstance(self.mech.dhcp_driver,
odl_dhcp_driver.OdlDhcpDriver))
def test_dhcp_port_create_on_subnet_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
self.mech.journal.sync_pending_entries()
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_delete_on_port_update_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
plugin = data['plugin']
self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port_id)
port = plugin.get_port(data['context'], port_id)
port['fixed_ips'] = []
ports = {'port': port}
plugin.update_port(data['context'], port_id, ports)
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)
self.mech.journal.sync_pending_entries()
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port_id)
| [
"oslo_config.cfg.CONF.import_group",
"oslo_config.cfg.CONF.set_override",
"networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal",
"networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver"
]
| [((966, 1030), 'oslo_config.cfg.CONF.import_group', 'cfg.CONF.import_group', (['"""ml2_odl"""', '"""networking_odl.common.config"""'], {}), "('ml2_odl', 'networking_odl.common.config')\n", (987, 1030), False, 'from oslo_config import cfg\n'), ((1192, 1253), 'oslo_config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""enable_dhcp_service"""', '(True)', '"""ml2_odl"""'], {}), "('enable_dhcp_service', True, 'ml2_odl')\n", (1213, 1253), False, 'from oslo_config import cfg\n'), ((1274, 1318), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver', 'mech_driver_v2.OpenDaylightMechanismDriver', ([], {}), '()\n', (1316, 1318), False, 'from networking_odl.ml2 import mech_driver_v2\n'), ((1858, 1983), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', 'mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', (['subnet_context', 'odl_const.ODL_SUBNET', 'odl_const.ODL_CREATE'], {}), '(subnet_context,\n odl_const.ODL_SUBNET, odl_const.ODL_CREATE)\n', (1919, 1983), False, 'from networking_odl.ml2 import mech_driver_v2\n'), ((3062, 3191), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', 'mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', (['subnet_context', 'odl_const.ODL_PORT', 'odl_const.ODL_UPDATE', 'port'], {}), '(subnet_context,\n odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)\n', (3123, 3191), False, 'from networking_odl.ml2 import mech_driver_v2\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.