code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import unittest
import dace
import numpy as np
from dace.transformation.dataflow import MapTiling, OutLocalStorage
N = dace.symbol('N')
@dace.program
def arange():
out = np.ndarray([N], np.int32)
for i in dace.map[0:N]:
with dace.tasklet:
o >> out[i]
o = i
return out
class LocalStorageTests(unittest.TestCase):
def test_even(self):
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [8]
}, {}])
self.assertTrue(
np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))
def test_uneven(self):
# For testing uneven decomposition, use longer buffer and ensure
# it's not filled over
output = np.ones(20, np.int32)
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [5]
}, {}])
dace.propagate_memlets_sdfg(sdfg)
sdfg(N=16, __return=output)
self.assertTrue(
np.array_equal(output[:16], np.arange(16, dtype=np.int32)))
self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones",
"dace.propagate_memlets_sdfg",
"dace.symbol",
"numpy.ndarray",
"unittest.main",
"numpy.arange"
]
| [((120, 136), 'dace.symbol', 'dace.symbol', (['"""N"""'], {}), "('N')\n", (131, 136), False, 'import dace\n'), ((177, 202), 'numpy.ndarray', 'np.ndarray', (['[N]', 'np.int32'], {}), '([N], np.int32)\n', (187, 202), True, 'import numpy as np\n'), ((1422, 1437), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1435, 1437), False, 'import unittest\n'), ((874, 895), 'numpy.ones', 'np.ones', (['(20)', 'np.int32'], {}), '(20, np.int32)\n', (881, 895), True, 'import numpy as np\n'), ((1147, 1180), 'dace.propagate_memlets_sdfg', 'dace.propagate_memlets_sdfg', (['sdfg'], {}), '(sdfg)\n', (1174, 1180), False, 'import dace\n'), ((693, 722), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.int32'}), '(16, dtype=np.int32)\n', (702, 722), True, 'import numpy as np\n'), ((1282, 1311), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.int32'}), '(16, dtype=np.int32)\n', (1291, 1311), True, 'import numpy as np\n'), ((1366, 1386), 'numpy.ones', 'np.ones', (['(4)', 'np.int32'], {}), '(4, np.int32)\n', (1373, 1386), True, 'import numpy as np\n')] |
import django
from django.test import TestCase
from django.template import Template, Context
class genericObj(object):
"""
A generic object for testing templatetags
"""
def __init__(self):
self.name = "test"
self.status = "ready"
def getOption(self, optionName):
if optionName == "name":
return self.name
elif optionName == "status":
return self.status
def getName(self):
return self.name
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class object_extrasTests(TestCase):
def test_callMethod(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|args:"name"|call:"getOption" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
template = """
{% load object_extras %}
{{ obj|call:"getName" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
def test_check_type(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|obj_type:"genericObj" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "True")
template = """
{% load object_extras %}
{{ obj|obj_type:"notexist" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "False")
class static_extrasTests(TestCase):
def setUp(self):
self.widgetTypeSetJs = set()
self.widgetTypeSetJs.add('queryonclick')
self.widgetTypeSetCss = set()
self.widgetTypeSetCss.add('geoexttoolbar')
def test_getJsStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getJsStatics widgetTypeSet as widget_js %}
{% for static_path in widget_js %}
<script src="{% static static_path %}" type="text/javascript"></script>
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetJs
}
out = '<script src="/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js" type="text/javascript"></script>'
self.assertEqual(render(template, context), out)
def test_getCssStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getCssStatics widgetTypeSet as widget_css %}
{% for static_path in widget_css %}
<link rel="stylesheet" type="text/css" href="{% static static_path %}" />
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetCss
}
out = '<link rel="stylesheet" type="text/css" href="/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css" />'
self.assertEqual(render(template, context), out)
def test_template_exist(self):
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclick.html"|template_exists }}
"""
self.assertEqual(render(template), "True")
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclicknotexist.html"|template_exists }}
"""
self.assertEqual(render(template), "False")
| [
"django.template.Template",
"django.template.Context"
]
| [((685, 706), 'django.template.Context', 'Context', (['context_dict'], {}), '(context_dict)\n', (692, 706), False, 'from django.template import Template, Context\n'), ((716, 741), 'django.template.Template', 'Template', (['template_string'], {}), '(template_string)\n', (724, 741), False, 'from django.template import Template, Context\n')] |
import pytest
import rumps
from src.app_functions.menu.change_auto_login import change_auto_login
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
app.settings = {}
return app
def test_setting_is_true(mocker, basic_app):
"""Check if setting is changed correctly if True"""
basic_app.settings["auto_login"] = True
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is False
mock_function.assert_called_once_with(basic_app)
def test_setting_is_false(mocker, basic_app):
"""Check if setting is changed correctly if false"""
basic_app.settings["auto_login"] = False
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is True
mock_function.assert_called_once_with(basic_app)
| [
"pytest.fixture",
"rumps.App",
"src.app_functions.menu.change_auto_login.change_auto_login"
]
| [((101, 133), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""basic_app"""'}), "(name='basic_app')\n", (115, 133), False, 'import pytest\n'), ((288, 308), 'rumps.App', 'rumps.App', (['"""TestApp"""'], {}), "('TestApp')\n", (297, 308), False, 'import rumps\n'), ((661, 689), 'src.app_functions.menu.change_auto_login.change_auto_login', 'change_auto_login', (['basic_app'], {}), '(basic_app)\n', (678, 689), False, 'from src.app_functions.menu.change_auto_login import change_auto_login\n'), ((1114, 1142), 'src.app_functions.menu.change_auto_login.change_auto_login', 'change_auto_login', (['basic_app'], {}), '(basic_app)\n', (1131, 1142), False, 'from src.app_functions.menu.change_auto_login import change_auto_login\n')] |
# -*- coding: utf-8 -*-
"""VGG 19 architecture for CIFAR-100."""
import tensorflow as tf
from ._vgg import _vgg
from ..datasets.cifar100 import cifar100
from .testproblem import TestProblem
class cifar100_vgg19(TestProblem):
"""DeepOBS test problem class for the VGG 19 network on Cifar-100.
The CIFAR-100 images are resized to ``224`` by ``224`` to fit the input
dimension of the original VGG network, which was designed for ImageNet.
Details about the architecture can be found in the `original paper`_.
VGG 19 consists of 19 weight layers, of mostly convolutions. The model uses
cross-entroy loss. A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
.. _original paper: https://arxiv.org/abs/1409.1556
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
Attributes:
dataset: The DeepOBS data set class for Cifar-100.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=5e-4):
"""Create a new VGG 19 test problem instance on Cifar-100.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(cifar100_vgg19, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the VGG 19 test problem on Cifar-100."""
self.dataset = cifar100(self._batch_size)
self.train_init_op = self.dataset.train_init_op
self.train_eval_init_op = self.dataset.train_eval_init_op
self.valid_init_op = self.dataset.valid_init_op
self.test_init_op = self.dataset.test_init_op
training = tf.equal(self.dataset.phase, "train")
x, y = self.dataset.batch
linear_outputs = _vgg(
x,
training,
variant=19,
num_outputs=100,
weight_decay=self._weight_decay,
)
self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=linear_outputs
)
y_pred = tf.argmax(linear_outputs, 1)
y_correct = tf.argmax(y, 1)
correct_prediction = tf.equal(y_pred, y_correct)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
| [
"tensorflow.equal",
"tensorflow.losses.get_regularization_loss",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.argmax",
"tensorflow.cast"
]
| [((2453, 2490), 'tensorflow.equal', 'tf.equal', (['self.dataset.phase', '"""train"""'], {}), "(self.dataset.phase, 'train')\n", (2461, 2490), True, 'import tensorflow as tf\n'), ((2724, 2799), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'y', 'logits': 'linear_outputs'}), '(labels=y, logits=linear_outputs)\n', (2766, 2799), True, 'import tensorflow as tf\n'), ((2839, 2867), 'tensorflow.argmax', 'tf.argmax', (['linear_outputs', '(1)'], {}), '(linear_outputs, 1)\n', (2848, 2867), True, 'import tensorflow as tf\n'), ((2888, 2903), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2897, 2903), True, 'import tensorflow as tf\n'), ((2933, 2960), 'tensorflow.equal', 'tf.equal', (['y_pred', 'y_correct'], {}), '(y_pred, y_correct)\n', (2941, 2960), True, 'import tensorflow as tf\n'), ((3069, 3104), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (3102, 3104), True, 'import tensorflow as tf\n'), ((3000, 3039), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3007, 3039), True, 'import tensorflow as tf\n')] |
import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
| [
"functools._make_key",
"functools.wraps",
"collections.defaultdict",
"time.time",
"weakref.ref",
"typing.TypeVar"
]
| [((294, 306), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (301, 306), False, 'from typing import TypeVar\n'), ((1062, 1116), 'functools._make_key', 'functools._make_key', (['args_for_key', 'kwargs'], {'typed': '(False)'}), '(args_for_key, kwargs, typed=False)\n', (1081, 1116), False, 'import functools\n'), ((3291, 3315), 'functools.wraps', 'functools.wraps', (['wrapped'], {}), '(wrapped)\n', (3306, 3315), False, 'import functools\n'), ((2186, 2203), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2197, 2203), False, 'from collections import defaultdict\n'), ((2308, 2332), 'functools.wraps', 'functools.wraps', (['wrapped'], {}), '(wrapped)\n', (2323, 2332), False, 'import functools\n'), ((2809, 2833), 'functools.wraps', 'functools.wraps', (['wrapped'], {}), '(wrapped)\n', (2824, 2833), False, 'import functools\n'), ((2405, 2433), 'weakref.ref', 'weakref.ref', (['self', 'on_delete'], {}), '(self, on_delete)\n', (2416, 2433), False, 'import weakref\n'), ((2062, 2073), 'time.time', 'time.time', ([], {}), '()\n', (2071, 2073), False, 'import time\n'), ((1210, 1221), 'time.time', 'time.time', ([], {}), '()\n', (1219, 1221), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Basis Pursuit DeNoising
=======================
This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$
where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import bpdn
from sporco import util
from sporco import plot
"""
Configure problem size, sparsity, and noise level.
"""
N = 512 # Signal size
M = 4*N # Dictionary size
L = 32 # Number of non-zero coefficients in generator
sigma = 0.5 # Noise level
"""
Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.
"""
# Construct random dictionary and random sparse coefficients
np.random.seed(12345)
D = np.random.randn(N, M)
x0 = np.zeros((M, 1))
si = np.random.permutation(list(range(0, M-1)))
x0[si[0:L]] = np.random.randn(L, 1)
# Construct reference and noisy signal
s0 = D.dot(x0)
s = s0 + sigma*np.random.randn(N,1)
"""
Set BPDN solver class options.
"""
opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})
"""
Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.
"""
# Function computing reconstruction error at lmbda
def evalerr(prm):
lmbda = prm[0]
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
return np.sum(np.abs(x-x0))
# Parallel evalution of error function on lmbda grid
lrng = np.logspace(1, 2, 20)
sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))
lmbda = sprm[0]
print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))
"""
Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics.
"""
# Initialise and run BPDN object for best lmbda
opt['Verbose'] = True
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
print("BPDN solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Plot comparison of reference and recovered representations.
"""
plot.plot(np.hstack((x0, x)), title='Sparse representation',
lgnd=['Reference', 'Reconstructed'])
"""
Plot lmbda error curve, functional value, residuals, and rho
"""
its = b.getitstat()
fig = plot.figure(figsize=(15, 10))
plot.subplot(2, 2, 1)
plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$',
ylbl='Error', fig=fig)
plot.subplot(2, 2, 2)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(2, 2, 3)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(2, 2, 4)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
| [
"numpy.abs",
"builtins.input",
"sporco.util.grid_search",
"numpy.hstack",
"sporco.admm.bpdn.BPDN",
"numpy.zeros",
"numpy.random.seed",
"numpy.vstack",
"sporco.admm.bpdn.BPDN.Options",
"sporco.plot.figure",
"sporco.plot.subplot",
"numpy.logspace",
"numpy.random.randn",
"sporco.plot.plot"
]
| [((1417, 1438), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (1431, 1438), True, 'import numpy as np\n'), ((1443, 1464), 'numpy.random.randn', 'np.random.randn', (['N', 'M'], {}), '(N, M)\n', (1458, 1464), True, 'import numpy as np\n'), ((1470, 1486), 'numpy.zeros', 'np.zeros', (['(M, 1)'], {}), '((M, 1))\n', (1478, 1486), True, 'import numpy as np\n'), ((1549, 1570), 'numpy.random.randn', 'np.random.randn', (['L', '(1)'], {}), '(L, 1)\n', (1564, 1570), True, 'import numpy as np\n'), ((1710, 1825), 'sporco.admm.bpdn.BPDN.Options', 'bpdn.BPDN.Options', (["{'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 0.001, 'AutoRho': {\n 'RsdlTarget': 1.0}}"], {}), "({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': \n 0.001, 'AutoRho': {'RsdlTarget': 1.0}})\n", (1727, 1825), False, 'from sporco.admm import bpdn\n'), ((2494, 2515), 'numpy.logspace', 'np.logspace', (['(1)', '(2)', '(20)'], {}), '(1, 2, 20)\n', (2505, 2515), True, 'import numpy as np\n'), ((2541, 2575), 'sporco.util.grid_search', 'util.grid_search', (['evalerr', '(lrng,)'], {}), '(evalerr, (lrng,))\n', (2557, 2575), False, 'from sporco import util\n'), ((2844, 2871), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', (['D', 's', 'lmbda', 'opt'], {}), '(D, s, lmbda, opt)\n', (2853, 2871), False, 'from sporco.admm import bpdn\n'), ((3223, 3252), 'sporco.plot.figure', 'plot.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (3234, 3252), False, 'from sporco import plot\n'), ((3253, 3274), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3265, 3274), False, 'from sporco import plot\n'), ((3275, 3361), 'sporco.plot.plot', 'plot.plot', (['fvmx'], {'x': 'lrng', 'ptyp': '"""semilogx"""', 'xlbl': '"""$\\\\lambda$"""', 'ylbl': '"""Error"""', 'fig': 'fig'}), "(fvmx, x=lrng, ptyp='semilogx', xlbl='$\\\\lambda$', ylbl='Error',\n fig=fig)\n", (3284, 3361), False, 'from sporco import plot\n'), ((3367, 3388), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3379, 3388), False, 'from sporco import plot\n'), ((3389, 3457), 'sporco.plot.plot', 'plot.plot', (['its.ObjFun'], {'xlbl': '"""Iterations"""', 'ylbl': '"""Functional"""', 'fig': 'fig'}), "(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)\n", (3398, 3457), False, 'from sporco import plot\n'), ((3458, 3479), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (3470, 3479), False, 'from sporco import plot\n'), ((3642, 3663), 'sporco.plot.subplot', 'plot.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3654, 3663), False, 'from sporco import plot\n'), ((3664, 3736), 'sporco.plot.plot', 'plot.plot', (['its.Rho'], {'xlbl': '"""Iterations"""', 'ylbl': '"""Penalty Parameter"""', 'fig': 'fig'}), "(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)\n", (3673, 3736), False, 'from sporco import plot\n'), ((3780, 3787), 'builtins.input', 'input', ([], {}), '()\n', (3785, 3787), False, 'from builtins import input\n'), ((2354, 2381), 'sporco.admm.bpdn.BPDN', 'bpdn.BPDN', (['D', 's', 'lmbda', 'opt'], {}), '(D, s, lmbda, opt)\n', (2363, 2381), False, 'from sporco.admm import bpdn\n'), ((3027, 3045), 'numpy.hstack', 'np.hstack', (['(x0, x)'], {}), '((x0, x))\n', (3036, 3045), True, 'import numpy as np\n'), ((1641, 1662), 'numpy.random.randn', 'np.random.randn', (['N', '(1)'], {}), '(N, 1)\n', (1656, 1662), True, 'import numpy as np\n'), ((2418, 2432), 'numpy.abs', 'np.abs', (['(x - x0)'], {}), '(x - x0)\n', (2424, 2432), True, 'import numpy as np\n'), ((3490, 3531), 'numpy.vstack', 'np.vstack', (['(its.PrimalRsdl, its.DualRsdl)'], {}), '((its.PrimalRsdl, its.DualRsdl))\n', (3499, 3531), True, 'import numpy as np\n')] |
import pygame
from game.game_logic.game import Game
import matplotlib.pyplot as plt
def main():
scores_history = []
GAME_COUNT = 2
for i in range(GAME_COUNT):
game = Game(400, "Snake AI")
score = game.start()
scores_history.append(score)
print("Game:", i)
plt.ylim(0, 36)
plt.plot(range(len(scores_history)), scores_history)
plt.ylabel('Snake length')
plt.xlabel('Game count')
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"game.game_logic.game.Game",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
]
| [((307, 322), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(36)'], {}), '(0, 36)\n', (315, 322), True, 'import matplotlib.pyplot as plt\n'), ((384, 410), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Snake length"""'], {}), "('Snake length')\n", (394, 410), True, 'import matplotlib.pyplot as plt\n'), ((415, 439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Game count"""'], {}), "('Game count')\n", (425, 439), True, 'import matplotlib.pyplot as plt\n'), ((444, 454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (452, 454), True, 'import matplotlib.pyplot as plt\n'), ((188, 209), 'game.game_logic.game.Game', 'Game', (['(400)', '"""Snake AI"""'], {}), "(400, 'Snake AI')\n", (192, 209), False, 'from game.game_logic.game import Game\n')] |
import os
import sys
from glob import glob
def create_list(images_dir, output_file, img_ext=".jpg"):
ImgList = os.listdir(images_dir)
val_list = []
for img in ImgList:
img,ext = img.split(".")
val_list.append(img)
with open(os.path.join(images_dir, output_file),'w') as fid:
for line in val_list[:-1]:
fid.write(line + "\n")
fid.write(val_list[-1])
def main():
if len(sys.argv) < 2:
print("Requires images directory")
sys.exit(1)
elif len(sys.argv) < 3:
images_dir = sys.argv[1]
output_file = "image_list.txt"
else:
images_dir = sys.argv[1]
output_file = sys.argv[2]
create_list(images_dir, output_file)
if __name__=="__main__":
main() | [
"os.listdir",
"os.path.join",
"sys.exit"
]
| [((117, 139), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (127, 139), False, 'import os\n'), ((521, 532), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (529, 532), False, 'import sys\n'), ((265, 302), 'os.path.join', 'os.path.join', (['images_dir', 'output_file'], {}), '(images_dir, output_file)\n', (277, 302), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/
# In[ ]:
# Показатель оттока клиентов – бизнес-термин, описывающий
# насколько интенсивно клиенты покидают компанию или
# прекращают оплачивать товары или услуги.
# Это ключевой показатель для многих компаний, потому что
# зачастую приобретение новых клиентов обходится намного дороже,
# чем удержание старых (в некоторых случаях от 5 до 20 раз дороже).
# Примеры использования:
# 1. мобильные операторы, операторы кабельного телевидения и
# компании, обслуживающие прием платежей с помощью кредитных карт
# 2. казино используют прогнозные модели, чтобы предсказать
# идеальные условия в зале, позволяющие удержать игроков
# в Блэкджек за столом.
# 3. Aвиакомпании могут предложить клиентам, у которых есть
# жалобы, заменить их билет на билет первого класса.
# Эффективное удержание клиентов сводится к задаче, в рамках
# которой, используя имеющиеся данные, необходимо отличить
# клиентов, собирающихся уйти, от тех, кто этого делать
# не собирается.
# In[ ]:
# datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv
# In[88]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import KFold, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# In[3]:
# Load dataset
raw_churn_df = pd.read_csv('churn.csv')
# In[17]:
display(raw_churn_df.shape)
display(raw_churn_df.head(), raw_churn_df.tail())
display(raw_churn_df.columns.values)
display(raw_churn_df.dtypes)
display(raw_churn_df.isnull().sum())
# In[78]:
# Isolate target data
y = raw_churn_df['Churn?']
X = raw_churn_df.drop('Churn?', axis=1)
# In[79]:
# Drop irrelevant features
features_to_drop = ['State', 'Area Code', 'Phone']
X = X.drop(features_to_drop, axis=1)
# In[80]:
# Encode yes/no with 1/0 values
X["Int'l Plan"] = X["Int'l Plan"].map({'no': 0, 'yes': 1})
X["VMail Plan"] = X["VMail Plan"].map({'no': 0, 'yes': 1})
# In[81]:
# Scale everything
std_scaler = StandardScaler(with_mean=True)
X = std_scaler.fit_transform(X)
display(X.shape)
# In[90]:
# Perform CV for SVM, random forest and kNN
def try_clf(X, y, clf_nofit):
X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
clf = clf_nofit.fit(X_tr, y_tr)
y_pred = clf.predict(X_val)
display(clf_nofit.__class__.__name__)
display(accuracy_score(y_val, y_pred))
display(confusion_matrix(y_val, y_pred))
display("prec, rec, f1, support", precision_recall_fscore_support(y_val, y_pred))
try_clf(X, y, SVC(gamma='scale'))
try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
try_clf(X, y, KNeighborsClassifier())
# std scaler with_mean=False accuracies:
# 0.9256594724220624
# 0.9484412470023981
# 0.8896882494004796
# std scaler with_mean=True accuracies:
# 0.9256594724220624
# 0.9496402877697842
# 0.8896882494004796
# In[86]:
# Recall
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству фактических уходов?
# Precision
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству спрогнозированных уходов?
# In[101]:
# # Predict probabilities
# def try_probab(X, y, clf_nofit):
# X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
# clf = clf_nofit.fit(X_tr, y_tr)
# y_prob = clf.predict_proba(X_val)
# # for i in range(len(X)):
# # display("y_true={0}, Predicted={1}".format(y[i], y_prob[i]))
# display(pd.value_counts(y_prob[:, 1]))
# try_probab(X, y, SVC(gamma='scale', probability=True))
# # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
# # try_probab(X, y, KNeighborsClassifier())
# # for i in range(len(Xnew)):
# # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# In[ ]:
# todo: calibration and discrimination
# https://github.com/ghuiber/churn/blob/master/churn_measurements.py
# from churn_measurements import calibration, discrimination
| [
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC"
]
| [((1752, 1776), 'pandas.read_csv', 'pd.read_csv', (['"""churn.csv"""'], {}), "('churn.csv')\n", (1763, 1776), True, 'import pandas as pd\n'), ((2422, 2452), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)'}), '(with_mean=True)\n', (2436, 2452), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2623, 2662), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(42)'}), '(X, y, random_state=42)\n', (2639, 2662), False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((2996, 3014), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (2999, 3014), False, 'from sklearn.svm import SVC\n'), ((3030, 3081), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'n_jobs': '(-1)'}), '(n_estimators=100, n_jobs=-1)\n', (3052, 3081), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3097, 3119), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (3117, 3119), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2805, 2834), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (2819, 2834), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((2853, 2884), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (2869, 2884), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((2929, 2975), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (2960, 2975), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n')] |
import requests
import tarfile
import os
def download_file(url, directory):
local_filename = os.path.join(directory, url.split('/')[-1])
print ("Downloading %s --> %s"%(url, local_filename))
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def extract_tar(fpath):
fname_dir, fname = os.path.split(fpath)
dest_path = os.path.join(fname_dir,fname.split('.')[0])
print ("Extracting %s --> %s"%(fpath, dest_path))
if fname.endswith("tar.gz"):
tar = tarfile.open(fpath, "r:gz")
tar.extractall(path=fname_dir)
tar.close()
elif fname.endswith("tar"):
tar = tarfile.open(fname, "r:")
tar.extractall(path=fname_dir)
tar.close()
return dest_path
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f)) | [
"tarfile.open",
"requests.get",
"os.path.split",
"os.path.basename",
"os.walk"
]
| [((484, 504), 'os.path.split', 'os.path.split', (['fpath'], {}), '(fpath)\n', (497, 504), False, 'import os\n'), ((962, 980), 'os.walk', 'os.walk', (['startpath'], {}), '(startpath)\n', (969, 980), False, 'import os\n'), ((209, 239), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (221, 239), False, 'import requests\n'), ((666, 693), 'tarfile.open', 'tarfile.open', (['fpath', '"""r:gz"""'], {}), "(fpath, 'r:gz')\n", (678, 693), False, 'import tarfile\n'), ((799, 824), 'tarfile.open', 'tarfile.open', (['fname', '"""r:"""'], {}), "(fname, 'r:')\n", (811, 824), False, 'import tarfile\n'), ((1112, 1134), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1128, 1134), False, 'import os\n')] |
""" A bar graph.
(c) September 2017 by <NAME>
"""
import argparse
from collections import defaultdict
from keras.models import Sequential
from keras.layers import Dense, Activation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
np.set_printoptions(suppress=True, linewidth=200)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
titlesize = 21
labelsize = 17
legendsize = 15
ticksize = 15
bar_width = 0.80
opacity = 1.0
error_config = {'ecolor': '0.0', 'linewidth':3.0}
def deprecated():
"""
This is a deprecated method, only to show how to possibly combine these into
one plot. However, I find this unwieldly.
"""
fig, ax = plt.subplots()
bar_width = 0.80
opacity = 0.5
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,
alpha=opacity,
color='b',
yerr=std_lin,
error_kw=error_config,
label='Lin')
rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,
alpha=opacity,
color='r',
yerr=std_rfs,
error_kw=error_config,
label='RF')
rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,
alpha=opacity,
color='y',
yerr=std_dnn,
error_kw=error_config,
label='DNN')
plt.xticks(np.arange(11) + bar_width / 2,
('A','B','','D','E','F','G','','','J','K'))
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.tight_layout()
plt.legend()
plt.savefig('figures/validation_set_results.png')
def plot(results, vv):
lin_mean = []
lin_std = []
lin_keys = []
rfs_mean = []
rfs_std = []
rfs_keys = []
dnn_mean = []
dnn_std = []
dnn_keys = []
sorted_keys = sorted(results.keys())
for key in sorted_keys:
info = [ss['loss'] for ss in results[key]]
if 'Lin' in key:
lin_mean.append(np.mean(info))
lin_std.append(np.std(info))
lin_keys.append(key)
elif 'RFs' in key:
rfs_mean.append(np.mean(info))
rfs_std.append(np.std(info))
rfs_keys.append(key)
elif 'DNN' in key:
dnn_mean.append(np.mean(info))
dnn_std.append(np.std(info))
dnn_keys.append(key)
print("\nlin_mean: {}".format(lin_mean))
print("lin_std: {}".format(lin_std))
print("lin_keys: {}".format(lin_keys))
print("\nrfs_mean: {}".format(rfs_mean))
print("rfs_std: {}".format(rfs_std))
print("rfs_keys: {}".format(rfs_keys))
print("\nDNN results:")
for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):
print("{:.2f}\t{:.2f}\t{}".format(mean,std,key))
# sys.exit()
# Use this to determine which DNN models should be here.
dnn_threshold = 3.0
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
real_index += 1
# Gah! Now I can finally make the bar chart. I think it's easiest to have it
# split across three different subplots, one per algorithm category.
width_ratio = [len(lin_keys),len(rfs_keys),real_index]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),
gridspec_kw={'width_ratios':width_ratio})
for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):
ax[0].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):
ax[1].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
ax[2].bar(np.array([real_index]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index += 1
# Some rather tedious but necessary stuff to make it publication-quality.
ax[0].set_title('Linear', fontsize=titlesize)
ax[1].set_title('Random Forests', fontsize=titlesize)
ax[2].set_title('Deep Neural Networks', fontsize=titlesize)
ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)
for i in range(3):
ax[i].set_xlabel('Algorithm', fontsize=labelsize)
ax[i].set_ylim([0.0,9.0])
ax[i].tick_params(axis='y', labelsize=ticksize)
ax[i].set_xticklabels([])
ax[0].legend(loc="best", ncol=1, prop={'size':legendsize})
ax[1].legend(loc="best", ncol=2, prop={'size':legendsize})
ax[2].legend(loc="best", ncol=3, prop={'size':legendsize})
plt.tight_layout()
plt.savefig('figures/validation_set_results_v'+vv+'.png')
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--version', type=int)
pp.add_argument('--kfolds', type=int, default=10)
args = pp.parse_args()
assert args.version is not None
VERSION = str(args.version).zfill(2)
file_name = 'results/results_kfolds10_v'+VERSION+'.npy'
results = np.load(file_name)[()]
print("results has keys: {}".format(results.keys()))
plot(results, VERSION)
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"numpy.set_printoptions"
]
| [((201, 222), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (215, 222), False, 'import matplotlib\n'), ((285, 334), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': '(200)'}), '(suppress=True, linewidth=200)\n', (304, 334), True, 'import numpy as np\n'), ((364, 397), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (377, 397), True, 'import matplotlib.pyplot as plt\n'), ((717, 731), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (729, 731), True, 'import matplotlib.pyplot as plt\n'), ((1649, 1668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Group"""'], {}), "('Group')\n", (1659, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scores"""'], {}), "('Scores')\n", (1683, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1737), 'matplotlib.pyplot.title', 'plt.title', (['"""Scores by group and gender"""'], {}), "('Scores by group and gender')\n", (1707, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1760), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1777), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1775, 1777), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1831), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/validation_set_results.png"""'], {}), "('figures/validation_set_results.png')\n", (1793, 1831), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(16, 5)', 'gridspec_kw': "{'width_ratios': width_ratio}"}), "(nrows=1, ncols=3, figsize=(16, 5), gridspec_kw={'width_ratios':\n width_ratio})\n", (3485, 3567), True, 'import matplotlib.pyplot as plt\n'), ((5199, 5217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5215, 5217), True, 'import matplotlib.pyplot as plt\n'), ((5222, 5283), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/validation_set_results_v' + vv + '.png')"], {}), "('figures/validation_set_results_v' + vv + '.png')\n", (5233, 5283), True, 'import matplotlib.pyplot as plt\n'), ((5318, 5343), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5341, 5343), False, 'import argparse\n'), ((830, 846), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (838, 846), True, 'import numpy as np\n'), ((1071, 1096), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 7]'], {}), '([3, 4, 5, 6, 7])\n', (1079, 1096), True, 'import numpy as np\n'), ((1317, 1334), 'numpy.array', 'np.array', (['[9, 10]'], {}), '([9, 10])\n', (1325, 1334), True, 'import numpy as np\n'), ((5620, 5638), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (5627, 5638), True, 'import numpy as np\n'), ((1554, 1567), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (1563, 1567), True, 'import numpy as np\n'), ((3680, 3694), 'numpy.array', 'np.array', (['[ii]'], {}), '([ii])\n', (3688, 3694), True, 'import numpy as np\n'), ((3938, 3952), 'numpy.array', 'np.array', (['[ii]'], {}), '([ii])\n', (3946, 3952), True, 'import numpy as np\n'), ((4269, 4291), 'numpy.array', 'np.array', (['[real_index]'], {}), '([real_index])\n', (4277, 4291), True, 'import numpy as np\n'), ((2193, 2206), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2200, 2206), True, 'import numpy as np\n'), ((2235, 2247), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2241, 2247), True, 'import numpy as np\n'), ((2337, 2350), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2344, 2350), True, 'import numpy as np\n'), ((2379, 2391), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2385, 2391), True, 'import numpy as np\n'), ((2481, 2494), 'numpy.mean', 'np.mean', (['info'], {}), '(info)\n', (2488, 2494), True, 'import numpy as np\n'), ((2523, 2535), 'numpy.std', 'np.std', (['info'], {}), '(info)\n', (2529, 2535), True, 'import numpy as np\n')] |
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for custom rnns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import, unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
GRUCell = tf.nn.rnn_cell.LSTMCell
RNNCell = tf.nn.rnn_cell.RNNCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn(self):
size = 5 # size of each model layer.
batch_size = 1
cell = GatedGRUCell(size)
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_time_major(self):
size = 5 # size of each model layer.
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell = GatedGRUCell(size)
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
def func(x, seq_length):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_const_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`.
decoder_time_step = 6
x_val = np.random.randn(decoder_time_step, input_size).astype('f')
x_val = np.stack([x_val] * batch_size)
attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')
def func(x):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
output_0 = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return output_0, tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_gru_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = GRUCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = GRUCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_multi_rnn_lstm(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell_0 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_1 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_2 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_opset(9, "ReverseSequence")
@skip_tf2()
@allow_missing_shapes("Missing RNN shape")
def test_bidrectional_attention_wrapper_lstm_encoder(self):
size = 30
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')
def func(encoder_x, decoder_x, seq_length):
encoder_cell = LSTMCell(size)
attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
# [9, 3, 30], [9, 30]
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \
bidirectional_dynamic_rnn(cell_fw=match_cell_fw,
cell_bw=match_cell_bk,
inputs=decoder_x,
sequence_length=tf.identity(seq_length),
dtype=tf.float32,
time_major=True)
matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)
matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)
return tf.identity(matched_output, name="output_0"), tf.identity(matched_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val,
"input_3:0": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_0:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
class GatedGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super().__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
# inputs shape: [batch size, time step, input size] = [1, 3, 2]
# num_units: 5
# W shape: [2, 3 * 5] = [2, 15]
# U shape: [5, 3 * 5] = [5, 15]
# b shape: [1, 3 * 5] = [1, 15]
# state shape: [batch size, state size] = [1, 5]
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
# W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h
if __name__ == '__main__':
unittest_main()
| [
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tf2onnx.tf_loader.is_tf2",
"numpy.array",
"numpy.stack",
"tensorflow.sigmoid",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.identity",
"tensorflow.python.ops.init_ops.constant_initializer",
"numpy.random.randn",
"numpy.arange"
]
| [((584, 592), 'tf2onnx.tf_loader.is_tf2', 'is_tf2', ([], {}), '()\n', (590, 592), False, 'from tf2onnx.tf_loader import is_tf2\n'), ((1563, 1627), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], dtype=np.float32)\n', (1571, 1627), True, 'import numpy as np\n'), ((1638, 1668), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (1646, 1668), True, 'import numpy as np\n'), ((2311, 2375), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], dtype=np.float32)\n', (2319, 2375), True, 'import numpy as np\n'), ((2386, 2416), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (2394, 2416), True, 'import numpy as np\n'), ((3073, 3165), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]],\n dtype=np.float32)\n', (3081, 3165), True, 'import numpy as np\n'), ((3168, 3198), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (3176, 3198), True, 'import numpy as np\n'), ((3994, 4086), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]],\n dtype=np.float32)\n', (4002, 4086), True, 'import numpy as np\n'), ((4089, 4119), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (4097, 4119), True, 'import numpy as np\n'), ((4136, 4180), 'numpy.array', 'np.array', (['[4, 3, 4, 5, 2, 1]'], {'dtype': 'np.int32'}), '([4, 3, 4, 5, 2, 1], dtype=np.int32)\n', (4144, 4180), True, 'import numpy as np\n'), ((5395, 5425), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (5403, 5425), True, 'import numpy as np\n'), ((7263, 7301), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (7271, 7301), True, 'import numpy as np\n'), ((7439, 7477), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (7447, 7477), True, 'import numpy as np\n'), ((9579, 9617), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (9587, 9617), True, 'import numpy as np\n'), ((9755, 9793), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (9763, 9793), True, 'import numpy as np\n'), ((11894, 11932), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (11902, 11932), True, 'import numpy as np\n'), ((12070, 12108), 'numpy.stack', 'np.stack', (['([decoder_x_val] * batch_size)'], {}), '([decoder_x_val] * batch_size)\n', (12078, 12108), True, 'import numpy as np\n'), ((13787, 13863), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]], dtype=np.float32)\n', (13795, 13863), True, 'import numpy as np\n'), ((13872, 13902), 'numpy.stack', 'np.stack', (['([x_val] * batch_size)'], {}), '([x_val] * batch_size)\n', (13880, 13902), True, 'import numpy as np\n'), ((15642, 15680), 'numpy.stack', 'np.stack', (['([encoder_x_val] * batch_size)'], {}), '([encoder_x_val] * batch_size)\n', (15650, 15680), True, 'import numpy as np\n'), ((19725, 19750), 'tensorflow.sigmoid', 'tf.sigmoid', (['(xw[0] + hu[0])'], {}), '(xw[0] + hu[0])\n', (19735, 19750), True, 'import tensorflow as tf\n'), ((19763, 19788), 'tensorflow.sigmoid', 'tf.sigmoid', (['(xw[1] + hu[1])'], {}), '(xw[1] + hu[1])\n', (19773, 19788), True, 'import tensorflow as tf\n'), ((5571, 5636), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (5607, 5636), True, 'import tensorflow as tf\n'), ((5795, 5951), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (5830, 5951), True, 'import tensorflow as tf\n'), ((7672, 7708), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (7683, 7708), True, 'import tensorflow as tf\n'), ((7781, 7846), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (7817, 7846), True, 'import tensorflow as tf\n'), ((8076, 8232), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (8111, 8232), True, 'import tensorflow as tf\n'), ((9980, 10016), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (9991, 10016), True, 'import tensorflow as tf\n'), ((10089, 10154), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (10125, 10154), True, 'import tensorflow as tf\n'), ((10383, 10539), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (10418, 10539), True, 'import tensorflow as tf\n'), ((12296, 12332), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output_0"""'}), "(output, name='output_0')\n", (12307, 12332), True, 'import tensorflow as tf\n'), ((12405, 12470), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (12441, 12470), True, 'import tensorflow as tf\n'), ((12700, 12856), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (12735, 12856), True, 'import tensorflow as tf\n'), ((13950, 13984), 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(0.5)'], {}), '(0.5)\n', (13979, 13984), False, 'from tensorflow.python.ops import init_ops\n'), ((16058, 16123), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['attn_size', 'attention_states'], {}), '(attn_size, attention_states)\n', (16094, 16123), True, 'import tensorflow as tf\n'), ((16353, 16509), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (16388, 16509), True, 'import tensorflow as tf\n'), ((16786, 16942), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['cell', 'attention_mechanism'], {'attention_layer_size': 'attn_size', 'cell_input_fn': 'match_input_fn', 'output_attention': '(False)'}), '(cell, attention_mechanism,\n attention_layer_size=attn_size, cell_input_fn=match_input_fn,\n output_attention=False)\n', (16821, 16942), True, 'import tensorflow as tf\n'), ((17698, 17752), 'tensorflow.concat', 'tf.concat', (['[match_output_fw, match_output_bk]'], {'axis': '(-1)'}), '([match_output_fw, match_output_bk], axis=-1)\n', (17707, 17752), True, 'import tensorflow as tf\n'), ((17781, 17850), 'tensorflow.concat', 'tf.concat', (['[match_state_fw.cell_state, match_state_bk.cell_state]', '(-1)'], {}), '([match_state_fw.cell_state, match_state_bk.cell_state], -1)\n', (17790, 17850), True, 'import tensorflow as tf\n'), ((18075, 18128), 'numpy.array', 'np.array', (['[6, 5, 4, 3, 2, 1, 2, 3, 6]'], {'dtype': 'np.int32'}), '([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)\n', (18083, 18128), True, 'import numpy as np\n'), ((19686, 19705), 'tensorflow.matmul', 'tf.matmul', (['state', 'U'], {}), '(state, U)\n', (19695, 19705), True, 'import tensorflow as tf\n'), ((1798, 1828), 'tensorflow.identity', 'tf.identity', (['xs'], {'name': '"""output"""'}), "(xs, name='output')\n", (1809, 1828), True, 'import tensorflow as tf\n'), ((1830, 1864), 'tensorflow.identity', 'tf.identity', (['s'], {'name': '"""final_state"""'}), "(s, name='final_state')\n", (1841, 1864), True, 'import tensorflow as tf\n'), ((2583, 2613), 'tensorflow.identity', 'tf.identity', (['xs'], {'name': '"""output"""'}), "(xs, name='output')\n", (2594, 2613), True, 'import tensorflow as tf\n'), ((2615, 2649), 'tensorflow.identity', 'tf.identity', (['s'], {'name': '"""final_state"""'}), "(s, name='final_state')\n", (2626, 2649), True, 'import tensorflow as tf\n'), ((3475, 3510), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (3486, 3510), True, 'import tensorflow as tf\n'), ((3512, 3554), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (3523, 3554), True, 'import tensorflow as tf\n'), ((4474, 4509), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (4485, 4509), True, 'import tensorflow as tf\n'), ((4511, 4553), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (4522, 4553), True, 'import tensorflow as tf\n'), ((5320, 5366), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (5335, 5366), True, 'import numpy as np\n'), ((5454, 5503), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'time_step', 'attn_size'], {}), '(batch_size, time_step, attn_size)\n', (5469, 5503), True, 'import numpy as np\n'), ((5693, 5732), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (5702, 5732), True, 'import tensorflow as tf\n'), ((6301, 6335), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (6312, 6335), True, 'import tensorflow as tf\n'), ((6337, 6391), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (6348, 6391), True, 'import tensorflow as tf\n'), ((7180, 7226), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (7195, 7226), True, 'import numpy as np\n'), ((7356, 7402), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (7371, 7402), True, 'import numpy as np\n'), ((7974, 8013), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (7983, 8013), True, 'import tensorflow as tf\n'), ((8601, 8635), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (8612, 8635), True, 'import tensorflow as tf\n'), ((8637, 8691), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (8648, 8691), True, 'import tensorflow as tf\n'), ((9496, 9542), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (9511, 9542), True, 'import numpy as np\n'), ((9672, 9718), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (9687, 9718), True, 'import numpy as np\n'), ((10282, 10321), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (10291, 10321), True, 'import tensorflow as tf\n'), ((10896, 10930), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (10907, 10930), True, 'import tensorflow as tf\n'), ((10932, 10986), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (10943, 10986), True, 'import tensorflow as tf\n'), ((11811, 11857), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (11826, 11857), True, 'import numpy as np\n'), ((11987, 12033), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'input_size'], {}), '(decoder_time_step, input_size)\n', (12002, 12033), True, 'import numpy as np\n'), ((12598, 12637), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (12607, 12637), True, 'import tensorflow as tf\n'), ((13215, 13249), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""output"""'}), "(output, name='output')\n", (13226, 13249), True, 'import tensorflow as tf\n'), ((13251, 13305), 'tensorflow.identity', 'tf.identity', (['attr_state.cell_state'], {'name': '"""final_state"""'}), "(attr_state.cell_state, name='final_state')\n", (13262, 13305), True, 'import tensorflow as tf\n'), ((14631, 14666), 'tensorflow.identity', 'tf.identity', (['outputs'], {'name': '"""output"""'}), "(outputs, name='output')\n", (14642, 14666), True, 'import tensorflow as tf\n'), ((14668, 14710), 'tensorflow.identity', 'tf.identity', (['cell_state'], {'name': '"""cell_state"""'}), "(cell_state, name='cell_state')\n", (14679, 14710), True, 'import tensorflow as tf\n'), ((15559, 15605), 'numpy.random.randn', 'np.random.randn', (['encoder_time_step', 'input_size'], {}), '(encoder_time_step, input_size)\n', (15574, 15605), True, 'import numpy as np\n'), ((15735, 15793), 'numpy.random.randn', 'np.random.randn', (['decoder_time_step', 'batch_size', 'input_size'], {}), '(decoder_time_step, batch_size, input_size)\n', (15750, 15793), True, 'import numpy as np\n'), ((16251, 16290), 'tensorflow.concat', 'tf.concat', (['[curr_input, state]'], {'axis': '(-1)'}), '([curr_input, state], axis=-1)\n', (16260, 16290), True, 'import tensorflow as tf\n'), ((17870, 17914), 'tensorflow.identity', 'tf.identity', (['matched_output'], {'name': '"""output_0"""'}), "(matched_output, name='output_0')\n", (17881, 17914), True, 'import tensorflow as tf\n'), ((17916, 17962), 'tensorflow.identity', 'tf.identity', (['matched_state'], {'name': '"""final_state"""'}), "(matched_state, name='final_state')\n", (17927, 17962), True, 'import tensorflow as tf\n'), ((19238, 19271), 'numpy.arange', 'np.arange', (['(30.0)'], {'dtype': 'np.float32'}), '(30.0, dtype=np.float32)\n', (19247, 19271), True, 'import numpy as np\n'), ((19405, 19438), 'numpy.arange', 'np.arange', (['(75.0)'], {'dtype': 'np.float32'}), '(75.0, dtype=np.float32)\n', (19414, 19438), True, 'import numpy as np\n'), ((19558, 19591), 'numpy.arange', 'np.arange', (['(15.0)'], {'dtype': 'np.float32'}), '(15.0, dtype=np.float32)\n', (19567, 19591), True, 'import numpy as np\n'), ((19632, 19652), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'W'], {}), '(inputs, W)\n', (19641, 19652), True, 'import tensorflow as tf\n'), ((4430, 4453), 'tensorflow.identity', 'tf.identity', (['seq_length'], {}), '(seq_length)\n', (4441, 4453), True, 'import tensorflow as tf\n'), ((17524, 17547), 'tensorflow.identity', 'tf.identity', (['seq_length'], {}), '(seq_length)\n', (17535, 17547), True, 'import tensorflow as tf\n')] |
#!/user/bin/env python3
# -*- coding: utf-8 -*-
#!/user/bin/env python3
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 04.2020
# Context: CHARM PROJECT - Harzard perception
"""
Module documentation.
"""
# Imports
import sys
#import os
# Global variables
# Class declarations
# Function declarations
def main():
args = sys.argv[1:]
if not args:
print('usage: [--flags options] [inputs] ')
sys.exit(1)
# Main body
if __name__ == '__main__':
main() | [
"sys.exit"
]
| [((444, 455), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (452, 455), False, 'import sys\n')] |
# Databricks notebook source
from pyspark.sql.types import *
from pyspark.sql import functions as F
import base64
import array
# COMMAND ----------
# s is a base64 encoded float[] with first element being the magnitude
def Base64ToFloatArray(s):
arr = array.array('f', base64.b64decode(s))
return (arr[0], arr[1:])
def cosineSimilarity(s1, s2):
(m1, v1) = Base64ToFloatArray(s1)
(m2, v2) = Base64ToFloatArray(s2)
if (m1 == 0) or (m2 == 0):
return 0
else :
return sum(x*y for x,y in zip(v1, v2))/(m1 * m2)
# Register udf functions so that it could be used in dataframe
#
# Perform same computation as cosineSimilarity()
#
@F.udf("float")
def udfCosineSimilarity(s1, s2):
return cosineSimilarity(s1, s2)
# COMMAND ----------
# MAGIC %md **NetworkSimilarity** class to compute Network Similarity
# COMMAND ----------
# Parameters:
# resource: resource stream path
# container: container name in Azure Storage (AS) account
# account: Azure Storage (AS) account
# sas: complete 'Blob service SAS URL' of the shared access signature (sas) for the container
# key: access key for the container, if sas is specified, key is ignored
#
# Note:
# resource does not have header
# you need to provide value for either sas or key
#
class NetworkSimilarity(AzureStorageAccess):
# constructor
def __init__(self, resource, container, account, sas='', key=''):
AzureStorageAccess.__init__(self, container, account, sas, key)
schema = StructType()
schema.add(StructField('EntityId', LongType(), False))
schema.add(StructField('EntityType', StringType(), False))
schema.add(StructField('Data', StringType(), False))
self.df = spark.read.format('csv').options(header='false', delimiter='\t').schema(schema).load(self.getFullpath(resource))
def getDataframe(self):
return self.df
def raiseErrorIfNotFound(self, row, e):
if row is None:
raise KeyError('entity ' + str(e) + ' not found')
def getSimilarity(self, e1, e2):
df = self.df
row1 = df.where(df.EntityId == e1).first()
self.raiseErrorIfNotFound(row1, e1)
row2 = df.where(df.EntityId == e2).first()
self.raiseErrorIfNotFound(row2, e2)
return cosineSimilarity(row1.Data, row2.Data)
def getTopEntities(self, e, targetType = '', maxCount = 20, minScore = 0.0):
df1 = self.df
row1 = df1.where(df1.EntityId == e).first()
self.raiseErrorIfNotFound(row1, e)
if targetType == '':
df2 = df1.where(df1.EntityId != e)
else :
df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType))
df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score'))
return df3.where(df3.Score >= minScore).orderBy(df3.Score.desc()).limit(maxCount)
| [
"pyspark.sql.functions.udf",
"pyspark.sql.functions.lit",
"base64.b64decode"
]
| [((646, 660), 'pyspark.sql.functions.udf', 'F.udf', (['"""float"""'], {}), "('float')\n", (651, 660), True, 'from pyspark.sql import functions as F\n'), ((273, 292), 'base64.b64decode', 'base64.b64decode', (['s'], {}), '(s)\n', (289, 292), False, 'import base64\n'), ((2664, 2680), 'pyspark.sql.functions.lit', 'F.lit', (['row1.Data'], {}), '(row1.Data)\n', (2669, 2680), True, 'from pyspark.sql import functions as F\n')] |
#! /usr/bin/env python3
from .base_miner import BasePostGradientMiner
import torch
from ..utils import loss_and_miner_utils as lmu
# adapted from
# https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/
# /embedding_learning/model.py
class DistanceWeightedMiner(BasePostGradientMiner):
def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs):
super().__init__(**kwargs)
self.cutoff = cutoff
self.nonzero_loss_cutoff = nonzero_loss_cutoff
def mine(self, embeddings, labels):
label_set = torch.unique(labels)
n, d = embeddings.size()
dist_mat = lmu.dist_mat(embeddings)
dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device)
# so that we don't get log(0). We mask the diagonal out later anyway
# Cut off to avoid high variance.
dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device))
# Subtract max(log(distance)) for stability.
# See the first equation from Section 4 of the paper
log_weights = (2.0 - float(d)) * torch.log(dist_mat) - (
float(d - 3) / 2
) * torch.log(1.0 - 0.25 * (dist_mat ** 2.0))
weights = torch.exp(log_weights - torch.max(log_weights))
# Sample only negative examples by setting weights of
# the same-class examples to 0.
mask = torch.ones(weights.size()).to(embeddings.device)
for i in label_set:
idx = (labels == i).nonzero()
mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0
weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float())
weights = weights / torch.sum(weights, dim=1, keepdim=True)
np_weights = weights.cpu().numpy()
return lmu.get_random_triplet_indices(labels, weights=np_weights)
| [
"torch.unique",
"torch.log",
"torch.max",
"torch.tensor",
"torch.sum"
]
| [((548, 568), 'torch.unique', 'torch.unique', (['labels'], {}), '(labels)\n', (560, 568), False, 'import torch\n'), ((1681, 1720), 'torch.sum', 'torch.sum', (['weights'], {'dim': '(1)', 'keepdim': '(True)'}), '(weights, dim=1, keepdim=True)\n', (1690, 1720), False, 'import torch\n'), ((1090, 1109), 'torch.log', 'torch.log', (['dist_mat'], {}), '(dist_mat)\n', (1099, 1109), False, 'import torch\n'), ((1155, 1194), 'torch.log', 'torch.log', (['(1.0 - 0.25 * dist_mat ** 2.0)'], {}), '(1.0 - 0.25 * dist_mat ** 2.0)\n', (1164, 1194), False, 'import torch\n'), ((1239, 1261), 'torch.max', 'torch.max', (['log_weights'], {}), '(log_weights)\n', (1248, 1261), False, 'import torch\n'), ((887, 912), 'torch.tensor', 'torch.tensor', (['self.cutoff'], {}), '(self.cutoff)\n', (899, 912), False, 'import torch\n')] |
from functools import partial
from pulsar import Connection, Pool, get_actor
from pulsar.utils.pep import to_string
from pulsar.apps.data import RemoteStore
from pulsar.apps.ds import redis_parser
from .client import RedisClient, Pipeline, Consumer, ResponseError
from .pubsub import RedisPubSub, RedisChannels
class RedisStoreConnection(Connection):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.parser = self._producer._parser_class()
async def execute(self, *args, **options):
consumer = self.current_consumer()
await consumer.start((args, options))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
async def execute_pipeline(self, commands, raise_on_error=True):
consumer = self.current_consumer()
consumer.start((commands, raise_on_error, []))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
class RedisStore(RemoteStore):
'''Redis :class:`.Store` implementation.
'''
protocol_factory = partial(RedisStoreConnection, Consumer)
supported_queries = frozenset(('filter', 'exclude'))
def _init(self, namespace=None, parser_class=None, pool_size=50,
decode_responses=False, **kwargs):
self._decode_responses = decode_responses
if not parser_class:
actor = get_actor()
pyparser = actor.cfg.redis_py_parser if actor else False
parser_class = redis_parser(pyparser)
self._parser_class = parser_class
if namespace:
self._urlparams['namespace'] = namespace
self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop)
if self._database is None:
self._database = 0
self._database = int(self._database)
self.loaded_scripts = set()
@property
def pool(self):
return self._pool
@property
def namespace(self):
'''The prefix namespace to append to all transaction on keys
'''
n = self._urlparams.get('namespace')
return '%s:' % n if n else ''
def key(self):
return (self._dns, self._encoding)
def client(self):
'''Get a :class:`.RedisClient` for the Store'''
return RedisClient(self)
def pipeline(self):
'''Get a :class:`.Pipeline` for the Store'''
return Pipeline(self)
def pubsub(self, protocol=None):
return RedisPubSub(self, protocol=protocol)
def channels(self, protocol=None, **kw):
return RedisChannels(self.pubsub(protocol=protocol), **kw)
def ping(self):
return self.client().ping()
async def execute(self, *args, **options):
connection = await self._pool.connect()
with connection:
result = await connection.execute(*args, **options)
return result
async def execute_pipeline(self, commands, raise_on_error=True):
conn = await self._pool.connect()
with conn:
result = await conn.execute_pipeline(commands, raise_on_error)
return result
async def connect(self, protocol_factory=None):
protocol_factory = protocol_factory or self.create_protocol
if isinstance(self._host, tuple):
host, port = self._host
transport, connection = await self._loop.create_connection(
protocol_factory, host, port)
else:
raise NotImplementedError('Could not connect to %s' %
str(self._host))
if self._password:
await connection.execute('AUTH', self._password)
if self._database:
await connection.execute('SELECT', self._database)
return connection
def flush(self):
return self.execute('flushdb')
def close(self):
'''Close all open connections.'''
return self._pool.close()
def has_query(self, query_type):
return query_type in self.supported_queries
def basekey(self, meta, *args):
key = '%s%s' % (self.namespace, meta.table_name)
postfix = ':'.join((to_string(p) for p in args if p is not None))
return '%s:%s' % (key, postfix) if postfix else key
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
# indices = dict(((idx.attname, idx.unique) for idx in meta.indices))
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data
class CompiledQuery:
def __init__(self, pipe, query):
self.pipe = pipe
| [
"pulsar.apps.ds.redis_parser",
"pulsar.Pool",
"pulsar.utils.pep.to_string",
"functools.partial",
"pulsar.get_actor"
]
| [((1192, 1231), 'functools.partial', 'partial', (['RedisStoreConnection', 'Consumer'], {}), '(RedisStoreConnection, Consumer)\n', (1199, 1231), False, 'from functools import partial\n'), ((1776, 1832), 'pulsar.Pool', 'Pool', (['self.connect'], {'pool_size': 'pool_size', 'loop': 'self._loop'}), '(self.connect, pool_size=pool_size, loop=self._loop)\n', (1780, 1832), False, 'from pulsar import Connection, Pool, get_actor\n'), ((1507, 1518), 'pulsar.get_actor', 'get_actor', ([], {}), '()\n', (1516, 1518), False, 'from pulsar import Connection, Pool, get_actor\n'), ((1615, 1637), 'pulsar.apps.ds.redis_parser', 'redis_parser', (['pyparser'], {}), '(pyparser)\n', (1627, 1637), False, 'from pulsar.apps.ds import redis_parser\n'), ((4258, 4270), 'pulsar.utils.pep.to_string', 'to_string', (['p'], {}), '(p)\n', (4267, 4270), False, 'from pulsar.utils.pep import to_string\n')] |
# Generated by Django 3.0.7 on 2020-06-16 05:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_auto_20200616_0116'),
]
operations = [
migrations.AddField(
model_name='userreward',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='userreward',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='usertask',
name='created_dt',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='usertask',
name='last_updated_dt',
field=models.DateTimeField(auto_now=True),
),
]
| [
"django.db.models.DateTimeField"
]
| [((369, 443), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (389, 443), False, 'from django.db import migrations, models\n'), ((612, 647), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (632, 647), False, 'from django.db import migrations, models\n'), ((773, 847), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (793, 847), False, 'from django.db import migrations, models\n'), ((1014, 1049), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1034, 1049), False, 'from django.db import migrations, models\n')] |
import deephaven.TableTools as tt
import deephaven.Plot as plt
t = tt.emptyTable(50)\
.update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`")
p = plt.plot("S1", t, "X", "Y").lineColor("black").show()
p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
p4 = plt.plot3d("S1", t, "X", "X", "Y").show()
pBy = plt.plotBy("S1", t, "X", "Y", "USym").show()
pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show()
cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show()
cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
cp = plt.catPlot3d("S1", t, "X", "X", "Y").show()
cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show()
cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show()
pp = plt.piePlot("S1", t, "X", "Y")
chp = plt.catHistPlot("S1", t, "X").show()
hp = plt.histPlot("S1", t, "X", 5).show()
hp = plt.histPlot("S1", t, "X", 0, 10, 5).show()
ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show()
epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show()
ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show()
epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show()
ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show()
epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show()
doubles = [3, 4, 3, 5, 4, 5]
time = 1491946585000000000
t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]),
tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles),
tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles))
t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))")
ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close")
ohlcPlotBy = plt.figure().newChart(0)\
.chartTitle("Chart Title")\
.newAxes()\
.xLabel("X")\
.yLabel("Y")\
.ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym")
categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"]
valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]
valuesI = [27, 55, 16, 17, 15]
ap = plt.plot("S1", valuesD, valuesI).show()
ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show()
acp = plt.catPlot("S1", categories, valuesI).show()
acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show()
achp = plt.catHistPlot("S1", categories).show()
app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show()
aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()
aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show()
aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show()
hp = plt.histPlot("S1", valuesD, 5).show()
hp = plt.histPlot("S1", valuesD, 0, 10, 5).show()
hp = plt.histPlot("S1", valuesI, 5).show()
| [
"deephaven.Plot.catHistPlot",
"deephaven.Plot.plot3d",
"deephaven.Plot.plot",
"deephaven.Plot.figure",
"deephaven.Plot.histPlot",
"deephaven.Plot.piePlot",
"deephaven.TableTools.emptyTable",
"deephaven.Plot.catPlot",
"deephaven.Plot.plotBy",
"deephaven.TableTools.doubleCol",
"deephaven.Plot.errorBarXYBy",
"deephaven.Plot.ohlcPlot",
"deephaven.Plot.errorBarXBy",
"deephaven.TableTools.col",
"deephaven.Plot.catPlot3d",
"deephaven.Plot.catPlotBy",
"deephaven.Plot.errorBarXY",
"deephaven.Plot.errorBarY",
"deephaven.Plot.plot3dBy",
"deephaven.Plot.catPlot3dBy",
"deephaven.Plot.errorBarX",
"deephaven.Plot.errorBarYBy"
]
| [((1214, 1244), 'deephaven.Plot.piePlot', 'plt.piePlot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (1225, 1244), True, 'import deephaven.Plot as plt\n'), ((2181, 2245), 'deephaven.Plot.ohlcPlot', 'plt.ohlcPlot', (['"""Test1"""', 't', '"""Time"""', '"""Open"""', '"""High"""', '"""Low"""', '"""Close"""'], {}), "('Test1', t, 'Time', 'Open', 'High', 'Low', 'Close')\n", (2193, 2245), True, 'import deephaven.Plot as plt\n'), ((1905, 1951), 'deephaven.TableTools.col', 'tt.col', (['"""USym"""', "['A', 'B', 'A', 'B', 'A', 'B']"], {}), "('USym', ['A', 'B', 'A', 'B', 'A', 'B'])\n", (1911, 1951), True, 'import deephaven.TableTools as tt\n'), ((1969, 1998), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', (['"""Open"""', 'doubles'], {}), "('Open', doubles)\n", (1981, 1998), True, 'import deephaven.TableTools as tt\n'), ((2000, 2029), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', (['"""High"""', 'doubles'], {}), "('High', doubles)\n", (2012, 2029), True, 'import deephaven.TableTools as tt\n'), ((2047, 2075), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', (['"""Low"""', 'doubles'], {}), "('Low', doubles)\n", (2059, 2075), True, 'import deephaven.TableTools as tt\n'), ((2077, 2107), 'deephaven.TableTools.doubleCol', 'tt.doubleCol', (['"""Close"""', 'doubles'], {}), "('Close', doubles)\n", (2089, 2107), True, 'import deephaven.TableTools as tt\n'), ((69, 86), 'deephaven.TableTools.emptyTable', 'tt.emptyTable', (['(50)'], {}), '(50)\n', (82, 86), True, 'import deephaven.TableTools as tt\n'), ((559, 593), 'deephaven.Plot.plot3d', 'plt.plot3d', (['"""S1"""', 't', '"""X"""', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'X', 'Y')\n", (569, 593), True, 'import deephaven.Plot as plt\n'), ((609, 646), 'deephaven.Plot.plotBy', 'plt.plotBy', (['"""S1"""', 't', '"""X"""', '"""Y"""', '"""USym"""'], {}), "('S1', t, 'X', 'Y', 'USym')\n", (619, 646), True, 'import deephaven.Plot as plt\n'), ((660, 704), 'deephaven.Plot.plot3dBy', 'plt.plot3dBy', (['"""S1"""', 't', '"""X"""', '"""X"""', '"""Y"""', '"""USym"""'], {}), "('S1', t, 'X', 'X', 'Y', 'USym')\n", (672, 704), True, 'import deephaven.Plot as plt\n'), ((1044, 1081), 'deephaven.Plot.catPlot3d', 'plt.catPlot3d', (['"""S1"""', 't', '"""X"""', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'X', 'Y')\n", (1057, 1081), True, 'import deephaven.Plot as plt\n'), ((1097, 1137), 'deephaven.Plot.catPlotBy', 'plt.catPlotBy', (['"""S1"""', 't', '"""X"""', '"""Y"""', '"""USym"""'], {}), "('S1', t, 'X', 'Y', 'USym')\n", (1110, 1137), True, 'import deephaven.Plot as plt\n'), ((1153, 1200), 'deephaven.Plot.catPlot3dBy', 'plt.catPlot3dBy', (['"""S1"""', 't', '"""X"""', '"""X"""', '"""Y"""', '"""USym"""'], {}), "('S1', t, 'X', 'X', 'Y', 'USym')\n", (1168, 1200), True, 'import deephaven.Plot as plt\n'), ((1253, 1282), 'deephaven.Plot.catHistPlot', 'plt.catHistPlot', (['"""S1"""', 't', '"""X"""'], {}), "('S1', t, 'X')\n", (1268, 1282), True, 'import deephaven.Plot as plt\n'), ((1296, 1325), 'deephaven.Plot.histPlot', 'plt.histPlot', (['"""S1"""', 't', '"""X"""', '(5)'], {}), "('S1', t, 'X', 5)\n", (1308, 1325), True, 'import deephaven.Plot as plt\n'), ((1338, 1374), 'deephaven.Plot.histPlot', 'plt.histPlot', (['"""S1"""', 't', '"""X"""', '(0)', '(10)', '(5)'], {}), "('S1', t, 'X', 0, 10, 5)\n", (1350, 1374), True, 'import deephaven.Plot as plt\n'), ((1388, 1455), 'deephaven.Plot.errorBarXY', 'plt.errorBarXY', (['"""S1"""', 't', '"""X"""', '"""XLow"""', '"""XHigh"""', '"""Y"""', '"""YLow"""', '"""YHigh"""'], {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'YLow', 'YHigh')\n", (1402, 1455), True, 'import deephaven.Plot as plt\n'), ((1470, 1547), 'deephaven.Plot.errorBarXYBy', 'plt.errorBarXYBy', (['"""S1"""', 't', '"""X"""', '"""XLow"""', '"""XHigh"""', '"""Y"""', '"""YLow"""', '"""YHigh"""', '"""USym"""'], {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'YLow', 'YHigh', 'USym')\n", (1486, 1547), True, 'import deephaven.Plot as plt\n'), ((1561, 1610), 'deephaven.Plot.errorBarX', 'plt.errorBarX', (['"""S1"""', 't', '"""X"""', '"""XLow"""', '"""XHigh"""', '"""Y"""'], {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y')\n", (1574, 1610), True, 'import deephaven.Plot as plt\n'), ((1626, 1685), 'deephaven.Plot.errorBarXBy', 'plt.errorBarXBy', (['"""S1"""', 't', '"""X"""', '"""XLow"""', '"""XHigh"""', '"""Y"""', '"""USym"""'], {}), "('S1', t, 'X', 'XLow', 'XHigh', 'Y', 'USym')\n", (1641, 1685), True, 'import deephaven.Plot as plt\n'), ((1699, 1748), 'deephaven.Plot.errorBarY', 'plt.errorBarY', (['"""S1"""', 't', '"""X"""', '"""Y"""', '"""YLow"""', '"""YHigh"""'], {}), "('S1', t, 'X', 'Y', 'YLow', 'YHigh')\n", (1712, 1748), True, 'import deephaven.Plot as plt\n'), ((1764, 1823), 'deephaven.Plot.errorBarYBy', 'plt.errorBarYBy', (['"""S1"""', 't', '"""X"""', '"""Y"""', '"""YLow"""', '"""YHigh"""', '"""USym"""'], {}), "('S1', t, 'X', 'Y', 'YLow', 'YHigh', 'USym')\n", (1779, 1823), True, 'import deephaven.Plot as plt\n'), ((2587, 2619), 'deephaven.Plot.plot', 'plt.plot', (['"""S1"""', 'valuesD', 'valuesI'], {}), "('S1', valuesD, valuesI)\n", (2595, 2619), True, 'import deephaven.Plot as plt\n'), ((2632, 2675), 'deephaven.Plot.plot3d', 'plt.plot3d', (['"""S1"""', 'valuesI', 'valuesI', 'valuesI'], {}), "('S1', valuesI, valuesI, valuesI)\n", (2642, 2675), True, 'import deephaven.Plot as plt\n'), ((2690, 2728), 'deephaven.Plot.catPlot', 'plt.catPlot', (['"""S1"""', 'categories', 'valuesI'], {}), "('S1', categories, valuesI)\n", (2701, 2728), True, 'import deephaven.Plot as plt\n'), ((2743, 2795), 'deephaven.Plot.catPlot3d', 'plt.catPlot3d', (['"""S1"""', 'categories', 'categories', 'valuesD'], {}), "('S1', categories, categories, valuesD)\n", (2756, 2795), True, 'import deephaven.Plot as plt\n'), ((2811, 2844), 'deephaven.Plot.catHistPlot', 'plt.catHistPlot', (['"""S1"""', 'categories'], {}), "('S1', categories)\n", (2826, 2844), True, 'import deephaven.Plot as plt\n'), ((2969, 3043), 'deephaven.Plot.errorBarXY', 'plt.errorBarXY', (['"""S1"""', 'valuesD', 'valuesD', 'valuesD', 'valuesD', 'valuesD', 'valuesD'], {}), "('S1', valuesD, valuesD, valuesD, valuesD, valuesD, valuesD)\n", (2983, 3043), True, 'import deephaven.Plot as plt\n'), ((3058, 3113), 'deephaven.Plot.errorBarX', 'plt.errorBarX', (['"""S1"""', 'valuesD', 'valuesD', 'valuesD', 'valuesD'], {}), "('S1', valuesD, valuesD, valuesD, valuesD)\n", (3071, 3113), True, 'import deephaven.Plot as plt\n'), ((3128, 3183), 'deephaven.Plot.errorBarY', 'plt.errorBarY', (['"""S1"""', 'valuesD', 'valuesD', 'valuesD', 'valuesD'], {}), "('S1', valuesD, valuesD, valuesD, valuesD)\n", (3141, 3183), True, 'import deephaven.Plot as plt\n'), ((3198, 3228), 'deephaven.Plot.histPlot', 'plt.histPlot', (['"""S1"""', 'valuesD', '(5)'], {}), "('S1', valuesD, 5)\n", (3210, 3228), True, 'import deephaven.Plot as plt\n'), ((3241, 3278), 'deephaven.Plot.histPlot', 'plt.histPlot', (['"""S1"""', 'valuesD', '(0)', '(10)', '(5)'], {}), "('S1', valuesD, 0, 10, 5)\n", (3253, 3278), True, 'import deephaven.Plot as plt\n'), ((3291, 3321), 'deephaven.Plot.histPlot', 'plt.histPlot', (['"""S1"""', 'valuesI', '(5)'], {}), "('S1', valuesI, 5)\n", (3303, 3321), True, 'import deephaven.Plot as plt\n'), ((248, 275), 'deephaven.Plot.plot', 'plt.plot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (256, 275), True, 'import deephaven.Plot as plt\n'), ((719, 749), 'deephaven.Plot.catPlot', 'plt.catPlot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (730, 749), True, 'import deephaven.Plot as plt\n'), ((307, 334), 'deephaven.Plot.plot', 'plt.plot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (315, 334), True, 'import deephaven.Plot as plt\n'), ((480, 507), 'deephaven.Plot.plot', 'plt.plot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (488, 507), True, 'import deephaven.Plot as plt\n'), ((782, 812), 'deephaven.Plot.catPlot', 'plt.catPlot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (793, 812), True, 'import deephaven.Plot as plt\n'), ((963, 993), 'deephaven.Plot.catPlot', 'plt.catPlot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (974, 993), True, 'import deephaven.Plot as plt\n'), ((386, 413), 'deephaven.Plot.plot', 'plt.plot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (394, 413), True, 'import deephaven.Plot as plt\n'), ((865, 895), 'deephaven.Plot.catPlot', 'plt.catPlot', (['"""S1"""', 't', '"""X"""', '"""Y"""'], {}), "('S1', t, 'X', 'Y')\n", (876, 895), True, 'import deephaven.Plot as plt\n'), ((2859, 2871), 'deephaven.Plot.figure', 'plt.figure', ([], {}), '()\n', (2869, 2871), True, 'import deephaven.Plot as plt\n'), ((2260, 2272), 'deephaven.Plot.figure', 'plt.figure', ([], {}), '()\n', (2270, 2272), True, 'import deephaven.Plot as plt\n')] |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from flask import current_app as app
from flask import render_template
from flask import url_for
import logging
LOG = logging.getLogger(__name__)
from rhoci.test import bp # noqa
@bp.route('/index')
@bp.route('/')
def index():
"""All tests."""
jenkins_url = app.config['custom']['jenkins']['url']
uf = url_for('api.all_tests')
return render_template('tests/index.html',
jenkins_url=jenkins_url,
uf=uf)
@bp.route('/class/<class_name>/name/<name>')
def test(class_name, name):
"""Specific test summary."""
uf = url_for('api.test_to_jobs', class_name=class_name, test_name=name)
return render_template('tests/test_to_jobs.html', uf=uf)
| [
"logging.getLogger",
"flask.render_template",
"rhoci.test.bp.route",
"flask.url_for"
]
| [((757, 784), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (774, 784), False, 'import logging\n'), ((823, 841), 'rhoci.test.bp.route', 'bp.route', (['"""/index"""'], {}), "('/index')\n", (831, 841), False, 'from rhoci.test import bp\n'), ((843, 856), 'rhoci.test.bp.route', 'bp.route', (['"""/"""'], {}), "('/')\n", (851, 856), False, 'from rhoci.test import bp\n'), ((1118, 1161), 'rhoci.test.bp.route', 'bp.route', (['"""/class/<class_name>/name/<name>"""'], {}), "('/class/<class_name>/name/<name>')\n", (1126, 1161), False, 'from rhoci.test import bp\n'), ((957, 981), 'flask.url_for', 'url_for', (['"""api.all_tests"""'], {}), "('api.all_tests')\n", (964, 981), False, 'from flask import url_for\n'), ((993, 1060), 'flask.render_template', 'render_template', (['"""tests/index.html"""'], {'jenkins_url': 'jenkins_url', 'uf': 'uf'}), "('tests/index.html', jenkins_url=jenkins_url, uf=uf)\n", (1008, 1060), False, 'from flask import render_template\n'), ((1232, 1298), 'flask.url_for', 'url_for', (['"""api.test_to_jobs"""'], {'class_name': 'class_name', 'test_name': 'name'}), "('api.test_to_jobs', class_name=class_name, test_name=name)\n", (1239, 1298), False, 'from flask import url_for\n'), ((1310, 1359), 'flask.render_template', 'render_template', (['"""tests/test_to_jobs.html"""'], {'uf': 'uf'}), "('tests/test_to_jobs.html', uf=uf)\n", (1325, 1359), False, 'from flask import render_template\n')] |
import typing
from .core import Component
_Controller = typing.TypeVar('_Controller')
_ControllerType = typing.Type[_Controller]
ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object])
_controller_factory: typing.Optional[ControllerFactory] = None
def controller(controller_class: _ControllerType) -> _ControllerType:
Component.register(controller_class)
return controller_class
def set_controller_factory(controller_factory: ControllerFactory) -> None:
global _controller_factory
_controller_factory = controller_factory
def build_controller(controller_class: _ControllerType) -> _Controller:
if _controller_factory is None:
return controller_class()
return _controller_factory(controller_class)
def get_component(controller_class: _ControllerType) -> Component:
return Component.get_by_cls(controller_class)
| [
"typing.NewType",
"typing.TypeVar"
]
| [((58, 87), 'typing.TypeVar', 'typing.TypeVar', (['"""_Controller"""'], {}), "('_Controller')\n", (72, 87), False, 'import typing\n'), ((151, 226), 'typing.NewType', 'typing.NewType', (['"""ControllerFactory"""', 'typing.Callable[[typing.Type], object]'], {}), "('ControllerFactory', typing.Callable[[typing.Type], object])\n", (165, 226), False, 'import typing\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import csv
from datetime import datetime
import os
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import and_
from ..constants import CONST
from ..models import AccidentMarker
from ..utilities import init_flask, decode_hebrew, open_utf8
from ..import importmail
from xml.dom import minidom
import math
import requests
import logging
############################################################################################
# United.py is responsible for the parsing and deployment of "united hatzala" data to the DB
############################################################################################
PROVIDER_CODE = CONST.UNITED_HATZALA_CODE
TIME_ZONE = 2
# convert IMS hours code to hours
RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15}
WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9,
"12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18,
"25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7,
"34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28,
"43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34,
"52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36,
"62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47,
"72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57,
"82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67,
"92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75}
def retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological service) website
logging.basicConfig(level=logging.DEBUG)
s = requests.session()
r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')
xml_doc = minidom.parseString(r.text)
collection = xml_doc.documentElement
return collection
def parse_date(created):
"""
:param created: Date & Time string from csv
:return: Python datetime object
"""
global time
global hour
DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']
for date_format in DATE_FORMATS:
try:
if date_format == '%Y-%m-%d %H:%M:%S':
time = datetime.strptime(str(created)[:-4], date_format)
hour = time.strftime('%H')
hour = int(hour)
else:
time = datetime.strptime(str(created)[:-3], date_format)
hour = time.strftime('%H')
hour = int(hour) if str(created).endswith('AM') else int(hour) + 12
break
except ValueError:
pass
return datetime(time.year, time.month, time.day, hour, time.minute, 0)
def is_nth_weekday(nth, daynum, year,
month): # find if date is the nth occurrence of the daynum day of the week (ex: the forth sunday of april 2016)
# start counting the daynum from monday = 0
return calendar.Calendar(nth).monthdatescalendar(
year,
month
)[nth][daynum]
def get_parent_object_node(node):
while node.parentNode:
node = node.parentNode
if node.nodeName == "Object":
return node
def accident_time_zone_adjustment(created): # return accident time in UTC time
# pylint: disable=unexpected-keyword-arg
accident_date = parse_date(created)
daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)
winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)
# weather is given in UTC time
# therefore in daylight_saving_time we deduct 3 hours from the local time and in winter clock 2 hours
# [
accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)
# if accident happend between april and september
if accident_date.month < 10 & accident_date.month > 3:
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend before the last sunday of october at 2:00 o'clock
elif accident_date.month == 10 & (
winter_clock.day > accident_date.day | (
winter_clock.day == accident_date.day & accident_date.hour < 2)):
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend after the last friday of march at 2:00 o'clock
elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (
daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):
accident_date.replace(hour=accident_date.hour - 1)
# ]
adate = ''.join(
(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))
return adate
def all_station_in_date_frame(collection, created): # return the stations data in the time of the accident
doc = minidom.Document()
base = doc.createElement('accident_date')
doc.appendChild(base)
station_data_in_date = collection.getElementsByTagName('date_selected')
station_data_in_date.sort()
accident_date = accident_time_zone_adjustment(created)
for station in enumerate(station_data_in_date):
if accident_date in str(station.childNodes[0].nodeValue):
base.appendChild(get_parent_object_node(station))
return base
def find_station_by_coordinate(collection, latitude, longitude):
station_place_in_xml = -1
min_distance = float("inf") # initialize big starting value so the distance will always be smaller than the initial
station_data = collection.getElementsByTagName('surface_station')
for i, station in enumerate(station_data):
station_lon = station.getElementsByTagName('station_lon')
assert len(station_lon) == 1
lon = float(station_lon[0].childNodes[0].nodeValue)
lon_difference = (lon - float(longitude)) ** 2
station_lat = station.getElementsByTagName('station_lat')
assert len(station_lat) == 1
lat = float(station_lat[0].childNodes[0].nodeValue)
lat_difference = (lat - float(latitude)) ** 2
temp_dis = math.sqrt(lat_difference + lon_difference)
if temp_dis < min_distance:
min_distance = temp_dis
station_place_in_xml = i
return station_place_in_xml
def convert_xml_values_to_numbers(rain):
num_conv = rain[:2] # variable to help convert from string to number
for char in num_conv: # in the xml number are in a three digits format (4-004), we delete the 0es before the number
if char == '0':
rain.replace(char, '')
else:
break
rain_in_millimeters = float(rain)
if rain_in_millimeters >= 990:
# numbers that are higher then 990 in the xml code equals 0.(the last digit) for example 991 = 0.1
rain_in_millimeters *= 0.01
return rain_in_millimeters
def get_weather_element(station, weather_data, tag):
element = weather_data[station].getElementsByTagName(tag)
if element:
weather_element = element[0].childNodes[0].nodeValue
else:
weather_element = None
return weather_element
def process_weather_data(collection, latitude, longitude):
weather = 1 # default weather is clear sky
station = find_station_by_coordinate(collection, latitude, longitude)
weather_data = collection.getElementsByTagName('surface_observation')
wind_force = get_weather_element(station, weather_data, 'FF')
rain = get_weather_element(station, weather_data, 'RRR')
rain_duration = get_weather_element(station, weather_data,
'TR') # the duration of time in which the rain amount was measured
weather_code = get_weather_element(station, weather_data, 'WW')
if weather_code is not None:
return WEATHER[weather_code.strip()]
if wind_force is not None:
if int(wind_force) > 8:
weather = 76 # סופת רוחות
elif int(wind_force) > 5:
weather = 77 # רוחות חזקות
if rain is not None and rain_duration is not None:
rain_in_millimeters = convert_xml_values_to_numbers(rain)
rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]
# rain amount is between 0.1 and 0.5 millimeter
if 0.0 < rain_in_millimeters <= 0.5 or (
0.0 < rain_in_millimeters / rain_hours <= 0.5):
if weather == 76:
weather = 80 # סופת רוחות, גשם קל
elif weather == 77:
weather = 84 # רוחות חזקות, גשם קל
else:
weather = 37 # גשם קל
# average rain amount per hour is between 0.5 and 4.0 millimeters
if 0.5 < rain_in_millimeters / rain_hours <= 4:
if weather == 76:
weather = 81 # גשם וסופת רוחות
elif weather == 77:
weather = 85 # גשם ורוחות חזקות
else:
weather = 15 # גשם
# average rain amount per hour is between 4.0 and 8.0 millimeters
elif 4 < rain_in_millimeters / rain_hours <= 8:
if 76 == weather:
weather = 82 # סופת רוחות, גשם שוטף
if weather == 77:
weather = 86 # רוחות חזקות, גשם שוטף
else:
weather = 78 # גשם שוטף
# average rain amount per hour is more than 8.0 millimeters
elif rain_in_millimeters / rain_hours > 8:
if weather == 76:
weather = 83 # סופת רוחות, גשם זלעפות
if weather == 77:
weather = 87 # רוחות חזקות, גשם זלעפות
else:
weather = 79 # גשם זלעפות
return weather
CSVMAP = [
{"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9},
{"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8},
]
def create_accidents(collection, file_location):
"""
:param file_location: local location of .csv
:return: Yields a marker object with every iteration
"""
logging.info("\tReading accidents data from '%s'..." % file_location)
with open_utf8(file_location, 'rU') as f:
reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)
for line, accident in enumerate(reader):
if line == 0: # header
format_version = 0 if "MissionID" in accident[0] else 1
continue
if not accident: # empty line
continue
if line == 1 and accident[0] == "":
logging.warn("\t\tEmpty File!")
continue
csvmap = CSVMAP[format_version]
if accident[csvmap["lat"]] == "" or accident[csvmap["long"]] == "" or \
accident[csvmap["lat"]] is None or accident[csvmap["long"]] is None or \
accident[csvmap["lat"]] == "NULL" or accident[csvmap["long"]] == "NULL":
logging.warn("\t\tMissing coordinates in line {0}. Moving on...".format(line + 1))
continue
created = parse_date(accident[csvmap["time"]])
marker = {'id': accident[csvmap["id"]], 'latitude': accident[csvmap["lat"]],
'longitude': accident[csvmap["long"]], 'created': created, 'provider_code': PROVIDER_CODE,
'title': decode_hebrew(accident[csvmap["type"]], encoding="utf-8")[:100],
'address': decode_hebrew((accident[csvmap["street"]] + ' ' + accident[csvmap["city"]]), encoding="utf-8"),
'accident_severity': 2 if u"קשה" in decode_hebrew(accident[csvmap["type"]], encoding="utf-8") else 3,
'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,
'description': decode_hebrew(accident[csvmap["comment"]], encoding="utf-8"),
'weather': process_weather_data(collection, accident[csvmap["lat"]],
accident[csvmap["long"]])}
if format_version == 0:
casualties = accident[csvmap["casualties"]]
marker['road_intactness'] = casualties if casualties.isdigit() else 0
yield marker
def import_to_db(collection, path):
"""
:param path: Local files directory ('united_path' on main() below)
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
accidents = list(create_accidents(collection, path))
if not accidents:
return 0
new_ids = [m["id"] for m in accidents
if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m["id"],
AccidentMarker.provider_code == m["provider_code"])).count()]
if not new_ids:
logging.info("\t\tNothing loaded, all accidents already in DB")
return 0
db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m["id"] in new_ids])
db.session.commit()
return len(new_ids)
def update_db(collection):
"""
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2)
for accident in united:
if not accident.weather:
accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)
db.session.commit()
logging.info("\tFinished commiting the changes")
def main(light=True, username='', password='', lastmail=False):
"""
Calls importmail.py prior to importing to DB
"""
collection = retrieve_ims_xml()
if not light:
logging.info("Importing data from mail...")
importmail.main(username, password, lastmail)
united_path = "static/data/united/"
total = 0
logging.info("Loading United accidents...")
for united_file in os.listdir(united_path):
if united_file.endswith(".csv"):
total += import_to_db(collection, united_path + united_file)
logging.info("\tImported {0} items".format(total))
update_db(collection)
| [
"logging.basicConfig",
"datetime.datetime",
"requests.session",
"os.listdir",
"logging.warn",
"calendar.Calendar",
"xml.dom.minidom.Document",
"math.sqrt",
"xml.dom.minidom.parseString",
"csv.reader",
"flask_sqlalchemy.SQLAlchemy",
"logging.info",
"sqlalchemy.and_"
]
| [((2034, 2074), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2053, 2074), False, 'import logging\n'), ((2083, 2101), 'requests.session', 'requests.session', ([], {}), '()\n', (2099, 2101), False, 'import requests\n'), ((2180, 2207), 'xml.dom.minidom.parseString', 'minidom.parseString', (['r.text'], {}), '(r.text)\n', (2199, 2207), False, 'from xml.dom import minidom\n'), ((3109, 3172), 'datetime.datetime', 'datetime', (['time.year', 'time.month', 'time.day', 'hour', 'time.minute', '(0)'], {}), '(time.year, time.month, time.day, hour, time.minute, 0)\n', (3117, 3172), False, 'from datetime import datetime\n'), ((5272, 5290), 'xml.dom.minidom.Document', 'minidom.Document', ([], {}), '()\n', (5288, 5290), False, 'from xml.dom import minidom\n'), ((10533, 10602), 'logging.info', 'logging.info', (['("\\tReading accidents data from \'%s\'..." % file_location)'], {}), '("\\tReading accidents data from \'%s\'..." % file_location)\n', (10545, 10602), False, 'import logging\n'), ((12945, 12960), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (12955, 12960), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((13697, 13712), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (13707, 13712), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((13993, 14041), 'logging.info', 'logging.info', (['"""\tFinished commiting the changes"""'], {}), "('\\tFinished commiting the changes')\n", (14005, 14041), False, 'import logging\n'), ((14392, 14435), 'logging.info', 'logging.info', (['"""Loading United accidents..."""'], {}), "('Loading United accidents...')\n", (14404, 14435), False, 'import logging\n'), ((14459, 14482), 'os.listdir', 'os.listdir', (['united_path'], {}), '(united_path)\n', (14469, 14482), False, 'import os\n'), ((6520, 6562), 'math.sqrt', 'math.sqrt', (['(lat_difference + lon_difference)'], {}), '(lat_difference + lon_difference)\n', (6529, 6562), False, 'import math\n'), ((10667, 10718), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""', 'dialect': 'csv.excel_tab'}), "(f, delimiter=',', dialect=csv.excel_tab)\n", (10677, 10718), False, 'import csv\n'), ((13336, 13399), 'logging.info', 'logging.info', (['"""\t\tNothing loaded, all accidents already in DB"""'], {}), "('\\t\\tNothing loaded, all accidents already in DB')\n", (13348, 13399), False, 'import logging\n'), ((14236, 14279), 'logging.info', 'logging.info', (['"""Importing data from mail..."""'], {}), "('Importing data from mail...')\n", (14248, 14279), False, 'import logging\n'), ((11034, 11065), 'logging.warn', 'logging.warn', (['"""\t\tEmpty File!"""'], {}), "('\\t\\tEmpty File!')\n", (11046, 11065), False, 'import logging\n'), ((3404, 3426), 'calendar.Calendar', 'calendar.Calendar', (['nth'], {}), '(nth)\n', (3421, 3426), False, 'import calendar\n'), ((13163, 13254), 'sqlalchemy.and_', 'and_', (["(AccidentMarker.id == m['id'])", "(AccidentMarker.provider_code == m['provider_code'])"], {}), "(AccidentMarker.id == m['id'], AccidentMarker.provider_code == m[\n 'provider_code'])\n", (13167, 13254), False, 'from sqlalchemy import and_\n')] |
import unittest
from numpy.testing import assert_array_equal
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import VarianceReduction
from .utils import run_qs
class VarianceReductionTestCase(unittest.TestCase):
"""Variance reduction test case using artifitial dataset"""
def setUp(self):
self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]
self.y = [0, 1, 0, 1, 0, 1]
self.quota = 4
def test_variance_reduction(self):
trn_ds = Dataset(self.X,
np.concatenate([self.y[:2],
[None] * (len(self.y) - 2)]))
qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)
qseq = run_qs(trn_ds, qs, self.y, self.quota)
assert_array_equal(qseq, np.array([4, 5, 2, 3]))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"libact.models.LogisticRegression"
]
| [((936, 951), 'unittest.main', 'unittest.main', ([], {}), '()\n', (949, 951), False, 'import unittest\n'), ((879, 901), 'numpy.array', 'np.array', (['[4, 5, 2, 3]'], {}), '([4, 5, 2, 3])\n', (887, 901), True, 'import numpy as np\n'), ((759, 779), 'libact.models.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (777, 779), False, 'from libact.models import LogisticRegression\n')] |
# Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service.
"""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation.
"""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation.
"""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account
"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
TODO(neillc): Add pagination
"""
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details
"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents.
"""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| [
"openstack_dashboard.api.swift.swift_upload_object",
"openstack_dashboard.api.swift.swift_delete_container",
"openstack_dashboard.api.swift.swift_create_container",
"openstack_dashboard.api.swift.swift_copy_object",
"openstack_dashboard.api.swift.swift_create_pseudo_folder",
"openstack_dashboard.api.rest.utils.AjaxError",
"openstack_dashboard.api.swift.swift_get_capabilities",
"django.http.StreamingHttpResponse",
"openstack_dashboard.api.swift.swift_delete_object",
"django.utils.http.urlunquote",
"openstack_dashboard.api.swift.swift_get_object",
"os.path.splitext",
"openstack_dashboard.api.swift.swift_get_containers",
"openstack_dashboard.api.swift.swift_get_container",
"django.forms.FileField",
"openstack_dashboard.api.swift.swift_delete_folder",
"openstack_dashboard.api.rest.utils.CreatedResponse",
"openstack_dashboard.api.swift.swift_get_objects",
"openstack_dashboard.api.swift.swift_update_container",
"openstack_dashboard.api.rest.utils.ajax"
]
| [((1200, 1217), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (1215, 1217), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((1573, 1590), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (1588, 1590), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((2105, 2122), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (2120, 2122), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((2292, 2309), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (2307, 2309), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((2931, 2948), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (2946, 2948), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((3226, 3261), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {'data_required': '(True)'}), '(data_required=True)\n', (3241, 3261), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((3607, 3624), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (3622, 3624), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((4614, 4645), 'django.forms.FileField', 'forms.FileField', ([], {'required': '(False)'}), '(required=False)\n', (4629, 4645), False, 'from django import forms\n'), ((6133, 6150), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (6148, 6150), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((7791, 7808), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (7806, 7808), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((8241, 8258), 'openstack_dashboard.api.rest.utils.ajax', 'rest_utils.ajax', ([], {}), '()\n', (8256, 8258), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((1338, 1379), 'openstack_dashboard.api.swift.swift_get_capabilities', 'api.swift.swift_get_capabilities', (['request'], {}), '(request)\n', (1370, 1379), False, 'from openstack_dashboard import api\n'), ((1755, 1794), 'openstack_dashboard.api.swift.swift_get_containers', 'api.swift.swift_get_containers', (['request'], {}), '(request)\n', (1785, 1794), False, 'from openstack_dashboard import api\n'), ((2834, 2901), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', (["(u'/api/swift/containers/%s' % container)"], {}), "(u'/api/swift/containers/%s' % container)\n", (2860, 2901), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((3369, 3440), 'openstack_dashboard.api.swift.swift_update_container', 'api.swift.swift_update_container', (['request', 'container'], {'metadata': 'metadata'}), '(request, container, metadata=metadata)\n', (3401, 3440), False, 'from openstack_dashboard import api\n'), ((3902, 3962), 'openstack_dashboard.api.swift.swift_get_objects', 'api.swift.swift_get_objects', (['request', 'container'], {'prefix': 'path'}), '(request, container, prefix=path)\n', (3929, 3962), False, 'from openstack_dashboard import api\n'), ((6012, 6109), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', (["(u'/api/swift/containers/%s/object/%s' % (container, result.name))"], {}), "(u'/api/swift/containers/%s/object/%s' % (\n container, result.name))\n", (6038, 6109), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((6764, 6823), 'openstack_dashboard.api.swift.swift_get_object', 'api.swift.swift_get_object', (['request', 'container', 'object_name'], {}), '(request, container, object_name)\n', (6790, 6823), False, 'from openstack_dashboard import api\n'), ((7244, 7275), 'django.http.StreamingHttpResponse', 'StreamingHttpResponse', (['obj.data'], {}), '(obj.data)\n', (7265, 7275), False, 'from django.http import StreamingHttpResponse\n'), ((8747, 8849), 'openstack_dashboard.api.rest.utils.CreatedResponse', 'rest_utils.CreatedResponse', (["(u'/api/swift/containers/%s/object/%s' % (dest_container, result.name))"], {}), "(u'/api/swift/containers/%s/object/%s' % (\n dest_container, result.name))\n", (8773, 8849), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((2572, 2643), 'openstack_dashboard.api.swift.swift_create_container', 'api.swift.swift_create_container', (['request', 'container'], {'metadata': 'metadata'}), '(request, container, metadata=metadata)\n', (2604, 2643), False, 'from openstack_dashboard import api\n'), ((3016, 3068), 'openstack_dashboard.api.swift.swift_delete_container', 'api.swift.swift_delete_container', (['request', 'container'], {}), '(request, container)\n', (3048, 3068), False, 'from openstack_dashboard import api\n'), ((3866, 3882), 'django.utils.http.urlunquote', 'urlunquote', (['path'], {}), '(path)\n', (3876, 3882), False, 'from django.utils.http import urlunquote\n'), ((5543, 5587), 'openstack_dashboard.api.rest.utils.AjaxError', 'rest_utils.AjaxError', (['(500)', '"""Invalid request"""'], {}), "(500, 'Invalid request')\n", (5563, 5587), True, 'from openstack_dashboard.api.rest import utils as rest_utils\n'), ((5674, 5743), 'openstack_dashboard.api.swift.swift_create_pseudo_folder', 'api.swift.swift_create_pseudo_folder', (['request', 'container', 'object_name'], {}), '(request, container, object_name)\n', (5710, 5743), False, 'from openstack_dashboard import api\n'), ((5841, 5917), 'openstack_dashboard.api.swift.swift_upload_object', 'api.swift.swift_upload_object', (['request', 'container', 'object_name', "data['file']"], {}), "(request, container, object_name, data['file'])\n", (5870, 5917), False, 'from openstack_dashboard import api\n'), ((6586, 6648), 'openstack_dashboard.api.swift.swift_delete_object', 'api.swift.swift_delete_object', (['request', 'container', 'object_name'], {}), '(request, container, object_name)\n', (6615, 6648), False, 'from openstack_dashboard import api\n'), ((7145, 7176), 'os.path.splitext', 'os.path.splitext', (['obj.orig_name'], {}), '(obj.orig_name)\n', (7161, 7176), False, 'import os\n'), ((8448, 8539), 'openstack_dashboard.api.swift.swift_copy_object', 'api.swift.swift_copy_object', (['request', 'container', 'object_name', 'dest_container', 'dest_name'], {}), '(request, container, object_name, dest_container,\n dest_name)\n', (8475, 8539), False, 'from openstack_dashboard import api\n'), ((2226, 2275), 'openstack_dashboard.api.swift.swift_get_container', 'api.swift.swift_get_container', (['request', 'container'], {}), '(request, container)\n', (2255, 2275), False, 'from openstack_dashboard import api\n'), ((6274, 6336), 'openstack_dashboard.api.swift.swift_delete_folder', 'api.swift.swift_delete_folder', (['request', 'container', 'object_name'], {}), '(request, container, object_name)\n', (6303, 6336), False, 'from openstack_dashboard import api\n'), ((7876, 7984), 'openstack_dashboard.api.swift.swift_get_object', 'api.swift.swift_get_object', (['request'], {'container_name': 'container', 'object_name': 'object_name', 'with_data': '(False)'}), '(request, container_name=container, object_name=\n object_name, with_data=False)\n', (7902, 7984), False, 'from openstack_dashboard import api\n'), ((7072, 7098), 'os.path.splitext', 'os.path.splitext', (['obj.name'], {}), '(obj.name)\n', (7088, 7098), False, 'import os\n')] |
'''Load image/class/box from a annotation file.
The annotation file is organized as:
image_name #obj xmin ymin xmax ymax class_index ..
'''
from __future__ import print_function
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
from PIL import Image, ImageOps
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
| [
"random.choice",
"random.randrange",
"torch.LongTensor",
"os.path.join",
"torch.Tensor",
"encoder.DataEncoder",
"random.random"
]
| [((938, 951), 'encoder.DataEncoder', 'DataEncoder', ([], {}), '()\n', (949, 951), False, 'from encoder import DataEncoder\n'), ((2156, 2186), 'os.path.join', 'os.path.join', (['self.root', 'fname'], {}), '(self.root, fname)\n', (2168, 2186), False, 'import os\n'), ((3309, 3324), 'random.random', 'random.random', ([], {}), '()\n', (3322, 3324), False, 'import random\n'), ((4172, 4218), 'random.choice', 'random.choice', (['[None, 0.1, 0.3, 0.5, 0.7, 0.9]'], {}), '([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n', (4185, 4218), False, 'import random\n'), ((1632, 1649), 'torch.Tensor', 'torch.Tensor', (['box'], {}), '(box)\n', (1644, 1649), False, 'import torch\n'), ((1682, 1705), 'torch.LongTensor', 'torch.LongTensor', (['label'], {}), '(label)\n', (1698, 1705), False, 'import torch\n'), ((2536, 2562), 'torch.Tensor', 'torch.Tensor', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2548, 2562), False, 'import torch\n'), ((4529, 4554), 'random.randrange', 'random.randrange', (['(imw - w)'], {}), '(imw - w)\n', (4545, 4554), False, 'import random\n'), ((4575, 4600), 'random.randrange', 'random.randrange', (['(imh - h)'], {}), '(imh - h)\n', (4591, 4600), False, 'import random\n'), ((4623, 4659), 'torch.Tensor', 'torch.Tensor', (['[[x, y, x + w, y + h]]'], {}), '([[x, y, x + w, y + h]])\n', (4635, 4659), False, 'import torch\n')] |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to build composite layers.
WARNING:
The builder pattern is still experimental and we need to gain experience
on when to use and when not to use.
Please discuss w/ teammates before using it to build complicated
layers.
"""
import functools
from lingvo.core import activations
from lingvo.core import builder_layers
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import tshape
class Base:
"""Model builder with commonly used layers.
A method in a builder class constructs a layer param. FProp of a layer
constructed by a builder takes a tuple of tf.Tensor (one or more) and returns
a tuple of tf.Tensor (one or more). Even though certain layers support FProp
argument being None (e.g., Conv2DLayer), builder should not depend on such a
support.
The constructed layer is often a composition of multiple sub-layers connected
in certain patterns. We expect to have a few methods to facilitate building
these patterns. For example, _Seq() helps to build a sequential layer that
calls its sub-layer one after another.
TODO(zhifengc): Adds a more concrete example.
"""
@classmethod
def Params(cls):
"""The params of this layer."""
p = hyperparams.InstantiableParams(cls)
p.Define('deterministic_dropout', False,
'Used deterministic dropout or not.')
p.Define(
'fprop_dtype', None,
'Activations datatype to use. To enable bfloat16 activations for '
'layers built using model builder, set fprop_dtype to '
'tf.bfloat16, which will be propagated to layers that support '
'bfloat16 activations. Default is None, which will use float32 '
'activations.')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the '
'computations onto. If device_mesh is None, it is assumed to be a '
'single device. Here are some examples: '
'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '
'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '
'devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
def __init__(self, params):
# Sub-classes should put some options common to many layers in __init__.
self._params = params.Copy()
######################################################################
# Layers to compose multiple layers.
#
# Sub-classes are discouraged to override these composition method.
######################################################################
def _Rep(self, name, repeat, *subs):
r"""Connects sub-layers sequentially and repeat multiple times.
E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers
sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have
the same structure as the given sa, but sa1 and sa2 do not share the same
weight.
Args:
name: The layer name.
repeat: Repeat \*subs this many times in the compose layer.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
iterations = []
for i in range(repeat):
iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))
return self._Seq(name, *iterations)
def _Seq(self, name, *subs):
"""Connects sub-layers sequentially."""
return builder_layers.SequentialLayer.Params().Set(
name=name, sub=list(subs))
def _Graph(self, name, input_endpoints, output_endpoints,
*signature_sub_param_list):
"""Connects sub-layers into a data flow graph."""
return builder_layers.GraphLayer.Params().Set(
name=name,
input_endpoints=input_endpoints,
output_endpoints=output_endpoints,
sub=list(signature_sub_param_list))
def _Id(self, name):
"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""
return self._Seq(name)
def _Arg(self, name, index):
"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""
return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])
def _Par(self, name, *subs):
"""y = (f1, f2, ..., fn)(x).
We feed the input tuple to all sub-layers and concatenates their output
tuples into one tuple.
Args:
name: The layer name.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
def ConcatTuples(tuples):
# tuples is a list of tuples.
return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))
def ConcatMeta(tuples):
return py_utils.NestedMap(
flops=0,
out_shapes=tuple(
functools.reduce(lambda x, y: x + list(y), tuples, [])))
return builder_layers.ParallelLayer.Params().Set(
name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)
def _Fn(self, name, fn, fn_out=None, fn_flops=None):
"""y = fn(x).
Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input
tuple. Typically, fn is a very simple python function. This layer can be
used for prototyping but we advice to implement the logic as a sub-class of
BaseLayer for all established layers as FnLayer can't be serialized.
Args:
name: The layer name.
fn: A lambda tuple(Tensor) -> tuple(Tensor).
fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)
fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.
If None, we assume flops == sum of elements in the inputs.
Returns:
The param for the composed layer.
"""
def FnMeta(*shapes):
"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""
if fn_out:
out_shapes = fn_out(*shapes)
if isinstance(out_shapes, tshape.Shape):
out_shapes = (out_shapes,)
else:
out_shapes = shapes
if fn_flops:
flops = fn_flops(*shapes)
else:
flops = sum([s.size for s in shapes])
return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)
return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)
def _Save(self, name):
"""Returns a layer from which the activation and gradient can be accessed."""
return layers.FetchLayer.Params().Set(name=name)
def _AddFetches(self, name, body, fetches):
"""Fetches saved activations in the body sub-layer.
E.g.:
_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),
_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),
_Output('output', ...)), ['layer1_out', 'layer2_out'])
The layer returns the stack's final output together with intermediate
activations from layer1_out and layer2_out.
Args:
name: This layer's name.
body: The sub-layer.
fetches: A list of fetch names inside the sub-layer body.
Returns:
A layer whose outputs correspond to the activations of fetch points
in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].
"""
return builder_layers.BranchLayer.Params().Set(
name=name, body=body, fetches=fetches)
def _Rematerialize(self, name, body):
"""Forces rematerialization on FProp of the body layer."""
return builder_layers.RematerializationLayer.Params().Set(
name=name, body=body)
def _BatchParallel(self, name, sub):
"""Splits the batch and compute the forward pass on multiple devices.
Args:
name: This layer's name.
sub: The sub-layer.
Returns:
A BatchParallel layer which splits the batch and computes the forward pass
on multiple devices.
"""
return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)
def _PrintShape(self, name):
"""Print FProp input shape information."""
return builder_layers.PrintShapeLayer.Params().Set(name=name)
def _CreateNestedMap(self, name, keys):
"""Returns a NestedMap with keys from fprop args."""
return builder_layers.CreateNestedMapLayer.Params().Set(
name=name, keys=keys)
###########################################################################
# Basic nn layers.
#
# The following method returns a layer param, whose FProp takes a single
# Tensor and returns a single Tensor.
#
# These methods are designed to have minimal knobs. Sub-classes which needs to
# be flexible can override these methods with different options. E.g., a
# sub-class builder can override _BN() to tune the decay option.
###########################################################################
def _BN(self, name, dims):
"""Batch norm."""
return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)
def _LN(self, name, dims, use_fused_layernorm=False):
"""Layer norm."""
return layers.LayerNorm.Params().Set(
name=name,
input_dim=dims,
use_fused_layernorm=use_fused_layernorm,
fprop_dtype=self.params.fprop_dtype)
def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):
"""Returns a DropoutLayer Params."""
if self.params.deterministic_dropout:
return layers.DeterministicDropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims)
return layers.DropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims,
fprop_dtype=self.params.fprop_dtype)
def _Linear(self,
name,
idims,
odims,
device_mesh=None,
weight_split_dims_mapping=None,
qdomain=None):
"""Linear layer. y = matmul([..., idims], [idims, odims])."""
p = builder_layers.LinearLayer.Params()
p.name = name
p.input_dims = idims
p.output_dims = odims
p.fprop_dtype = self.params.fprop_dtype
p.device_mesh = device_mesh
p.weight_split_dims_mapping = weight_split_dims_mapping
p.qdomain.default = qdomain
return p
def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):
"""Bias layer. The bias is added to the last dimension of the input."""
return builder_layers.BiasLayer.Params().Set(
name=name,
dims=dims,
fprop_dtype=self.params.fprop_dtype,
device_mesh=device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping)
def _Activation(self, name, fn='RELU'):
"""Activation layer."""
return activations.ActivationLayer.Params().Set(activation=fn, name=name)
def _FC(self, name, idims, odims, act='RELU'):
"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""
# pyformat: disable
return self._Seq(
name,
self._Linear('linear', idims, odims),
self._Bias('bias', odims),
self._Activation('act', fn=act))
def _MLP(self, name, dims, act='RELU'):
"""Multiple layers of feed-forward fully connected.
Args:
name: The layer name.
dims: A list of int. i-th layer has dims[i] as its input dimension, and
dims[i+1] as its output dimensions.
act: The activation function.
Returns:
The param for the composed layer.
"""
l = []
for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
l += [self._FC('l%03d' % n, i, o, act)]
return self._Seq(name, *l)
def _Conv2D(self, name, filter_shape, filter_stride):
"""Conv2D layer."""
return layers.Conv2DLayerNoPadding.Params().Set(
name=name, filter_shape=filter_shape, filter_stride=filter_stride,
fprop_dtype=self.params.fprop_dtype)
def _Reshape(self, name, shape):
"""Reshape inputs to the shape provided."""
return builder_layers.ReshapeLayer.Params().Set(name=name,
shape=shape)
| [
"lingvo.core.activations.ActivationLayer.Params",
"lingvo.core.builder_layers.ReshapeLayer.Params",
"lingvo.core.builder_layers.CreateNestedMapLayer.Params",
"lingvo.core.py_utils.NestedMap",
"lingvo.core.layers.DeterministicDropoutLayer.Params",
"lingvo.core.layers.FetchLayer.Params",
"lingvo.core.builder_layers.PrintShapeLayer.Params",
"lingvo.core.builder_layers.FnLayer.Params",
"lingvo.core.layers.LayerNorm.Params",
"lingvo.core.builder_layers.ArgIndexLayer.Params",
"lingvo.core.builder_layers.GraphLayer.Params",
"lingvo.core.builder_layers.BranchLayer.Params",
"lingvo.core.layers.DropoutLayer.Params",
"lingvo.core.builder_layers.BatchParallelLayer.Params",
"lingvo.core.layers.Conv2DLayerNoPadding.Params",
"lingvo.core.hyperparams.InstantiableParams",
"lingvo.core.builder_layers.BiasLayer.Params",
"lingvo.core.builder_layers.ParallelLayer.Params",
"lingvo.core.layers.BatchNormLayer.Params",
"lingvo.core.builder_layers.SequentialLayer.Params",
"lingvo.core.builder_layers.RematerializationLayer.Params",
"lingvo.core.builder_layers.LinearLayer.Params"
]
| [((1971, 2006), 'lingvo.core.hyperparams.InstantiableParams', 'hyperparams.InstantiableParams', (['cls'], {}), '(cls)\n', (2001, 2006), False, 'from lingvo.core import hyperparams\n'), ((11168, 11203), 'lingvo.core.builder_layers.LinearLayer.Params', 'builder_layers.LinearLayer.Params', ([], {}), '()\n', (11201, 11203), False, 'from lingvo.core import builder_layers\n'), ((7392, 7446), 'lingvo.core.py_utils.NestedMap', 'py_utils.NestedMap', ([], {'flops': 'flops', 'out_shapes': 'out_shapes'}), '(flops=flops, out_shapes=out_shapes)\n', (7410, 7446), False, 'from lingvo.core import py_utils\n'), ((4762, 4801), 'lingvo.core.builder_layers.SequentialLayer.Params', 'builder_layers.SequentialLayer.Params', ([], {}), '()\n', (4799, 4801), False, 'from lingvo.core import builder_layers\n'), ((5009, 5043), 'lingvo.core.builder_layers.GraphLayer.Params', 'builder_layers.GraphLayer.Params', ([], {}), '()\n', (5041, 5043), False, 'from lingvo.core import builder_layers\n'), ((5412, 5449), 'lingvo.core.builder_layers.ArgIndexLayer.Params', 'builder_layers.ArgIndexLayer.Params', ([], {}), '()\n', (5447, 5449), False, 'from lingvo.core import builder_layers\n'), ((6117, 6154), 'lingvo.core.builder_layers.ParallelLayer.Params', 'builder_layers.ParallelLayer.Params', ([], {}), '()\n', (6152, 6154), False, 'from lingvo.core import builder_layers\n'), ((7459, 7490), 'lingvo.core.builder_layers.FnLayer.Params', 'builder_layers.FnLayer.Params', ([], {}), '()\n', (7488, 7490), False, 'from lingvo.core import builder_layers\n'), ((7648, 7674), 'lingvo.core.layers.FetchLayer.Params', 'layers.FetchLayer.Params', ([], {}), '()\n', (7672, 7674), False, 'from lingvo.core import layers\n'), ((8447, 8482), 'lingvo.core.builder_layers.BranchLayer.Params', 'builder_layers.BranchLayer.Params', ([], {}), '()\n', (8480, 8482), False, 'from lingvo.core import builder_layers\n'), ((8650, 8696), 'lingvo.core.builder_layers.RematerializationLayer.Params', 'builder_layers.RematerializationLayer.Params', ([], {}), '()\n', (8694, 8696), False, 'from lingvo.core import builder_layers\n'), ((9055, 9097), 'lingvo.core.builder_layers.BatchParallelLayer.Params', 'builder_layers.BatchParallelLayer.Params', ([], {}), '()\n', (9095, 9097), False, 'from lingvo.core import builder_layers\n'), ((9212, 9251), 'lingvo.core.builder_layers.PrintShapeLayer.Params', 'builder_layers.PrintShapeLayer.Params', ([], {}), '()\n', (9249, 9251), False, 'from lingvo.core import builder_layers\n'), ((9378, 9422), 'lingvo.core.builder_layers.CreateNestedMapLayer.Params', 'builder_layers.CreateNestedMapLayer.Params', ([], {}), '()\n', (9420, 9422), False, 'from lingvo.core import builder_layers\n'), ((10044, 10074), 'lingvo.core.layers.BatchNormLayer.Params', 'layers.BatchNormLayer.Params', ([], {}), '()\n', (10072, 10074), False, 'from lingvo.core import layers\n'), ((10202, 10227), 'lingvo.core.layers.LayerNorm.Params', 'layers.LayerNorm.Params', ([], {}), '()\n', (10225, 10227), False, 'from lingvo.core import layers\n'), ((10714, 10742), 'lingvo.core.layers.DropoutLayer.Params', 'layers.DropoutLayer.Params', ([], {}), '()\n', (10740, 10742), False, 'from lingvo.core import layers\n'), ((11623, 11656), 'lingvo.core.builder_layers.BiasLayer.Params', 'builder_layers.BiasLayer.Params', ([], {}), '()\n', (11654, 11656), False, 'from lingvo.core import builder_layers\n'), ((11921, 11957), 'lingvo.core.activations.ActivationLayer.Params', 'activations.ActivationLayer.Params', ([], {}), '()\n', (11955, 11957), False, 'from lingvo.core import activations\n'), ((12884, 12920), 'lingvo.core.layers.Conv2DLayerNoPadding.Params', 'layers.Conv2DLayerNoPadding.Params', ([], {}), '()\n', (12918, 12920), False, 'from lingvo.core import layers\n'), ((13141, 13177), 'lingvo.core.builder_layers.ReshapeLayer.Params', 'builder_layers.ReshapeLayer.Params', ([], {}), '()\n', (13175, 13177), False, 'from lingvo.core import builder_layers\n'), ((10539, 10580), 'lingvo.core.layers.DeterministicDropoutLayer.Params', 'layers.DeterministicDropoutLayer.Params', ([], {}), '()\n', (10578, 10580), False, 'from lingvo.core import layers\n')] |
# -*- coding: utf-8 -*-
# file: preprocess.py
# author: jackie
# Copyright (C) 2021. All Rights Reserved.
import os
import pandas as pd
import argparse
import emoji
import re
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--inpath", type=str, required=True, default='./raw_data/data1.csv')
parser.add_argument("--folder_name", type=str, required=False, default='./custom')
parser.add_argument("--task", type=str, required=False, default='aptepc')
args = parser.parse_args()
def convert(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-ASP'
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-ASP'
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_tag(text, labels):
# convert label to list
try:
labels = eval(labels)
tags = ['O'] * len(text)
sentiment = ['-999'] * len(text)
for j in range(len(labels)):
label = labels[j]
sentiment_key = labels[j][3]
if sentiment_key == '正':
sentiment_value = 'Positive'
elif sentiment_key == '负':
sentiment_value = 'Negative'
else:
sentiment_value = 'Others'
tags[label[4][0]] = 'B-'+label[1]
sentiment[label[4][0]] = sentiment_value
k = label[4][0] + 1
while k < label[4][1]:
tags[k] = 'I-'+label[1]
sentiment[k] = sentiment_value
k += 1
return text, tags, sentiment
except:
print ("labels", labels)
print ("text", text)
def convert_sentiment(sentiment_key):
if sentiment_key == '正':
sentiment_value = 'Positive'
else:
sentiment_value = 'Negative'
return sentiment_value
def convert_apc(text, label):
label_update = [(i[0], i[3], i[4]) for i in eval(label)]
label_update = list(set(label_update))
str1_list = []
str2_list = []
str3_list = []
for j in range(len(label_update)):
str1 = text[:label_update[j][2][0]] + '$T$ ' + text[label_update[j][2][1]:]
str1_list.append(str1)
str2_list.append(label_update[j][0])
str3_list.append(convert_sentiment(label_update[j][1]))
return str1_list, str2_list, str3_list
def filter_emoji(desstr, restr=''):
# 过滤表情
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return co.sub(restr, desstr)
def convert_to_atepc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_atepc_tag(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
data.columns = ['text', 'tag_sentiment_list']
# preprocess for emoji
data['text'] = data['text'].map(lambda x: filter_emoji(x, restr='xx'))
# drop id list not able to process
# print (data.iloc[8832,:])
# data = data.drop([8832])
# 只保留review的长度小于600的
data = data[data['text'].str.len() <= 600]
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
text, tags, sentiment = convert(text, label)
for word, tag, sen in zip(text, tags, sentiment):
if word not in [',', '。', ' ', '\xa0', '\u2006', '\u3000', '\u2002', '\u2003', '\u2005', '\x0c', '\u2028',
'\u2009', '\u200a']:
f1.write(word + ' ' + tag + ' ' + sen + '\n')
else:
f1.write("\n")
f1.write("\n")
f1.close()
print ("process atepc finished!")
def convert_to_apc(inpath, dist_fname, flag):
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
f1 = open(dist_fname, 'w', encoding='utf8')
data = pd.read_csv(inpath)
# train test split
x_train, x_test = train_test_split(data, test_size=0.2, random_state=42)
if flag == 'train':
data_res = x_train.iloc[:, :].reset_index()
else:
data_res = x_test.iloc[:, :].reset_index()
# print (data_res.head())
for i in range(len(data_res)):
text, label = data_res['text'][i], data_res['tag_sentiment_list'][i]
str1_list, str2_list, str3_list = convert_apc(text, label)
for x1, x2, x3 in zip(str1_list, str2_list, str3_list):
f1.write(x1 + '\n')
f1.write(x2 + '\n')
f1.write(x3 + '\n')
f1.close()
print ("process apc finished!")
def main(inpath, folder_name, task):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
if task == 'aptepc':
# get folder name
print ("start process for an aptepc task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'apc':
# get folder name
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt')
# process train
convert_to_apc(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_apc(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
elif task == 'aptepc-tag':
# get folder name
print ("start process for an aptepc tag task")
folder_name_prefix = folder_name.split('/')[-1]
dist_train_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')
dist_test_fname = os.path.join(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')
# process train
convert_to_atepc_tag(inpath, dist_train_fname, 'train')
print ("<<< finish training data preprocess")
# process test
convert_to_atepc_tag(inpath, dist_test_fname, 'test')
print ("<<< finish test data preprocess")
main(args.inpath, args.folder_name, args.task) | [
"os.path.exists",
"argparse.ArgumentParser",
"re.compile",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"os.makedirs",
"os.path.join",
"os.remove"
]
| [((239, 264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (262, 264), False, 'import argparse\n'), ((3310, 3336), 'os.path.exists', 'os.path.exists', (['dist_fname'], {}), '(dist_fname)\n', (3324, 3336), False, 'import os\n'), ((3428, 3447), 'pandas.read_csv', 'pd.read_csv', (['inpath'], {}), '(inpath)\n', (3439, 3447), True, 'import pandas as pd\n'), ((3720, 3774), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, test_size=0.2, random_state=42)\n', (3736, 3774), False, 'from sklearn.model_selection import train_test_split\n'), ((4609, 4635), 'os.path.exists', 'os.path.exists', (['dist_fname'], {}), '(dist_fname)\n', (4623, 4635), False, 'import os\n'), ((4727, 4746), 'pandas.read_csv', 'pd.read_csv', (['inpath'], {}), '(inpath)\n', (4738, 4746), True, 'import pandas as pd\n'), ((5122, 5176), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, test_size=0.2, random_state=42)\n', (5138, 5176), False, 'from sklearn.model_selection import train_test_split\n'), ((6005, 6031), 'os.path.exists', 'os.path.exists', (['dist_fname'], {}), '(dist_fname)\n', (6019, 6031), False, 'import os\n'), ((6123, 6142), 'pandas.read_csv', 'pd.read_csv', (['inpath'], {}), '(inpath)\n', (6134, 6142), True, 'import pandas as pd\n'), ((6188, 6242), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, test_size=0.2, random_state=42)\n', (6204, 6242), False, 'from sklearn.model_selection import train_test_split\n'), ((3075, 3104), 're.compile', 're.compile', (['u"""[𐀀-\U0010ffff]"""'], {}), "(u'[𐀀-\\U0010ffff]')\n", (3085, 3104), False, 'import re\n'), ((3346, 3367), 'os.remove', 'os.remove', (['dist_fname'], {}), '(dist_fname)\n', (3355, 3367), False, 'import os\n'), ((4645, 4666), 'os.remove', 'os.remove', (['dist_fname'], {}), '(dist_fname)\n', (4654, 4666), False, 'import os\n'), ((6041, 6062), 'os.remove', 'os.remove', (['dist_fname'], {}), '(dist_fname)\n', (6050, 6062), False, 'import os\n'), ((6854, 6881), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (6868, 6881), False, 'import os\n'), ((6891, 6915), 'os.makedirs', 'os.makedirs', (['folder_name'], {}), '(folder_name)\n', (6902, 6915), False, 'import os\n'), ((7102, 7175), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.train.txt.atepc')"], {}), "(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')\n", (7114, 7175), False, 'import os\n'), ((7202, 7274), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.test.txt.atepc')"], {}), "(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')\n", (7214, 7274), False, 'import os\n'), ((3148, 3193), 're.compile', 're.compile', (["u'[\\ud800-\\udbff][\\udc00-\\udfff]'"], {}), "(u'[\\ud800-\\udbff][\\udc00-\\udfff]')\n", (3158, 3193), False, 'import re\n'), ((7677, 7744), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.train.txt')"], {}), "(folder_name_prefix, folder_name_prefix + '.train.txt')\n", (7689, 7744), False, 'import os\n'), ((7771, 7837), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.test.txt')"], {}), "(folder_name_prefix, folder_name_prefix + '.test.txt')\n", (7783, 7837), False, 'import os\n'), ((8298, 8371), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.train.txt.atepc')"], {}), "(folder_name_prefix, folder_name_prefix + '.train.txt.atepc')\n", (8310, 8371), False, 'import os\n'), ((8398, 8470), 'os.path.join', 'os.path.join', (['folder_name_prefix', "(folder_name_prefix + '.test.txt.atepc')"], {}), "(folder_name_prefix, folder_name_prefix + '.test.txt.atepc')\n", (8410, 8470), False, 'import os\n')] |
import os
import shutil
import requests
def get_cat(folder, name):
url = "http://consuming-python-services-api.azurewebsites.net/cats/random"
data = get_data_from_url(url)
save_image(folder, name, data)
def get_data_from_url(url):
response = requests.get(url, stream=True)
return response.raw
def save_image(folder, name, data):
file_name = os.path.join(folder, name + '.jpg')
with open(file_name, 'wb') as fout:
shutil.copyfileobj(data, fout)
| [
"os.path.join",
"shutil.copyfileobj",
"requests.get"
]
| [((263, 293), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (275, 293), False, 'import requests\n'), ((372, 407), 'os.path.join', 'os.path.join', (['folder', "(name + '.jpg')"], {}), "(folder, name + '.jpg')\n", (384, 407), False, 'import os\n'), ((456, 486), 'shutil.copyfileobj', 'shutil.copyfileobj', (['data', 'fout'], {}), '(data, fout)\n', (474, 486), False, 'import shutil\n')] |
# -*- coding: utf-8 -*-
"""
media info manager module.
"""
from pyrin.core.mixin import HookMixin
from pyrin.core.structs import Manager
import pyrin.utils.path as path_utils
from charma.media_info import MediaInfoPackage
from charma.media_info.interface import AbstractMediaInfoProvider
from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError
class MediaInfoManager(Manager, HookMixin):
"""
media info manager class.
"""
package_class = MediaInfoPackage
hook_type = AbstractMediaInfoProvider
invalid_hook_type_error = InvalidMediaInfoProviderTypeError
REQUIRED_INFO = ('runtime', 'width', 'height')
def _is_complete(self, info):
"""
gets a value indicating that given media info is complete.
:param dict info: media info to be checked.
:rtype: bool
"""
for item in self.REQUIRED_INFO:
result = info.get(item)
if result is None or result <= 0:
return False
return True
def register_provider(self, instance):
"""
registers the given instance into media info providers.
:param AbstractMediaInfoProvider instance: media info provider instance
to be registered.
:raises InvalidMediaInfoProviderTypeError: invalid media info provider type error.
"""
self.register_hook(instance)
def get_info(self, file, **options):
"""
gets a dict containing media info of given file.
:param str file: absolute path of video file.
:raises InvalidPathError: invalid path error.
:raises PathIsNotAbsoluteError: path is not absolute error.
:raises PathNotExistedError: path not existed error.
:raises IsNotFileError: is not directory error.
:returns: dict(int runtime,
int width,
int height)
:rtype: dict
"""
path_utils.assert_is_file(file)
result = dict()
for provider in self._get_hooks():
current_result = provider.get_info(file, **options)
result.update(current_result)
if self._is_complete(result) is True:
break
result.setdefault('runtime', 0)
result.setdefault('width', 0)
result.setdefault('height', 0)
return result
| [
"pyrin.utils.path.assert_is_file"
]
| [((1994, 2025), 'pyrin.utils.path.assert_is_file', 'path_utils.assert_is_file', (['file'], {}), '(file)\n', (2019, 2025), True, 'import pyrin.utils.path as path_utils\n')] |
import unittest
from boxrec.parsers import FightParser
class MockResponse(object):
def __init__(self, content, encoding, url):
self.content= content
self.encoding = encoding
self.url = url
class TestFightParser(unittest.TestCase):
def setUp(self):
with open('mock_data/fights/draw.html', 'rb') as file:
self.drawn_fight = file.read()
self.parser = FightParser()
def test_parses_draw(self):
"""Test it correctly handles draws"""
mock_response = MockResponse(
self.drawn_fight,
'UTF-8',
"http://boxrec.com/en/event/115689/202488"
)
result = self.parser.parse(mock_response)
self.assertEqual(result.winner, 'drawn', "Result should equal draw.")
class TestBoxerParser(unittest.TestCase):
pass
| [
"boxrec.parsers.FightParser"
]
| [((413, 426), 'boxrec.parsers.FightParser', 'FightParser', ([], {}), '()\n', (424, 426), False, 'from boxrec.parsers import FightParser\n')] |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
from ceilometerclient.apiclient import base
from ceilometerclient.apiclient import exceptions
from ceilometerclient import exc
def getid(obj):
"""Extracts object ID.
Abstracts the common pattern of allowing both an object or an
object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""Managers interact with a particular type of API.
It works with samples, meters, alarms, etc. and provide CRUD operations for
them.
"""
resource_class = None
def __init__(self, api):
self.api = api
@property
def client(self):
"""Compatible with latest oslo-incubator.apiclient code."""
return self.api
def _create(self, url, body):
body = self.api.post(url, json=body).json()
if body:
return self.resource_class(self, body)
def _list(self, url, response_key=None, obj_class=None, body=None,
expect_single=False):
try:
resp = self.api.get(url)
except exceptions.NotFound:
raise exc.HTTPNotFound
if not resp.content:
raise exc.HTTPNotFound
body = resp.json()
if obj_class is None:
obj_class = self.resource_class
if response_key:
try:
data = body[response_key]
except KeyError:
return []
else:
data = body
if expect_single:
data = [data]
return [obj_class(self, res, loaded=True) for res in data if res]
def _update(self, url, body, response_key=None):
body = self.api.put(url, json=body).json()
# PUT requests may not return a body
if body:
return self.resource_class(self, body)
def _delete(self, url):
self.api.delete(url)
class Resource(base.Resource):
"""A resource represents a particular instance of an object.
Resource might be tenant, user, etc.
This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def to_dict(self):
return copy.deepcopy(self._info)
| [
"copy.deepcopy"
]
| [((3031, 3056), 'copy.deepcopy', 'copy.deepcopy', (['self._info'], {}), '(self._info)\n', (3044, 3056), False, 'import copy\n')] |
import socket
import csv
import traceback
import threading
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
usrpass={}
def openfile():
filename="login_credentials.csv"
with open(filename,'r')as csvfile:
csv_file = csv.reader(csvfile, delimiter=",")
for col in csv_file:
usrpass[col[0]]=col[1]
usrpass.pop("Username")
#print(usrpass)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
ihost=socket.gethostname()
host=socket.gethostbyname(ihost)
iport=[]
hostfile="host.csv"
with open(hostfile,'r')as host_file:
csv_hfile = csv.reader(host_file, delimiter=",")
for row in csv_hfile:
iport.append(row[1])
port=int(iport[4])
def socketbind():
try:
s.bind(('',port))
print("Bind with host at port number : "+str(port))
s.listen(10)
print("Socket is listening!!")
except socket.error as msg:
print("Error in Binding: "+ str(msg)+"\n Retrying....")
socketbind()
def socketaccept():
conn,add=s.accept()
print("connection is established with IP : "+str(add[0])+" and Port Number : "+str(add[1]))
conn.send(bytes("1","utf-8"))
conversation(conn)
conn.close()
def conversation(conn):
while True:
username=str(conn.recv(1024),"utf-8")
password=str(conn.recv(1024),"utf-8")
res=checkpass(username,password)
if res==1:
print("Valid Password!")
conn.send(bytes("1","utf-8"))
conn.send(bytes("1","utf-8"))
else:
conn.send(bytes("-1","utf-8"))
conn.send(bytes("-1","utf-8"))
# def checkusr(username):
# if username in usrpass:
# return 1
# else:
# print("Invalid Username")
# return -1
def checkpass(username,password):
if usrpass[username]==password:
return 1
else:
print("Invalid Password")
return -1
def main():
openfile()
socketbind()
socketaccept()
# count=0
# while (count<6):
# new_thread=threading.Thread(target =socketaccept)
# new_thread.start()
# count=count+1
main() | [
"socket.gethostbyname",
"socket.gethostname",
"socket.socket",
"csv.reader"
]
| [((62, 111), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (75, 111), False, 'import socket\n'), ((401, 421), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (419, 421), False, 'import socket\n'), ((427, 454), 'socket.gethostbyname', 'socket.gethostbyname', (['ihost'], {}), '(ihost)\n', (447, 454), False, 'import socket\n'), ((461, 481), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (479, 481), False, 'import socket\n'), ((487, 514), 'socket.gethostbyname', 'socket.gethostbyname', (['ihost'], {}), '(ihost)\n', (507, 514), False, 'import socket\n'), ((597, 633), 'csv.reader', 'csv.reader', (['host_file'], {'delimiter': '""","""'}), "(host_file, delimiter=',')\n", (607, 633), False, 'import csv\n'), ((238, 272), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (248, 272), False, 'import csv\n')] |
from itertools import product
from sklearn.base import clone
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import ParameterGrid
from imblearn.pipeline import Pipeline
from rlearn.utils import check_random_states
def check_pipelines(objects_list, random_state, n_runs):
"""Extract estimators and parameters grids."""
# Create random states
random_states = check_random_states(random_state, n_runs)
pipelines = []
param_grid = []
for comb, rs in product(product(*objects_list), random_states):
name = "|".join([i[0] for i in comb])
# name, object, sub grid
comb = [
(nm, ob, ParameterGrid(sg))
if ob is not None
else (nm, FunctionTransformer(), ParameterGrid(sg))
for nm, ob, sg in comb
]
# Create estimator
if name not in [n[0] for n in pipelines]:
est = Pipeline([(nm, ob) for nm, ob, _ in comb])
pipelines.append((name, est))
# Create intermediate parameter grids
sub_grids = [
[{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg]
for nm, obj, sg in comb
]
# Create parameter grids
for sub_grid in product(*sub_grids):
param_prefix = "" if len(comb) == 1 else f"{name}__"
grid = {"est_name": [name]}
grid.update(
{f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()}
)
random_states = {
f"{param_prefix}{param}": [rs]
for param in est.get_params()
if "random_state" in param
}
grid.update(random_states)
# Avoid multiple runs over pipelines without random state
if grid not in param_grid:
param_grid.append(grid)
return pipelines, param_grid
def check_pipelines_wrapper(
objects_list, wrapper, random_state, n_runs, wrapped_only=False
):
wrapper_label = wrapper[0]
wrapper_obj = wrapper[1]
wrapper_grid = wrapper[2]
estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)
wrapped_estimators = [
(
f"{wrapper_label}|{name}",
clone(wrapper_obj).set_params(**{"classifier": pipeline}),
)
for name, pipeline in estimators
]
wrapped_param_grids = [
{
"est_name": [f'{wrapper_label}|{d["est_name"][0]}'],
**{
f'{wrapper_label}|{d["est_name"][0]}__classifier__{k}': v
for k, v in d.items()
if k != "est_name"
},
**{
f'{wrapper_label}|{d["est_name"][0]}__{k}': v
for k, v in wrapper_grid.items()
},
}
for d in param_grids
]
if wrapped_only:
return wrapped_estimators, wrapped_param_grids
else:
return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)
| [
"rlearn.utils.check_random_states",
"sklearn.model_selection.ParameterGrid",
"sklearn.base.clone",
"itertools.product",
"imblearn.pipeline.Pipeline",
"sklearn.preprocessing.FunctionTransformer"
]
| [((407, 448), 'rlearn.utils.check_random_states', 'check_random_states', (['random_state', 'n_runs'], {}), '(random_state, n_runs)\n', (426, 448), False, 'from rlearn.utils import check_random_states\n'), ((517, 539), 'itertools.product', 'product', (['*objects_list'], {}), '(*objects_list)\n', (524, 539), False, 'from itertools import product\n'), ((1269, 1288), 'itertools.product', 'product', (['*sub_grids'], {}), '(*sub_grids)\n', (1276, 1288), False, 'from itertools import product\n'), ((929, 971), 'imblearn.pipeline.Pipeline', 'Pipeline', (['[(nm, ob) for nm, ob, _ in comb]'], {}), '([(nm, ob) for nm, ob, _ in comb])\n', (937, 971), False, 'from imblearn.pipeline import Pipeline\n'), ((675, 692), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['sg'], {}), '(sg)\n', (688, 692), False, 'from sklearn.model_selection import ParameterGrid\n'), ((746, 767), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', ([], {}), '()\n', (765, 767), False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((769, 786), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['sg'], {}), '(sg)\n', (782, 786), False, 'from sklearn.model_selection import ParameterGrid\n'), ((2285, 2303), 'sklearn.base.clone', 'clone', (['wrapper_obj'], {}), '(wrapper_obj)\n', (2290, 2303), False, 'from sklearn.base import clone\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from clean_transcript import clean_transcript
ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
def validate_label(label):
clean = clean_transcript(ALPHABET_FILE_PATH)
cleaned, transcript = clean.clean(label)
if cleaned:
return transcript.lower()
return None
| [
"clean_transcript.clean_transcript"
]
| [((201, 237), 'clean_transcript.clean_transcript', 'clean_transcript', (['ALPHABET_FILE_PATH'], {}), '(ALPHABET_FILE_PATH)\n', (217, 237), False, 'from clean_transcript import clean_transcript\n')] |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll.mean() # TODO maybe cases for length or padding_mask
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp, *args, **kwargs):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
# Changes
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L)
import src.models.nn.utils as U
# AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
| [
"torch.nn.functional.linear",
"torch.nn.Identity",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.nn.ModuleList",
"torch.FloatTensor",
"torch.typename",
"torch.zeros_like",
"torch.cat",
"torch.arange",
"functools.partial",
"torch.nn.functional.log_softmax",
"torch.nn.ParameterList",
"torch.nn.init.uniform_",
"torch.zeros",
"torch.nn.init.normal_"
]
| [((13488, 13533), 'functools.partial', 'functools.partial', (['_init_weight'], {'default': '(0.02)'}), '(_init_weight, default=0.02)\n', (13505, 13533), False, 'import functools\n'), ((13547, 13592), 'functools.partial', 'functools.partial', (['_init_weight'], {'default': '(0.01)'}), '(_init_weight, default=0.01)\n', (13564, 13592), False, 'import functools\n'), ((13432, 13472), 'torch.nn.init.normal_', 'nn.init.normal_', (['weight'], {'mean': '(0)', 'std': 'std'}), '(weight, mean=0, std=std)\n', (13447, 13472), True, 'import torch.nn as nn\n'), ((2796, 2814), 'torch.nn.ParameterList', 'nn.ParameterList', ([], {}), '()\n', (2812, 2814), True, 'import torch.nn as nn\n'), ((2959, 2978), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2969, 2978), True, 'import torch.nn as nn\n'), ((9817, 9832), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9830, 9832), True, 'import torch.nn as nn\n'), ((9858, 9876), 'torch.nn.ParameterList', 'nn.ParameterList', ([], {}), '()\n', (9874, 9876), True, 'import torch.nn as nn\n'), ((2672, 2690), 'torch.nn.ParameterList', 'nn.ParameterList', ([], {}), '()\n', (2688, 2690), True, 'import torch.nn as nn\n'), ((4612, 4649), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['bias', '(-bound)', 'bound'], {}), '(bias, -bound, bound)\n', (4628, 4649), True, 'import torch.nn as nn\n'), ((4793, 4828), 'torch.nn.functional.linear', 'F.linear', (['hidden', 'weight'], {'bias': 'bias'}), '(hidden, weight, bias=bias)\n', (4801, 4828), True, 'import torch.nn.functional as F\n'), ((7488, 7520), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['head_logit'], {'dim': '(1)'}), '(head_logit, dim=1)\n', (7501, 7520), True, 'import torch.nn.functional as F\n'), ((7540, 7606), 'torch.zeros_like', 'torch.zeros_like', (['target'], {'dtype': 'hidden.dtype', 'device': 'hidden.device'}), '(target, dtype=hidden.dtype, device=hidden.device)\n', (7556, 7606), False, 'import torch\n'), ((9647, 9666), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (9657, 9666), True, 'import torch.nn as nn\n'), ((9689, 9702), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (9700, 9702), True, 'import torch.nn as nn\n'), ((11812, 11838), 'torch.zeros_like', 'torch.zeros_like', (['inp_flat'], {}), '(inp_flat)\n', (11828, 11838), False, 'import torch\n'), ((12947, 12975), 'torch.cat', 'torch.cat', (['embeddings'], {'dim': '(0)'}), '(embeddings, dim=0)\n', (12956, 12975), False, 'import torch\n'), ((2479, 2521), 'torch.zeros', 'torch.zeros', (['self.n_clusters', 'self.d_embed'], {}), '(self.n_clusters, self.d_embed)\n', (2490, 2521), False, 'import torch\n'), ((2568, 2596), 'torch.zeros', 'torch.zeros', (['self.n_clusters'], {}), '(self.n_clusters)\n', (2579, 2596), False, 'import torch\n'), ((9937, 9994), 'torch.nn.Embedding', 'nn.Embedding', (['n_token', 'd_embed'], {'sparse': '(sample_softmax > 0)'}), '(n_token, d_embed, sparse=sample_softmax > 0)\n', (9949, 9994), True, 'import torch.nn as nn\n'), ((11500, 11534), 'torch.nn.functional.linear', 'F.linear', (['embed', 'self.emb_projs[0]'], {}), '(embed, self.emb_projs[0])\n', (11508, 11534), True, 'import torch.nn.functional as F\n'), ((12552, 12586), 'torch.nn.functional.linear', 'F.linear', (['emb_i', 'self.emb_projs[i]'], {}), '(emb_i, self.emb_projs[i])\n', (12560, 12586), True, 'import torch.nn.functional as F\n'), ((1165, 1182), 'torch.typename', 'torch.typename', (['p'], {}), '(p)\n', (1179, 1182), False, 'import torch\n'), ((3546, 3566), 'torch.zeros', 'torch.zeros', (['n_token'], {}), '(n_token)\n', (3557, 3566), False, 'import torch\n'), ((7024, 7073), 'torch.cat', 'torch.cat', (['[weight_i, self.cluster_weight]'], {'dim': '(0)'}), '([weight_i, self.cluster_weight], dim=0)\n', (7033, 7073), False, 'import torch\n'), ((7128, 7173), 'torch.cat', 'torch.cat', (['[bias_i, self.cluster_bias]'], {'dim': '(0)'}), '([bias_i, self.cluster_bias], dim=0)\n', (7137, 7173), False, 'import torch\n'), ((8571, 8605), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['tail_logit_i'], {'dim': '(1)'}), '(tail_logit_i, dim=1)\n', (8584, 8605), True, 'import torch.nn.functional as F\n'), ((10792, 10828), 'torch.nn.Embedding', 'nn.Embedding', (['(r_idx - l_idx)', 'd_emb_i'], {}), '(r_idx - l_idx, d_emb_i)\n', (10804, 10828), True, 'import torch.nn as nn\n'), ((3707, 3736), 'torch.zeros', 'torch.zeros', (['n_token', 'd_embed'], {}), '(n_token, d_embed)\n', (3718, 3736), False, 'import torch\n'), ((4264, 4290), 'torch.zeros', 'torch.zeros', (['(r_idx - l_idx)'], {}), '(r_idx - l_idx)\n', (4275, 4290), False, 'import torch\n'), ((10356, 10390), 'torch.FloatTensor', 'torch.FloatTensor', (['d_proj', 'd_embed'], {}), '(d_proj, d_embed)\n', (10373, 10390), False, 'import torch\n'), ((11067, 11101), 'torch.FloatTensor', 'torch.FloatTensor', (['d_proj', 'd_emb_i'], {}), '(d_proj, d_emb_i)\n', (11084, 11101), False, 'import torch\n'), ((12745, 12785), 'torch.arange', 'torch.arange', (['_tokens'], {'device': 'inp.device'}), '(_tokens, device=inp.device)\n', (12757, 12785), False, 'import torch\n'), ((4131, 4159), 'torch.zeros', 'torch.zeros', (['d_proj', 'd_emb_i'], {}), '(d_proj, d_emb_i)\n', (4142, 4159), False, 'import torch\n'), ((4446, 4481), 'torch.zeros', 'torch.zeros', (['(r_idx - l_idx)', 'd_emb_i'], {}), '(r_idx - l_idx, d_emb_i)\n', (4457, 4481), False, 'import torch\n'), ((3293, 3321), 'torch.zeros', 'torch.zeros', (['d_proj', 'd_embed'], {}), '(d_proj, d_embed)\n', (3304, 3321), False, 'import torch\n'), ((6338, 6366), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit'], {'dim': '(-1)'}), '(logit, dim=-1)\n', (6351, 6366), True, 'import torch.nn.functional as F\n')] |
import json
import csv
import sys
import os
import re
import codecs
import logging
from logging.config import dictConfig
import click
import yaml
from sqlalchemy import create_engine
from jsontableschema_sql import Storage
from smart_open import smart_open
from . import postgres
from . import carto
csv.field_size_limit(sys.maxsize)
def get_logger(logging_config):
try:
with open(logging_config) as file:
config = yaml.load(file)
dictConfig(config)
except:
FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)
logger = logging.getLogger('the_el')
def exception_handler(type, value, tb):
logger.exception("Uncaught exception: {}".format(str(value)), exc_info=(type, value, tb))
sys.excepthook = exception_handler
return logger
@click.group()
def main():
pass
def get_connection_string(connection_string):
connection_string = os.getenv('CONNECTION_STRING', connection_string)
if connection_string == None:
raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')
return connection_string
def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):
engine = create_engine(connection_string)
storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)
return engine, storage
def fopen(file, mode='r'):
if file == None:
if mode == 'r':
return sys.stdin
elif mode == 'w':
return sys.stdout
else:
return smart_open(file, mode=mode)
def get_table_schema(table_schema_path):
with fopen(table_schema_path) as file:
contents = file.read()
if not isinstance(contents, str):
contents = contents.decode('utf-8')
return json.loads(contents)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
descriptor = storage.describe(table_name)
with fopen(output_file, mode='w') as file:
json.dump(descriptor, file)
@main.command()
@click.argument('table_name')
@click.argument('table_schema_path')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--indexes-fields')
@click.option('--geometry-support')
@click.option('--if-not-exists', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def create_table(table_name,
table_schema_path,
connection_string,
db_schema,
indexes_fields,
geometry_support,
if_not_exists,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
logger.info('{} - Creating table using Carto'.format(table_name))
return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
logger.info('{} - Creating table using SQLAlchemy'.format(table_name))
storage.create(table_name, table_schema, indexes_fields=indexes_fields)
@main.command()
@click.argument('table_name')
@click.option('--table-schema-path')
@click.option('--connection-string')
@click.option('-f','--input-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--skip-headers', is_flag=True)
@click.option('--indexes-fields')
@click.option('--upsert', is_flag=True)
@click.option('--truncate/--no-truncate', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def write(table_name,
table_schema_path,
connection_string,
input_file,
db_schema,
geometry_support,
from_srid,
skip_headers,
indexes_fields,
upsert,
truncate,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimted json?
with fopen(input_file) as file:
rows = csv.reader(file)
if skip_headers:
next(rows)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
logger.info('{} - Writing to table using Carto'.format(table_name))
carto.load(logger,
db_schema,
table_name,
load_postgis,
table_schema,
connection_string,
rows,
indexes_fields,
truncate)
else:
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)
## TODO: truncate? carto does. Makes this idempotent
logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))
if table_schema_path != None:
table_schema = get_table_schema(table_schema_path)
storage.describe(table_name, descriptor=table_schema)
else:
storage.describe(table_name)
if upsert:
postgres.upsert(engine, db_schema, table_name, table_schema, rows)
elif geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_from(engine, table_name, table_schema, rows)
else:
storage.write(table_name, rows)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--to-srid')
@click.option('--logging-config', default='logging_config.conf')
def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):
logger = get_logger(logging_config)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimited json?
with fopen(output_file, mode='w') as file:
writer = csv.writer(file)
descriptor = storage.describe(table_name)
fields = map(lambda x: x['name'], descriptor['fields'])
writer.writerow(fields)
if geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_to(engine, table_name, file)
else:
for row in storage.iter(table_name):
row_out = []
for field in row:
if isinstance(field, dict) or isinstance(field, list):
field = json.dumps(field)
row_out.append(field)
writer.writerow(row_out)
@main.command()
@click.argument('new_table_name')
@click.argument('old_table_name')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--select-users', help='Users to grant SELECT on updated table')
@click.option('--logging-config', default='logging_config.conf')
def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):
logger = get_logger(logging_config)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))
return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)
connection_string = get_connection_string(connection_string)
engine = create_engine(connection_string)
if engine.dialect.driver == 'psycopg2':
logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))
conn = engine.raw_connection()
try:
with conn.cursor() as cur:
sql = 'ALTER TABLE "{}" RENAME TO "{}_old";'.format(old_table_name, old_table_name) +\
'ALTER TABLE "{}" RENAME TO "{}";'.format(new_table_name, old_table_name) +\
'DROP TABLE "{}_old";'.format(old_table_name)
cur.execute(sql)
conn.commit()
except:
conn.rollback()
raise
conn.close()
elif engine.dialect.driver == 'cx_oracle':
logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))
conn = engine.connect()
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
grants_sql = []
for user in select_users:
grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))
# Oracle does not allow table modification within a transaction, so make individual transactions:
sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)
sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)
sql3 = 'DROP TABLE {}_old'.format(old_table_name)
try:
conn.execute(sql1)
except:
logger.error("Could not rename {} table. Does it exist?".format(old_table_name))
raise
try:
conn.execute(sql2)
except:
logger.error("Could not rename {} table. Does it exist?".format(new_table_name))
rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql)
raise
try:
conn.execute(sql3)
except:
logger.error("Could not drop {}_old table. Do you have permission?".format(old_table_name))
rb_sql1 = 'DROP TABLE {}'.format(old_table_name)
conn.execute(rb_sql1)
rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql2)
raise
try:
for sql in grants_sql:
conn.execute(sql)
except:
logger.error("Could not grant all permissions to {}.".format(old_table_name))
raise
else:
raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))
| [
"csv.field_size_limit",
"logging.getLogger",
"click.argument",
"json.loads",
"logging.basicConfig",
"os.getenv",
"click.group",
"click.option",
"sqlalchemy.create_engine",
"logging.config.dictConfig",
"re.match",
"csv.writer",
"yaml.load",
"json.dumps",
"smart_open.smart_open",
"jsontableschema_sql.Storage",
"csv.reader",
"json.dump"
]
| [((303, 336), 'csv.field_size_limit', 'csv.field_size_limit', (['sys.maxsize'], {}), '(sys.maxsize)\n', (323, 336), False, 'import csv\n'), ((899, 912), 'click.group', 'click.group', ([], {}), '()\n', (910, 912), False, 'import click\n'), ((2014, 2042), 'click.argument', 'click.argument', (['"""table_name"""'], {}), "('table_name')\n", (2028, 2042), False, 'import click\n'), ((2044, 2079), 'click.option', 'click.option', (['"""--connection-string"""'], {}), "('--connection-string')\n", (2056, 2079), False, 'import click\n'), ((2081, 2116), 'click.option', 'click.option', (['"""-o"""', '"""--output-file"""'], {}), "('-o', '--output-file')\n", (2093, 2116), False, 'import click\n'), ((2117, 2144), 'click.option', 'click.option', (['"""--db-schema"""'], {}), "('--db-schema')\n", (2129, 2144), False, 'import click\n'), ((2146, 2180), 'click.option', 'click.option', (['"""--geometry-support"""'], {}), "('--geometry-support')\n", (2158, 2180), False, 'import click\n'), ((2581, 2609), 'click.argument', 'click.argument', (['"""table_name"""'], {}), "('table_name')\n", (2595, 2609), False, 'import click\n'), ((2611, 2646), 'click.argument', 'click.argument', (['"""table_schema_path"""'], {}), "('table_schema_path')\n", (2625, 2646), False, 'import click\n'), ((2648, 2683), 'click.option', 'click.option', (['"""--connection-string"""'], {}), "('--connection-string')\n", (2660, 2683), False, 'import click\n'), ((2685, 2712), 'click.option', 'click.option', (['"""--db-schema"""'], {}), "('--db-schema')\n", (2697, 2712), False, 'import click\n'), ((2714, 2746), 'click.option', 'click.option', (['"""--indexes-fields"""'], {}), "('--indexes-fields')\n", (2726, 2746), False, 'import click\n'), ((2748, 2782), 'click.option', 'click.option', (['"""--geometry-support"""'], {}), "('--geometry-support')\n", (2760, 2782), False, 'import click\n'), ((2784, 2844), 'click.option', 'click.option', (['"""--if-not-exists"""'], {'is_flag': '(True)', 'default': '(False)'}), "('--if-not-exists', is_flag=True, default=False)\n", (2796, 2844), False, 'import click\n'), ((2846, 2909), 'click.option', 'click.option', (['"""--logging-config"""'], {'default': '"""logging_config.conf"""'}), "('--logging-config', default='logging_config.conf')\n", (2858, 2909), False, 'import click\n'), ((4023, 4051), 'click.argument', 'click.argument', (['"""table_name"""'], {}), "('table_name')\n", (4037, 4051), False, 'import click\n'), ((4053, 4088), 'click.option', 'click.option', (['"""--table-schema-path"""'], {}), "('--table-schema-path')\n", (4065, 4088), False, 'import click\n'), ((4090, 4125), 'click.option', 'click.option', (['"""--connection-string"""'], {}), "('--connection-string')\n", (4102, 4125), False, 'import click\n'), ((4127, 4161), 'click.option', 'click.option', (['"""-f"""', '"""--input-file"""'], {}), "('-f', '--input-file')\n", (4139, 4161), False, 'import click\n'), ((4162, 4189), 'click.option', 'click.option', (['"""--db-schema"""'], {}), "('--db-schema')\n", (4174, 4189), False, 'import click\n'), ((4191, 4225), 'click.option', 'click.option', (['"""--geometry-support"""'], {}), "('--geometry-support')\n", (4203, 4225), False, 'import click\n'), ((4227, 4254), 'click.option', 'click.option', (['"""--from-srid"""'], {}), "('--from-srid')\n", (4239, 4254), False, 'import click\n'), ((4256, 4300), 'click.option', 'click.option', (['"""--skip-headers"""'], {'is_flag': '(True)'}), "('--skip-headers', is_flag=True)\n", (4268, 4300), False, 'import click\n'), ((4302, 4334), 'click.option', 'click.option', (['"""--indexes-fields"""'], {}), "('--indexes-fields')\n", (4314, 4334), False, 'import click\n'), ((4336, 4374), 'click.option', 'click.option', (['"""--upsert"""'], {'is_flag': '(True)'}), "('--upsert', is_flag=True)\n", (4348, 4374), False, 'import click\n'), ((4376, 4445), 'click.option', 'click.option', (['"""--truncate/--no-truncate"""'], {'is_flag': '(True)', 'default': '(False)'}), "('--truncate/--no-truncate', is_flag=True, default=False)\n", (4388, 4445), False, 'import click\n'), ((4447, 4510), 'click.option', 'click.option', (['"""--logging-config"""'], {'default': '"""logging_config.conf"""'}), "('--logging-config', default='logging_config.conf')\n", (4459, 4510), False, 'import click\n'), ((6711, 6739), 'click.argument', 'click.argument', (['"""table_name"""'], {}), "('table_name')\n", (6725, 6739), False, 'import click\n'), ((6741, 6776), 'click.option', 'click.option', (['"""--connection-string"""'], {}), "('--connection-string')\n", (6753, 6776), False, 'import click\n'), ((6778, 6813), 'click.option', 'click.option', (['"""-o"""', '"""--output-file"""'], {}), "('-o', '--output-file')\n", (6790, 6813), False, 'import click\n'), ((6814, 6841), 'click.option', 'click.option', (['"""--db-schema"""'], {}), "('--db-schema')\n", (6826, 6841), False, 'import click\n'), ((6843, 6877), 'click.option', 'click.option', (['"""--geometry-support"""'], {}), "('--geometry-support')\n", (6855, 6877), False, 'import click\n'), ((6879, 6906), 'click.option', 'click.option', (['"""--from-srid"""'], {}), "('--from-srid')\n", (6891, 6906), False, 'import click\n'), ((6908, 6933), 'click.option', 'click.option', (['"""--to-srid"""'], {}), "('--to-srid')\n", (6920, 6933), False, 'import click\n'), ((6935, 6998), 'click.option', 'click.option', (['"""--logging-config"""'], {'default': '"""logging_config.conf"""'}), "('--logging-config', default='logging_config.conf')\n", (6947, 6998), False, 'import click\n'), ((8175, 8207), 'click.argument', 'click.argument', (['"""new_table_name"""'], {}), "('new_table_name')\n", (8189, 8207), False, 'import click\n'), ((8209, 8241), 'click.argument', 'click.argument', (['"""old_table_name"""'], {}), "('old_table_name')\n", (8223, 8241), False, 'import click\n'), ((8243, 8278), 'click.option', 'click.option', (['"""--connection-string"""'], {}), "('--connection-string')\n", (8255, 8278), False, 'import click\n'), ((8280, 8307), 'click.option', 'click.option', (['"""--db-schema"""'], {}), "('--db-schema')\n", (8292, 8307), False, 'import click\n'), ((8309, 8386), 'click.option', 'click.option', (['"""--select-users"""'], {'help': '"""Users to grant SELECT on updated table"""'}), "('--select-users', help='Users to grant SELECT on updated table')\n", (8321, 8386), False, 'import click\n'), ((8388, 8451), 'click.option', 'click.option', (['"""--logging-config"""'], {'default': '"""logging_config.conf"""'}), "('--logging-config', default='logging_config.conf')\n", (8400, 8451), False, 'import click\n'), ((667, 694), 'logging.getLogger', 'logging.getLogger', (['"""the_el"""'], {}), "('the_el')\n", (684, 694), False, 'import logging\n'), ((1005, 1054), 'os.getenv', 'os.getenv', (['"""CONNECTION_STRING"""', 'connection_string'], {}), "('CONNECTION_STRING', connection_string)\n", (1014, 1054), False, 'import os\n'), ((1347, 1379), 'sqlalchemy.create_engine', 'create_engine', (['connection_string'], {}), '(connection_string)\n', (1360, 1379), False, 'from sqlalchemy import create_engine\n'), ((1394, 1518), 'jsontableschema_sql.Storage', 'Storage', (['engine'], {'dbschema': 'db_schema', 'geometry_support': 'geometry_support', 'from_srid': 'from_srid', 'to_srid': 'to_srid', 'views': '(True)'}), '(engine, dbschema=db_schema, geometry_support=geometry_support,\n from_srid=from_srid, to_srid=to_srid, views=True)\n', (1401, 1518), False, 'from jsontableschema_sql import Storage\n'), ((9104, 9136), 'sqlalchemy.create_engine', 'create_engine', (['connection_string'], {}), '(connection_string)\n', (9117, 9136), False, 'from sqlalchemy import create_engine\n'), ((467, 485), 'logging.config.dictConfig', 'dictConfig', (['config'], {}), '(config)\n', (477, 485), False, 'from logging.config import dictConfig\n'), ((1725, 1752), 'smart_open.smart_open', 'smart_open', (['file'], {'mode': 'mode'}), '(file, mode=mode)\n', (1735, 1752), False, 'from smart_open import smart_open\n'), ((1975, 1995), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (1985, 1995), False, 'import json\n'), ((2535, 2562), 'json.dump', 'json.dump', (['descriptor', 'file'], {}), '(descriptor, file)\n', (2544, 2562), False, 'import json\n'), ((3360, 3424), 're.match', 're.match', (['carto.carto_connection_string_regex', 'connection_string'], {}), '(carto.carto_connection_string_regex, connection_string)\n', (3368, 3424), False, 'import re\n'), ((5050, 5066), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (5060, 5066), False, 'import csv\n'), ((7526, 7542), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (7536, 7542), False, 'import csv\n'), ((8608, 8672), 're.match', 're.match', (['carto.carto_connection_string_regex', 'connection_string'], {}), '(carto.carto_connection_string_regex, connection_string)\n', (8616, 8672), False, 'import re\n'), ((443, 458), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (452, 458), False, 'import yaml\n'), ((579, 652), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'level': 'logging.INFO', 'stream': 'sys.stderr'}), '(format=FORMAT, level=logging.INFO, stream=sys.stderr)\n', (598, 652), False, 'import logging\n'), ((5128, 5192), 're.match', 're.match', (['carto.carto_connection_string_regex', 'connection_string'], {}), '(carto.carto_connection_string_regex, connection_string)\n', (5136, 5192), False, 'import re\n'), ((8056, 8073), 'json.dumps', 'json.dumps', (['field'], {}), '(field)\n', (8066, 8073), False, 'import json\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is based on speech_to_text_infer.py and allows you to score the hypotheses
with sclite. A local installation from https://github.com/usnistgov/SCTK is required.
Hypotheses and references are first saved in trn format and are scored after applying a glm
file (if provided).
"""
import errno
import json
import os
import subprocess
from argparse import ArgumentParser
import torch
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=""):
sclite_path = os.path.join(sctk_dir, "bin", "sclite")
if not os.path.exists(sclite_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)
# apply glm
if os.path.exists(glm):
rfilter_path = os.path.join(sctk_dir, "bin", "rfilter1")
if not os.path.exists(rfilter_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)
hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + ".glm"
rfilt_cmd = [rfilter_path] + [glm]
with open(hypglm, "w") as hypf, open(hyp_fname, "r") as hyp_in:
subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)
refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + ".glm"
with open(refglm, "w") as reff, open(ref_fname, "r") as ref_in:
subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)
else:
refglm = ref_fname
hypglm = hyp_fname
_ = subprocess.check_output(f"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all", shell=True)
can_gpu = torch.cuda.is_available()
def get_utt_info(manifest_path):
info_list = []
with open(manifest_path, "r") as utt_f:
for line in utt_f:
utt = json.loads(line)
info_list.append(utt)
return info_list
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=False, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_true',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument("--out_dir", type=str, required=True, help="Destination dir for output files")
parser.add_argument("--sctk_dir", type=str, required=False, default="", help="Path to sctk root dir")
parser.add_argument("--glm", type=str, required=False, default="", help="Path to glm file")
args = parser.parse_args()
torch.set_grad_enabled(False)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
use_sctk = os.path.exists(args.sctk_dir)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': not args.dont_normalize_text,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
wer = WER(vocabulary=asr_model.decoder.vocabulary)
hypotheses = []
references = []
all_log_probs = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
for r in log_probs.cpu().numpy():
all_log_probs.append(r)
hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)
for batch_ind in range(greedy_predictions.shape[0]):
reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()])
references.append(reference)
del test_batch
info_list = get_utt_info(args.dataset)
hypfile = os.path.join(args.out_dir, "hyp.trn")
reffile = os.path.join(args.out_dir, "ref.trn")
with open(hypfile, "w") as hyp_f, open(reffile, "w") as ref_f:
for i in range(len(hypotheses)):
utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]
# rfilter in sctk likes each transcript to have a space at the beginning
hyp_f.write(" " + hypotheses[i] + " (" + utt_id + ")" + "\n")
ref_f.write(" " + references[i] + " (" + utt_id + ")" + "\n")
if use_sctk:
score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [
"subprocess.check_output",
"os.path.exists",
"nemo.utils.logging.info",
"json.loads",
"argparse.ArgumentParser",
"nemo.collections.asr.metrics.wer.WER",
"os.makedirs",
"nemo.collections.asr.models.EncDecCTCModel.from_pretrained",
"subprocess.run",
"os.path.join",
"torch.cuda.is_available",
"nemo.collections.asr.models.EncDecCTCModel.restore_from",
"torch.cuda.amp.autocast",
"os.path.basename",
"torch.set_grad_enabled",
"os.strerror"
]
| [((2469, 2494), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2492, 2494), False, 'import torch\n'), ((1409, 1448), 'os.path.join', 'os.path.join', (['sctk_dir', '"""bin"""', '"""sclite"""'], {}), "(sctk_dir, 'bin', 'sclite')\n", (1421, 1448), False, 'import os\n'), ((1598, 1617), 'os.path.exists', 'os.path.exists', (['glm'], {}), '(glm)\n', (1612, 1617), False, 'import os\n'), ((2364, 2461), 'subprocess.check_output', 'subprocess.check_output', (['f"""{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all"""'], {'shell': '(True)'}), "(f'{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all'\n , shell=True)\n", (2387, 2461), False, 'import subprocess\n'), ((2738, 2754), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2752, 2754), False, 'from argparse import ArgumentParser\n'), ((3596, 3625), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (3618, 3625), False, 'import torch\n'), ((3718, 3747), 'os.path.exists', 'os.path.exists', (['args.sctk_dir'], {}), '(args.sctk_dir)\n', (3732, 3747), False, 'import os\n'), ((4606, 4650), 'nemo.collections.asr.metrics.wer.WER', 'WER', ([], {'vocabulary': 'asr_model.decoder.vocabulary'}), '(vocabulary=asr_model.decoder.vocabulary)\n', (4609, 4650), False, 'from nemo.collections.asr.metrics.wer import WER\n'), ((5470, 5507), 'os.path.join', 'os.path.join', (['args.out_dir', '"""hyp.trn"""'], {}), "(args.out_dir, 'hyp.trn')\n", (5482, 5507), False, 'import os\n'), ((5522, 5559), 'os.path.join', 'os.path.join', (['args.out_dir', '"""ref.trn"""'], {}), "(args.out_dir, 'ref.trn')\n", (5534, 5559), False, 'import os\n'), ((1460, 1487), 'os.path.exists', 'os.path.exists', (['sclite_path'], {}), '(sclite_path)\n', (1474, 1487), False, 'import os\n'), ((1642, 1683), 'os.path.join', 'os.path.join', (['sctk_dir', '"""bin"""', '"""rfilter1"""'], {}), "(sctk_dir, 'bin', 'rfilter1')\n", (1654, 1683), False, 'import os\n'), ((3638, 3666), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (3652, 3666), False, 'import os\n'), ((3676, 3701), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (3687, 3701), False, 'import os\n'), ((3798, 3858), 'nemo.utils.logging.info', 'logging.info', (['f"""Using local ASR model from {args.asr_model}"""'], {}), "(f'Using local ASR model from {args.asr_model}')\n", (3810, 3858), False, 'from nemo.utils import logging\n'), ((3879, 3935), 'nemo.collections.asr.models.EncDecCTCModel.restore_from', 'EncDecCTCModel.restore_from', ([], {'restore_path': 'args.asr_model'}), '(restore_path=args.asr_model)\n', (3906, 3935), False, 'from nemo.collections.asr.models import EncDecCTCModel\n'), ((3954, 4013), 'nemo.utils.logging.info', 'logging.info', (['f"""Using NGC cloud ASR model {args.asr_model}"""'], {}), "(f'Using NGC cloud ASR model {args.asr_model}')\n", (3966, 4013), False, 'from nemo.utils import logging\n'), ((4034, 4091), 'nemo.collections.asr.models.EncDecCTCModel.from_pretrained', 'EncDecCTCModel.from_pretrained', ([], {'model_name': 'args.asr_model'}), '(model_name=args.asr_model)\n', (4064, 4091), False, 'from nemo.collections.asr.models import EncDecCTCModel\n'), ((1535, 1560), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (1546, 1560), False, 'import os\n'), ((1699, 1727), 'os.path.exists', 'os.path.exists', (['rfilter_path'], {}), '(rfilter_path)\n', (1713, 1727), False, 'import os\n'), ((2024, 2076), 'subprocess.run', 'subprocess.run', (['rfilt_cmd'], {'stdin': 'hyp_in', 'stdout': 'hypf'}), '(rfilt_cmd, stdin=hyp_in, stdout=hypf)\n', (2038, 2076), False, 'import subprocess\n'), ((2238, 2290), 'subprocess.run', 'subprocess.run', (['rfilt_cmd'], {'stdin': 'ref_in', 'stdout': 'reff'}), '(rfilt_cmd, stdin=ref_in, stdout=reff)\n', (2252, 2290), False, 'import subprocess\n'), ((2638, 2654), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2648, 2654), False, 'import json\n'), ((4854, 4864), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (4862, 4864), False, 'from torch.cuda.amp import autocast\n'), ((1779, 1804), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (1790, 1804), False, 'import os\n'), ((1859, 1886), 'os.path.basename', 'os.path.basename', (['hyp_fname'], {}), '(hyp_fname)\n', (1875, 1886), False, 'import os\n'), ((2116, 2143), 'os.path.basename', 'os.path.basename', (['ref_fname'], {}), '(ref_fname)\n', (2132, 2143), False, 'import os\n'), ((5706, 5754), 'os.path.basename', 'os.path.basename', (["info_list[i]['audio_filepath']"], {}), "(info_list[i]['audio_filepath'])\n", (5722, 5754), False, 'import os\n')] |
from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur | [
"numpy.linalg.norm"
]
| [((575, 641), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.points[(idx + 1) % self.n] - self.points[idx])'], {}), '(self.points[(idx + 1) % self.n] - self.points[idx])\n', (589, 641), True, 'import numpy as np\n'), ((300, 362), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.points[i] - self.points[(i + 1) % self.n])'], {}), '(self.points[i] - self.points[(i + 1) % self.n])\n', (314, 362), True, 'import numpy as np\n')] |
from setuptools import setup
setup(name='rapid_plotly',
version='0.1',
description='Convenience functions to rapidly create beautiful Plotly graphs',
author='<NAME>',
author_email='<EMAIL>',
packages=['rapid_plotly'],
zip_safe=False)
| [
"setuptools.setup"
]
| [((30, 247), 'setuptools.setup', 'setup', ([], {'name': '"""rapid_plotly"""', 'version': '"""0.1"""', 'description': '"""Convenience functions to rapidly create beautiful Plotly graphs"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['rapid_plotly']", 'zip_safe': '(False)'}), "(name='rapid_plotly', version='0.1', description=\n 'Convenience functions to rapidly create beautiful Plotly graphs',\n author='<NAME>', author_email='<EMAIL>', packages=['rapid_plotly'],\n zip_safe=False)\n", (35, 247), False, 'from setuptools import setup\n')] |
from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
| [
"app_common.apptools.testing_utils.assert_obj_gui_works",
"unittest.skipIf",
"os.environ.get",
"numpy.array",
"numpy.random.randn",
"numpy.testing.assert_array_equal"
]
| [((5646, 5702), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (5652, 5702), False, 'from unittest import skipIf, TestCase\n'), ((8462, 8518), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (8468, 8518), False, 'from unittest import skipIf, TestCase\n'), ((9402, 9458), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (9408, 9458), False, 'from unittest import skipIf, TestCase\n'), ((14104, 14160), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (14110, 14160), False, 'from unittest import skipIf, TestCase\n'), ((15401, 15457), 'unittest.skipIf', 'skipIf', (['(not BACKEND_AVAILABLE)', '"""No UI backend available"""'], {}), "(not BACKEND_AVAILABLE, 'No UI backend available')\n", (15407, 15457), False, 'from unittest import skipIf, TestCase\n'), ((195, 231), 'os.environ.get', 'os.environ.get', (['"""ETS_TOOLKIT"""', '"""qt4"""'], {}), "('ETS_TOOLKIT', 'qt4')\n", (209, 231), False, 'import os\n'), ((970, 990), 'numpy.random.randn', 'np.random.randn', (['LEN'], {}), '(LEN)\n', (985, 990), True, 'import numpy as np\n'), ((1136, 1169), 'numpy.array', 'np.array', (["(['0', '1'] * (LEN // 2))"], {}), "(['0', '1'] * (LEN // 2))\n", (1144, 1169), True, 'import numpy as np\n'), ((1197, 1238), 'numpy.array', 'np.array', (['([0, 1] * (LEN // 2))'], {'dtype': 'bool'}), '([0, 1] * (LEN // 2), dtype=bool)\n', (1205, 1238), True, 'import numpy as np\n'), ((1545, 1570), 'app_common.apptools.testing_utils.assert_obj_gui_works', 'assert_obj_gui_works', (['obj'], {}), '(obj)\n', (1565, 1570), False, 'from app_common.apptools.testing_utils import assert_obj_gui_works\n'), ((2451, 2512), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", "TEST_DF['a'].values"], {}), "(config_dict['x_arr'], TEST_DF['a'].values)\n", (2469, 2512), False, 'from numpy.testing import assert_array_equal\n'), ((2565, 2626), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr']", "TEST_DF['b'].values"], {}), "(config_dict['y_arr'], TEST_DF['b'].values)\n", (2583, 2626), False, 'from numpy.testing import assert_array_equal\n'), ((4902, 4968), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][False]", "TEST_DF['a'][::2]"], {}), "(config_dict['x_arr'][False], TEST_DF['a'][::2])\n", (4920, 4968), False, 'from numpy.testing import assert_array_equal\n'), ((4977, 5043), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][True]", "TEST_DF['a'][1::2]"], {}), "(config_dict['x_arr'][True], TEST_DF['a'][1::2])\n", (4995, 5043), False, 'from numpy.testing import assert_array_equal\n'), ((5226, 5292), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr'][False]", "TEST_DF['b'][::2]"], {}), "(config_dict['y_arr'][False], TEST_DF['b'][::2])\n", (5244, 5292), False, 'from numpy.testing import assert_array_equal\n'), ((5301, 5367), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['y_arr'][True]", "TEST_DF['b'][1::2]"], {}), "(config_dict['y_arr'][True], TEST_DF['b'][1::2])\n", (5319, 5367), False, 'from numpy.testing import assert_array_equal\n'), ((10803, 10838), 'numpy.array', 'np.array', (["(['e'] * LEN + ['f'] * LEN)"], {}), "(['e'] * LEN + ['f'] * LEN)\n", (10811, 10838), True, 'import numpy as np\n'), ((10841, 10883), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr', 'x_values'], {}), '(config.x_arr, x_values)\n', (10859, 10883), False, 'from numpy.testing import assert_array_equal\n'), ((11188, 11238), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", 'x_values'], {}), "(config_dict['x_arr'], x_values)\n", (11206, 11238), False, 'from numpy.testing import assert_array_equal\n'), ((11803, 11852), 'numpy.array', 'np.array', (["(['e'] * (LEN // 2) + ['f'] * (LEN // 2))"], {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))\n", (11811, 11852), True, 'import numpy as np\n'), ((13182, 13231), 'numpy.array', 'np.array', (["(['e'] * (LEN // 2) + ['f'] * (LEN // 2))"], {}), "(['e'] * (LEN // 2) + ['f'] * (LEN // 2))\n", (13190, 13231), True, 'import numpy as np\n'), ((14760, 14821), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr']", "TEST_DF['a'].values"], {}), "(config_dict['x_arr'], TEST_DF['a'].values)\n", (14778, 14821), False, 'from numpy.testing import assert_array_equal\n'), ((3898, 3917), 'numpy.array', 'np.array', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (3906, 3917), True, 'import numpy as np\n'), ((4274, 4293), 'numpy.array', 'np.array', (['[2, 2, 3]'], {}), '([2, 2, 3])\n', (4282, 4293), True, 'import numpy as np\n'), ((11964, 12011), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr[key]', 'x_values'], {}), '(config.x_arr[key], x_values)\n', (11982, 12011), False, 'from numpy.testing import assert_array_equal\n'), ((12524, 12579), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][key]", 'x_values'], {}), "(config_dict['x_arr'][key], x_values)\n", (12542, 12579), False, 'from numpy.testing import assert_array_equal\n'), ((13343, 13390), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['config.x_arr[key]', 'x_values'], {}), '(config.x_arr[key], x_values)\n', (13361, 13390), False, 'from numpy.testing import assert_array_equal\n'), ((13903, 13958), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["config_dict['x_arr'][key]", 'x_values'], {}), "(config_dict['x_arr'][key], x_values)\n", (13921, 13958), False, 'from numpy.testing import assert_array_equal\n')] |
from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
| [
"collections.namedtuple",
"csv.reader"
]
| [((60, 91), 'collections.namedtuple', 'namedtuple', (['"""Point"""', "['x', 'y']"], {}), "('Point', ['x', 'y'])\n", (70, 91), False, 'from collections import namedtuple\n'), ((286, 299), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (296, 299), False, 'import csv\n'), ((590, 620), 'collections.namedtuple', 'namedtuple', (['"""Student"""', 'columns'], {}), "('Student', columns)\n", (600, 620), False, 'from collections import namedtuple\n')] |
#!/usr/bin/env python
from setuptools import setup
# Modified from http://stackoverflow.com/questions/2058802/
# how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
def version():
import os
import re
init = os.path.join('dark', '__init__.py')
with open(init) as fp:
initData = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]",
initData, re.M)
if match:
return match.group(1)
else:
raise RuntimeError('Unable to find version string in %r.' % init)
# Explicitly list bin scripts to be installed, seeing as I have a few local
# bin files that are not (yet) part of the distribution.
scripts = [
'bin/aa-info.py',
'bin/aa-to-dna.py',
'bin/aa-to-properties.py',
'bin/adaptor-distances.py',
'bin/alignment-panel-civ.py',
'bin/alignments-per-read.py',
'bin/bit-score-to-e-value.py',
'bin/cat-json-blast-records.py',
'bin/check-fasta-json-blast-consistency.py',
'bin/codon-distance.py',
'bin/compare-consensuses.py',
'bin/compare-sequences.py',
'bin/convert-blast-xml-to-json.py',
'bin/convert-diamond-to-json.py',
'bin/convert-diamond-to-sam.py',
'bin/convert-sam-to-fastq.sh',
'bin/create-newick-relabeling-output.py',
'bin/dark-matter-version.py',
'bin/describe-protein-database.py',
'bin/dna-to-aa.py',
'bin/download-genbank.sh',
'bin/e-value-to-bit-score.py',
'bin/extract-ORFs.py',
'bin/fasta-base-indices.py',
'bin/fasta-count.py',
'bin/fasta-diff.sh',
'bin/fasta-identity-table.py',
'bin/fasta-ids.py',
'bin/fasta-join.py',
'bin/fasta-lengths.py',
'bin/fasta-sequences.py',
'bin/fasta-sort.py',
'bin/fasta-split-by-id.py',
'bin/fasta-subset.py',
'bin/fasta-subtraction.py',
'bin/fasta-to-phylip.py',
'bin/fasta-variable-sites.py',
'bin/filter-fasta-by-complexity.py',
'bin/filter-fasta-by-taxonomy.py',
'bin/filter-fasta.py',
'bin/filter-hits-to-fasta.py',
'bin/filter-reads-alignments.py',
'bin/filter-sam.py',
'bin/find-hits.py',
'bin/format-fasta.py',
'bin/genome-protein-summary.py',
'bin/get-features.py',
'bin/get-hosts.py',
'bin/get-reads.py',
'bin/get-taxonomy.py',
'bin/graph-evalues.py',
'bin/local-align.py',
'bin/make-consensus.py',
'bin/make-fasta-database.py',
'bin/make-protein-database.py',
'bin/ncbi-fetch-id.py',
'bin/newick-to-ascii.py',
'bin/noninteractive-alignment-panel.py',
'bin/parse-genbank-flat-file.py',
'bin/position-summary.py',
'bin/pre-commit.sh',
'bin/print-blast-xml-for-derek.py',
'bin/print-blast-xml.py',
'bin/print-read-lengths.py',
'bin/proteins-to-pathogens.py',
'bin/proteins-to-pathogens-civ.py',
'bin/randomize-fasta.py',
'bin/read-blast-json.py',
'bin/read-blast-xml.py',
'bin/relabel-newick-tree.py',
'bin/run-bwa.py',
'bin/run-bowtie2.py',
'bin/sam-coverage.py',
'bin/sam-coverage-depth.py',
'bin/sam-to-fasta-alignment.py',
'bin/sam-reference-read-counts.py',
'bin/sam-references.py',
'bin/sff-to-fastq.py',
'bin/split-fasta-by-adaptors.py',
'bin/subset-protein-database.py',
'bin/summarize-fasta-bases.py',
'bin/summarize-reads.py',
'bin/trim-primers.py',
'bin/trim-reads.py',
'bin/write-htcondor-job-spec.py',
]
setup(name='dark-matter',
version=version(),
packages=['dark', 'dark.blast', 'dark.diamond', 'dark.civ'],
url='https://github.com/acorg/dark-matter',
download_url='https://github.com/acorg/dark-matter',
author='<NAME>, <NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
keywords=['virus discovery'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='MIT',
description='Python classes for working with genetic sequence data',
scripts=scripts,
install_requires=[
'biopython>=1.71',
'bz2file>=0.98',
'Cython>=0.29.16',
'ipython>=3.1.0',
'matplotlib>=1.4.3',
'mysql-connector-python==8.0.11',
'numpy>=1.14.2',
'pysam>=0.15.2',
'pyfaidx>=0.4.8.4',
'pyzmq>=14.3.1',
'requests>=2.18.4',
'cachetools>=3.1.0',
'simplejson>=3.5.3',
'six>=1.11.0',
])
| [
"os.path.join",
"re.search"
]
| [((242, 277), 'os.path.join', 'os.path.join', (['"""dark"""', '"""__init__.py"""'], {}), "('dark', '__init__.py')\n", (254, 277), False, 'import os\n'), ((346, 415), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]+)[\'\\\\"]"""', 'initData', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]+)[\\\'\\\\"]\', initData, re.M)\n', (355, 415), False, 'import re\n')] |
from __future__ import absolute_import
import os
import errno
from contextlib import contextmanager
__author__ = '<NAME>'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '1/24/14'
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of
"mkdir -p" command. The check is performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | [
"os.chdir",
"os.path.isdir",
"os.makedirs",
"os.getcwd"
]
| [((593, 604), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (602, 604), False, 'import os\n'), ((609, 623), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (617, 623), False, 'import os\n'), ((668, 681), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (676, 681), False, 'import os\n'), ((1035, 1062), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path, **kwargs)\n', (1046, 1062), False, 'import os\n'), ((1131, 1150), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1144, 1150), False, 'import os\n')] |
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[kinesisanalytics.python.datagenerator.stockticker]
import json
import boto3
import random
import datetime
kinesis = boto3.client('kinesis')
def getReferrer():
data = {}
now = datetime.datetime.now()
str_now = now.isoformat()
data['EVENT_TIME'] = str_now
data['TICKER'] = random.choice(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])
price = random.random() * 100
data['PRICE'] = round(price, 2)
return data
while True:
data = json.dumps(getReferrer())
print(data)
kinesis.put_record(
StreamName="ExampleInputStream",
Data=data,
PartitionKey="partitionkey")
# snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
| [
"datetime.datetime.now",
"random.random",
"random.choice",
"boto3.client"
]
| [((1236, 1259), 'boto3.client', 'boto3.client', (['"""kinesis"""'], {}), "('kinesis')\n", (1248, 1259), False, 'import boto3\n'), ((1303, 1326), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1324, 1326), False, 'import datetime\n'), ((1411, 1465), 'random.choice', 'random.choice', (["['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV']"], {}), "(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])\n", (1424, 1465), False, 'import random\n'), ((1478, 1493), 'random.random', 'random.random', ([], {}), '()\n', (1491, 1493), False, 'import random\n')] |
import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy() | [
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"numpy.max",
"torch.tensor",
"torch.cuda.is_available",
"json.load",
"json.dump"
]
| [((3352, 3366), 'numpy.max', 'np.max', (['[w, h]'], {}), '([w, h])\n', (3358, 3366), True, 'import numpy as np\n'), ((4799, 4825), 'logging.getLogger', 'logging.getLogger', (['"""train"""'], {}), "('train')\n", (4816, 4825), False, 'import logging\n'), ((5465, 5500), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (5477, 5500), False, 'import torch\n'), ((5511, 5546), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (5523, 5546), False, 'import torch\n'), ((4646, 4658), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4655, 4658), False, 'import json\n'), ((4745, 4763), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (4754, 4763), False, 'import json\n'), ((4914, 4960), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(message)s"""'], {}), "('%(asctime)s | %(message)s')\n", (4931, 4960), False, 'import logging\n'), ((4974, 5014), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (4995, 5014), False, 'import logging\n'), ((4526, 4551), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4549, 4551), False, 'import torch\n')] |
import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
@pytest.fixture(scope="module")
def test_case():
import pylint.testutils
from pylint_plugins import AssertRaisesWithoutMsg
class TestAssertRaisesWithoutMsg(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = AssertRaisesWithoutMsg
test_case = TestAssertRaisesWithoutMsg()
test_case.setup_method()
return test_case
def test_assert_raises_without_msg(test_case):
node = extract_node("self.assertRaises(Exception)")
with test_case.assertAddsMessages(create_message(test_case.CHECKER_CLASS.name, node)):
test_case.walk(node)
node = extract_node("self.assertRaises(Exception, msg='test')")
with test_case.assertNoMessages():
test_case.walk(node)
node = extract_node("pandas.assertRaises(Exception)")
with test_case.assertNoMessages():
test_case.walk(node)
| [
"pytest.fixture",
"tests.pylint_plugins.utils.skip_if_pylint_unavailable",
"tests.pylint_plugins.utils.extract_node",
"tests.pylint_plugins.utils.create_message"
]
| [((125, 153), 'tests.pylint_plugins.utils.skip_if_pylint_unavailable', 'skip_if_pylint_unavailable', ([], {}), '()\n', (151, 153), False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((157, 187), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (171, 187), False, 'import pytest\n'), ((563, 607), 'tests.pylint_plugins.utils.extract_node', 'extract_node', (['"""self.assertRaises(Exception)"""'], {}), "('self.assertRaises(Exception)')\n", (575, 607), False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((740, 796), 'tests.pylint_plugins.utils.extract_node', 'extract_node', (['"""self.assertRaises(Exception, msg=\'test\')"""'], {}), '("self.assertRaises(Exception, msg=\'test\')")\n', (752, 796), False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((877, 923), 'tests.pylint_plugins.utils.extract_node', 'extract_node', (['"""pandas.assertRaises(Exception)"""'], {}), "('pandas.assertRaises(Exception)')\n", (889, 923), False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n'), ((646, 696), 'tests.pylint_plugins.utils.create_message', 'create_message', (['test_case.CHECKER_CLASS.name', 'node'], {}), '(test_case.CHECKER_CLASS.name, node)\n', (660, 696), False, 'from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable\n')] |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
def plot_bcs_bkpt(bkpt_name, infolder, outfolder):
if infolder[-1] != '/':
infolder = infolder + '/'
file_1 = infolder + bkpt_name + "_1.bc_windows.txt"
file_2 = infolder + bkpt_name + "_2.bc_windows.txt"
file_hap = infolder + bkpt_name + "_hap_bcs.txt"
df_1 = pd.read_table(file_1)
df_2 = pd.read_table(file_2)
hap_bcs = pd.read_table(file_hap)
# bkpt_name = "1"
# file_1 = bkpt_name + "_1.bc_windows.txt"
# file_2 = bkpt_name + "_2.bc_windows.txt"
# file_hap = bkpt_name + "_hap_bcs.txt"
# #sort barcodes by where they map (lowest coordinate to highest)
# #read in data frames
# df_1 = pd.read_table(file_1)
# df_2 = pd.read_table(file_2)
# hap_bcs = pd.read_table(file_hap)
hap_bcs = hap_bcs.transpose()
bcs_hap_dict = {}
for key in df_1.keys():
if key != "chrom" and key != "window_start" and key != "window_end":
key = key[:-2]
bcs_hap_dict[key] = 'unassigned'
for key, values in hap_bcs.iteritems():
if values[0] != 'bcs':
hap = values[1]
bcs_hap_dict[values[0]] = hap
df_1 = df_1.sort_values('window_start')
df_2 = df_2.sort_values('window_start')
chrom_1 = df_1.at[0, 'chrom']
chrom_2 = df_2.at[0, 'chrom']
x_values_1_1 = []
x_values_1_2 = []
x_values_1_unassigned = []
y_values_1_1 = []
y_values_1_2 = []
y_values_1_unassigned = []
x_values_2_1 = []
x_values_2_2 = []
x_values_2_unassigned = []
y_values_2_1 = []
y_values_2_2 = []
y_values_2_unassigned = []
i1 = 0
window_start_arr1 = df_1['window_start']
for name, values in df_1.iteritems(): #go through columns (so each barcode)
if name != "chrom" and name != "window_start" and name != "window_end":
i1 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
#print type(hap) int
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_1_1.append(i1)
x_values_1_1.append(window_start_arr1[indx])
elif hap == 2:
y_values_1_2.append(i1)
x_values_1_2.append(window_start_arr1[indx])
else:
y_values_1_unassigned.append(i1)
x_values_1_unassigned.append(window_start_arr1[indx])
i2 = 0
window_start_arr2 = df_2['window_start']
for name, values in df_2.iteritems():
if name != "chrom" and name != "window_start" and name != "window_end":
i2 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_2_1.append(i2)
x_values_2_1.append(window_start_arr2[indx])
elif hap == 2:
y_values_2_2.append(i2)
x_values_2_2.append(window_start_arr2[indx])
elif hap == 'unassigned':
y_values_2_unassigned.append(i2)
x_values_2_unassigned.append(window_start_arr2[indx])
fig = plt.figure()
figL = fig.add_subplot(121)
figL.scatter(x_values_1_1, y_values_1_1, s=0.2, color='b') #this doesn't seem to contain anything
figL.scatter(x_values_1_2, y_values_1_2, s=0.2, color='r') #same
figL.scatter(x_values_1_unassigned, y_values_1_unassigned, s=0.2, color='g')
figL.set_title("")
figL.set_xlabel("chr %d (Mb)" %chrom_1)
figL.set_ylabel("SV-specific barcode")
figR = fig.add_subplot(122)
figR.scatter(x_values_2_1, y_values_2_1, s=0.2, color='b') #same
figR.scatter(x_values_2_2, y_values_2_2, s=0.2, color='r') #same
figR.scatter(x_values_2_unassigned, y_values_2_unassigned, s=0.2, color='g')
figR.set_title("")
figR.set_xlabel("chr %d (Mb)" %chrom_2)
figR.set_ylabel("")
brkpt1 = min(df_1['window_start']) + ((max(df_1['window_end']) - min(df_1['window_start']))/2)
brkpt2 = min(df_2['window_start']) + ((max(df_2['window_end']) - min(df_2['window_start']))/2)
figL.axvline(x=brkpt1, linewidth=1, color = 'black')
figR.axvline(x=brkpt2, linewidth=1, color = 'black')
path = outfolder + 'bcs_bkpt_map'
plt.savefig(path)
| [
"matplotlib.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"pandas.read_table"
]
| [((57, 78), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (71, 78), False, 'import matplotlib\n'), ((897, 918), 'pandas.read_table', 'pd.read_table', (['file_1'], {}), '(file_1)\n', (910, 918), True, 'import pandas as pd\n'), ((931, 952), 'pandas.read_table', 'pd.read_table', (['file_2'], {}), '(file_2)\n', (944, 952), True, 'import pandas as pd\n'), ((967, 990), 'pandas.read_table', 'pd.read_table', (['file_hap'], {}), '(file_hap)\n', (980, 990), True, 'import pandas as pd\n'), ((3954, 3966), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3964, 3966), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5087), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (5081, 5087), True, 'import matplotlib.pyplot as plt\n')] |
import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
class UrlForm(FlaskForm):
url = StringField(
'URL',
validators=[validators.DataRequired(), validators.URL(message='Sorry, this is not a valid URL,')])
wMin = IntegerRangeField(
'Min. words',
default=5,
validators=[validators.DataRequired(), validators.NumberRange(min=1, max=20)])
extractor_class = SelectField(
'Extractor',
default=langid.EXTRACTORS[0],
choices=[(i, i) for i in langid.EXTRACTORS],
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
return_raw = BooleanField(
'Display raw sentences',
default=False
)
class TextForm(FlaskForm):
text = StringField(
'Text',
widget=TextArea(),
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
@blueprint_langid.route('/', methods=['GET', 'POST'])
@templated('index.html')
def crawl():
form = UrlForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
try:
results = langid.mixed_sentences_from_urls(
form.url.data.strip(), extractor_name=form.extractor_class.data, model=form.model_class.data,
with_proba=True, min_words=form.wMin.data, return_raw=form.return_raw.data)
except Exception as e:
flash('Something went wrong %s' % e, 'danger')
logging.exception(e)
return dict(form=form)
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
@blueprint_langid.route('/text', methods=['GET', 'POST'])
@templated('langid.html')
def predict_text():
form = TextForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
results = [[r] for r in langid.lang_of_text(
form.text.data, model=form.model_class.data, with_proba=True)]
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
| [
"wtforms.validators.NumberRange",
"wtforms.widgets.TextArea",
"flask.flash",
"wtforms.BooleanField",
"logging.exception",
"langid.lang_of_text",
"utils.utils.templated",
"flask.Blueprint",
"wtforms.validators.DataRequired",
"wtforms.validators.URL"
]
| [((362, 391), 'flask.Blueprint', 'Blueprint', (['"""langid"""', '__name__'], {}), "('langid', __name__)\n", (371, 391), False, 'from flask import Blueprint\n'), ((1575, 1598), 'utils.utils.templated', 'templated', (['"""index.html"""'], {}), "('index.html')\n", (1584, 1598), False, 'from utils.utils import templated\n'), ((2410, 2434), 'utils.utils.templated', 'templated', (['"""langid.html"""'], {}), "('langid.html')\n", (2419, 2434), False, 'from utils.utils import templated\n'), ((1118, 1170), 'wtforms.BooleanField', 'BooleanField', (['"""Display raw sentences"""'], {'default': '(False)'}), "('Display raw sentences', default=False)\n", (1130, 1170), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((1278, 1288), 'wtforms.widgets.TextArea', 'TextArea', ([], {}), '()\n', (1286, 1288), False, 'from wtforms.widgets import TextArea\n'), ((2168, 2214), 'flask.flash', 'flash', (["('Something went wrong %s' % e)", '"""danger"""'], {}), "('Something went wrong %s' % e, 'danger')\n", (2173, 2214), False, 'from flask import Flask, render_template, request, flash\n'), ((2223, 2243), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2240, 2243), False, 'import logging\n'), ((2751, 2837), 'langid.lang_of_text', 'langid.lang_of_text', (['form.text.data'], {'model': 'form.model_class.data', 'with_proba': '(True)'}), '(form.text.data, model=form.model_class.data, with_proba\n =True)\n', (2770, 2837), False, 'import langid\n'), ((478, 503), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (501, 503), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((505, 562), 'wtforms.validators.URL', 'validators.URL', ([], {'message': '"""Sorry, this is not a valid URL,"""'}), "(message='Sorry, this is not a valid URL,')\n", (519, 562), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((657, 682), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (680, 682), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((684, 721), 'wtforms.validators.NumberRange', 'validators.NumberRange', ([], {'min': '(1)', 'max': '(20)'}), '(min=1, max=20)\n', (706, 721), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((892, 917), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (915, 917), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((1072, 1097), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (1095, 1097), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((1310, 1335), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (1333, 1335), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n'), ((1490, 1515), 'wtforms.validators.DataRequired', 'validators.DataRequired', ([], {}), '()\n', (1513, 1515), False, 'from wtforms import StringField, validators, SelectField, BooleanField\n')] |
"""
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| [
"logging.getLogger",
"openff.bespokefit.utilities.logging.DeprecationWarningFilter"
]
| [((689, 715), 'openff.bespokefit.utilities.logging.DeprecationWarningFilter', 'DeprecationWarningFilter', ([], {}), '()\n', (713, 715), False, 'from openff.bespokefit.utilities.logging import DeprecationWarningFilter\n'), ((595, 630), 'logging.getLogger', 'logging.getLogger', (['"""openff.toolkit"""'], {}), "('openff.toolkit')\n", (612, 630), False, 'import logging\n'), ((659, 678), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (676, 678), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g) | [
"sdf.Group",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt",
"sdf.save",
"sdf.Dataset"
]
| [((517, 695), 'numpy.array', 'np.array', (['(0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, 2500.0, \n 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0, 7000.0,\n 7500.0, 8000.0)'], {}), '((0.0, 50.0, 100.0, 300.0, 500.0, 750.0, 1000.0, 1500.0, 2000.0, \n 2500.0, 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0,\n 7000.0, 7500.0, 8000.0))\n', (525, 695), True, 'import numpy as np\n'), ((681, 824), 'numpy.array', 'np.array', (['(-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, \n 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)'], {}), '((-0.5, -0.4, -0.3, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15,\n 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))\n', (689, 824), True, 'import numpy as np\n'), ((1140, 1168), 'numpy.zeros', 'np.zeros', (['(lenG, lenx, lenP)'], {}), '((lenG, lenx, lenP))\n', (1148, 1168), True, 'import numpy as np\n'), ((1338, 1426), 'sdf.Dataset', 'sdf.Dataset', (['"""G"""'], {'data': 'G', 'unit': '"""kg/(m2.s)"""', 'is_scale': '(True)', 'display_name': '"""Mass Flux"""'}), "('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name=\n 'Mass Flux')\n", (1349, 1426), False, 'import sdf\n'), ((1429, 1502), 'sdf.Dataset', 'sdf.Dataset', (['"""x"""'], {'data': 'x', 'unit': '"""1"""', 'is_scale': '(True)', 'display_name': '"""Quality"""'}), "('x', data=x, unit='1', is_scale=True, display_name='Quality')\n", (1440, 1502), False, 'import sdf\n'), ((1510, 1585), 'sdf.Dataset', 'sdf.Dataset', (['"""P"""'], {'data': 'P', 'unit': '"""Pa"""', 'is_scale': '(True)', 'display_name': '"""Pressure"""'}), "('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')\n", (1521, 1585), False, 'import sdf\n'), ((1593, 1657), 'sdf.Dataset', 'sdf.Dataset', (['"""q"""'], {'data': 'q', 'unit': '"""W/m2"""', 'scales': '[ds_G, ds_x, ds_P]'}), "('q', data=q, unit='W/m2', scales=[ds_G, ds_x, ds_P])\n", (1604, 1657), False, 'import sdf\n'), ((1705, 1778), 'sdf.Group', 'sdf.Group', (['"""/"""'], {'comment': '"""2006 CHF LUT"""', 'datasets': '[ds_G, ds_x, ds_P, ds_q]'}), "('/', comment='2006 CHF LUT', datasets=[ds_G, ds_x, ds_P, ds_q])\n", (1714, 1778), False, 'import sdf\n'), ((1776, 1810), 'sdf.save', 'sdf.save', (['"""../Data/2006LUT.sdf"""', 'g'], {}), "('../Data/2006LUT.sdf', g)\n", (1784, 1810), False, 'import sdf\n'), ((381, 478), 'numpy.array', 'np.array', (['(0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0,\n 21.0)'], {}), '((0.1, 0.3, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 12.0, 14.0, 16.0, \n 18.0, 20.0, 21.0))\n', (389, 478), True, 'import numpy as np\n'), ((886, 923), 'numpy.loadtxt', 'np.loadtxt', (['"""../Data/2006LUTdata.txt"""'], {}), "('../Data/2006LUTdata.txt')\n", (896, 923), True, 'import numpy as np\n')] |
import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
class PolicyAssertion(unittest.TestCase):
class StubPolicySet(object):
def __init__(self, *policies):
self._policies = policies
def get_enabled_policies(self):
return self._policies
def update_by_config(self, policy_enabling_map):
pass
class StubConfigContainer(object):
def __init__(self, policy_names_to_enable):
default_config_dict = ConfigDefaultSource(None).get_config_dict()
policy_options = default_config_dict.get('policies', {})
for policy, options in policy_options.items():
options['enabled'] = False
for policy in policy_names_to_enable:
options = policy_options.setdefault(policy, {})
options['enabled'] = True
self._config_dict = {
'policies': policy_options,
}
def append_config_source(self, config_source):
# Ignore a comment config source
pass
def get_config_dict(self):
return self._config_dict
def assertFoundNoViolations(self, path, Policy, policy_options=None):
self.assertFoundViolationsEqual(path, Policy, [], policy_options)
def assertFoundViolationsEqual(self, path, Policy, expected_violations, policy_options=None):
policy_to_test = Policy()
policy_name = Policy.__name__
policy_set = PolicyAssertion.StubPolicySet(policy_to_test)
config = PolicyAssertion.StubConfigContainer(policy_name)
if policy_options is not None:
config.get_config_dict()['policies'][policy_name].update(policy_options)
linter = Linter(policy_set, config.get_config_dict())
violations = linter.lint_file(path)
pprint(violations)
assert len(violations) == len(expected_violations)
for violation, expected_violation in zip_longest(violations, expected_violations):
self.assertViolation(violation, expected_violation)
def assertViolation(self, actual_violation, expected_violation):
self.assertIsNot(actual_violation, None)
self.assertIsNot(expected_violation, None)
pprint(actual_violation)
assert actual_violation['name'] == expected_violation['name']
assert actual_violation['position'] == expected_violation['position']
assert actual_violation['level'] == expected_violation['level']
self.assertIsInstance(actual_violation['description'], str)
def get_fixture_path(*filename):
return Path('test', 'fixture', 'policy', *filename)
| [
"vint.linting.config.config_default_source.ConfigDefaultSource",
"pprint.pprint",
"vint.compat.itertools.zip_longest",
"pathlib.Path"
]
| [((2791, 2835), 'pathlib.Path', 'Path', (['"""test"""', '"""fixture"""', '"""policy"""', '*filename'], {}), "('test', 'fixture', 'policy', *filename)\n", (2795, 2835), False, 'from pathlib import Path\n'), ((2016, 2034), 'pprint.pprint', 'pprint', (['violations'], {}), '(violations)\n', (2022, 2034), False, 'from pprint import pprint\n'), ((2140, 2184), 'vint.compat.itertools.zip_longest', 'zip_longest', (['violations', 'expected_violations'], {}), '(violations, expected_violations)\n', (2151, 2184), False, 'from vint.compat.itertools import zip_longest\n'), ((2430, 2454), 'pprint.pprint', 'pprint', (['actual_violation'], {}), '(actual_violation)\n', (2436, 2454), False, 'from pprint import pprint\n'), ((660, 685), 'vint.linting.config.config_default_source.ConfigDefaultSource', 'ConfigDefaultSource', (['None'], {}), '(None)\n', (679, 685), False, 'from vint.linting.config.config_default_source import ConfigDefaultSource\n')] |
import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| [
"logging.getLogger",
"tensorflow.reduce_sum",
"tensorflow.math.divide_no_nan",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"copy.deepcopy",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.math.minimum",
"tensorflow.keras.layers.Input",
"sklearn.decomposition.PCA",
"numpy.asarray",
"tensorflow.keras.backend.max",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"sys.stdout.flush",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Conv1D",
"tensorflow.strings.unicode_decode",
"tensorflow.zeros",
"tensorflow.keras.backend.greater_equal",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"tensorflow.keras.backend.set_floatx",
"tensorflow.reduce_max",
"tensorflow.keras.utils.register_keras_serializable",
"os.path.dirname",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.utils.custom_object_scope",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.keras.backend.batch_set_value",
"tensorflow.expand_dims",
"tensorflow.subtract",
"os.path.abspath",
"time.time",
"tensorflow.math.square",
"os.path.join",
"tensorflow.keras.backend.constant",
"json.load",
"numpy.zeros",
"collections.defaultdict",
"tensorflow.constant_initializer",
"tensorflow.abs",
"tensorflow.keras.Model",
"tensorflow.keras.backend.argmax",
"tensorflow.keras.backend.clear_session",
"json.dump",
"sys.stdout.write"
]
| [((775, 806), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (792, 806), False, 'import logging\n'), ((859, 903), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), '()\n', (901, 903), True, 'import tensorflow as tf\n'), ((7294, 7338), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), '()\n', (7336, 7338), True, 'import tensorflow as tf\n'), ((349, 374), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (364, 374), False, 'import os\n'), ((10680, 10718), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'n_dims'}), '(n_components=n_dims)\n', (10697, 10718), False, 'from sklearn import decomposition\n'), ((10806, 10834), 'os.path.dirname', 'os.path.dirname', (['source_file'], {}), '(source_file)\n', (10821, 10834), False, 'import os\n'), ((5184, 5211), 'tensorflow.cast', 'tf.cast', (['y_true', 'self.dtype'], {}), '(y_true, self.dtype)\n', (5191, 5211), True, 'import tensorflow as tf\n'), ((5229, 5256), 'tensorflow.cast', 'tf.cast', (['y_pred', 'self.dtype'], {}), '(y_pred, self.dtype)\n', (5236, 5256), True, 'import tensorflow as tf\n'), ((5922, 6013), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['self.true_positives', '(self.true_positives + self.false_positives)'], {}), '(self.true_positives, self.true_positives + self.\n false_positives)\n', (5943, 6013), True, 'import tensorflow as tf\n'), ((6048, 6139), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['self.true_positives', '(self.true_positives + self.false_negatives)'], {}), '(self.true_positives, self.true_positives + self.\n false_negatives)\n', (6069, 6139), True, 'import tensorflow as tf\n'), ((6281, 6324), 'tensorflow.math.divide_no_nan', 'tf.math.divide_no_nan', (['mul_value', 'add_value'], {}), '(mul_value, add_value)\n', (6302, 6324), True, 'import tensorflow as tf\n'), ((7162, 7205), 'tensorflow.zeros', 'tf.zeros', (['self.init_shape'], {'dtype': 'self.dtype'}), '(self.init_shape, dtype=self.dtype)\n', (7170, 7205), True, 'import tensorflow as tf\n'), ((7214, 7290), 'tensorflow.keras.backend.batch_set_value', 'tf.keras.backend.batch_set_value', (['[(v, reset_value) for v in self.variables]'], {}), '([(v, reset_value) for v in self.variables])\n', (7246, 7290), True, 'import tensorflow as tf\n'), ((10307, 10369), 'os.path.join', 'os.path.join', (['_file_dir', '"""embeddings/glove.840B.300d-char.txt"""'], {}), "(_file_dir, 'embeddings/glove.840B.300d-char.txt')\n", (10319, 10369), False, 'import os\n'), ((16744, 16772), 'copy.deepcopy', 'copy.deepcopy', (['label_mapping'], {}), '(label_mapping)\n', (16757, 16772), False, 'import copy\n'), ((18434, 18480), 'os.path.join', 'os.path.join', (['dirpath', '"""model_parameters.json"""'], {}), "(dirpath, 'model_parameters.json')\n", (18446, 18480), False, 'import os\n'), ((18601, 18644), 'os.path.join', 'os.path.join', (['dirpath', '"""label_mapping.json"""'], {}), "(dirpath, 'label_mapping.json')\n", (18613, 18644), False, 'import os\n'), ((19098, 19144), 'os.path.join', 'os.path.join', (['dirpath', '"""model_parameters.json"""'], {}), "(dirpath, 'model_parameters.json')\n", (19110, 19144), False, 'import os\n'), ((19290, 19333), 'os.path.join', 'os.path.join', (['dirpath', '"""label_mapping.json"""'], {}), "(dirpath, 'label_mapping.json')\n", (19302, 19333), False, 'import os\n'), ((21288, 21322), 'tensorflow.reshape', 'tf.reshape', (['input_str_tensor', '[-1]'], {}), '(input_str_tensor, [-1])\n', (21298, 21322), True, 'import tensorflow as tf\n'), ((21350, 21418), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (['input_str_flatten'], {'input_encoding': '"""UTF-8"""'}), "(input_str_flatten, input_encoding='UTF-8')\n", (21375, 21418), True, 'import tensorflow as tf\n'), ((21573, 21632), 'tensorflow.math.minimum', 'tf.math.minimum', (['sentences_encode', '(max_char_encoding_id + 1)'], {}), '(sentences_encode, max_char_encoding_id + 1)\n', (21588, 21632), True, 'import tensorflow as tf\n'), ((24599, 24631), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (24629, 24631), True, 'import tensorflow as tf\n'), ((24773, 24801), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (24799, 24801), True, 'import tensorflow as tf\n'), ((25785, 25852), 'numpy.zeros', 'np.zeros', (["(max_char_encoding_id + 2, self._parameters['dim_embed'])"], {}), "((max_char_encoding_id + 2, self._parameters['dim_embed']))\n", (25793, 25852), True, 'import numpy as np\n'), ((27629, 27672), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', (['self._model.output'], {}), '(self._model.output)\n', (27652, 27672), True, 'import tensorflow as tf\n'), ((28048, 28098), 'tensorflow.keras.Model', 'tf.keras.Model', (['self._model.inputs', 'argmax_outputs'], {}), '(self._model.inputs, argmax_outputs)\n', (28062, 28098), True, 'import tensorflow as tf\n'), ((29070, 29102), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (29100, 29102), True, 'import tensorflow as tf\n'), ((29715, 29759), 'tensorflow.keras.backend.argmax', 'tf.keras.backend.argmax', (['final_softmax_layer'], {}), '(final_softmax_layer)\n', (29738, 29759), True, 'import tensorflow as tf\n'), ((30186, 30236), 'tensorflow.keras.Model', 'tf.keras.Model', (['self._model.inputs', 'argmax_outputs'], {}), '(self._model.inputs, argmax_outputs)\n', (30200, 30236), True, 'import tensorflow as tf\n'), ((32129, 32142), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (32140, 32142), False, 'from collections import defaultdict\n'), ((32321, 32332), 'time.time', 'time.time', ([], {}), '()\n', (32330, 32332), False, 'import time\n'), ((35185, 35223), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float32"""'], {}), "('float32')\n", (35212, 35223), True, 'import tensorflow as tf\n'), ((36799, 36833), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {'dtype': 'int'}), '((batch_size,), dtype=int)\n', (36807, 36833), True, 'import numpy as np\n'), ((36856, 36910), 'numpy.zeros', 'np.zeros', (["(batch_size, self._parameters['max_length'])"], {}), "((batch_size, self._parameters['max_length']))\n", (36864, 36910), True, 'import numpy as np\n'), ((4853, 4898), 'tensorflow.reduce_max', 'tf.reduce_max', (['y_pred'], {'axis': '(-1)', 'keepdims': '(True)'}), '(y_pred, axis=-1, keepdims=True)\n', (4866, 4898), True, 'import tensorflow as tf\n'), ((5444, 5478), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['val'], {'axis': 'self.axis'}), '(val, axis=self.axis)\n', (5457, 5478), True, 'import tensorflow as tf\n'), ((6589, 6622), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(f1_score * weights)'], {}), '(f1_score * weights)\n', (6602, 6622), True, 'import tensorflow as tf\n'), ((9847, 9867), 'numpy.asarray', 'np.asarray', (['line[1:]'], {}), '(line[1:])\n', (9857, 9867), True, 'import numpy as np\n'), ((10550, 10564), 'numpy.asarray', 'np.asarray', (['ls'], {}), '(ls)\n', (10560, 10564), True, 'import numpy as np\n'), ((18544, 18575), 'json.dump', 'json.dump', (['self._parameters', 'fp'], {}), '(self._parameters, fp)\n', (18553, 18575), False, 'import json\n'), ((18703, 18736), 'json.dump', 'json.dump', (['self.label_mapping', 'fp'], {}), '(self.label_mapping, fp)\n', (18712, 18736), False, 'import json\n'), ((18762, 18783), 'os.path.join', 'os.path.join', (['dirpath'], {}), '(dirpath)\n', (18774, 18783), False, 'import os\n'), ((19221, 19234), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (19230, 19234), False, 'import json\n'), ((19408, 19421), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (19417, 19421), False, 'import json\n'), ((19673, 19723), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', (['custom_objects'], {}), '(custom_objects)\n', (19707, 19723), True, 'import tensorflow as tf\n'), ((19748, 19783), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['dirpath'], {}), '(dirpath)\n', (19774, 19783), True, 'import tensorflow as tf\n'), ((21506, 21526), 'tensorflow.cast', 'tf.cast', (['(1)', 'tf.int32'], {}), '(1, tf.int32)\n', (21513, 21526), True, 'import tensorflow as tf\n'), ((25218, 25271), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None,)', 'dtype': 'tf.string'}), '(shape=(None,), dtype=tf.string)\n', (25239, 25271), True, 'import tensorflow as tf\n'), ((26262, 26426), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(max_char_encoding_id + 2)', "self._parameters['dim_embed']"], {'weights': '[embedding_matrix]', 'input_length': 'input_shape[0]', 'trainable': '(True)'}), "(max_char_encoding_id + 2, self._parameters[\n 'dim_embed'], weights=[embedding_matrix], input_length=input_shape[0],\n trainable=True)\n", (26287, 26426), True, 'import tensorflow as tf\n'), ((27490, 27545), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (27511, 27545), True, 'import tensorflow as tf\n'), ((29504, 29575), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_labels'], {'activation': '"""softmax"""', 'name': '"""dense_2"""'}), "(num_labels, activation='softmax', name='dense_2')\n", (29525, 29575), True, 'import tensorflow as tf\n'), ((32529, 32547), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (32545, 32547), False, 'import sys\n'), ((34988, 35006), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (35004, 35006), False, 'import sys\n'), ((35356, 35390), 'numpy.concatenate', 'np.concatenate', (['y_val_pred'], {'axis': '(0)'}), '(y_val_pred, axis=0)\n', (35370, 35390), True, 'import numpy as np\n'), ((35404, 35438), 'numpy.concatenate', 'np.concatenate', (['y_val_test'], {'axis': '(0)'}), '(y_val_test, axis=0)\n', (35418, 35438), True, 'import numpy as np\n'), ((36966, 37037), 'numpy.zeros', 'np.zeros', (["(batch_size, self._parameters['max_length'], self.num_labels)"], {}), "((batch_size, self._parameters['max_length'], self.num_labels))\n", (36974, 37037), True, 'import numpy as np\n'), ((6218, 6243), 'tensorflow.math.square', 'tf.math.square', (['self.beta'], {}), '(self.beta)\n', (6232, 6243), True, 'import tensorflow as tf\n'), ((6356, 6381), 'tensorflow.math.square', 'tf.math.square', (['self.beta'], {}), '(self.beta)\n', (6370, 6381), True, 'import tensorflow as tf\n'), ((6511, 6551), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.weights_intermediate'], {}), '(self.weights_intermediate)\n', (6524, 6551), True, 'import tensorflow as tf\n'), ((6704, 6728), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['f1_score'], {}), '(f1_score)\n', (6718, 6728), True, 'import tensorflow as tf\n'), ((22764, 22799), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['threshold_'], {}), '(threshold_)\n', (22787, 22799), True, 'import tensorflow as tf\n'), ((23089, 23129), 'tensorflow.gather', 'tf.gather', (['self.thresh_vec', 'argmax_layer'], {}), '(self.thresh_vec, argmax_layer)\n', (23098, 23129), True, 'import tensorflow as tf\n'), ((23170, 23216), 'tensorflow.keras.backend.max', 'tf.keras.backend.max', (['confidence_layer'], {'axis': '(2)'}), '(confidence_layer, axis=2)\n', (23190, 23216), True, 'import tensorflow as tf\n'), ((23744, 23808), 'tensorflow.keras.backend.constant', 'tf.keras.backend.constant', (['default_ind'], {'dtype': 'argmax_layer.dtype'}), '(default_ind, dtype=argmax_layer.dtype)\n', (23769, 23808), True, 'import tensorflow as tf\n'), ((26596, 26714), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'fil', 'kernel_size': "self._parameters['size_conv']", 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=fil, kernel_size=self._parameters[\n 'size_conv'], activation='relu', padding='same')\n", (26618, 26714), True, 'import tensorflow as tf\n'), ((27012, 27071), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(False)', 'scale': '(True)'}), '(fused=False, scale=True)\n', (27046, 27071), True, 'import tensorflow as tf\n'), ((27209, 27261), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'size', 'activation': '"""relu"""'}), "(units=size, activation='relu')\n", (27230, 27261), True, 'import tensorflow as tf\n'), ((32588, 32724), 'sys.stdout.write', 'sys.stdout.write', (["('\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))"], {}), "(\n '\\rEPOCH %d, batch_id %d: loss: %f - acc: %f - f1_score %f' % (self.\n _epoch_id, batch_id, *model_results[1:]))\n", (32604, 32724), False, 'import sys\n'), ((33399, 33410), 'time.time', 'time.time', ([], {}), '()\n', (33408, 33410), False, 'import time\n'), ((34923, 34948), 'numpy.argmax', 'np.argmax', (['y_val'], {'axis': '(-1)'}), '(y_val, axis=-1)\n', (34932, 34948), True, 'import numpy as np\n'), ((35051, 35138), 'sys.stdout.write', 'sys.stdout.write', (["('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id, batch_id))"], {}), "('\\rEPOCH %g, validation_batch_id %d' % (self._epoch_id,\n batch_id))\n", (35067, 35138), False, 'import sys\n'), ((37283, 37315), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['batch_data'], {}), '(batch_data)\n', (37303, 37315), True, 'import tensorflow as tf\n'), ((5083, 5097), 'tensorflow.abs', 'tf.abs', (['y_pred'], {}), '(y_pred)\n', (5089, 5097), True, 'import tensorflow as tf\n'), ((5391, 5423), 'tensorflow.expand_dims', 'tf.expand_dims', (['sample_weight', '(1)'], {}), '(sample_weight, 1)\n', (5405, 5423), True, 'import tensorflow as tf\n'), ((23422, 23495), 'tensorflow.keras.backend.greater_equal', 'tf.keras.backend.greater_equal', (['confidence_max_layer', 'threshold_at_argmax'], {}), '(confidence_max_layer, threshold_at_argmax)\n', (23452, 23495), True, 'import tensorflow as tf\n'), ((26841, 26893), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (["self._parameters['dropout']"], {}), "(self._parameters['dropout'])\n", (26864, 26893), True, 'import tensorflow as tf\n'), ((27360, 27412), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (["self._parameters['dropout']"], {}), "(self._parameters['dropout'])\n", (27383, 27412), True, 'import tensorflow as tf\n'), ((24043, 24081), 'tensorflow.subtract', 'tf.subtract', (['argmax_layer', 'bg_label_tf'], {}), '(argmax_layer, bg_label_tf)\n', (24054, 24081), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| [
"flask.request.args.get",
"airflow.hooks.hive_hooks.HiveCliHook",
"json.dumps",
"pandas.set_option",
"airflow.hooks.presto_hook.PrestoHook",
"flask_admin.expose",
"flask.Blueprint",
"airflow.hooks.hive_hooks.HiveMetastoreHook",
"airflow.hooks.mysql_hook.MySqlHook"
]
| [((1547, 1588), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (1560, 1588), True, 'import pandas as pd\n'), ((5719, 5861), 'flask.Blueprint', 'Blueprint', (['"""metastore_browser"""', '__name__'], {'template_folder': '"""templates"""', 'static_folder': '"""static"""', 'static_url_path': '"""/static/metastore_browser"""'}), "('metastore_browser', __name__, template_folder='templates',\n static_folder='static', static_url_path='/static/metastore_browser')\n", (5728, 5861), False, 'from flask import Blueprint, request\n'), ((1698, 1709), 'flask_admin.expose', 'expose', (['"""/"""'], {}), "('/')\n", (1704, 1709), False, 'from flask_admin import BaseView, expose\n'), ((2484, 2501), 'flask_admin.expose', 'expose', (['"""/table/"""'], {}), "('/table/')\n", (2490, 2501), False, 'from flask_admin import BaseView, expose\n'), ((2813, 2827), 'flask_admin.expose', 'expose', (['"""/db/"""'], {}), "('/db/')\n", (2819, 2827), False, 'from flask_admin import BaseView, expose\n'), ((3113, 3135), 'flask_admin.expose', 'expose', (['"""/partitions/"""'], {}), "('/partitions/')\n", (3119, 3135), False, 'from flask_admin import BaseView, expose\n'), ((3966, 3985), 'flask_admin.expose', 'expose', (['"""/objects/"""'], {}), "('/objects/')\n", (3972, 3985), False, 'from flask_admin import BaseView, expose\n'), ((4975, 4991), 'flask_admin.expose', 'expose', (['"""/data/"""'], {}), "('/data/')\n", (4981, 4991), False, 'from flask_admin import BaseView, expose\n'), ((5349, 5364), 'flask_admin.expose', 'expose', (['"""/ddl/"""'], {}), "('/ddl/')\n", (5355, 5364), False, 'from flask_admin import BaseView, expose\n'), ((2031, 2065), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', (['METASTORE_MYSQL_CONN_ID'], {}), '(METASTORE_MYSQL_CONN_ID)\n', (2040, 2065), False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((2544, 2569), 'flask.request.args.get', 'request.args.get', (['"""table"""'], {}), "('table')\n", (2560, 2569), False, 'from flask import Blueprint, request\n'), ((2582, 2618), 'airflow.hooks.hive_hooks.HiveMetastoreHook', 'HiveMetastoreHook', (['METASTORE_CONN_ID'], {}), '(METASTORE_CONN_ID)\n', (2599, 2618), False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((2859, 2881), 'flask.request.args.get', 'request.args.get', (['"""db"""'], {}), "('db')\n", (2875, 2881), False, 'from flask import Blueprint, request\n'), ((2894, 2930), 'airflow.hooks.hive_hooks.HiveMetastoreHook', 'HiveMetastoreHook', (['METASTORE_CONN_ID'], {}), '(METASTORE_CONN_ID)\n', (2911, 2930), False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((3732, 3766), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', (['METASTORE_MYSQL_CONN_ID'], {}), '(METASTORE_MYSQL_CONN_ID)\n', (3741, 3766), False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((4792, 4826), 'airflow.hooks.mysql_hook.MySqlHook', 'MySqlHook', (['METASTORE_MYSQL_CONN_ID'], {}), '(METASTORE_MYSQL_CONN_ID)\n', (4801, 4826), False, 'from airflow.hooks.mysql_hook import MySqlHook\n'), ((4942, 4955), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (4952, 4955), False, 'import json\n'), ((5028, 5053), 'flask.request.args.get', 'request.args.get', (['"""table"""'], {}), "('table')\n", (5044, 5053), False, 'from flask import Blueprint, request\n'), ((5136, 5162), 'airflow.hooks.presto_hook.PrestoHook', 'PrestoHook', (['PRESTO_CONN_ID'], {}), '(PRESTO_CONN_ID)\n', (5146, 5162), False, 'from airflow.hooks.presto_hook import PrestoHook\n'), ((5400, 5425), 'flask.request.args.get', 'request.args.get', (['"""table"""'], {}), "('table')\n", (5416, 5425), False, 'from flask import Blueprint, request\n'), ((5501, 5530), 'airflow.hooks.hive_hooks.HiveCliHook', 'HiveCliHook', (['HIVE_CLI_CONN_ID'], {}), '(HIVE_CLI_CONN_ID)\n', (5512, 5530), False, 'from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook\n'), ((3186, 3211), 'flask.request.args.get', 'request.args.get', (['"""table"""'], {}), "('table')\n", (3202, 3211), False, 'from flask import Blueprint, request\n')] |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
def joinThreads(threads: List[threading.Thread]):
for thread in threads:
thread.join()
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
def runCompose(app: str, args: str):
compose(app, args)
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
def getArguments():
arguments = ""
for i in range(3, len(argv)):
arguments += argv[i] + " "
return arguments
def getAppYml(name):
url = 'https://raw.githubusercontent.com/runcitadel/compose-nonfree/main/apps/' + \
name + '/' + 'app.yml'
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return False
def getAppYmlPath(app):
return os.path.join(appsDir, app, 'app.yml')
def composeToAppYml(app):
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
# Read the compose file and parse it
with open(composeFile, "r") as f:
compose = yaml.safe_load(f)
registry = os.path.join(appsDir, "registry.json")
# Load the registry
with open(registry, "r") as f:
registryData = json.load(f)
converted = convertComposeYMLToAppYML(compose, app, registryData)
# Put converted into the app.yml after encoding it as YAML
with open(appYml, "w") as f:
f.write(yaml.dump(converted, sort_keys=False))
def update(verbose: bool = False):
apps = findAndValidateApps(appsDir)
# The compose generation process updates the registry, so we need to get it set up with the basics before that
registry = getAppRegistry(apps, appsDir)
with open(os.path.join(appsDir, "registry.json"), "w") as f:
json.dump(registry, f, indent=4, sort_keys=True)
print("Wrote registry to registry.json")
simpleRegistry = getSimpleAppRegistry(apps, appsDir)
with open(os.path.join(appSystemDir, "apps.json"), "w") as f:
json.dump(simpleRegistry, f, indent=4, sort_keys=True)
print("Wrote version information to apps.json")
# Loop through the apps and generate valid compose files from them, then put these into the app dir
for app in apps:
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
with open(composeFile, "w") as f:
appCompose = getApp(appYml, app)
if(appCompose):
f.write(yaml.dump(appCompose, sort_keys=False))
if verbose:
print("Wrote " + app + " to " + composeFile)
print("Generated configuration successfully")
def download(app: str = None):
if(app is None):
apps = findAndValidateApps(appsDir)
for app in apps:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
else:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
def getUserData():
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
return userData
def startInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Starting app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(target=runCompose, args=(app, "up --detach"))
thread.start()
threads.append(thread)
joinThreads(threads)
def stopInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Stopping app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(
target=runCompose, args=(app, "rm --force --stop"))
thread.start()
threads.append(thread)
joinThreads(threads)
# Loads an app.yml and converts it to a docker-compose.yml
def getApp(appFile: str, appId: str):
with open(appFile, 'r') as f:
app = yaml.safe_load(f)
if not "metadata" in app:
raise Exception("Error: Could not find metadata in " + appFile)
app["metadata"]["id"] = appId
if('version' in app and str(app['version']) == "1"):
return createComposeConfigFromV1(app, nodeRoot)
else:
return createComposeConfigFromV0(app)
def compose(app, arguments):
# Runs a compose command in the app dir
# Before that, check if a docker-compose.yml exists in the app dir
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
commonComposeFile = os.path.join(appSystemDir, "docker-compose.common.yml")
os.environ["APP_DOMAIN"] = subprocess.check_output(
"hostname -s 2>/dev/null || echo 'umbrel'", shell=True).decode("utf-8") + ".local"
os.environ["APP_HIDDEN_SERVICE"] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
os.path.join(nodeRoot, "tor", "data", "app-{}/hostname".format(app))), shell=True).decode("utf-8")
os.environ["APP_SEED"] = deriveEntropy("app-{}-seed".format(app))
# Allow more app seeds, with random numbers from 1-5 assigned in a loop
for i in range(1, 6):
os.environ["APP_SEED_{}".format(i)] = deriveEntropy("app-{}-seed{}".format(app, i))
os.environ["APP_DATA_DIR"] = os.path.join(appDataDir, app)
os.environ["BITCOIN_DATA_DIR"] = os.path.join(nodeRoot, "bitcoin")
os.environ["LND_DATA_DIR"] = os.path.join(nodeRoot, "lnd")
# List all hidden services for an app and put their hostname in the environment
hiddenServices: List[str] = getAppHiddenServices(app)
for service in hiddenServices:
appHiddenServiceFile = os.path.join(
nodeRoot, "tor", "data", "app-{}-{}/hostname".format(app, service))
os.environ["APP_HIDDEN_SERVICE_{}".format(service.upper().replace("-", "_"))] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
appHiddenServiceFile), shell=True).decode("utf-8")
if not os.path.isfile(composeFile):
print("Error: Could not find docker-compose.yml in " + app)
exit(1)
os.system(
"docker compose --env-file '{}' --project-name '{}' --file '{}' --file '{}' {}".format(
os.path.join(nodeRoot, ".env"), app, commonComposeFile, composeFile, arguments))
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
def deleteData(app: str):
dataDir = os.path.join(appDataDir, app)
try:
shutil.rmtree(dataDir, onerror=remove_readonly)
except FileNotFoundError:
pass
def createDataDir(app: str):
dataDir = os.path.join(appDataDir, app)
appDir = os.path.join(appsDir, app)
if os.path.isdir(dataDir):
deleteData(app)
# Recursively copy everything from appDir to dataDir while excluding .gitignore
shutil.copytree(appDir, dataDir, symlinks=False,
ignore=shutil.ignore_patterns(".gitignore"))
# Chown and chmod dataDir to have the same owner and permissions as appDir
os.chown(dataDir, os.stat(appDir).st_uid, os.stat(appDir).st_gid)
os.chmod(dataDir, os.stat(appDir).st_mode)
def setInstalled(app: str):
userData = getUserData()
if not "installedApps" in userData:
userData["installedApps"] = []
userData["installedApps"].append(app)
userData["installedApps"] = list(set(userData["installedApps"]))
with open(userFile, "w") as f:
json.dump(userData, f)
def setRemoved(app: str):
userData = getUserData()
if not "installedApps" in userData:
return
userData["installedApps"] = list(set(userData["installedApps"]))
userData["installedApps"].remove(app)
with open(userFile, "w") as f:
json.dump(userData, f)
def getAppHiddenServices(app: str):
torDir = os.path.join(nodeRoot, "tor", "data")
# List all subdirectories of torDir which start with app-${APP}-
# but return them without the app-${APP}- prefix
results = []
for subdir in os.listdir(torDir):
if subdir.startswith("app-{}-".format(app)):
results.append(subdir[len("app-{}-".format(app)):])
return results
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
def updateRepos():
# Get the list of repos
repos = []
with open(sourcesList) as f:
repos = f.readlines()
# For each repo, clone the repo to a temporary dir, checkout the branch,
# and overwrite the current app dir with the contents of the temporary dir/apps/app
alreadyInstalled = []
for repo in repos:
repo = repo.strip()
if repo == "":
continue
# Split the repo into the git url and the branch
repo = repo.split(" ")
if len(repo) != 2:
print("Error: Invalid repo format in " + sourcesList)
exit(1)
gitUrl = repo[0]
branch = repo[1]
# Clone the repo to a temporary dir
tempDir = tempfile.mkdtemp()
print("Cloning the repository")
# Git clone with a depth of 1 to avoid cloning the entire repo
# Dont print anything to stdout, as we don't want to see the git clone output
subprocess.run("git clone --depth 1 {} {}".format(gitUrl, tempDir), shell=True, stdout=subprocess.DEVNULL)
# Overwrite the current app dir with the contents of the temporary dir/apps/app
for app in os.listdir(os.path.join(tempDir, "apps")):
# if the app is already installed, don't overwrite it
if app in alreadyInstalled:
continue
if os.path.isdir(os.path.join(appsDir, app)):
shutil.rmtree(os.path.join(appsDir, app), onerror=remove_readonly)
if os.path.isdir(os.path.join(tempDir, "apps", app)):
shutil.copytree(os.path.join(tempDir, "apps", app), os.path.join(appsDir, app),
symlinks=False, ignore=shutil.ignore_patterns(".gitignore"))
alreadyInstalled.append(app)
# Remove the temporary dir
shutil.rmtree(tempDir)
| [
"os.listdir",
"os.chmod",
"lib.metadata.getSimpleAppRegistry",
"os.path.isdir",
"lib.composegenerator.v1.generate.createComposeConfigFromV1",
"lib.validate.findAndValidateApps",
"subprocess.check_output",
"yaml.dump",
"shutil.ignore_patterns",
"lib.appymlgenerator.convertComposeYMLToAppYML",
"requests.get",
"os.path.isfile",
"tempfile.mkdtemp",
"lib.composegenerator.v0.generate.createComposeConfigFromV0",
"lib.metadata.getAppRegistry",
"os.stat",
"os.path.join",
"os.path.realpath",
"yaml.safe_load",
"shutil.rmtree",
"json.load",
"threading.Thread",
"json.dump"
]
| [((893, 928), 'os.path.join', 'os.path.join', (['scriptDir', '""".."""', '""".."""'], {}), "(scriptDir, '..', '..')\n", (905, 928), False, 'import os\n'), ((939, 969), 'os.path.join', 'os.path.join', (['nodeRoot', '"""apps"""'], {}), "(nodeRoot, 'apps')\n", (951, 969), False, 'import os\n'), ((985, 1021), 'os.path.join', 'os.path.join', (['nodeRoot', '"""app-system"""'], {}), "(nodeRoot, 'app-system')\n", (997, 1021), False, 'import os\n'), ((1036, 1078), 'os.path.join', 'os.path.join', (['appSystemDir', '"""sources.list"""'], {}), "(appSystemDir, 'sources.list')\n", (1048, 1078), False, 'import os\n'), ((1092, 1126), 'os.path.join', 'os.path.join', (['nodeRoot', '"""app-data"""'], {}), "(nodeRoot, 'app-data')\n", (1104, 1126), False, 'import os\n'), ((1138, 1179), 'os.path.join', 'os.path.join', (['nodeRoot', '"""db"""', '"""user.json"""'], {}), "(nodeRoot, 'db', 'user.json')\n", (1150, 1179), False, 'import os\n'), ((1195, 1235), 'os.path.join', 'os.path.join', (['nodeRoot', '"""scripts"""', '"""app"""'], {}), "(nodeRoot, 'scripts', 'app')\n", (1207, 1235), False, 'import os\n'), ((854, 880), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (870, 880), False, 'import os\n'), ((1686, 1703), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1698, 1703), False, 'import requests\n'), ((1837, 1874), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""app.yml"""'], {}), "(appsDir, app, 'app.yml')\n", (1849, 1874), False, 'import os\n'), ((1921, 1969), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""docker-compose.yml"""'], {}), "(appsDir, app, 'docker-compose.yml')\n", (1933, 1969), False, 'import os\n'), ((1983, 2020), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""app.yml"""'], {}), "(appsDir, app, 'app.yml')\n", (1995, 2020), False, 'import os\n'), ((2151, 2189), 'os.path.join', 'os.path.join', (['appsDir', '"""registry.json"""'], {}), "(appsDir, 'registry.json')\n", (2163, 2189), False, 'import os\n'), ((2301, 2354), 'lib.appymlgenerator.convertComposeYMLToAppYML', 'convertComposeYMLToAppYML', (['compose', 'app', 'registryData'], {}), '(compose, app, registryData)\n', (2326, 2354), False, 'from lib.appymlgenerator import convertComposeYMLToAppYML\n'), ((2554, 2582), 'lib.validate.findAndValidateApps', 'findAndValidateApps', (['appsDir'], {}), '(appsDir)\n', (2573, 2582), False, 'from lib.validate import findAndValidateApps\n'), ((2713, 2742), 'lib.metadata.getAppRegistry', 'getAppRegistry', (['apps', 'appsDir'], {}), '(apps, appsDir)\n', (2727, 2742), False, 'from lib.metadata import getAppRegistry, getSimpleAppRegistry\n'), ((2932, 2967), 'lib.metadata.getSimpleAppRegistry', 'getSimpleAppRegistry', (['apps', 'appsDir'], {}), '(apps, appsDir)\n', (2952, 2967), False, 'from lib.metadata import getAppRegistry, getSimpleAppRegistry\n'), ((4326, 4350), 'os.path.isfile', 'os.path.isfile', (['userFile'], {}), '(userFile)\n', (4340, 4350), False, 'import os\n'), ((4544, 4568), 'os.path.isfile', 'os.path.isfile', (['userFile'], {}), '(userFile)\n', (4558, 4568), False, 'import os\n'), ((5122, 5146), 'os.path.isfile', 'os.path.isfile', (['userFile'], {}), '(userFile)\n', (5136, 5146), False, 'import os\n'), ((6260, 6308), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""docker-compose.yml"""'], {}), "(appsDir, app, 'docker-compose.yml')\n", (6272, 6308), False, 'import os\n'), ((6333, 6388), 'os.path.join', 'os.path.join', (['appSystemDir', '"""docker-compose.common.yml"""'], {}), "(appSystemDir, 'docker-compose.common.yml')\n", (6345, 6388), False, 'import os\n'), ((7058, 7087), 'os.path.join', 'os.path.join', (['appDataDir', 'app'], {}), '(appDataDir, app)\n', (7070, 7087), False, 'import os\n'), ((7125, 7158), 'os.path.join', 'os.path.join', (['nodeRoot', '"""bitcoin"""'], {}), "(nodeRoot, 'bitcoin')\n", (7137, 7158), False, 'import os\n'), ((7192, 7221), 'os.path.join', 'os.path.join', (['nodeRoot', '"""lnd"""'], {}), "(nodeRoot, 'lnd')\n", (7204, 7221), False, 'import os\n'), ((8125, 8154), 'os.chmod', 'os.chmod', (['path', 'stat.S_IWRITE'], {}), '(path, stat.S_IWRITE)\n', (8133, 8154), False, 'import os\n'), ((8212, 8241), 'os.path.join', 'os.path.join', (['appDataDir', 'app'], {}), '(appDataDir, app)\n', (8224, 8241), False, 'import os\n'), ((8395, 8424), 'os.path.join', 'os.path.join', (['appDataDir', 'app'], {}), '(appDataDir, app)\n', (8407, 8424), False, 'import os\n'), ((8438, 8464), 'os.path.join', 'os.path.join', (['appsDir', 'app'], {}), '(appsDir, app)\n', (8450, 8464), False, 'import os\n'), ((8472, 8494), 'os.path.isdir', 'os.path.isdir', (['dataDir'], {}), '(dataDir)\n', (8485, 8494), False, 'import os\n'), ((9573, 9610), 'os.path.join', 'os.path.join', (['nodeRoot', '"""tor"""', '"""data"""'], {}), "(nodeRoot, 'tor', 'data')\n", (9585, 9610), False, 'import os\n'), ((9768, 9786), 'os.listdir', 'os.listdir', (['torDir'], {}), '(torDir)\n', (9778, 9786), False, 'import os\n'), ((2118, 2135), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2132, 2135), False, 'import yaml\n'), ((2272, 2284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2281, 2284), False, 'import json\n'), ((2816, 2864), 'json.dump', 'json.dump', (['registry', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(registry, f, indent=4, sort_keys=True)\n', (2825, 2864), False, 'import json\n'), ((3042, 3096), 'json.dump', 'json.dump', (['simpleRegistry', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(simpleRegistry, f, indent=4, sort_keys=True)\n', (3051, 3096), False, 'import json\n'), ((3297, 3345), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""docker-compose.yml"""'], {}), "(appsDir, app, 'docker-compose.yml')\n", (3309, 3345), False, 'import os\n'), ((3363, 3400), 'os.path.join', 'os.path.join', (['appsDir', 'app', '"""app.yml"""'], {}), "(appsDir, app, 'app.yml')\n", (3375, 3400), False, 'import os\n'), ((3792, 3820), 'lib.validate.findAndValidateApps', 'findAndValidateApps', (['appsDir'], {}), '(appsDir)\n', (3811, 3820), False, 'from lib.validate import findAndValidateApps\n'), ((4884, 4946), 'threading.Thread', 'threading.Thread', ([], {'target': 'runCompose', 'args': "(app, 'up --detach')"}), "(target=runCompose, args=(app, 'up --detach'))\n", (4900, 4946), False, 'import threading\n'), ((5462, 5530), 'threading.Thread', 'threading.Thread', ([], {'target': 'runCompose', 'args': "(app, 'rm --force --stop')"}), "(target=runCompose, args=(app, 'rm --force --stop'))\n", (5478, 5530), False, 'import threading\n'), ((5771, 5788), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5785, 5788), False, 'import yaml\n'), ((5999, 6039), 'lib.composegenerator.v1.generate.createComposeConfigFromV1', 'createComposeConfigFromV1', (['app', 'nodeRoot'], {}), '(app, nodeRoot)\n', (6024, 6039), False, 'from lib.composegenerator.v1.generate import createComposeConfigFromV1\n'), ((6065, 6095), 'lib.composegenerator.v0.generate.createComposeConfigFromV0', 'createComposeConfigFromV0', (['app'], {}), '(app)\n', (6090, 6095), False, 'from lib.composegenerator.v0.generate import createComposeConfigFromV0\n'), ((7766, 7793), 'os.path.isfile', 'os.path.isfile', (['composeFile'], {}), '(composeFile)\n', (7780, 7793), False, 'import os\n'), ((8259, 8306), 'shutil.rmtree', 'shutil.rmtree', (['dataDir'], {'onerror': 'remove_readonly'}), '(dataDir, onerror=remove_readonly)\n', (8272, 8306), False, 'import shutil\n'), ((9210, 9232), 'json.dump', 'json.dump', (['userData', 'f'], {}), '(userData, f)\n', (9219, 9232), False, 'import json\n'), ((9499, 9521), 'json.dump', 'json.dump', (['userData', 'f'], {}), '(userData, f)\n', (9508, 9521), False, 'import json\n'), ((11112, 11130), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11128, 11130), False, 'import tempfile\n'), ((12208, 12230), 'shutil.rmtree', 'shutil.rmtree', (['tempDir'], {}), '(tempDir)\n', (12221, 12230), False, 'import shutil\n'), ((2467, 2504), 'yaml.dump', 'yaml.dump', (['converted'], {'sort_keys': '(False)'}), '(converted, sort_keys=False)\n', (2476, 2504), False, 'import yaml\n'), ((2757, 2795), 'os.path.join', 'os.path.join', (['appsDir', '"""registry.json"""'], {}), "(appsDir, 'registry.json')\n", (2769, 2795), False, 'import os\n'), ((2982, 3021), 'os.path.join', 'os.path.join', (['appSystemDir', '"""apps.json"""'], {}), "(appSystemDir, 'apps.json')\n", (2994, 3021), False, 'import os\n'), ((4414, 4426), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4423, 4426), False, 'import json\n'), ((4632, 4644), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4641, 4644), False, 'import json\n'), ((5210, 5222), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5219, 5222), False, 'import json\n'), ((8002, 8032), 'os.path.join', 'os.path.join', (['nodeRoot', '""".env"""'], {}), "(nodeRoot, '.env')\n", (8014, 8032), False, 'import os\n'), ((8684, 8720), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".gitignore"""'], {}), "('.gitignore')\n", (8706, 8720), False, 'import shutil\n'), ((8823, 8838), 'os.stat', 'os.stat', (['appDir'], {}), '(appDir)\n', (8830, 8838), False, 'import os\n'), ((8847, 8862), 'os.stat', 'os.stat', (['appDir'], {}), '(appDir)\n', (8854, 8862), False, 'import os\n'), ((8893, 8908), 'os.stat', 'os.stat', (['appDir'], {}), '(appDir)\n', (8900, 8908), False, 'import os\n'), ((11561, 11590), 'os.path.join', 'os.path.join', (['tempDir', '"""apps"""'], {}), "(tempDir, 'apps')\n", (11573, 11590), False, 'import os\n'), ((6420, 6499), 'subprocess.check_output', 'subprocess.check_output', (['"""hostname -s 2>/dev/null || echo \'umbrel\'"""'], {'shell': '(True)'}), '("hostname -s 2>/dev/null || echo \'umbrel\'", shell=True)\n', (6443, 6499), False, 'import subprocess\n'), ((11753, 11779), 'os.path.join', 'os.path.join', (['appsDir', 'app'], {}), '(appsDir, app)\n', (11765, 11779), False, 'import os\n'), ((11894, 11928), 'os.path.join', 'os.path.join', (['tempDir', '"""apps"""', 'app'], {}), "(tempDir, 'apps', app)\n", (11906, 11928), False, 'import os\n'), ((3540, 3578), 'yaml.dump', 'yaml.dump', (['appCompose'], {'sort_keys': '(False)'}), '(appCompose, sort_keys=False)\n', (3549, 3578), False, 'import yaml\n'), ((11812, 11838), 'os.path.join', 'os.path.join', (['appsDir', 'app'], {}), '(appsDir, app)\n', (11824, 11838), False, 'import os\n'), ((11963, 11997), 'os.path.join', 'os.path.join', (['tempDir', '"""apps"""', 'app'], {}), "(tempDir, 'apps', app)\n", (11975, 11997), False, 'import os\n'), ((11999, 12025), 'os.path.join', 'os.path.join', (['appsDir', 'app'], {}), '(appsDir, app)\n', (12011, 12025), False, 'import os\n'), ((12082, 12118), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".gitignore"""'], {}), "('.gitignore')\n", (12104, 12118), False, 'import shutil\n')] |
# Based on local.py (c) 2012, <NAME> <<EMAIL>>
# Based on chroot.py (c) 2013, <NAME> <<EMAIL>>
# Based on func.py
# (c) 2014, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: <NAME> (@mscherer) <<EMAIL>>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
class Connection(ConnectionBase):
""" Salt-based connections """
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'community.general.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@staticmethod
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
# TODO test it
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
""" terminate the connection; nothing to do here """
pass
| [
"base64.b64encode",
"os.path.join",
"salt.client.LocalClient",
"os.path.normpath",
"ansible.errors.AnsibleError"
]
| [((1432, 1448), 'salt.client.LocalClient', 'sc.LocalClient', ([], {}), '()\n', (1446, 1448), True, 'import salt.client as sc\n'), ((2497, 2519), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (2513, 2519), False, 'import os\n'), ((2535, 2569), 'os.path.join', 'os.path.join', (['prefix', 'normpath[1:]'], {}), '(prefix, normpath[1:])\n', (2547, 2569), False, 'import os\n'), ((1359, 1408), 'ansible.errors.AnsibleError', 'errors.AnsibleError', (['"""saltstack is not installed"""'], {}), "('saltstack is not installed')\n", (1378, 1408), False, 'from ansible import errors\n'), ((1740, 1840), 'ansible.errors.AnsibleError', 'errors.AnsibleError', (['"""Internal Error: this module does not support optimized module pipelining"""'], {}), "(\n 'Internal Error: this module does not support optimized module pipelining')\n", (1759, 1840), False, 'from ansible import errors\n'), ((2126, 2251), 'ansible.errors.AnsibleError', 'errors.AnsibleError', (['("Minion %s didn\'t answer, check if salt-minion is running and the name is correct"\n % self.host)'], {}), '(\n "Minion %s didn\'t answer, check if salt-minion is running and the name is correct"\n % self.host)\n', (2145, 2251), False, 'from ansible import errors\n'), ((2446, 2477), 'os.path.join', 'os.path.join', (['os.path.sep', 'path'], {}), '(os.path.sep, path)\n', (2458, 2477), False, 'import os\n'), ((3008, 3033), 'base64.b64encode', 'base64.b64encode', (['content'], {}), '(content)\n', (3024, 3033), False, 'import base64\n')] |
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| [
"django.http.HttpResponseRedirect",
"vrtManager.util.randomMAC",
"servers.models.Compute.objects.get",
"django.utils.translation.ugettext_lazy",
"create.models.Flavor.objects.filter",
"vrtManager.create.wvmCreate",
"create.forms.NewVMForm",
"django.template.RequestContext",
"vrtManager.util.randomUUID",
"create.forms.FlavorAddForm",
"instance.models.Instance",
"create.models.Flavor.objects.get",
"create.models.Flavor"
]
| [((645, 676), 'servers.models.Compute.objects.get', 'Compute.objects.get', ([], {'id': 'host_id'}), '(id=host_id)\n', (664, 676), False, 'from servers.models import Compute\n'), ((583, 613), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/login"""'], {}), "('/login')\n", (603, 613), False, 'from django.http import HttpResponseRedirect\n'), ((755, 829), 'vrtManager.create.wvmCreate', 'wvmCreate', (['compute.hostname', 'compute.login', 'compute.password', 'compute.type'], {}), '(compute.hostname, compute.login, compute.password, compute.type)\n', (764, 829), False, 'from vrtManager.create import wvmCreate\n'), ((1116, 1132), 'vrtManager.util.randomMAC', 'util.randomMAC', ([], {}), '()\n', (1130, 1132), False, 'from vrtManager import util\n'), ((1236, 1283), 'django.utils.translation.ugettext_lazy', '_', (['"""You haven\'t defined have any storage pools"""'], {}), '("You haven\'t defined have any storage pools")\n', (1237, 1283), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1346, 1393), 'django.utils.translation.ugettext_lazy', '_', (['"""You haven\'t defined have any network pools"""'], {}), '("You haven\'t defined have any network pools")\n', (1347, 1393), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((691, 714), 'create.models.Flavor.objects.filter', 'Flavor.objects.filter', ([], {}), '()\n', (712, 714), False, 'from create.models import Flavor\n'), ((1518, 1545), 'create.forms.FlavorAddForm', 'FlavorAddForm', (['request.POST'], {}), '(request.POST)\n', (1531, 1545), False, 'from create.forms import FlavorAddForm, NewVMForm\n'), ((2090, 2122), 'create.models.Flavor.objects.get', 'Flavor.objects.get', ([], {'id': 'flavor_id'}), '(id=flavor_id)\n', (2108, 2122), False, 'from create.models import Flavor\n'), ((2304, 2327), 'create.forms.NewVMForm', 'NewVMForm', (['request.POST'], {}), '(request.POST)\n', (2313, 2327), False, 'from create.forms import FlavorAddForm, NewVMForm\n'), ((5049, 5072), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (5063, 5072), False, 'from django.template import RequestContext\n'), ((1651, 1744), 'create.models.Flavor', 'Flavor', ([], {'label': "data['label']", 'vcpu': "data['vcpu']", 'memory': "data['memory']", 'disk': "data['disk']"}), "(label=data['label'], vcpu=data['vcpu'], memory=data['memory'], disk=\n data['disk'])\n", (1657, 1744), False, 'from create.models import Flavor\n'), ((2511, 2563), 'django.utils.translation.ugettext_lazy', '_', (['"""A virtual machine with this name already exists"""'], {}), "('A virtual machine with this name already exists')\n", (2512, 2563), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4192, 4209), 'vrtManager.util.randomUUID', 'util.randomUUID', ([], {}), '()\n', (4207, 4209), False, 'from vrtManager import util\n'), ((2757, 2801), 'django.utils.translation.ugettext_lazy', '_', (['"""No Virtual Machine MAC has been entered"""'], {}), "('No Virtual Machine MAC has been entered')\n", (2758, 2801), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4509, 4567), 'instance.models.Instance', 'Instance', ([], {'compute_id': 'host_id', 'name': "data['name']", 'uuid': 'uuid'}), "(compute_id=host_id, name=data['name'], uuid=uuid)\n", (4517, 4567), False, 'from instance.models import Instance\n'), ((4654, 4720), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('/instance/%s/%s/' % (host_id, data['name']))"], {}), "('/instance/%s/%s/' % (host_id, data['name']))\n", (4674, 4720), False, 'from django.http import HttpResponseRedirect\n'), ((3614, 3662), 'django.utils.translation.ugettext_lazy', '_', (['"""First you need to create or select an image"""'], {}), "('First you need to create or select an image')\n", (3615, 3662), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty | [
"torch.cuda.is_available",
"torch.autograd.Variable",
"torch.rand"
]
| [((238, 269), 'torch.rand', 'torch.rand', (['batch_size', '(1)', '(1)', '(1)'], {}), '(batch_size, 1, 1, 1)\n', (248, 269), False, 'import torch\n'), ((150, 175), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (173, 175), False, 'import torch\n'), ((409, 466), 'torch.autograd.Variable', 'torch.autograd.Variable', (['interpolates'], {'requires_grad': '(True)'}), '(interpolates, requires_grad=True)\n', (432, 466), False, 'import torch\n')] |
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
| [
"torch.jit.trace",
"os.path.exists",
"torch._C._jit_to_backend",
"pathlib.Path",
"unittest.skipIf",
"torch.set_default_dtype",
"os.path.realpath",
"torch.nn.PReLU",
"torch.tensor",
"sys.path.append"
]
| [((314, 347), 'sys.path.append', 'sys.path.append', (['pytorch_test_dir'], {}), '(pytorch_test_dir)\n', (329, 347), False, 'import sys\n'), ((1117, 1176), 'unittest.skipIf', 'unittest.skipIf', (['TEST_WITH_ASAN', '"""Unresolved bug with ASAN"""'], {}), "(TEST_WITH_ASAN, 'Unresolved bug with ASAN')\n", (1132, 1176), False, 'import unittest\n'), ((285, 311), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (301, 311), False, 'import os\n'), ((1304, 1320), 'torch.nn.PReLU', 'torch.nn.PReLU', ([], {}), '()\n', (1318, 1320), False, 'import torch\n'), ((1597, 1635), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float32'], {}), '(torch.float32)\n', (1620, 1635), False, 'import torch\n'), ((1864, 1926), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced_module', 'compile_spec'], {}), "('nnapi', traced_module, compile_spec)\n", (1888, 1926), False, 'import torch\n'), ((2092, 2108), 'torch.nn.PReLU', 'torch.nn.PReLU', ([], {}), '()\n', (2106, 2108), False, 'import torch\n'), ((2126, 2155), 'torch.jit.trace', 'torch.jit.trace', (['module', 'args'], {}), '(module, args)\n', (2141, 2155), False, 'import torch\n'), ((2540, 2556), 'torch.nn.PReLU', 'torch.nn.PReLU', ([], {}), '()\n', (2554, 2556), False, 'import torch\n'), ((2574, 2603), 'torch.jit.trace', 'torch.jit.trace', (['module', 'args'], {}), '(module, args)\n', (2589, 2603), False, 'import torch\n'), ((5030, 5073), 'torch.set_default_dtype', 'torch.set_default_dtype', (['self.default_dtype'], {}), '(self.default_dtype)\n', (5053, 5073), False, 'import torch\n'), ((1015, 1039), 'os.path.exists', 'os.path.exists', (['lib_path'], {}), '(lib_path)\n', (1029, 1039), False, 'import os\n'), ((3183, 3238), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced', 'compile_spec'], {}), "('nnapi', traced, compile_spec)\n", (3207, 3238), False, 'import torch\n'), ((3618, 3673), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced', 'compile_spec'], {}), "('nnapi', traced, compile_spec)\n", (3642, 3673), False, 'import torch\n'), ((4092, 4147), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced', 'compile_spec'], {}), "('nnapi', traced, compile_spec)\n", (4116, 4147), False, 'import torch\n'), ((4502, 4557), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced', 'compile_spec'], {}), "('nnapi', traced, compile_spec)\n", (4526, 4557), False, 'import torch\n'), ((4858, 4913), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', (['"""nnapi"""', 'traced', 'compile_spec'], {}), "('nnapi', traced, compile_spec)\n", (4882, 4913), False, 'import torch\n'), ((884, 898), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (888, 898), False, 'from pathlib import Path\n'), ((2008, 2046), 'torch.tensor', 'torch.tensor', (['[[1.0, -1.0, 2.0, -2.0]]'], {}), '([[1.0, -1.0, 2.0, -2.0]])\n', (2020, 2046), False, 'import torch\n'), ((2456, 2494), 'torch.tensor', 'torch.tensor', (['[[1.0, -1.0, 2.0, -2.0]]'], {}), '([[1.0, -1.0, 2.0, -2.0]])\n', (2468, 2494), False, 'import torch\n')] |
from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
| [
"PyQt5.QtCore.pyqtSignal",
"InftyDoubleSpinBox.InftyDoubleSpinBox",
"helplib.saveFilewithMetaData",
"numpy.float64",
"PyQt5.QtWidgets.QWidget.__init__",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"helplib.readFileForFitsDataAndStdErrorAndMetaData",
"PyQt5.QtWidgets.QCheckBox"
]
| [((274, 290), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (284, 290), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((322, 338), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (332, 338), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((358, 380), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool', 'bool'], {}), '(bool, bool)\n', (368, 380), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((398, 420), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['np.float64'], {}), '(np.float64)\n', (408, 420), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((437, 453), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['list'], {}), '(list)\n', (447, 453), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((470, 485), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (480, 485), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((502, 517), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (512, 517), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((539, 551), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (549, 551), False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((711, 733), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {}), '(self)\n', (727, 733), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((807, 830), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Energy Shift:"""'], {}), "('Energy Shift:')\n", (813, 830), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((863, 883), 'InftyDoubleSpinBox.InftyDoubleSpinBox', 'InftyDoubleSpinBox', ([], {}), '()\n', (881, 883), False, 'from InftyDoubleSpinBox import InftyDoubleSpinBox\n'), ((1058, 1100), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['self.SHOW_ERROR_BARS_NOT_LOADED'], {}), '(self.SHOW_ERROR_BARS_NOT_LOADED)\n', (1067, 1100), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((1225, 1262), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Ignore first data point."""'], {}), "('Ignore first data point.')\n", (1234, 1262), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((1392, 1405), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1403, 1405), False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((5107, 5172), 'helplib.readFileForFitsDataAndStdErrorAndMetaData', 'hl.readFileForFitsDataAndStdErrorAndMetaData', (['fileName', 'id_string'], {}), '(fileName, id_string)\n', (5151, 5172), True, 'import helplib as hl\n'), ((7390, 7509), 'helplib.saveFilewithMetaData', 'hl.saveFilewithMetaData', (['id_string', 'fileName', 'self.__all_data', '(fit_strings, view_string, data_string, meta_string)'], {}), '(id_string, fileName, self.__all_data, (fit_strings,\n view_string, data_string, meta_string))\n', (7413, 7509), True, 'import helplib as hl\n'), ((6493, 6512), 'numpy.float64', 'np.float64', (['item[1]'], {}), '(item[1])\n', (6503, 6512), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trr',
name='subject_id',
field=models.PositiveIntegerField(null=True),
),
]
| [
"django.db.models.PositiveIntegerField"
]
| [((387, 425), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (414, 425), False, 'from django.db import migrations, models\n')] |
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
def dut_prepare(reinstall: bool):
if not check_if_installed() or reinstall:
TestRun.LOGGER.info("Installing iotrace:")
install_iotrace()
else:
TestRun.LOGGER.info("iotrace is already installed by previous test")
# Call it after installing iotrace because we need iotrace
# to get valid paths
dut_cleanup()
fio = Fio()
if not fio.is_installed():
TestRun.LOGGER.info("Installing fio")
fio.install()
TestRun.LOGGER.info("Killing all IO")
kill_all_io()
def dut_cleanup():
iotrace: IotracePlugin = TestRun.plugins['iotrace']
TestRun.LOGGER.info("Stopping fuzzing")
TestRun.executor.run(f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean')
output = TestRun.executor.run('pgrep iotrace')
if output.stdout != "":
TestRun.executor.run(f'kill -9 {output.stdout}')
TestRun.LOGGER.info("Removing existing traces")
trace_repository_path: str = iotrace.get_trace_repository_path()
TestRun.executor.run_expect_success(f'rm -rf {trace_repository_path}/kernel')
| [
"core.test_run_utils.TestRun.LOGGER.info",
"utils.misc.kill_all_io",
"test_tools.fio.fio.Fio",
"core.test_run_utils.TestRun.executor.run",
"utils.installer.check_if_installed",
"core.test_run_utils.TestRun.executor.run_expect_success",
"utils.installer.install_iotrace"
]
| [((667, 672), 'test_tools.fio.fio.Fio', 'Fio', ([], {}), '()\n', (670, 672), False, 'from test_tools.fio.fio import Fio\n'), ((777, 814), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""Killing all IO"""'], {}), "('Killing all IO')\n", (796, 814), False, 'from core.test_run_utils import TestRun\n'), ((819, 832), 'utils.misc.kill_all_io', 'kill_all_io', ([], {}), '()\n', (830, 832), False, 'from utils.misc import kill_all_io\n'), ((915, 954), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""Stopping fuzzing"""'], {}), "('Stopping fuzzing')\n", (934, 954), False, 'from core.test_run_utils import TestRun\n'), ((959, 1077), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', (['f"""{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean"""'], {}), "(\n f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean'\n )\n", (979, 1077), False, 'from core.test_run_utils import TestRun\n'), ((1082, 1119), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', (['"""pgrep iotrace"""'], {}), "('pgrep iotrace')\n", (1102, 1119), False, 'from core.test_run_utils import TestRun\n'), ((1210, 1257), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""Removing existing traces"""'], {}), "('Removing existing traces')\n", (1229, 1257), False, 'from core.test_run_utils import TestRun\n'), ((1331, 1408), 'core.test_run_utils.TestRun.executor.run_expect_success', 'TestRun.executor.run_expect_success', (['f"""rm -rf {trace_repository_path}/kernel"""'], {}), "(f'rm -rf {trace_repository_path}/kernel')\n", (1366, 1408), False, 'from core.test_run_utils import TestRun\n'), ((393, 435), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""Installing iotrace:"""'], {}), "('Installing iotrace:')\n", (412, 435), False, 'from core.test_run_utils import TestRun\n'), ((444, 461), 'utils.installer.install_iotrace', 'install_iotrace', ([], {}), '()\n', (459, 461), False, 'from utils.installer import install_iotrace, check_if_installed\n'), ((480, 548), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""iotrace is already installed by previous test"""'], {}), "('iotrace is already installed by previous test')\n", (499, 548), False, 'from core.test_run_utils import TestRun\n'), ((712, 749), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', (['"""Installing fio"""'], {}), "('Installing fio')\n", (731, 749), False, 'from core.test_run_utils import TestRun\n'), ((1156, 1204), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', (['f"""kill -9 {output.stdout}"""'], {}), "(f'kill -9 {output.stdout}')\n", (1176, 1204), False, 'from core.test_run_utils import TestRun\n'), ((350, 370), 'utils.installer.check_if_installed', 'check_if_installed', ([], {}), '()\n', (368, 370), False, 'from utils.installer import install_iotrace, check_if_installed\n')] |
from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
| [
"models.Song.query",
"random.choice"
]
| [((167, 182), 'random.choice', 'choice', (['results'], {}), '(results)\n', (173, 182), False, 'from random import choice\n'), ((89, 101), 'models.Song.query', 'Song.query', ([], {}), '()\n', (99, 101), False, 'from models import Song\n')] |
#!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| [
"csv.register_dialect",
"csv.writer"
]
| [((52, 97), 'csv.register_dialect', 'csv.register_dialect', (['"""hashes"""'], {'delimiter': '"""#"""'}), "('hashes', delimiter='#')\n", (72, 97), False, 'import csv\n'), ((150, 181), 'csv.writer', 'csv.writer', (['f'], {'dialect': '"""hashes"""'}), "(f, dialect='hashes')\n", (160, 181), False, 'import csv\n')] |
from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
class ProfileForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(0, 120)])
email = StringField('Email', validators=[DataRequired(), Email()])
institution = StringField('Institution', validators=[DataRequired()])
experiment = SelectField('Experiment', validators=[DataRequired()],
choices=[("ATLAS", "ATLAS"), ("CMS", "CMS")],
default="ATLAS")
submit = SubmitField('Save Profile')
def __init__(self, user: Optional[UserModel] = None):
super().__init__()
if user:
self.name.data = user.name
self.email.data = user.email
self.institution.data = user.institution
self.experiment.data = user.experiment
| [
"wtforms.validators.Length",
"wtforms.validators.Email",
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
]
| [((681, 708), 'wtforms.SubmitField', 'SubmitField', (['"""Save Profile"""'], {}), "('Save Profile')\n", (692, 708), False, 'from wtforms import StringField, SelectField, SubmitField\n'), ((297, 311), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (309, 311), False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((313, 327), 'wtforms.validators.Length', 'Length', (['(0)', '(120)'], {}), '(0, 120)\n', (319, 327), False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((375, 389), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (387, 389), False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((391, 398), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (396, 398), False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((458, 472), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (470, 472), False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((530, 544), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (542, 544), False, 'from wtforms.validators import DataRequired, Length, Email\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
def build_detection_graph():
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3],
name='input_img') # is RGB. not GBR
raw_shape = tf.shape(img_plac)
raw_h, raw_w = tf.to_float(raw_shape[0]), tf.to_float(raw_shape[1])
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0) # [1, None, None, 3]
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_shape = tf.shape(img_batch)
resized_h, resized_w = tf.to_float(resized_shape[1]), tf.to_float(resized_shape[2])
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
dets = tf.concat([tf.reshape(detection_category, [-1, 1]),
tf.reshape(detection_scores, [-1, 1]),
boxes], axis=1, name='DetResults')
return dets
def export_frozenPB():
tf.reset_default_graph()
dets = build_detection_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("we have restred the weights from =====>>\n", CKPT_PATH)
saver.restore(sess, CKPT_PATH)
tf.train.write_graph(sess.graph_def, OUT_DIR, PB_NAME)
freeze_graph.freeze_graph(input_graph=os.path.join(OUT_DIR, PB_NAME),
input_saver='',
input_binary=False,
input_checkpoint=CKPT_PATH,
output_node_names="DetResults",
restore_op_name="save/restore_all",
filename_tensor_name='save/Const:0',
output_graph=os.path.join(OUT_DIR, PB_NAME.replace('.pb', '_Frozen.pb')),
clear_devices=False,
initializer_nodes='')
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| [
"data.io.image_preprocess.short_side_resize_for_inference_data",
"tensorflow.shape",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.to_float",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.Session",
"os.path.join",
"tensorflow.constant",
"libs.networks.build_whole_network.DetectionNetwork",
"tensorflow.train.write_graph",
"tensorflow.expand_dims",
"tensorflow.cast",
"sys.path.append",
"tensorflow.stack"
]
| [((203, 228), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (218, 228), False, 'import os, sys\n'), ((646, 717), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8', 'shape': '[None, None, 3]', 'name': '"""input_img"""'}), "(dtype=tf.uint8, shape=[None, None, 3], name='input_img')\n", (660, 717), True, 'import tensorflow as tf\n'), ((783, 801), 'tensorflow.shape', 'tf.shape', (['img_plac'], {}), '(img_plac)\n', (791, 801), True, 'import tensorflow as tf\n'), ((891, 920), 'tensorflow.cast', 'tf.cast', (['img_plac', 'tf.float32'], {}), '(img_plac, tf.float32)\n', (898, 920), True, 'import tensorflow as tf\n'), ((937, 1089), 'data.io.image_preprocess.short_side_resize_for_inference_data', 'short_side_resize_for_inference_data', ([], {'img_tensor': 'img_batch', 'target_shortside_len': 'cfgs.IMG_SHORT_SIDE_LEN', 'length_limitation': 'cfgs.IMG_MAX_LENGTH'}), '(img_tensor=img_batch,\n target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN, length_limitation=cfgs.\n IMG_MAX_LENGTH)\n', (973, 1089), False, 'from data.io.image_preprocess import short_side_resize_for_inference_data\n'), ((1260, 1293), 'tensorflow.expand_dims', 'tf.expand_dims', (['img_batch'], {'axis': '(0)'}), '(img_batch, axis=0)\n', (1274, 1293), True, 'import tensorflow as tf\n'), ((1331, 1423), 'libs.networks.build_whole_network.DetectionNetwork', 'build_whole_network.DetectionNetwork', ([], {'base_network_name': 'cfgs.NET_NAME', 'is_training': '(False)'}), '(base_network_name=cfgs.NET_NAME,\n is_training=False)\n', (1367, 1423), False, 'from libs.networks import build_whole_network\n'), ((1802, 1821), 'tensorflow.shape', 'tf.shape', (['img_batch'], {}), '(img_batch)\n', (1810, 1821), True, 'import tensorflow as tf\n'), ((2344, 2368), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2366, 2368), True, 'import tensorflow as tf\n'), ((2418, 2434), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2432, 2434), True, 'import tensorflow as tf\n'), ((821, 846), 'tensorflow.to_float', 'tf.to_float', (['raw_shape[0]'], {}), '(raw_shape[0])\n', (832, 846), True, 'import tensorflow as tf\n'), ((848, 873), 'tensorflow.to_float', 'tf.to_float', (['raw_shape[1]'], {}), '(raw_shape[1])\n', (859, 873), True, 'import tensorflow as tf\n'), ((1215, 1243), 'tensorflow.constant', 'tf.constant', (['cfgs.PIXEL_MEAN'], {}), '(cfgs.PIXEL_MEAN)\n', (1226, 1243), True, 'import tensorflow as tf\n'), ((1849, 1878), 'tensorflow.to_float', 'tf.to_float', (['resized_shape[1]'], {}), '(resized_shape[1])\n', (1860, 1878), True, 'import tensorflow as tf\n'), ((1880, 1909), 'tensorflow.to_float', 'tf.to_float', (['resized_shape[2]'], {}), '(resized_shape[2])\n', (1891, 1909), True, 'import tensorflow as tf\n'), ((2082, 2116), 'tensorflow.stack', 'tf.stack', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (2090, 2116), True, 'import tensorflow as tf\n'), ((2445, 2457), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2455, 2457), True, 'import tensorflow as tf\n'), ((2586, 2640), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', 'OUT_DIR', 'PB_NAME'], {}), '(sess.graph_def, OUT_DIR, PB_NAME)\n', (2606, 2640), True, 'import tensorflow as tf\n'), ((2140, 2179), 'tensorflow.reshape', 'tf.reshape', (['detection_category', '[-1, 1]'], {}), '(detection_category, [-1, 1])\n', (2150, 2179), True, 'import tensorflow as tf\n'), ((2202, 2239), 'tensorflow.reshape', 'tf.reshape', (['detection_scores', '[-1, 1]'], {}), '(detection_scores, [-1, 1])\n', (2212, 2239), True, 'import tensorflow as tf\n'), ((2687, 2717), 'os.path.join', 'os.path.join', (['OUT_DIR', 'PB_NAME'], {}), '(OUT_DIR, PB_NAME)\n', (2699, 2717), False, 'import os, sys\n')] |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def _copy_schema_immutable(schema):
new_schema = copy.deepcopy(schema)
if not schema.update_allowed:
new_schema.immutable = True
return new_schema
class ImmutableNet(net.Net):
'''Ensure an existing net doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in net.Net.properties_schema.items()
}
class ImmutablePort(port.Port):
'''Ensure an existing port doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in port.Port.properties_schema.items()
}
class ImmutableSubnet(subnet.Subnet):
'''Ensure an existing subnet doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in subnet.Subnet.properties_schema.items()
}
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| [
"heat.engine.resources.openstack.neutron.port.Port.properties_schema.items",
"heat.engine.resources.openstack.neutron.net.Net.properties_schema.items",
"heat.engine.resources.openstack.neutron.subnet.Subnet.properties_schema.items",
"copy.deepcopy"
]
| [((816, 837), 'copy.deepcopy', 'copy.deepcopy', (['schema'], {}), '(schema)\n', (829, 837), False, 'import copy\n'), ((1094, 1127), 'heat.engine.resources.openstack.neutron.net.Net.properties_schema.items', 'net.Net.properties_schema.items', ([], {}), '()\n', (1125, 1127), False, 'from heat.engine.resources.openstack.neutron import net\n'), ((1302, 1337), 'heat.engine.resources.openstack.neutron.port.Port.properties_schema.items', 'port.Port.properties_schema.items', ([], {}), '()\n', (1335, 1337), False, 'from heat.engine.resources.openstack.neutron import port\n'), ((1520, 1559), 'heat.engine.resources.openstack.neutron.subnet.Subnet.properties_schema.items', 'subnet.Subnet.properties_schema.items', ([], {}), '()\n', (1557, 1559), False, 'from heat.engine.resources.openstack.neutron import subnet\n')] |
import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
def _perform_driver_error_object_not_found(driver):
with pytest.raises(errors.ObjectDoesNotExistError):
driver.download_object("someasset", "somedestination")
assert not os.path.isfile("somedestination")
def test_local_driver(local_assetsmanager):
local_driver = local_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(local_driver)
@skip_unless("ENABLE_GCS_TEST", "True")
def test_gcs_driver(gcs_assetsmanager):
gcs_driver = gcs_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(gcs_driver)
@skip_unless("ENABLE_S3_TEST", "True")
def test_s3_driver(s3_assetsmanager):
s3_driver = s3_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(s3_driver)
| [
"os.path.isfile",
"tests.conftest.skip_unless",
"pytest.raises"
]
| [((494, 532), 'tests.conftest.skip_unless', 'skip_unless', (['"""ENABLE_GCS_TEST"""', '"""True"""'], {}), "('ENABLE_GCS_TEST', 'True')\n", (505, 532), False, 'from tests.conftest import skip_unless\n'), ((693, 730), 'tests.conftest.skip_unless', 'skip_unless', (['"""ENABLE_S3_TEST"""', '"""True"""'], {}), "('ENABLE_S3_TEST', 'True')\n", (704, 730), False, 'from tests.conftest import skip_unless\n'), ((163, 208), 'pytest.raises', 'pytest.raises', (['errors.ObjectDoesNotExistError'], {}), '(errors.ObjectDoesNotExistError)\n', (176, 208), False, 'import pytest\n'), ((288, 321), 'os.path.isfile', 'os.path.isfile', (['"""somedestination"""'], {}), "('somedestination')\n", (302, 321), False, 'import os\n')] |
from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
def test_page_creation(self):
# Create user object and save it
user = User.objects.create()
user.save()
# Create a page
page = Page.objects.create(title="The Test Page", content="edit_test", author=user)
page.save()
post_data = {
'title': 'COVID19',
'content': 'Mass Testing is Underway',
'author': user.id
}
response = self.client.post('/form/', data = post_data)
self.assertEqual(response.status_code, 302)
page_object = Page.objects.get(title='COVID19')
self.assertEqual(page_object.content, 'Mass Testing is Underway') | [
"wiki.models.Page.objects.get",
"django.contrib.auth.models.User.objects.create",
"wiki.models.Page.objects.create",
"wiki.models.Page"
]
| [((280, 301), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {}), '()\n', (299, 301), False, 'from django.contrib.auth.models import User\n'), ((379, 449), 'wiki.models.Page', 'Page', ([], {'title': '"""My Detail Test Page"""', 'content': '"""details_test"""', 'author': 'user'}), "(title='My Detail Test Page', content='details_test', author=user)\n", (383, 449), False, 'from wiki.models import Page\n'), ((850, 871), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {}), '()\n', (869, 871), False, 'from django.contrib.auth.models import User\n'), ((900, 975), 'wiki.models.Page.objects.create', 'Page.objects.create', ([], {'title': '"""My Test Page"""', 'content': '"""edit_test"""', 'author': 'user'}), "(title='My Test Page', content='edit_test', author=user)\n", (919, 975), False, 'from wiki.models import Page\n'), ((1649, 1670), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {}), '()\n', (1668, 1670), False, 'from django.contrib.auth.models import User\n'), ((1720, 1796), 'wiki.models.Page.objects.create', 'Page.objects.create', ([], {'title': '"""The Test Page"""', 'content': '"""edit_test"""', 'author': 'user'}), "(title='The Test Page', content='edit_test', author=user)\n", (1739, 1796), False, 'from wiki.models import Page\n'), ((2059, 2092), 'wiki.models.Page.objects.get', 'Page.objects.get', ([], {'title': '"""COVID19"""'}), "(title='COVID19')\n", (2075, 2092), False, 'from wiki.models import Page\n')] |
from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(TestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_multiple_fields(self):
query = Query(Item)
where = query.build_where(Q(modified__gt=F('created')))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, SimpleCol)
self.assertIsInstance(lookup.lhs, SimpleCol)
self.assertEqual(lookup.rhs.target, Item._meta.get_field('created'))
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_transform(self):
query = Query(Author)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower='foo'))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, SimpleCol)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field('name'))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_foreign_key(self):
query = Query(Item)
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F('author__num')))
def test_foreign_key_exclusive(self):
query = Query(ObjectC)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, SimpleCol)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field('objecta'))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, SimpleCol)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field('objectb'))
| [
"datetime.datetime",
"django.db.models.sql.query.Query",
"django.db.models.F",
"django.test.utils.register_lookup",
"django.db.models.Q"
]
| [((654, 667), 'django.db.models.sql.query.Query', 'Query', (['Author'], {}), '(Author)\n', (659, 667), False, 'from django.db.models.sql.query import Query\n'), ((968, 981), 'django.db.models.sql.query.Query', 'Query', (['Author'], {}), '(Author)\n', (973, 981), False, 'from django.db.models.sql.query import Query\n'), ((1545, 1556), 'django.db.models.sql.query.Query', 'Query', (['Item'], {}), '(Item)\n', (1550, 1556), False, 'from django.db.models.sql.query import Query\n'), ((2015, 2028), 'django.db.models.sql.query.Query', 'Query', (['Author'], {}), '(Author)\n', (2020, 2028), False, 'from django.db.models.sql.query import Query\n'), ((2458, 2469), 'django.db.models.sql.query.Query', 'Query', (['Item'], {}), '(Item)\n', (2463, 2469), False, 'from django.db.models.sql.query import Query\n'), ((2951, 2962), 'django.db.models.sql.query.Query', 'Query', (['Item'], {}), '(Item)\n', (2956, 2962), False, 'from django.db.models.sql.query import Query\n'), ((3195, 3209), 'django.db.models.sql.query.Query', 'Query', (['Ranking'], {}), '(Ranking)\n', (3200, 3209), False, 'from django.db.models.sql.query import Query\n'), ((3373, 3387), 'django.db.models.sql.query.Query', 'Query', (['ObjectC'], {}), '(ObjectC)\n', (3378, 3387), False, 'from django.db.models.sql.query import Query\n'), ((702, 714), 'django.db.models.Q', 'Q', ([], {'num__gt': '(2)'}), '(num__gt=2)\n', (703, 714), False, 'from django.db.models import CharField, F, Q\n'), ((2042, 2075), 'django.test.utils.register_lookup', 'register_lookup', (['CharField', 'Lower'], {}), '(CharField, Lower)\n', (2057, 2075), False, 'from django.test.utils import register_lookup\n'), ((1016, 1028), 'django.db.models.Q', 'Q', ([], {'num__gt': '(2)'}), '(num__gt=2)\n', (1017, 1028), False, 'from django.db.models import CharField, F, Q\n'), ((1031, 1043), 'django.db.models.Q', 'Q', ([], {'num__lt': '(0)'}), '(num__lt=0)\n', (1032, 1043), False, 'from django.db.models import CharField, F, Q\n'), ((3121, 3142), 'django.db.models.Q', 'Q', ([], {'creator__num__gt': '(2)'}), '(creator__num__gt=2)\n', (3122, 3142), False, 'from django.db.models import CharField, F, Q\n'), ((3422, 3437), 'django.db.models.Q', 'Q', ([], {'objecta': 'None'}), '(objecta=None)\n', (3423, 3437), False, 'from django.db.models import CharField, F, Q\n'), ((3440, 3455), 'django.db.models.Q', 'Q', ([], {'objectb': 'None'}), '(objectb=None)\n', (3441, 3455), False, 'from django.db.models import CharField, F, Q\n'), ((1606, 1618), 'django.db.models.F', 'F', (['"""created"""'], {}), "('created')\n", (1607, 1618), False, 'from django.db.models import CharField, F, Q\n'), ((2116, 2136), 'django.db.models.Q', 'Q', ([], {'name__lower': '"""foo"""'}), "(name__lower='foo')\n", (2117, 2136), False, 'from django.db.models import CharField, F, Q\n'), ((2520, 2540), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (2528, 2540), False, 'from datetime import datetime\n'), ((3295, 3311), 'django.db.models.F', 'F', (['"""author__num"""'], {}), "('author__num')\n", (3296, 3311), False, 'from django.db.models import CharField, F, Q\n')] |
# This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main() | [
"torch.ger",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"torch.min",
"torch.multiprocessing.freeze_support",
"torch.sum",
"torch.nn.functional.softmax",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"torch.multiprocessing.Pool",
"numpy.stack",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"torch.multiprocessing.set_start_method",
"torch.optim.SGD",
"numpy.ones",
"matplotlib.pyplot.xticks",
"uuid.uuid4",
"numpy.std",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"torch.optim.Adam",
"torch.tensor",
"matplotlib.pyplot.figure",
"numpy.zeros",
"os.path.abspath",
"matplotlib.pyplot.subplot",
"torch.zeros"
]
| [((370, 395), 'torch.multiprocessing.set_start_method', 'set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (386, 395), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((2125, 2156), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (2134, 2156), False, 'import torch\n'), ((14535, 14561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (14545, 14561), True, 'import matplotlib.pyplot as plt\n'), ((16329, 16345), 'torch.multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (16343, 16345), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((599, 724), 'torch.tensor', 'torch.tensor', (['[[5, 0, 0, 2, 0], [0, 1, 2, 4, 2], [0, 0, 0, 2, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0]]'], {'dtype': 'torch.float32'}), '([[5, 0, 0, 2, 0], [0, 1, 2, 4, 2], [0, 0, 0, 2, 0], [0, 0, 0, \n 1, 0], [0, 0, 0, 0, 0]], dtype=torch.float32)\n', (611, 724), False, 'import torch\n'), ((793, 918), 'torch.tensor', 'torch.tensor', (['[[0, 0, 1, 0, 5], [0, 0, 2, 0, 0], [1, 2, 4, 2, 1], [0, 0, 2, 0, 0], [0, 0,\n 1, 0, 0]]'], {'dtype': 'torch.float32'}), '([[0, 0, 1, 0, 5], [0, 0, 2, 0, 0], [1, 2, 4, 2, 1], [0, 0, 2, \n 0, 0], [0, 0, 1, 0, 0]], dtype=torch.float32)\n', (805, 918), False, 'import torch\n'), ((6659, 6689), 'torch.nn.functional.softmax', 'F.softmax', (['(pol_vals * beta)', '(-1)'], {}), '(pol_vals * beta, -1)\n', (6668, 6689), True, 'import torch.nn.functional as F\n'), ((7710, 7733), 'numpy.repeat', 'np.repeat', (['matrix_id', '(2)'], {}), '(matrix_id, 2)\n', (7719, 7733), True, 'import numpy as np\n'), ((9970, 10006), 'numpy.zeros', 'np.zeros', (['(t_max // (t_max // 100),)'], {}), '((t_max // (t_max // 100),))\n', (9978, 10006), True, 'import numpy as np\n'), ((10034, 10070), 'numpy.zeros', 'np.zeros', (['(t_max // (t_max // 100),)'], {}), '((t_max // (t_max // 100),))\n', (10042, 10070), True, 'import numpy as np\n'), ((11627, 11644), 'torch.multiprocessing.Pool', 'Pool', ([], {'processes': '(2)'}), '(processes=2)\n', (11631, 11644), False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((14720, 14740), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (14731, 14740), True, 'import matplotlib.pyplot as plt\n'), ((15859, 15892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""P(common knowledge)"""'], {}), "('P(common knowledge)')\n", (15869, 15892), True, 'import matplotlib.pyplot as plt\n'), ((15901, 15930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {}), "('Expected Return')\n", (15911, 15930), True, 'import matplotlib.pyplot as plt\n'), ((15939, 15960), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.01]'], {}), '([0.0, 1.01])\n', (15947, 15960), True, 'import matplotlib.pyplot as plt\n'), ((15969, 15992), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (15977, 15992), True, 'import matplotlib.pyplot as plt\n'), ((16131, 16143), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16141, 16143), True, 'import matplotlib.pyplot as plt\n'), ((16152, 16175), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (16162, 16175), True, 'import matplotlib.pyplot as plt\n'), ((16184, 16210), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.5, 0.75, 1]'], {}), '([0.5, 0.75, 1])\n', (16194, 16210), True, 'import matplotlib.pyplot as plt\n'), ((16275, 16296), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (16283, 16296), True, 'import matplotlib.pyplot as plt\n'), ((6829, 6875), 'torch.nn.functional.softmax', 'F.softmax', (['(thetas_dec[i][dec_state] * beta)', '(-1)'], {}), '(thetas_dec[i][dec_state] * beta, -1)\n', (6838, 6875), True, 'import torch.nn.functional as F\n'), ((7445, 7491), 'torch.nn.functional.softmax', 'F.softmax', (['(thetas_dec[i][dec_state] * beta)', '(-1)'], {}), '(thetas_dec[i][dec_state] * beta, -1)\n', (7454, 7491), True, 'import torch.nn.functional as F\n'), ((7767, 7784), 'numpy.ones', 'np.ones', (['n_agents'], {}), '(n_agents)\n', (7774, 7784), True, 'import numpy as np\n'), ((10291, 10358), 'torch.zeros', 'torch.zeros', (['n_states_joint', '(n_actions ** 2 + 1)'], {'requires_grad': '(True)'}), '(n_states_joint, n_actions ** 2 + 1, requires_grad=True)\n', (10302, 10358), False, 'import torch\n'), ((10690, 10708), 'torch.optim.SGD', 'SGD', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (10693, 10708), False, 'from torch.optim import Adam, SGD\n'), ((10747, 10766), 'torch.optim.Adam', 'Adam', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (10751, 10766), False, 'from torch.optim import Adam, SGD\n'), ((3537, 3554), 'torch.sum', 'torch.sum', (['p1[:i]'], {}), '(p1[:i])\n', (3546, 3554), False, 'import torch\n'), ((3615, 3632), 'torch.sum', 'torch.sum', (['p2[:i]'], {}), '(p2[:i])\n', (3624, 3632), False, 'import torch\n'), ((4110, 4141), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (4119, 4141), False, 'import torch\n'), ((7267, 7300), 'torch.nn.functional.softmax', 'F.softmax', (['(pol_vals[i] * beta)', '(-1)'], {}), '(pol_vals[i] * beta, -1)\n', (7276, 7300), True, 'import torch.nn.functional as F\n'), ((10130, 10186), 'torch.zeros', 'torch.zeros', (['n_states_dec', 'n_actions'], {'requires_grad': '(True)'}), '(n_states_dec, n_actions, requires_grad=True)\n', (10141, 10186), False, 'import torch\n'), ((15643, 15717), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['p_vec', 'min_vals', 'max_vals'], {'facecolor': 'color[i]', 'alpha': '(0.3)'}), '(p_vec, min_vals, max_vals, facecolor=color[i], alpha=0.3)\n', (15659, 15717), True, 'import matplotlib.pyplot as plt\n'), ((4965, 5001), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck1', 'pi_dec[1]'], {}), '(p_marg_ag0_ck1, pi_dec[1])\n', (4974, 5001), False, 'import torch\n'), ((14070, 14095), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14085, 14095), False, 'import os\n'), ((14204, 14229), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14219, 14229), False, 'import os\n'), ((14348, 14373), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14363, 14373), False, 'import os\n'), ((3753, 3776), 'torch.min', 'torch.min', (['high1', 'high2'], {}), '(high1, high2)\n', (3762, 3776), False, 'import torch\n'), ((4872, 4908), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck1'], {}), '(pi_dec[0], p_marg_ag1_ck1)\n', (4881, 4908), False, 'import torch\n'), ((5807, 5843), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck2', 'pi_dec[1]'], {}), '(p_marg_ag0_ck2, pi_dec[1])\n', (5816, 5843), False, 'import torch\n'), ((12531, 12572), 'numpy.stack', 'np.stack', (['[r[0] for r in results]'], {'axis': '(1)'}), '([r[0] for r in results], axis=1)\n', (12539, 12572), True, 'import numpy as np\n'), ((12609, 12650), 'numpy.stack', 'np.stack', (['[r[1] for r in results]'], {'axis': '(1)'}), '([r[1] for r in results], axis=1)\n', (12617, 12650), True, 'import numpy as np\n'), ((13633, 13674), 'numpy.stack', 'np.stack', (['[r[0] for r in results]'], {'axis': '(1)'}), '([r[0] for r in results], axis=1)\n', (13641, 13674), True, 'import numpy as np\n'), ((13711, 13752), 'numpy.stack', 'np.stack', (['[r[1] for r in results]'], {'axis': '(1)'}), '([r[1] for r in results], axis=1)\n', (13719, 13752), True, 'import numpy as np\n'), ((3894, 3917), 'torch.min', 'torch.min', (['high1', 'high2'], {}), '(high1, high2)\n', (3903, 3917), False, 'import torch\n'), ((4779, 4815), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck2', 'pi_dec[1]'], {}), '(p_marg_ag0_ck2, pi_dec[1])\n', (4788, 4815), False, 'import torch\n'), ((5714, 5750), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck2'], {}), '(pi_dec[0], p_marg_ag1_ck2)\n', (5723, 5750), False, 'import torch\n'), ((14500, 14512), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14510, 14512), False, 'import uuid\n'), ((15122, 15140), 'numpy.std', 'np.std', (['vals[i]', '(1)'], {}), '(vals[i], 1)\n', (15128, 15140), True, 'import numpy as np\n'), ((4686, 4722), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck2'], {}), '(pi_dec[0], p_marg_ag1_ck2)\n', (4695, 4722), False, 'import torch\n'), ((5621, 5657), 'torch.ger', 'torch.ger', (['p_marg_ag0_ck1', 'pi_dec[1]'], {}), '(p_marg_ag0_ck1, pi_dec[1])\n', (5630, 5657), False, 'import torch\n'), ((4598, 4629), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (4607, 4629), False, 'import torch\n'), ((5528, 5564), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'p_marg_ag1_ck1'], {}), '(pi_dec[0], p_marg_ag1_ck1)\n', (5537, 5564), False, 'import torch\n'), ((5440, 5471), 'torch.ger', 'torch.ger', (['pi_dec[0]', 'pi_dec[1]'], {}), '(pi_dec[0], pi_dec[1])\n', (5449, 5471), False, 'import torch\n')] |
import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| [
"inspect.signature"
]
| [((113, 136), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (130, 136), False, 'import inspect\n')] |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class EmbeddingNNmsHeadV2limited(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
embedding_convs_num=2,
strides=(4, 8, 16, 32, 64),
delta=2.0,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(EmbeddingNNmsHeadV2limited, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.embedding_convs_num = embedding_convs_num
self.strides = strides
self.delta = delta
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.embedding_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.embedding_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.embedding_cls = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
# Pull and Push loss
self.pull_loss = nn.MSELoss()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.embedding_cls, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
embedding_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
for embedding_layer in self.embedding_convs:
embedding_feat = embedding_layer(embedding_feat)
embedding_pred = self.embedding_cls(embedding_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, embedding_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
embedding_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(embedding_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores and bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_embedding_preds = [
embedding_feat.permute(0, 2, 3, 1).reshape(-1, 1)
for embedding_feat in embedding_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_embedding_preds = torch.cat(flatten_embedding_preds)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
pos_iou_scores = bbox_overlaps(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True).clamp(min=1e-6)
max_scores, max_inds = flatten_cls_scores.sigmoid().max(1)
pos_embedding_preds = flatten_embedding_preds[pos_inds]
# Instance level op
dist_conf_mask_list = []
# generate instance levels index
instance_counter = torch.zeros(num_pos, device=pos_points.device)
remove = torch.zeros(num_pos, device=pos_points.device)
obj_id = 0
# NOTE: get mask for each obj
for i in range(len(pos_decoded_target_preds)):
if remove[i] == 0:
current_bbox = pos_decoded_target_preds[i]
mask = ((pos_decoded_target_preds == current_bbox).sum(1)==4).nonzero()
instance_counter[mask] = obj_id
remove[mask] = 1
obj_id += 1
instance_counter = instance_counter.int()
obj_ids = torch.bincount(instance_counter).nonzero().int()
for obj_id in obj_ids:
dist_conf_mask_list.append((instance_counter==obj_id).float())
# Opt for each obj
objs_embedding_list = []
obj_embedding_means_list = []
obj_embedding_means_expand_list = []
for dist_conf_mask in dist_conf_mask_list:
obj_mask_inds = dist_conf_mask.nonzero().reshape(-1)
obj_embedding_preds = pos_embedding_preds[obj_mask_inds]
objs_embedding_list.append(obj_embedding_preds)
# mean value
embedding_mean = obj_embedding_preds.sum() / obj_embedding_preds.shape[0]
obj_embedding_means_list.append(embedding_mean)
obj_embedding_means_expand_list.append(torch.zeros_like(obj_embedding_preds).fill_(embedding_mean))
embed()
# pull loss
theta = 1
embedding_expand_means = torch.cat(obj_embedding_means_expand_list)
pull_embedding = torch.cat(objs_embedding_list)
pull_loss = theta * self.pull_loss(pull_embedding, embedding_expand_means)
# push loss
N_samples = len(dist_conf_mask_list)
push_loss = 0
for obj_j_embedding_mean in obj_embedding_means_list:
for obj_k_embedding_mean in obj_embedding_means_list:
if torch.equal(obj_j_embedding_mean, obj_k_embedding_mean):
continue
else:
push_dist = self.delta - torch.abs(obj_k_embedding_mean - obj_j_embedding_mean)
push_loss += torch.max(push_dist, torch.zeros(1, device=push_dist.device))
push_loss = push_loss / N_samples**2
# iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds)
else:
loss_bbox = pos_bbox_preds.sum()
push_loss = pos_bbox_preds.sum()
pull_loss = pos_bbox_preds.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
push_loss=push_loss,
pull_loss=pull_loss)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(
cls_scores, bbox_preds, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
| [
"torch.nn.MSELoss",
"mmdet.core.bbox_overlaps",
"torch.arange",
"torch.nn.ModuleList",
"IPython.embed",
"mmdet.core.multiclass_nms",
"torch.meshgrid",
"torch.zeros_like",
"torch.abs",
"mmdet.core.multi_apply",
"torch.equal",
"mmdet.core.distance2bbox",
"torch.cat",
"mmdet.core.force_fp32",
"torch.bincount",
"torch.stack",
"torch.nn.Conv2d",
"torch.zeros",
"mmcv.cnn.normal_init"
]
| [((5325, 5374), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds')"}), "(apply_to=('cls_scores', 'bbox_preds'))\n", (5335, 5374), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((11008, 11057), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds')"}), "(apply_to=('cls_scores', 'bbox_preds'))\n", (11018, 11057), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((2441, 2456), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2454, 2456), True, 'import torch.nn as nn\n'), ((2482, 2497), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2495, 2497), True, 'import torch.nn as nn\n'), ((2529, 2544), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2542, 2544), True, 'import torch.nn as nn\n'), ((3737, 3803), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.cls_out_channels', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.cls_out_channels, 3, padding=1)\n', (3746, 3803), True, 'import torch.nn as nn\n'), ((3841, 3887), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(4)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 4, 3, padding=1)\n', (3850, 3887), True, 'import torch.nn as nn\n'), ((3917, 3963), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(1)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 1, 3, padding=1)\n', (3926, 3963), True, 'import torch.nn as nn\n'), ((4093, 4105), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4103, 4105), True, 'import torch.nn as nn\n'), ((4338, 4389), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_cls'], {'std': '(0.01)', 'bias': 'bias_cls'}), '(self.fcos_cls, std=0.01, bias=bias_cls)\n', (4349, 4389), False, 'from mmcv.cnn import normal_init\n'), ((4398, 4434), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_reg'], {'std': '(0.01)'}), '(self.fcos_reg, std=0.01)\n', (4409, 4434), False, 'from mmcv.cnn import normal_init\n'), ((4443, 4484), 'mmcv.cnn.normal_init', 'normal_init', (['self.embedding_cls'], {'std': '(0.01)'}), '(self.embedding_cls, std=0.01)\n', (4454, 4484), False, 'from mmcv.cnn import normal_init\n'), ((4531, 4583), 'mmdet.core.multi_apply', 'multi_apply', (['self.forward_single', 'feats', 'self.scales'], {}), '(self.forward_single, feats, self.scales)\n', (4542, 4583), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((6596, 6625), 'torch.cat', 'torch.cat', (['flatten_cls_scores'], {}), '(flatten_cls_scores)\n', (6605, 6625), False, 'import torch\n'), ((6655, 6684), 'torch.cat', 'torch.cat', (['flatten_bbox_preds'], {}), '(flatten_bbox_preds)\n', (6664, 6684), False, 'import torch\n'), ((6719, 6753), 'torch.cat', 'torch.cat', (['flatten_embedding_preds'], {}), '(flatten_embedding_preds)\n', (6728, 6753), False, 'import torch\n'), ((6779, 6796), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (6788, 6796), False, 'import torch\n'), ((6828, 6851), 'torch.cat', 'torch.cat', (['bbox_targets'], {}), '(bbox_targets)\n', (6837, 6851), False, 'import torch\n'), ((13551, 13573), 'torch.cat', 'torch.cat', (['mlvl_bboxes'], {}), '(mlvl_bboxes)\n', (13560, 13573), False, 'import torch\n'), ((13680, 13702), 'torch.cat', 'torch.cat', (['mlvl_scores'], {}), '(mlvl_scores)\n', (13689, 13702), False, 'import torch\n'), ((13790, 13830), 'torch.cat', 'torch.cat', (['[padding, mlvl_scores]'], {'dim': '(1)'}), '([padding, mlvl_scores], dim=1)\n', (13799, 13830), False, 'import torch\n'), ((13864, 13950), 'mmdet.core.multiclass_nms', 'multiclass_nms', (['mlvl_bboxes', 'mlvl_scores', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {}), '(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.\n max_per_img)\n', (13878, 13950), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((14791, 14854), 'torch.arange', 'torch.arange', (['(0)', '(w * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, w * stride, stride, dtype=dtype, device=device)\n', (14803, 14854), False, 'import torch\n'), ((14886, 14949), 'torch.arange', 'torch.arange', (['(0)', '(h * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, h * stride, stride, dtype=dtype, device=device)\n', (14898, 14949), False, 'import torch\n'), ((14978, 15010), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (14992, 15010), False, 'import torch\n'), ((15598, 15639), 'torch.cat', 'torch.cat', (['expanded_regress_ranges'], {'dim': '(0)'}), '(expanded_regress_ranges, dim=0)\n', (15607, 15639), False, 'import torch\n'), ((15664, 15688), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (15673, 15688), False, 'import torch\n'), ((15782, 15915), 'mmdet.core.multi_apply', 'multi_apply', (['self.fcos_target_single', 'gt_bboxes_list', 'gt_labels_list'], {'points': 'concat_points', 'regress_ranges': 'concat_regress_ranges'}), '(self.fcos_target_single, gt_bboxes_list, gt_labels_list, points\n =concat_points, regress_ranges=concat_regress_ranges)\n', (15793, 15915), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((17787, 17830), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (17798, 17830), False, 'import torch\n'), ((4180, 4209), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (4191, 4209), False, 'from mmcv.cnn import normal_init\n'), ((4255, 4284), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (4266, 4284), False, 'from mmcv.cnn import normal_init\n'), ((7473, 7514), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_preds'], {}), '(pos_points, pos_bbox_preds)\n', (7486, 7514), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((7554, 7597), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_targets'], {}), '(pos_points, pos_bbox_targets)\n', (7567, 7597), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((8073, 8119), 'torch.zeros', 'torch.zeros', (['num_pos'], {'device': 'pos_points.device'}), '(num_pos, device=pos_points.device)\n', (8084, 8119), False, 'import torch\n'), ((8141, 8187), 'torch.zeros', 'torch.zeros', (['num_pos'], {'device': 'pos_points.device'}), '(num_pos, device=pos_points.device)\n', (8152, 8187), False, 'import torch\n'), ((9733, 9775), 'torch.cat', 'torch.cat', (['obj_embedding_means_expand_list'], {}), '(obj_embedding_means_expand_list)\n', (9742, 9775), False, 'import torch\n'), ((9805, 9835), 'torch.cat', 'torch.cat', (['objs_embedding_list'], {}), '(objs_embedding_list)\n', (9814, 9835), False, 'import torch\n'), ((13397, 13450), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_pred'], {'max_shape': 'img_shape'}), '(points, bbox_pred, max_shape=img_shape)\n', (13410, 13450), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((9641, 9648), 'IPython.embed', 'embed', ([], {}), '()\n', (9646, 9648), False, 'from IPython import embed\n'), ((16476, 16524), 'torch.cat', 'torch.cat', (['[labels[i] for labels in labels_list]'], {}), '([labels[i] for labels in labels_list])\n', (16485, 16524), False, 'import torch\n'), ((16586, 16652), 'torch.cat', 'torch.cat', (['[bbox_targets[i] for bbox_targets in bbox_targets_list]'], {}), '([bbox_targets[i] for bbox_targets in bbox_targets_list])\n', (16595, 16652), False, 'import torch\n'), ((7680, 7765), 'mmdet.core.bbox_overlaps', 'bbox_overlaps', (['pos_decoded_bbox_preds', 'pos_decoded_target_preds'], {'is_aligned': '(True)'}), '(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True\n )\n', (7693, 7765), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((10186, 10241), 'torch.equal', 'torch.equal', (['obj_j_embedding_mean', 'obj_k_embedding_mean'], {}), '(obj_j_embedding_mean, obj_k_embedding_mean)\n', (10197, 10241), False, 'import torch\n'), ((8729, 8761), 'torch.bincount', 'torch.bincount', (['instance_counter'], {}), '(instance_counter)\n', (8743, 8761), False, 'import torch\n'), ((9564, 9601), 'torch.zeros_like', 'torch.zeros_like', (['obj_embedding_preds'], {}), '(obj_embedding_preds)\n', (9580, 9601), False, 'import torch\n'), ((10351, 10405), 'torch.abs', 'torch.abs', (['(obj_k_embedding_mean - obj_j_embedding_mean)'], {}), '(obj_k_embedding_mean - obj_j_embedding_mean)\n', (10360, 10405), False, 'import torch\n'), ((10464, 10503), 'torch.zeros', 'torch.zeros', (['(1)'], {'device': 'push_dist.device'}), '(1, device=push_dist.device)\n', (10475, 10503), False, 'import torch\n')] |
from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"azul.logging.configure_script_logging",
"azul.require",
"azul.terra.TDRClient",
"azul.terra.TDRSourceName.parse",
"azul.config.catalogs.values"
]
| [((226, 253), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'import logging\n'), ((272, 301), 'azul.logging.configure_script_logging', 'configure_script_logging', (['log'], {}), '(log)\n', (296, 301), False, 'from azul.logging import configure_script_logging\n'), ((312, 323), 'azul.terra.TDRClient', 'TDRClient', ([], {}), '()\n', (321, 323), False, 'from azul.terra import TDRClient, TDRSourceName\n'), ((586, 613), 'azul.terra.TDRSourceName.parse', 'TDRSourceName.parse', (['source'], {}), '(source)\n', (605, 613), False, 'from azul.terra import TDRClient, TDRSourceName\n'), ((678, 812), 'azul.require', 'require', (['(api_project == source.project)', '"""Actual Google project of TDR source differs from configured one"""', 'api_project', 'source'], {}), "(api_project == source.project,\n 'Actual Google project of TDR source differs from configured one',\n api_project, source)\n", (685, 812), False, 'from azul import config, require\n'), ((418, 442), 'azul.config.catalogs.values', 'config.catalogs.values', ([], {}), '()\n', (440, 442), False, 'from azul import config, require\n')] |
"""Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
class BaseLog:
"""
Set a base logging.
Use this as the base class for all your work. This adds a logging root.
"""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
| [
"logging.getLogger"
]
| [((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n')] |
import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n') | [
"subprocess.run",
"re.match"
]
| [((276, 342), 'subprocess.run', 'subprocess.run', (["['which', program]"], {'capture_output': '(True)', 'text': '(True)'}), "(['which', program], capture_output=True, text=True)\n", (290, 342), False, 'import subprocess\n'), ((157, 190), 're.match', 're.match', (['secure_pattern', 'program'], {}), '(secure_pattern, program)\n', (165, 190), False, 'import re\n')] |
#!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
class Pytheos:
""" Pytheos interface """
DEFAULT_PORT = 1255
@staticmethod
def check_channel_availability(channel: Connection):
""" Checks to make sure that the provided channel is available.
:param channel: Channel connection
:raises: ChannelUnavailableError
:return: None
"""
if not channel or not channel.connected:
raise ChannelUnavailableError()
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, value):
logger.setLevel(value)
@property
def connected(self):
return self._connected
@property
def signed_in(self):
return self._account_status == AccountStatus.SignedIn
@property
def username(self):
return self._account_username
def __init__(self, server: Union[str, SSDPResponse]=None, port: Optional[int]=DEFAULT_PORT):
""" Constructor
:param server: Server hostname or IP
:param port: Port number
"""
if isinstance(server, SSDPResponse):
server = utils.extract_host(server.location)
self.server: str = server
self.port: int = port
self._command_channel = Connection()
self._event_channel = Connection()
self._event_queue = asyncio.Queue()
self._event_task: Optional[asyncio.Task] = None
self._event_processor: Optional[asyncio.Task] = None
self._connected: bool = False
self._event_subscriptions: dict = {}
self._receive_events: bool = True
self._account_status: Optional[AccountStatus] = None
self._account_username: Optional[str] = None
self._players: list = []
self._groups: dict = {} # FIXME?: Not sure I like having this as a dict.
self._sources: dict = {} # FIXME?: Not sure I like having this as a dict.
self.api: Connection = self._command_channel
self._init_internal_event_handlers()
def __repr__(self):
return f'<Pytheos(server={self.server}, port={self.port})>'
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connected:
self.close()
async def connect(self, enable_event_connection: bool=True, refresh: bool=True) -> Pytheos:
""" Connect to our HEOS device.
:param enable_event_connection: Enables establishing an additional connection for system events
:param refresh: Determines if the system state should be automatically refreshed
:return: self
"""
logger.info(f'Connecting to {self.server}:{self.port}')
await self._command_channel.connect(self.server, self.port)
self._connected = True
self._receive_events = enable_event_connection
if self._receive_events:
await self._event_channel.connect(self.server, self.port, deduplicate=True)
await self.enable_event_reception(True)
loop = asyncio.get_running_loop()
self._event_task = loop.create_task(self._listen_for_events())
self._event_processor = loop.create_task(self._process_events())
if refresh:
await self.refresh()
return self
async def _set_register_for_change_events(self, value: bool):
""" Notifies HEOS that we want event messages on the event channel.
:param value: True or False
:return: None
"""
await self._event_channel.system.register_for_change_events(value)
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
async def refresh(self):
""" Refreshes internal information from the HEOS system.
:return: None
"""
await self.check_account()
await self.get_players()
await self.get_groups()
await self.get_sources()
async def reboot(self):
""" Instructs the system to reboot.
:return: None
"""
await self.api.system.reboot()
async def check_account(self) -> tuple:
""" Checks if the system is logged into HEOS and returns the status and account name, if available.
:return: tuple
"""
self._account_status, self._account_username = await self.api.system.check_account()
return self._account_status, self._account_username
async def sign_in(self, username: str, password: str):
""" Signs the system into the HEOS service.
:param username: Username
:param password: Password
:return: None
"""
await self.api.system.sign_in(username, password)
async def sign_out(self):
""" Signs out from the HEOS service.
:return: None
"""
await self.api.system.sign_out()
async def get_players(self):
""" Retrieves a mapping of IDs to Players present in the HEOS system.
:return: list
"""
self._players = [controllers.Player(self, player) for player in await self.api.player.get_players()]
return self._players
async def get_group(self, group_id):
""" Retrieve a specific group by ID.
:param group_id: Group ID
:return: PytheosGroup
"""
groups = await self.get_groups()
return groups.get(group_id)
async def get_groups(self):
""" Retrieves a mapping of IDs to Groups present in the HEOS system.
:return: dict
"""
self._groups = {}
for group in await self.api.group.get_groups():
self._groups[group.group_id] = controllers.Group(self, group)
return self._groups
async def get_sources(self):
""" Retrieves a mapping of IDs to Sources present in the HEOS system.
:return:
"""
self._sources = {}
for source in await self.api.browse.get_music_sources():
self._sources[source.source_id] = controllers.Source(self, source)
return self._sources
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
async def enable_event_reception(self, value):
""" Enables or disables event reception.
:param value: True or False
:return: None
"""
self._receive_events = value
await self._set_register_for_change_events(value)
async def _listen_for_events(self):
""" Async task that reads messages from the event channel and adds them to our event queue for
later processing.
:return: None
"""
while True:
results = await self._event_channel.read_message()
if results:
event = HEOSEvent(results)
logger.debug(f"Received event: {event!r}")
await self._event_queue.put(event)
await asyncio.sleep(0.5)
async def _process_events(self):
""" Async task that processes events that originate from the event channel.
:return: None
"""
while True:
event = await self._event_queue.get()
if event:
logger.debug(f'Processing event: {event!r}')
await self._event_handler(event)
await asyncio.sleep(0.5)
async def _event_handler(self, event: HEOSEvent):
""" Internal event handler
:param event: HEOS Event
:return: None
"""
loop = asyncio.get_running_loop()
for callback in self._event_subscriptions.get(event.command, []):
logger.debug(f'Calling registered callback {callback} for event {event!r}')
loop.create_task(callback(event))
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
def _handle_sources_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_players_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_groups_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_player_state_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_progress(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_playback_error(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_queue_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_repeat_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_shuffle_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_group_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_user_changed(self, event: HEOSEvent):
raise NotImplementedError()
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| [
"logging.getLogger",
"asyncio.Queue",
"asyncio.get_running_loop",
"asyncio.sleep"
]
| [((467, 495), 'logging.getLogger', 'logging.getLogger', (['"""pytheos"""'], {}), "('pytheos')\n", (484, 495), False, 'import logging\n'), ((1828, 1843), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (1841, 1843), False, 'import asyncio\n'), ((8845, 8871), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (8869, 8871), False, 'import asyncio\n'), ((3585, 3611), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (3609, 3611), False, 'import asyncio\n'), ((8256, 8274), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (8269, 8274), False, 'import asyncio\n'), ((8653, 8671), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (8666, 8671), False, 'import asyncio\n')] |
import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
| [
"tianshou.utils.MovAvg",
"numpy.isscalar",
"numpy.where",
"tianshou.data.ReplayBuffer",
"time.sleep",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"tianshou.data.Batch",
"tianshou.data.ListReplayBuffer",
"warnings.warn",
"time.time"
]
| [((1660, 1677), 'tianshou.utils.MovAvg', 'MovAvg', (['stat_size'], {}), '(stat_size)\n', (1666, 1677), False, 'from tianshou.utils import MovAvg\n'), ((1707, 1724), 'tianshou.utils.MovAvg', 'MovAvg', (['stat_size'], {}), '(stat_size)\n', (1713, 1724), False, 'from tianshou.utils import MovAvg\n'), ((2961, 2972), 'time.time', 'time.time', ([], {}), '()\n', (2970, 2972), False, 'import time\n'), ((550, 567), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(100)'], {}), '(100)\n', (562, 567), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((2124, 2146), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (2132, 2146), True, 'import numpy as np\n'), ((2173, 2195), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (2181, 2195), True, 'import numpy as np\n'), ((2766, 2782), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (2774, 2782), True, 'import numpy as np\n'), ((2922, 2939), 'numpy.sum', 'np.sum', (['n_episode'], {}), '(n_episode)\n', (2928, 2939), True, 'import numpy as np\n'), ((3152, 3174), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (3160, 3174), True, 'import numpy as np\n'), ((8048, 8059), 'time.time', 'time.time', ([], {}), '()\n', (8057, 8059), False, 'import time\n'), ((8361, 8378), 'numpy.sum', 'np.sum', (['n_episode'], {}), '(n_episode)\n', (8367, 8378), True, 'import numpy as np\n'), ((9082, 9089), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (9087, 9089), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((3323, 3456), 'warnings.warn', 'warnings.warn', (['"""There are already many steps in an episode. You should add a time limitation to your environment!"""', 'Warning'], {}), "(\n 'There are already many steps in an episode. You should add a time limitation to your environment!'\n , Warning)\n", (3336, 3456), False, 'import warnings\n'), ((3572, 3675), 'tianshou.data.Batch', 'Batch', ([], {'obs': 'self._obs', 'act': 'self._act', 'rew': 'self._rew', 'done': 'self._done', 'obs_next': 'None', 'info': 'self._info'}), '(obs=self._obs, act=self._act, rew=self._rew, done=self._done,\n obs_next=None, info=self._info)\n', (3577, 3675), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((4695, 4713), 'time.sleep', 'time.sleep', (['render'], {}), '(render)\n', (4705, 4713), False, 'import time\n'), ((9044, 9056), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9052, 9056), True, 'import numpy as np\n'), ((4403, 4423), 'numpy.array', 'np.array', (['result.act'], {}), '(result.act)\n', (4411, 4423), True, 'import numpy as np\n'), ((1334, 1352), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (1350, 1352), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((5642, 5664), 'numpy.isscalar', 'np.isscalar', (['n_episode'], {}), '(n_episode)\n', (5653, 5664), True, 'import numpy as np\n'), ((6904, 6924), 'numpy.where', 'np.where', (['self._done'], {}), '(self._done)\n', (6912, 6924), True, 'import numpy as np\n'), ((7125, 7147), 'numpy.isscalar', 'np.isscalar', (['n_episode'], {}), '(n_episode)\n', (7136, 7147), True, 'import numpy as np\n'), ((8972, 8986), 'numpy.array', 'np.array', (['lens'], {}), '(lens)\n', (8980, 8986), True, 'import numpy as np\n'), ((7065, 7084), 'numpy.array', 'np.array', (['n_episode'], {}), '(n_episode)\n', (7073, 7084), True, 'import numpy as np\n')] |
from drink_partners.contrib.samples import partner_bar_legal
class TestSearchPartner:
async def test_should_return_bad_request_for_str_coordinates(
self,
client,
partner_search_with_str_coordinates_url
):
async with client.get(partner_search_with_str_coordinates_url) as response: # noqa
assert response.status == 400
response_json = await response.json()
assert response_json['error_code'] == 'bad_request'
assert response_json['error_message'] == (
'Invalid coordinate longitude:a latitude:a'
)
async def test_should_return_nearest_partner_for_coordinate(
self,
client,
partner_search_coordinates_url,
save_partners
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 200
response_json = await response.json()
assert response_json == partner_bar_legal()
async def test_should_return_not_found_when_no_partner_covers_coordinate(
self,
client,
partner_search_coordinates_url
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 404
response_json = await response.json()
assert response_json['error_code'] == 'not_found'
assert response_json['error_message'] == (
'Partners not found covering area for '
'latitude:-43.36556 longitude:-22.99669'
)
| [
"drink_partners.contrib.samples.partner_bar_legal"
]
| [((995, 1014), 'drink_partners.contrib.samples.partner_bar_legal', 'partner_bar_legal', ([], {}), '()\n', (1012, 1014), False, 'from drink_partners.contrib.samples import partner_bar_legal\n')] |
import multiprocessing
from typing import List, Optional
import numpy as np
from ..util import dill_for_apply
class ImageSequenceWriter:
def __init__(self, pattern, writer, *, max_index=None):
if type(pattern) is not str:
raise ValueError("Pattern must be string")
if pattern.format(1, index="1") == pattern.format(2, index="2"):
raise ValueError("Pattern must use {} or {index}")
self._pattern = pattern
self._writer = writer
self._max_index = max_index
self._index = 1
@property
def next_filename(self):
index = str(self._index)
if self._max_index:
index = "{:0{}d}".format(self._index, len(str(self._max_index)))
return self._pattern.format(self._index, index=index)
def _save(self, filename: str, image: np.ndarray):
self._writer(filename, image)
def save(self, image: np.ndarray):
self._save(self.next_filename, image)
self._index += 1
def finish(self):
pass
class MultiprocessingImageSequenceWriter(ImageSequenceWriter):
"""Image sequence writer that uses multiprocessing to save several images in
parallel.
This falls apart for large objects, as multiprocessing pickles them and pipes them
into the subprocesses.
"""
def __init__(self, *args, max_workers=None, max_waiting=None, **kwargs):
super().__init__(*args, **kwargs)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
ctx = multiprocessing.get_context("spawn")
self._pool = ctx.Pool(max_workers)
if max_waiting is not None:
# Semaphore's value is number of slots available for tasks to wait in
self._sem = ctx.Semaphore(
max_waiting
) # type: Optional[multiprocessing.synchronize.Semaphore]
else:
self._sem = None
self._results = [] # type: List[multiprocessing.pool.AsyncResult]
def __del__(self):
self.terminate()
def _save(self, filename: str, image: np.ndarray):
# Limit number of waiting tasks
if self._sem:
self._sem.acquire()
def callback(v):
assert self._sem is not None
self._sem.release()
else:
callback = None # type: ignore
args = (self._writer, (filename, image))
if dill_for_apply:
# Use dill instead of pickle, and make sure writer returns the filename
_writer = self._writer # Exclude self from capture to avoid dilling _pool
args = dill_for_apply(lambda f, i: _writer(f, i) or f, filename, image)
result = self._pool.apply_async(
*args, callback=callback, error_callback=callback,
)
self._results.append(result)
def terminate(self):
self._pool.terminate()
self._pool.join()
def finish(self, result_handler=None):
try:
# self._pool.close()
for result in self._results:
filename = result.get()
if result_handler is not None:
result_handler(filename)
self._pool.close()
except KeyboardInterrupt:
self._pool.terminate()
finally:
self._pool.join()
| [
"multiprocessing.get_context",
"multiprocessing.cpu_count"
]
| [((1543, 1579), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (1570, 1579), False, 'import multiprocessing\n'), ((1497, 1524), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1522, 1524), False, 'import multiprocessing\n')] |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
def nodeAttributes():
"""return a list of valid attributes for Node"""
return Node._validAttributes.keys()
class Node(Drawable):
def id(self): return self._id
def __init__(self, id):
Drawable.__init__(self)
self._id = id
return
_validAttributes = {
"color" : None,
"fontcolor" : None,
"fontname" : None,
"fontsize" : None,
"height" : None,
"label" : None,
"layer" : None,
"shape" : None,
"shapefile" : None,
"style" : None,
"width" : None
}
# version
__id__ = "$Id$"
#
# End of file
| [
"Drawable.Drawable.__init__"
]
| [((618, 641), 'Drawable.Drawable.__init__', 'Drawable.__init__', (['self'], {}), '(self)\n', (635, 641), False, 'from Drawable import Drawable\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import random
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
def riskColourCode(riskScore):
if (riskScore <= 1):
return '#fef2ec'
elif (riskScore == 2):
return '#fcd9c8'
elif (riskScore == 3):
return '#f7ac91'
elif (riskScore == 4):
return '#f67e61'
elif (riskScore == 5):
return '#f2543d'
elif (riskScore == 6):
return '#e42626'
elif (riskScore == 7):
return '#b9051a'
elif (riskScore == 8):
return '#900014'
else:
return '#52000D'
class RiskScatterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,RISKSCATTER_ID)
b = Borg()
self.dbProxy = b.dbProxy
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self, -1, self.fig)
self.axes = self.fig.add_subplot(111,xlabel='Severity',ylabel='Likelihood',autoscale_on=False)
self.axes.set_xticklabels(['Marginal','Critical','Catastrophic'])
self.axes.set_yticks([0,1,2,3,4,5])
self.toolbar = NavigationToolbar(self.canvas)
envs = self.dbProxy.getDimensionNames('environment')
self.envCombo = wx.ComboBox(self,RISKSCATTER_COMBOENVIRONMENT_ID,envs[0],choices=envs,size=(300,-1),style=wx.CB_DROPDOWN)
self.envCombo.Bind(wx.EVT_COMBOBOX,self.onEnvironmentChange)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.Add(self.envCombo,0, wx.EXPAND)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.vbox)
self.vbox.Fit(self)
self.drawScatter(envs[0])
def drawScatter(self,envName):
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel('Severity')
self.axes.set_ylabel('Likelihood')
self.axes.set_xbound(0,4)
self.axes.set_ybound(0,5)
xs,ys,cs = self.dbProxy.riskScatter(envName)
ccs = []
for c in cs:
ccs.append(riskColourCode(c))
if ((len(xs) > 0) and (len(ys) > 0)):
self.axes.scatter(xs,ys,c=ccs,marker='d')
self.canvas.draw()
def onEnvironmentChange(self,evt):
envName = self.envCombo.GetStringSelection()
self.drawScatter(envName)
def on_save_plot(self, event):
fileChoices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(self,message="Save risk scatter",defaultDir=os.getcwd(),defaultFile="scatter.png",wildcard=fileChoices,style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
| [
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"matplotlib.use",
"cairis.core.Borg.Borg",
"matplotlib.figure.Figure",
"wx.ComboBox",
"wx.BoxSizer",
"os.getcwd",
"matplotlib.backends.backend_wxagg.NavigationToolbar2WxAgg",
"wx.Panel.__init__"
]
| [((932, 955), 'matplotlib.use', 'matplotlib.use', (['"""WXAgg"""'], {}), "('WXAgg')\n", (946, 955), False, 'import matplotlib\n'), ((1619, 1666), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent', 'RISKSCATTER_ID'], {}), '(self, parent, RISKSCATTER_ID)\n', (1636, 1666), False, 'import wx\n'), ((1673, 1679), 'cairis.core.Borg.Borg', 'Borg', ([], {}), '()\n', (1677, 1679), False, 'from cairis.core.Borg import Borg\n'), ((1743, 1775), 'matplotlib.figure.Figure', 'Figure', (['(5.0, 4.0)'], {'dpi': 'self.dpi'}), '((5.0, 4.0), dpi=self.dpi)\n', (1749, 1775), False, 'from matplotlib.figure import Figure\n'), ((1794, 1823), 'matplotlib.backends.backend_wxagg.FigureCanvasWxAgg', 'FigCanvas', (['self', '(-1)', 'self.fig'], {}), '(self, -1, self.fig)\n', (1803, 1823), True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((2052, 2082), 'matplotlib.backends.backend_wxagg.NavigationToolbar2WxAgg', 'NavigationToolbar', (['self.canvas'], {}), '(self.canvas)\n', (2069, 2082), True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((2161, 2276), 'wx.ComboBox', 'wx.ComboBox', (['self', 'RISKSCATTER_COMBOENVIRONMENT_ID', 'envs[0]'], {'choices': 'envs', 'size': '(300, -1)', 'style': 'wx.CB_DROPDOWN'}), '(self, RISKSCATTER_COMBOENVIRONMENT_ID, envs[0], choices=envs,\n size=(300, -1), style=wx.CB_DROPDOWN)\n', (2172, 2276), False, 'import wx\n'), ((2353, 2377), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2364, 2377), False, 'import wx\n'), ((3334, 3345), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3343, 3345), False, 'import os\n')] |
# Xlib.ext.xinput -- XInput extension module
#
# Copyright (C) 2012 Outpost Embedded, LLC
# <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
'''
A very incomplete implementation of the XInput extension.
'''
import sys
import array
import struct
# Python 2/3 compatibility.
from six import integer_types
from Xlib.protocol import rq
from Xlib import X
extname = 'XInputExtension'
PropertyDeleted = 0
PropertyCreated = 1
PropertyModified = 2
NotifyNormal = 0
NotifyGrab = 1
NotifyUngrab = 2
NotifyWhileGrabbed = 3
NotifyPassiveGrab = 4
NotifyPassiveUngrab = 5
NotifyAncestor = 0
NotifyVirtual = 1
NotifyInferior = 2
NotifyNonlinear = 3
NotifyNonlinearVirtual = 4
NotifyPointer = 5
NotifyPointerRoot = 6
NotifyDetailNone = 7
GrabtypeButton = 0
GrabtypeKeycode = 1
GrabtypeEnter = 2
GrabtypeFocusIn = 3
GrabtypeTouchBegin = 4
AnyModifier = (1 << 31)
AnyButton = 0
AnyKeycode = 0
AsyncDevice = 0
SyncDevice = 1
ReplayDevice = 2
AsyncPairedDevice = 3
AsyncPair = 4
SyncPair = 5
SlaveSwitch = 1
DeviceChange = 2
MasterAdded = (1 << 0)
MasterRemoved = (1 << 1)
SlaveAdded = (1 << 2)
SlaveRemoved = (1 << 3)
SlaveAttached = (1 << 4)
SlaveDetached = (1 << 5)
DeviceEnabled = (1 << 6)
DeviceDisabled = (1 << 7)
AddMaster = 1
RemoveMaster = 2
AttachSlave = 3
DetachSlave = 4
AttachToMaster = 1
Floating = 2
ModeRelative = 0
ModeAbsolute = 1
MasterPointer = 1
MasterKeyboard = 2
SlavePointer = 3
SlaveKeyboard = 4
FloatingSlave = 5
KeyClass = 0
ButtonClass = 1
ValuatorClass = 2
ScrollClass = 3
TouchClass = 8
KeyRepeat = (1 << 16)
AllDevices = 0
AllMasterDevices = 1
DeviceChanged = 1
KeyPress = 2
KeyRelease = 3
ButtonPress = 4
ButtonRelease = 5
Motion = 6
Enter = 7
Leave = 8
FocusIn = 9
FocusOut = 10
HierarchyChanged = 11
PropertyEvent = 12
RawKeyPress = 13
RawKeyRelease = 14
RawButtonPress = 15
RawButtonRelease = 16
RawMotion = 17
DeviceChangedMask = (1 << DeviceChanged)
KeyPressMask = (1 << KeyPress)
KeyReleaseMask = (1 << KeyRelease)
ButtonPressMask = (1 << ButtonPress)
ButtonReleaseMask = (1 << ButtonRelease)
MotionMask = (1 << Motion)
EnterMask = (1 << Enter)
LeaveMask = (1 << Leave)
FocusInMask = (1 << FocusIn)
FocusOutMask = (1 << FocusOut)
HierarchyChangedMask = (1 << HierarchyChanged)
PropertyEventMask = (1 << PropertyEvent)
RawKeyPressMask = (1 << RawKeyPress)
RawKeyReleaseMask = (1 << RawKeyRelease)
RawButtonPressMask = (1 << RawButtonPress)
RawButtonReleaseMask = (1 << RawButtonRelease)
RawMotionMask = (1 << RawMotion)
GrabModeSync = 0
GrabModeAsync = 1
GrabModeTouch = 2
DEVICEID = rq.Card16
DEVICE = rq.Card16
DEVICEUSE = rq.Card8
class FP1616(rq.Int32):
def check_value(self, value):
return int(value * 65536.0)
def parse_value(self, value, display):
return float(value) / float(1 << 16)
class FP3232(rq.ValueField):
structcode = 'lL'
structvalues = 2
def check_value(self, value):
return value
def parse_value(self, value, display):
integral, frac = value
ret = float(integral)
# optimised math.ldexp(float(frac), -32)
ret += float(frac) * (1.0 / (1 << 32))
return ret
class XIQueryVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(47),
rq.RequestLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
rq.Pad(20),
)
def query_version(self):
return XIQueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=2,
minor_version=0,
)
class Mask(rq.List):
def __init__(self, name):
rq.List.__init__(self, name, rq.Card32, pad=0)
def pack_value(self, val):
mask_seq = array.array(rq.struct_to_array_codes['L'])
if isinstance(val, integer_types):
# We need to build a "binary mask" that (as far as I can tell) is
# encoded in native byte order from end to end. The simple case is
# with a single unsigned 32-bit value, for which we construct an
# array with just one item. For values too big to fit inside 4
# bytes we build a longer array, being careful to maintain native
# byte order across the entire set of values.
if sys.byteorder == 'little':
def fun(val):
mask_seq.insert(0, val)
elif sys.byteorder == 'big':
fun = mask_seq.append
else:
raise AssertionError(sys.byteorder)
while val:
fun(val & 0xFFFFFFFF)
val = val >> 32
else:
mask_seq.extend(val)
return mask_seq.tostring(), len(mask_seq), None
EventMask = rq.Struct(
DEVICE('deviceid'),
rq.LengthOf('mask', 2),
Mask('mask'),
)
class XISelectEvents(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(46),
rq.RequestLength(),
rq.Window('window'),
rq.LengthOf('masks', 2),
rq.Pad(2),
rq.List('masks', EventMask),
)
def select_events(self, event_masks):
'''
select_events(event_masks)
event_masks:
Sequence of (deviceid, mask) pairs, where deviceid is a numerical device
ID, or AllDevices or AllMasterDevices, and mask is either an unsigned
integer or sequence of 32 bits unsigned values
'''
return XISelectEvents(
display=self.display,
opcode=self.display.get_extension_major(extname),
window=self,
masks=event_masks,
)
AnyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Pad(2),
)
class ButtonMask(object):
def __init__(self, value, length):
self._value = value
self._length = length
def __len__(self):
return self._length
def __getitem__(self, key):
return self._value & (1 << key)
def __str__(self):
return repr(self)
def __repr__(self):
return '0b{value:0{width}b}'.format(value=self._value,
width=self._length)
class ButtonState(rq.ValueField):
structcode = None
def __init__(self, name):
rq.ValueField.__init__(self, name)
def parse_binary_value(self, data, display, length, fmt):
# Mask: bitfield of <length> button states.
mask_len = 4 * ((((length + 7) >> 3) + 3) >> 2)
mask_data = data[:mask_len]
mask_value = 0
for byte in reversed(struct.unpack('={0:d}B'.format(mask_len), mask_data)):
mask_value <<= 8
mask_value |= byte
data = data[mask_len:]
assert (mask_value & 1) == 0
return ButtonMask(mask_value >> 1, length), data
ButtonInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf(('state', 'labels'), 2),
ButtonState('state'),
rq.List('labels', rq.Card32),
)
KeyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf('keycodes', 2),
rq.List('keycodes', rq.Card32),
)
ValuatorInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card32('label'),
FP3232('min'),
FP3232('max'),
FP3232('value'),
rq.Card32('resolution'),
rq.Card8('mode'),
rq.Pad(3),
)
ScrollInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card16('scroll_type'),
rq.Pad(2),
rq.Card32('flags'),
FP3232('increment'),
)
TouchInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card8('mode'),
rq.Card8('num_touches'),
)
INFO_CLASSES = {
KeyClass: KeyInfo,
ButtonClass: ButtonInfo,
ValuatorClass: ValuatorInfo,
ScrollClass: ScrollInfo,
TouchClass: TouchInfo,
}
class ClassInfoClass(object):
structcode = None
def parse_binary(self, data, display):
class_type, length = struct.unpack('=HH', data[:4])
class_struct = INFO_CLASSES.get(class_type, AnyInfo)
class_data, _ = class_struct.parse_binary(data, display)
data = data[length * 4:]
return class_data, data
ClassInfo = ClassInfoClass()
DeviceInfo = rq.Struct(
DEVICEID('deviceid'),
rq.Card16('use'),
rq.Card16('attachment'),
rq.LengthOf('classes', 2),
rq.LengthOf('name', 2),
rq.Bool('enabled'),
rq.Pad(1),
rq.String8('name', 4),
rq.List('classes', ClassInfo),
)
class XIQueryDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(48),
rq.RequestLength(),
DEVICEID('deviceid'),
rq.Pad(2),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('devices', 2),
rq.Pad(22),
rq.List('devices', DeviceInfo),
)
def query_device(self, deviceid):
return XIQueryDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
)
class XIGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(51),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('time'),
rq.Cursor('cursor', (X.NONE, )),
DEVICEID('deviceid'),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(1),
rq.LengthOf('mask', 2),
Mask('mask'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('status'),
rq.Pad(23),
)
def grab_device(self, deviceid, time, grab_mode, paired_device_mode, owner_events, event_mask):
return XIGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
)
class XIUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(52),
rq.RequestLength(),
rq.Card32('time'),
DEVICEID('deviceid'),
rq.Pad(2),
)
def ungrab_device(self, deviceid, time):
return XIUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
time=time,
deviceid=deviceid,
)
class XIPassiveGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(54),
rq.RequestLength(),
rq.Card32('time'),
rq.Window('grab_window'),
rq.Cursor('cursor', (X.NONE, )),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.LengthOf('mask', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,
GrabtypeFocusIn, GrabtypeTouchBegin)),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(2),
Mask('mask'),
rq.List('modifiers', rq.Card32),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('modifiers', 2),
rq.Pad(22),
rq.List('modifiers', rq.Card32),
)
def passive_grab_device(self, deviceid, time, detail,
grab_type, grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return XIPassiveGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
detail=detail,
grab_type=grab_type,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
modifiers=modifiers,
)
def grab_keycode(self, deviceid, time, keycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return passive_grab_device(self, deviceid, time, keycode,
GrabtypeKeycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers)
class XIPassiveUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(55),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode,
GrabtypeEnter, GrabtypeFocusIn,
GrabtypeTouchBegin)),
rq.Pad(3),
rq.List('modifiers', rq.Card32),
)
def passive_ungrab_device(self, deviceid, detail, grab_type, modifiers):
return XIPassiveUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
detail=detail,
grab_type=grab_type,
modifiers=modifiers,
)
def ungrab_keycode(self, deviceid, keycode, modifiers):
return passive_ungrab_device(self, deviceid, keycode,
GrabtypeKeycode, modifiers)
HierarchyInfo = rq.Struct(
DEVICEID('deviceid'),
DEVICEID('attachment'),
DEVICEUSE('type'),
rq.Bool('enabled'),
rq.Pad(2),
rq.Card32('flags'),
)
HierarchyEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('flags'),
rq.LengthOf('info', 2),
rq.Pad(10),
rq.List('info', HierarchyInfo),
)
ModifierInfo = rq.Struct(
rq.Card32('base_mods'),
rq.Card32('latched_mods'),
rq.Card32('locked_mods'),
rq.Card32('effective_mods'),
)
GroupInfo = rq.Struct(
rq.Card8('base_group'),
rq.Card8('latched_group'),
rq.Card8('locked_group'),
rq.Card8('effective_group'),
)
DeviceEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('detail'),
rq.Window('root'),
rq.Window('event'),
rq.Window('child'),
FP1616('root_x'),
FP1616('root_y'),
FP1616('event_x'),
FP1616('event_y'),
rq.LengthOf('buttons', 2),
rq.Card16('valulators_len'),
DEVICEID('sourceid'),
rq.Pad(2),
rq.Card32('flags'),
rq.Object('mods', ModifierInfo),
rq.Object('groups', GroupInfo),
ButtonState('buttons'),
)
DeviceChangedEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.LengthOf('classes', 2),
DEVICEID('sourceid'),
rq.Card8('reason'),
rq.Pad(11),
rq.List('classes', ClassInfo),
)
def init(disp, info):
disp.extension_add_method('display', 'xinput_query_version', query_version)
disp.extension_add_method('window', 'xinput_select_events', select_events)
disp.extension_add_method('display', 'xinput_query_device', query_device)
disp.extension_add_method('window', 'xinput_grab_device', grab_device)
disp.extension_add_method('display', 'xinput_ungrab_device', ungrab_device)
disp.extension_add_method('window', 'xinput_grab_keycode', grab_keycode)
disp.extension_add_method('window', 'xinput_ungrab_keycode', ungrab_keycode)
if hasattr(disp,"ge_add_event_data"):
for device_event in (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion):
disp.ge_add_event_data(info.major_opcode, device_event, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, DeviceChanged, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, HierarchyChanged, HierarchyEventData)
| [
"Xlib.protocol.rq.String8",
"Xlib.protocol.rq.Bool",
"Xlib.protocol.rq.List.__init__",
"Xlib.protocol.rq.ReplyLength",
"Xlib.protocol.rq.Card8",
"Xlib.protocol.rq.Card32",
"Xlib.protocol.rq.Card16",
"Xlib.protocol.rq.RequestLength",
"Xlib.protocol.rq.ValueField.__init__",
"array.array",
"Xlib.protocol.rq.ReplyCode",
"Xlib.protocol.rq.Cursor",
"Xlib.protocol.rq.Window",
"struct.unpack",
"Xlib.protocol.rq.List",
"Xlib.protocol.rq.Pad",
"Xlib.protocol.rq.Set",
"Xlib.protocol.rq.Opcode",
"Xlib.protocol.rq.Object",
"Xlib.protocol.rq.LengthOf"
]
| [((5938, 5960), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""mask"""', '(2)'], {}), "('mask', 2)\n", (5949, 5960), False, 'from Xlib.protocol import rq\n'), ((6787, 6804), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (6796, 6804), False, 'from Xlib.protocol import rq\n'), ((6811, 6830), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (6820, 6830), False, 'from Xlib.protocol import rq\n'), ((6837, 6858), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (6846, 6858), False, 'from Xlib.protocol import rq\n'), ((6865, 6874), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (6871, 6874), False, 'from Xlib.protocol import rq\n'), ((8032, 8049), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (8041, 8049), False, 'from Xlib.protocol import rq\n'), ((8056, 8075), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (8065, 8075), False, 'from Xlib.protocol import rq\n'), ((8082, 8103), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (8091, 8103), False, 'from Xlib.protocol import rq\n'), ((8110, 8145), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (["('state', 'labels')", '(2)'], {}), "(('state', 'labels'), 2)\n", (8121, 8145), False, 'from Xlib.protocol import rq\n'), ((8179, 8207), 'Xlib.protocol.rq.List', 'rq.List', (['"""labels"""', 'rq.Card32'], {}), "('labels', rq.Card32)\n", (8186, 8207), False, 'from Xlib.protocol import rq\n'), ((8241, 8258), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (8250, 8258), False, 'from Xlib.protocol import rq\n'), ((8265, 8284), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (8274, 8284), False, 'from Xlib.protocol import rq\n'), ((8291, 8312), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (8300, 8312), False, 'from Xlib.protocol import rq\n'), ((8319, 8345), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""keycodes"""', '(2)'], {}), "('keycodes', 2)\n", (8330, 8345), False, 'from Xlib.protocol import rq\n'), ((8352, 8382), 'Xlib.protocol.rq.List', 'rq.List', (['"""keycodes"""', 'rq.Card32'], {}), "('keycodes', rq.Card32)\n", (8359, 8382), False, 'from Xlib.protocol import rq\n'), ((8421, 8438), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (8430, 8438), False, 'from Xlib.protocol import rq\n'), ((8445, 8464), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (8454, 8464), False, 'from Xlib.protocol import rq\n'), ((8471, 8492), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (8480, 8492), False, 'from Xlib.protocol import rq\n'), ((8499, 8518), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""number"""'], {}), "('number')\n", (8508, 8518), False, 'from Xlib.protocol import rq\n'), ((8525, 8543), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""label"""'], {}), "('label')\n", (8534, 8543), False, 'from Xlib.protocol import rq\n'), ((8612, 8635), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""resolution"""'], {}), "('resolution')\n", (8621, 8635), False, 'from Xlib.protocol import rq\n'), ((8642, 8658), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""mode"""'], {}), "('mode')\n", (8650, 8658), False, 'from Xlib.protocol import rq\n'), ((8665, 8674), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(3)'], {}), '(3)\n', (8671, 8674), False, 'from Xlib.protocol import rq\n'), ((8711, 8728), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (8720, 8728), False, 'from Xlib.protocol import rq\n'), ((8735, 8754), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (8744, 8754), False, 'from Xlib.protocol import rq\n'), ((8761, 8782), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (8770, 8782), False, 'from Xlib.protocol import rq\n'), ((8789, 8808), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""number"""'], {}), "('number')\n", (8798, 8808), False, 'from Xlib.protocol import rq\n'), ((8815, 8839), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""scroll_type"""'], {}), "('scroll_type')\n", (8824, 8839), False, 'from Xlib.protocol import rq\n'), ((8846, 8855), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (8852, 8855), False, 'from Xlib.protocol import rq\n'), ((8862, 8880), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""flags"""'], {}), "('flags')\n", (8871, 8880), False, 'from Xlib.protocol import rq\n'), ((8942, 8959), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""type"""'], {}), "('type')\n", (8951, 8959), False, 'from Xlib.protocol import rq\n'), ((8966, 8985), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""length"""'], {}), "('length')\n", (8975, 8985), False, 'from Xlib.protocol import rq\n'), ((8992, 9013), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sourceid"""'], {}), "('sourceid')\n", (9001, 9013), False, 'from Xlib.protocol import rq\n'), ((9020, 9036), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""mode"""'], {}), "('mode')\n", (9028, 9036), False, 'from Xlib.protocol import rq\n'), ((9043, 9066), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""num_touches"""'], {}), "('num_touches')\n", (9051, 9066), False, 'from Xlib.protocol import rq\n'), ((9691, 9707), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""use"""'], {}), "('use')\n", (9700, 9707), False, 'from Xlib.protocol import rq\n'), ((9714, 9737), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""attachment"""'], {}), "('attachment')\n", (9723, 9737), False, 'from Xlib.protocol import rq\n'), ((9744, 9769), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""classes"""', '(2)'], {}), "('classes', 2)\n", (9755, 9769), False, 'from Xlib.protocol import rq\n'), ((9776, 9798), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""name"""', '(2)'], {}), "('name', 2)\n", (9787, 9798), False, 'from Xlib.protocol import rq\n'), ((9805, 9823), 'Xlib.protocol.rq.Bool', 'rq.Bool', (['"""enabled"""'], {}), "('enabled')\n", (9812, 9823), False, 'from Xlib.protocol import rq\n'), ((9830, 9839), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (9836, 9839), False, 'from Xlib.protocol import rq\n'), ((9846, 9867), 'Xlib.protocol.rq.String8', 'rq.String8', (['"""name"""', '(4)'], {}), "('name', 4)\n", (9856, 9867), False, 'from Xlib.protocol import rq\n'), ((9874, 9903), 'Xlib.protocol.rq.List', 'rq.List', (['"""classes"""', 'ClassInfo'], {}), "('classes', ClassInfo)\n", (9881, 9903), False, 'from Xlib.protocol import rq\n'), ((15501, 15519), 'Xlib.protocol.rq.Bool', 'rq.Bool', (['"""enabled"""'], {}), "('enabled')\n", (15508, 15519), False, 'from Xlib.protocol import rq\n'), ((15526, 15535), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (15532, 15535), False, 'from Xlib.protocol import rq\n'), ((15542, 15560), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""flags"""'], {}), "('flags')\n", (15551, 15560), False, 'from Xlib.protocol import rq\n'), ((15634, 15651), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (15643, 15651), False, 'from Xlib.protocol import rq\n'), ((15658, 15676), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""flags"""'], {}), "('flags')\n", (15667, 15676), False, 'from Xlib.protocol import rq\n'), ((15683, 15705), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""info"""', '(2)'], {}), "('info', 2)\n", (15694, 15705), False, 'from Xlib.protocol import rq\n'), ((15712, 15722), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(10)'], {}), '(10)\n', (15718, 15722), False, 'from Xlib.protocol import rq\n'), ((15729, 15759), 'Xlib.protocol.rq.List', 'rq.List', (['"""info"""', 'HierarchyInfo'], {}), "('info', HierarchyInfo)\n", (15736, 15759), False, 'from Xlib.protocol import rq\n'), ((15798, 15820), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""base_mods"""'], {}), "('base_mods')\n", (15807, 15820), False, 'from Xlib.protocol import rq\n'), ((15827, 15852), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""latched_mods"""'], {}), "('latched_mods')\n", (15836, 15852), False, 'from Xlib.protocol import rq\n'), ((15859, 15883), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""locked_mods"""'], {}), "('locked_mods')\n", (15868, 15883), False, 'from Xlib.protocol import rq\n'), ((15890, 15917), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""effective_mods"""'], {}), "('effective_mods')\n", (15899, 15917), False, 'from Xlib.protocol import rq\n'), ((15953, 15975), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""base_group"""'], {}), "('base_group')\n", (15961, 15975), False, 'from Xlib.protocol import rq\n'), ((15982, 16007), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""latched_group"""'], {}), "('latched_group')\n", (15990, 16007), False, 'from Xlib.protocol import rq\n'), ((16014, 16038), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""locked_group"""'], {}), "('locked_group')\n", (16022, 16038), False, 'from Xlib.protocol import rq\n'), ((16045, 16072), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""effective_group"""'], {}), "('effective_group')\n", (16053, 16072), False, 'from Xlib.protocol import rq\n'), ((16141, 16158), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (16150, 16158), False, 'from Xlib.protocol import rq\n'), ((16165, 16184), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""detail"""'], {}), "('detail')\n", (16174, 16184), False, 'from Xlib.protocol import rq\n'), ((16191, 16208), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""root"""'], {}), "('root')\n", (16200, 16208), False, 'from Xlib.protocol import rq\n'), ((16215, 16233), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""event"""'], {}), "('event')\n", (16224, 16233), False, 'from Xlib.protocol import rq\n'), ((16240, 16258), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""child"""'], {}), "('child')\n", (16249, 16258), False, 'from Xlib.protocol import rq\n'), ((16359, 16384), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""buttons"""', '(2)'], {}), "('buttons', 2)\n", (16370, 16384), False, 'from Xlib.protocol import rq\n'), ((16391, 16418), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""valulators_len"""'], {}), "('valulators_len')\n", (16400, 16418), False, 'from Xlib.protocol import rq\n'), ((16452, 16461), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (16458, 16461), False, 'from Xlib.protocol import rq\n'), ((16468, 16486), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""flags"""'], {}), "('flags')\n", (16477, 16486), False, 'from Xlib.protocol import rq\n'), ((16493, 16524), 'Xlib.protocol.rq.Object', 'rq.Object', (['"""mods"""', 'ModifierInfo'], {}), "('mods', ModifierInfo)\n", (16502, 16524), False, 'from Xlib.protocol import rq\n'), ((16531, 16561), 'Xlib.protocol.rq.Object', 'rq.Object', (['"""groups"""', 'GroupInfo'], {}), "('groups', GroupInfo)\n", (16540, 16561), False, 'from Xlib.protocol import rq\n'), ((16666, 16683), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (16675, 16683), False, 'from Xlib.protocol import rq\n'), ((16690, 16715), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""classes"""', '(2)'], {}), "('classes', 2)\n", (16701, 16715), False, 'from Xlib.protocol import rq\n'), ((16749, 16767), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""reason"""'], {}), "('reason')\n", (16757, 16767), False, 'from Xlib.protocol import rq\n'), ((16774, 16784), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(11)'], {}), '(11)\n', (16780, 16784), False, 'from Xlib.protocol import rq\n'), ((16791, 16820), 'Xlib.protocol.rq.List', 'rq.List', (['"""classes"""', 'ClassInfo'], {}), "('classes', ClassInfo)\n", (16798, 16820), False, 'from Xlib.protocol import rq\n'), ((4084, 4102), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (4092, 4102), False, 'from Xlib.protocol import rq\n'), ((4113, 4126), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(47)'], {}), '(47)\n', (4122, 4126), False, 'from Xlib.protocol import rq\n'), ((4137, 4155), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (4153, 4155), False, 'from Xlib.protocol import rq\n'), ((4166, 4192), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""major_version"""'], {}), "('major_version')\n", (4175, 4192), False, 'from Xlib.protocol import rq\n'), ((4203, 4229), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""minor_version"""'], {}), "('minor_version')\n", (4212, 4229), False, 'from Xlib.protocol import rq\n'), ((4276, 4290), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ([], {}), '()\n', (4288, 4290), False, 'from Xlib.protocol import rq\n'), ((4301, 4310), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (4307, 4310), False, 'from Xlib.protocol import rq\n'), ((4321, 4349), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sequence_number"""'], {}), "('sequence_number')\n", (4330, 4349), False, 'from Xlib.protocol import rq\n'), ((4360, 4376), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ([], {}), '()\n', (4374, 4376), False, 'from Xlib.protocol import rq\n'), ((4387, 4413), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""major_version"""'], {}), "('major_version')\n", (4396, 4413), False, 'from Xlib.protocol import rq\n'), ((4424, 4450), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""minor_version"""'], {}), "('minor_version')\n", (4433, 4450), False, 'from Xlib.protocol import rq\n'), ((4461, 4471), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(20)'], {}), '(20)\n', (4467, 4471), False, 'from Xlib.protocol import rq\n'), ((4761, 4807), 'Xlib.protocol.rq.List.__init__', 'rq.List.__init__', (['self', 'name', 'rq.Card32'], {'pad': '(0)'}), '(self, name, rq.Card32, pad=0)\n', (4777, 4807), False, 'from Xlib.protocol import rq\n'), ((4864, 4906), 'array.array', 'array.array', (["rq.struct_to_array_codes['L']"], {}), "(rq.struct_to_array_codes['L'])\n", (4875, 4906), False, 'import array\n'), ((6059, 6077), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (6067, 6077), False, 'from Xlib.protocol import rq\n'), ((6088, 6101), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(46)'], {}), '(46)\n', (6097, 6101), False, 'from Xlib.protocol import rq\n'), ((6112, 6130), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (6128, 6130), False, 'from Xlib.protocol import rq\n'), ((6141, 6160), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""window"""'], {}), "('window')\n", (6150, 6160), False, 'from Xlib.protocol import rq\n'), ((6171, 6194), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""masks"""', '(2)'], {}), "('masks', 2)\n", (6182, 6194), False, 'from Xlib.protocol import rq\n'), ((6205, 6214), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (6211, 6214), False, 'from Xlib.protocol import rq\n'), ((6225, 6252), 'Xlib.protocol.rq.List', 'rq.List', (['"""masks"""', 'EventMask'], {}), "('masks', EventMask)\n", (6232, 6252), False, 'from Xlib.protocol import rq\n'), ((7454, 7488), 'Xlib.protocol.rq.ValueField.__init__', 'rq.ValueField.__init__', (['self', 'name'], {}), '(self, name)\n', (7476, 7488), False, 'from Xlib.protocol import rq\n'), ((9374, 9404), 'struct.unpack', 'struct.unpack', (['"""=HH"""', 'data[:4]'], {}), "('=HH', data[:4])\n", (9387, 9404), False, 'import struct\n'), ((9985, 10003), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (9993, 10003), False, 'from Xlib.protocol import rq\n'), ((10014, 10027), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(48)'], {}), '(48)\n', (10023, 10027), False, 'from Xlib.protocol import rq\n'), ((10038, 10056), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (10054, 10056), False, 'from Xlib.protocol import rq\n'), ((10098, 10107), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (10104, 10107), False, 'from Xlib.protocol import rq\n'), ((10152, 10166), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ([], {}), '()\n', (10164, 10166), False, 'from Xlib.protocol import rq\n'), ((10177, 10186), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (10183, 10186), False, 'from Xlib.protocol import rq\n'), ((10197, 10225), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sequence_number"""'], {}), "('sequence_number')\n", (10206, 10225), False, 'from Xlib.protocol import rq\n'), ((10236, 10252), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ([], {}), '()\n', (10250, 10252), False, 'from Xlib.protocol import rq\n'), ((10263, 10288), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""devices"""', '(2)'], {}), "('devices', 2)\n", (10274, 10288), False, 'from Xlib.protocol import rq\n'), ((10299, 10309), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(22)'], {}), '(22)\n', (10305, 10309), False, 'from Xlib.protocol import rq\n'), ((10320, 10350), 'Xlib.protocol.rq.List', 'rq.List', (['"""devices"""', 'DeviceInfo'], {}), "('devices', DeviceInfo)\n", (10327, 10350), False, 'from Xlib.protocol import rq\n'), ((10632, 10650), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (10640, 10650), False, 'from Xlib.protocol import rq\n'), ((10661, 10674), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(51)'], {}), '(51)\n', (10670, 10674), False, 'from Xlib.protocol import rq\n'), ((10685, 10703), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (10701, 10703), False, 'from Xlib.protocol import rq\n'), ((10714, 10738), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""grab_window"""'], {}), "('grab_window')\n", (10723, 10738), False, 'from Xlib.protocol import rq\n'), ((10749, 10766), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (10758, 10766), False, 'from Xlib.protocol import rq\n'), ((10777, 10807), 'Xlib.protocol.rq.Cursor', 'rq.Cursor', (['"""cursor"""', '(X.NONE,)'], {}), "('cursor', (X.NONE,))\n", (10786, 10807), False, 'from Xlib.protocol import rq\n'), ((10850, 10903), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""grab_mode"""', '(1)', '(GrabModeSync, GrabModeAsync)'], {}), "('grab_mode', 1, (GrabModeSync, GrabModeAsync))\n", (10856, 10903), False, 'from Xlib.protocol import rq\n'), ((10914, 10976), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""paired_device_mode"""', '(1)', '(GrabModeSync, GrabModeAsync)'], {}), "('paired_device_mode', 1, (GrabModeSync, GrabModeAsync))\n", (10920, 10976), False, 'from Xlib.protocol import rq\n'), ((10987, 11010), 'Xlib.protocol.rq.Bool', 'rq.Bool', (['"""owner_events"""'], {}), "('owner_events')\n", (10994, 11010), False, 'from Xlib.protocol import rq\n'), ((11021, 11030), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (11027, 11030), False, 'from Xlib.protocol import rq\n'), ((11041, 11063), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""mask"""', '(2)'], {}), "('mask', 2)\n", (11052, 11063), False, 'from Xlib.protocol import rq\n'), ((11131, 11145), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ([], {}), '()\n', (11143, 11145), False, 'from Xlib.protocol import rq\n'), ((11156, 11165), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (11162, 11165), False, 'from Xlib.protocol import rq\n'), ((11176, 11204), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sequence_number"""'], {}), "('sequence_number')\n", (11185, 11204), False, 'from Xlib.protocol import rq\n'), ((11215, 11231), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ([], {}), '()\n', (11229, 11231), False, 'from Xlib.protocol import rq\n'), ((11242, 11260), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""status"""'], {}), "('status')\n", (11250, 11260), False, 'from Xlib.protocol import rq\n'), ((11271, 11281), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(23)'], {}), '(23)\n', (11277, 11281), False, 'from Xlib.protocol import rq\n'), ((11832, 11850), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (11840, 11850), False, 'from Xlib.protocol import rq\n'), ((11861, 11874), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(52)'], {}), '(52)\n', (11870, 11874), False, 'from Xlib.protocol import rq\n'), ((11885, 11903), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (11901, 11903), False, 'from Xlib.protocol import rq\n'), ((11914, 11931), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (11923, 11931), False, 'from Xlib.protocol import rq\n'), ((11973, 11982), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (11979, 11982), False, 'from Xlib.protocol import rq\n'), ((12291, 12309), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (12299, 12309), False, 'from Xlib.protocol import rq\n'), ((12320, 12333), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(54)'], {}), '(54)\n', (12329, 12333), False, 'from Xlib.protocol import rq\n'), ((12344, 12362), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (12360, 12362), False, 'from Xlib.protocol import rq\n'), ((12373, 12390), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""time"""'], {}), "('time')\n", (12382, 12390), False, 'from Xlib.protocol import rq\n'), ((12401, 12425), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""grab_window"""'], {}), "('grab_window')\n", (12410, 12425), False, 'from Xlib.protocol import rq\n'), ((12436, 12466), 'Xlib.protocol.rq.Cursor', 'rq.Cursor', (['"""cursor"""', '(X.NONE,)'], {}), "('cursor', (X.NONE,))\n", (12445, 12466), False, 'from Xlib.protocol import rq\n'), ((12478, 12497), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""detail"""'], {}), "('detail')\n", (12487, 12497), False, 'from Xlib.protocol import rq\n'), ((12539, 12566), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""modifiers"""', '(2)'], {}), "('modifiers', 2)\n", (12550, 12566), False, 'from Xlib.protocol import rq\n'), ((12577, 12599), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""mask"""', '(2)'], {}), "('mask', 2)\n", (12588, 12599), False, 'from Xlib.protocol import rq\n'), ((12610, 12723), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""grab_type"""', '(1)', '(GrabtypeButton, GrabtypeKeycode, GrabtypeEnter, GrabtypeFocusIn,\n GrabtypeTouchBegin)'], {}), "('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,\n GrabtypeFocusIn, GrabtypeTouchBegin))\n", (12616, 12723), False, 'from Xlib.protocol import rq\n'), ((12763, 12816), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""grab_mode"""', '(1)', '(GrabModeSync, GrabModeAsync)'], {}), "('grab_mode', 1, (GrabModeSync, GrabModeAsync))\n", (12769, 12816), False, 'from Xlib.protocol import rq\n'), ((12827, 12889), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""paired_device_mode"""', '(1)', '(GrabModeSync, GrabModeAsync)'], {}), "('paired_device_mode', 1, (GrabModeSync, GrabModeAsync))\n", (12833, 12889), False, 'from Xlib.protocol import rq\n'), ((12900, 12923), 'Xlib.protocol.rq.Bool', 'rq.Bool', (['"""owner_events"""'], {}), "('owner_events')\n", (12907, 12923), False, 'from Xlib.protocol import rq\n'), ((12934, 12943), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(2)'], {}), '(2)\n', (12940, 12943), False, 'from Xlib.protocol import rq\n'), ((12977, 13008), 'Xlib.protocol.rq.List', 'rq.List', (['"""modifiers"""', 'rq.Card32'], {}), "('modifiers', rq.Card32)\n", (12984, 13008), False, 'from Xlib.protocol import rq\n'), ((13053, 13067), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ([], {}), '()\n', (13065, 13067), False, 'from Xlib.protocol import rq\n'), ((13078, 13087), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(1)'], {}), '(1)\n', (13084, 13087), False, 'from Xlib.protocol import rq\n'), ((13098, 13126), 'Xlib.protocol.rq.Card16', 'rq.Card16', (['"""sequence_number"""'], {}), "('sequence_number')\n", (13107, 13126), False, 'from Xlib.protocol import rq\n'), ((13137, 13153), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ([], {}), '()\n', (13151, 13153), False, 'from Xlib.protocol import rq\n'), ((13164, 13191), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""modifiers"""', '(2)'], {}), "('modifiers', 2)\n", (13175, 13191), False, 'from Xlib.protocol import rq\n'), ((13202, 13212), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(22)'], {}), '(22)\n', (13208, 13212), False, 'from Xlib.protocol import rq\n'), ((13223, 13254), 'Xlib.protocol.rq.List', 'rq.List', (['"""modifiers"""', 'rq.Card32'], {}), "('modifiers', rq.Card32)\n", (13230, 13254), False, 'from Xlib.protocol import rq\n'), ((14393, 14411), 'Xlib.protocol.rq.Card8', 'rq.Card8', (['"""opcode"""'], {}), "('opcode')\n", (14401, 14411), False, 'from Xlib.protocol import rq\n'), ((14422, 14435), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', (['(55)'], {}), '(55)\n', (14431, 14435), False, 'from Xlib.protocol import rq\n'), ((14446, 14464), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ([], {}), '()\n', (14462, 14464), False, 'from Xlib.protocol import rq\n'), ((14475, 14499), 'Xlib.protocol.rq.Window', 'rq.Window', (['"""grab_window"""'], {}), "('grab_window')\n", (14484, 14499), False, 'from Xlib.protocol import rq\n'), ((14510, 14529), 'Xlib.protocol.rq.Card32', 'rq.Card32', (['"""detail"""'], {}), "('detail')\n", (14519, 14529), False, 'from Xlib.protocol import rq\n'), ((14571, 14598), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', (['"""modifiers"""', '(2)'], {}), "('modifiers', 2)\n", (14582, 14598), False, 'from Xlib.protocol import rq\n'), ((14609, 14722), 'Xlib.protocol.rq.Set', 'rq.Set', (['"""grab_type"""', '(1)', '(GrabtypeButton, GrabtypeKeycode, GrabtypeEnter, GrabtypeFocusIn,\n GrabtypeTouchBegin)'], {}), "('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,\n GrabtypeFocusIn, GrabtypeTouchBegin))\n", (14615, 14722), False, 'from Xlib.protocol import rq\n'), ((14795, 14804), 'Xlib.protocol.rq.Pad', 'rq.Pad', (['(3)'], {}), '(3)\n', (14801, 14804), False, 'from Xlib.protocol import rq\n'), ((14815, 14846), 'Xlib.protocol.rq.List', 'rq.List', (['"""modifiers"""', 'rq.Card32'], {}), "('modifiers', rq.Card32)\n", (14822, 14846), False, 'from Xlib.protocol import rq\n')] |
from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_validate_value_is_unique(color_attribute):
value = color_attribute.values.first()
# a new value but with existing slug should raise an error
with pytest.raises(ValidationError):
validate_value_is_unique(color_attribute, AttributeValue(slug=value.slug))
# a new value with a new slug should pass
validate_value_is_unique(
color_attribute, AttributeValue(slug="spanish-inquisition")
)
# value that already belongs to the attribute shouldn't be taken into account
validate_value_is_unique(color_attribute, value)
def test_get_single_attribute_by_pk(user_api_client, color_attribute_without_values):
attribute_gql_id = graphene.Node.to_global_id(
"Attribute", color_attribute_without_values.id
)
query = """
query($id: ID!) {
attribute(id: $id) {
id
slug
}
}
"""
content = get_graphql_content(
user_api_client.post_graphql(query, {"id": attribute_gql_id})
)
assert content["data"]["attribute"], "Should have found an attribute"
assert content["data"]["attribute"]["id"] == attribute_gql_id
assert content["data"]["attribute"]["slug"] == color_attribute_without_values.slug
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects
query = QUERY_ATTRIBUTES
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert attributes_data
assert len(attributes_data) == attributes.count()
def test_attributes_query_hidden_attribute(user_api_client, product, color_attribute):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.get_visible_to_user(
user_api_client.user
).count()
assert attribute_count == 1
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
def test_attributes_query_hidden_attribute_as_staff_user(
staff_api_client, product, color_attribute, permission_manage_products
):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.all().count()
# The user doesn't have the permission yet to manage products,
# the user shouldn't be able to see the hidden attributes
assert Attribute.objects.get_visible_to_user(staff_api_client.user).count() == 1
# The user should now be able to see the attributes
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
@pytest.mark.parametrize("is_staff", (False, True))
def test_resolve_attributes_with_hidden(
user_api_client,
product,
color_attribute,
size_attribute,
staff_user,
is_staff,
permission_manage_products,
):
"""Ensure non-staff users don't see hidden attributes, and staff users having
the 'manage product' permission can.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_attribute = color_attribute
variant_attribute = size_attribute
expected_product_attribute_count = product.attributes.count() - 1
expected_variant_attribute_count = variant.attributes.count() - 1
if is_staff:
api_client.user = staff_user
expected_product_attribute_count += 1
expected_variant_attribute_count += 1
staff_user.user_permissions.add(permission_manage_products)
# Hide one product and variant attribute from the storefront
for attribute in (product_attribute, variant_attribute):
attribute.visible_in_storefront = False
attribute.save(update_fields=["visible_in_storefront"])
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
assert len(product["attributes"]) == expected_product_attribute_count
assert len(product["variants"][0]["attributes"]) == expected_variant_attribute_count
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
@pytest.mark.parametrize("test_deprecated_filter", [True, False])
@pytest.mark.parametrize("tested_field", ["inCategory", "inCollection"])
def test_attributes_in_collection_query(
user_api_client,
product_type,
category,
collection,
collection_with_products,
test_deprecated_filter,
tested_field,
):
if "Collection" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Collection", collection.pk)
elif "Category" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Category", category.pk)
else:
raise AssertionError(tested_field)
expected_qs = Attribute.objects.filter(
Q(attributeproduct__product_type_id=product_type.pk)
| Q(attributevariant__product_type_id=product_type.pk)
)
# Create another product type and attribute that shouldn't get matched
other_category = Category.objects.create(name="Other Category", slug="other-cat")
other_attribute = Attribute.objects.create(name="Other", slug="other")
other_product_type = ProductType.objects.create(
name="Other type", has_variants=True, is_shipping_required=True
)
other_product_type.product_attributes.add(other_attribute)
other_product = Product.objects.create(
name=f"Another Product",
product_type=other_product_type,
category=other_category,
price=zero_money(),
is_published=True,
)
# Create another collection with products but shouldn't get matched
# as we don't look for this other collection
other_collection = Collection.objects.create(
name="Other Collection",
slug="other-collection",
is_published=True,
description="Description",
)
other_collection.products.add(other_product)
query = """
query($nodeID: ID!) {
attributes(first: 20, %(filter_input)s) {
edges {
node {
id
name
slug
}
}
}
}
"""
if test_deprecated_filter:
query = query % {"filter_input": f"{tested_field}: $nodeID"}
else:
query = query % {"filter_input": "filter: { %s: $nodeID }" % tested_field}
variables = {"nodeID": filtered_by_node_id}
content = get_graphql_content(user_api_client.post_graphql(query, variables))
attributes_data = content["data"]["attributes"]["edges"]
flat_attributes_data = [attr["node"]["slug"] for attr in attributes_data]
expected_flat_attributes_data = list(expected_qs.values_list("slug", flat=True))
assert flat_attributes_data == expected_flat_attributes_data
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products
):
query = CREATE_ATTRIBUTES_QUERY
attribute_name = "<NAME>"
name = "Value name"
variables = {"name": attribute_name, "values": [{"name": name}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
# Check if the attribute was correctly created
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(
attribute_name
), "The default slug should be the slugified name"
assert (
data["attribute"]["productTypes"]["edges"] == []
), "The attribute should not have been assigned to a product type"
# Check if the attribute values were correctly created
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
@pytest.mark.parametrize(
"input_slug, expected_slug, expected_error",
(
("my-slug", "my-slug", []),
(None, "my-name", []),
(
"",
None,
[{"field": "slug", "message": "The attribute's slug cannot be blank."}],
),
),
)
def test_create_attribute_with_given_slug(
staff_api_client,
permission_manage_products,
input_slug,
expected_slug,
expected_error,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
mutation createAttribute(
$name: String!, $slug: String) {
attributeCreate(input: {name: $name, slug: $slug}) {
errors {
field
message
}
attribute {
slug
}
}
}
"""
attribute_name = "My Name"
variables = {"name": attribute_name, "slug": input_slug}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))
# Check if the error is as expected: null or something else
assert content["data"]["attributeCreate"]["errors"] == expected_error
# Check if the slug was correctly set if no error was expected
if expected_error is None:
assert content["data"]["attributeCreate"]["attribute"]["slug"] == expected_slug
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
variables = {"name": "Example name", "values": [{"name": name_1}, {"name": name_2}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeCreate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["attribute"]["productTypes"]["edges"] == []
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "<NAME>"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1}, {"name": name_2}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == ProductErrorCode.INVALID.name
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["attribute"]["id"] == variables["id"]
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "<NAME>"
variables = {"name": name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["productErrors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
def test_create_attribute_value_capitalized_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name.upper(), "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="<NAME>", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert data["errors"][0]["field"] == "name"
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_attributes_to_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Default Type", has_variants=True)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = []
variables = {"productTypeId": product_type_global_id, "operations": operations}
product_attributes_ids = {attr.pk for attr in attribute_list[:2]}
variant_attributes_ids = {attr.pk for attr in attribute_list[2:]}
for attr_id in product_attributes_ids:
operations.append(
{"type": "PRODUCT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
for attr_id in variant_attributes_ids:
operations.append(
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeAssign"]
assert not content["errors"], "Should have succeeded"
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == len(
product_attributes_ids
)
assert len(content["productType"]["variantAttributes"]) == len(
variant_attributes_ids
)
found_product_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["productAttributes"]
}
found_variant_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["variantAttributes"]
}
assert found_product_attrs_ids == product_attributes_ids
assert found_variant_attrs_ids == variant_attributes_ids
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
@pytest.mark.parametrize(
"product_type_attribute_type, gql_attribute_type",
(
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT),
),
)
def test_assign_attribute_to_product_type_having_already_that_attribute(
staff_api_client,
permission_manage_products,
color_attribute_without_values,
product_type_attribute_type,
gql_attribute_type,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute already contained in the product type."""
product_type = ProductType.objects.create(name="Type")
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
if product_type_attribute_type == AttributeTypeEnum.PRODUCT:
product_type.product_attributes.add(attribute)
elif product_type_attribute_type == AttributeTypeEnum.VARIANT:
product_type.variant_attributes.add(attribute)
else:
raise ValueError(f"Unknown: {product_type}")
query = ASSIGN_ATTR_QUERY
operations = [
{
"type": gql_attribute_type.value,
"id": graphene.Node.to_global_id("Attribute", attribute.pk),
}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Color (color) have already been assigned to this product type.",
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_from_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variant_attribute, *product_attributes = attribute_list
product_type.product_attributes.add(*product_attributes)
product_type.variant_attributes.add(variant_attribute)
remaining_attribute_global_id = graphene.Node.to_global_id(
"Attribute", product_attributes[1].pk
)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", product_attributes[0].pk)
],
}
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 1
assert len(content["productType"]["variantAttributes"]) == 1
assert (
content["productType"]["productAttributes"][0]["id"]
== remaining_attribute_global_id
)
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
def test_retrieve_product_attributes_input_type(
staff_api_client, product, permission_manage_products
):
query = """
{
products(first: 10) {
edges {
node {
attributes {
values {
type
inputType
}
}
}
}
}
}
"""
found_products = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["type"] == "STRING"
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
@pytest.mark.parametrize(
"attribute, expected_value",
(
("filterable_in_storefront", True),
("filterable_in_dashboard", True),
("visible_in_storefront", True),
("available_in_grid", True),
("value_required", False),
("storefront_search_position", 0),
),
)
def test_retrieving_the_restricted_attributes_restricted(
staff_api_client,
color_attribute,
permission_manage_products,
attribute,
expected_value,
):
"""Checks if the attributes are restricted and if their default value
is the expected one."""
attribute = to_camel_case(attribute)
query = (
"""
{
attributes(first: 10) {
edges {
node {
%s
}
}
}
}
"""
% attribute
)
found_attributes = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["attributes"]["edges"]
assert len(found_attributes) == 1
assert found_attributes[0]["node"][attribute] == expected_value
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
@pytest.mark.parametrize(
"attribute_type, relation_field, backref_field",
(
("VARIANT", "variant_attributes", "attributevariant"),
("PRODUCT", "product_attributes", "attributeproduct"),
),
)
def test_sort_attributes_within_product_type(
staff_api_client,
attribute_list,
permission_manage_products,
attribute_type,
relation_field,
backref_field,
):
attributes = attribute_list
assert len(attributes) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
m2m_attributes = getattr(product_type, relation_field)
m2m_attributes.set(attributes)
sort_method = getattr(m2m_attributes, f"{relation_field}_sorted")
attributes = list(sort_method())
assert len(attributes) == 3
variables = {
"type": attribute_type,
"productTypeId": product_type_id,
"moves": [
{
"id": graphene.Node.to_global_id("Attribute", attributes[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("Attribute", attributes[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [attributes[1].pk, attributes[2].pk, attributes[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTES_RESORT_QUERY, variables)
)["data"]["productTypeReorderAttributes"]
assert not content["errors"]
assert (
content["productType"]["id"] == product_type_id
), "Did not return the correct product type"
gql_attributes = content["productType"][snake_to_camel_case(relation_field)]
assert len(gql_attributes) == len(expected_order)
for attr, expected_pk in zip(gql_attributes, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "Attribute"
assert int(gql_attr_id) == expected_pk
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
def test_sort_values_within_attribute(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
AttributeValue.objects.create(attribute=attribute, name="Green", slug="green")
values = list(attribute.values.all())
assert len(values) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
m2m_values = attribute.values
m2m_values.set(values)
assert values == sorted(
values, key=lambda o: o.sort_order if o.sort_order is not None else o.pk
), "The values are not properly ordered"
variables = {
"attributeId": attribute_id,
"moves": [
{
"id": graphene.Node.to_global_id("AttributeValue", values[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("AttributeValue", values[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [values[1].pk, values[2].pk, values[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTE_VALUES_RESORT_QUERY, variables)
)["data"]["attributeReorderValues"]
assert not content["errors"]
assert content["attribute"]["id"] == attribute_id
gql_values = content["attribute"]["values"]
assert len(gql_values) == len(expected_order)
actual_order = []
for attr, expected_pk in zip(gql_values, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "AttributeValue"
actual_order.append(int(gql_attr_id))
assert actual_order == expected_order
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
def test_search_attributes(api_client, color_attribute, size_attribute):
variables = {"filters": {"search": "color"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "color"
def test_filter_attributes_if_filterable_in_dashboard(
api_client, color_attribute, size_attribute
):
color_attribute.filterable_in_dashboard = False
color_attribute.save(update_fields=["filterable_in_dashboard"])
variables = {"filters": {"filterableInDashboard": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_if_available_in_grid(
api_client, color_attribute, size_attribute
):
color_attribute.available_in_grid = False
color_attribute.save(update_fields=["available_in_grid"])
variables = {"filters": {"availableInGrid": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_by_global_id_list(api_client, attribute_list):
global_ids = [
graphene.Node.to_global_id("Attribute", attribute.pk)
for attribute in attribute_list[:2]
]
variables = {"filters": {"ids": global_ids}}
expected_slugs = sorted([attribute_list[0].slug, attribute_list[1].slug])
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
received_slugs = sorted(
[attributes[0]["node"]["slug"], attributes[1]["node"]["slug"]]
)
assert received_slugs == expected_slugs
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_slug(api_client):
Attribute.objects.bulk_create(
[
Attribute(name="MyAttribute", slug="b"),
Attribute(name="MyAttribute", slug="a"),
]
)
variables = {"sortBy": {"field": "SLUG", "direction": "ASC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "a"
assert attributes[1]["node"]["slug"] == "b"
@pytest.mark.parametrize(
"sort_field, m2m_model",
(
("DASHBOARD_VARIANT_POSITION", AttributeVariant),
("DASHBOARD_PRODUCT_POSITION", AttributeProduct),
),
)
def test_sort_attributes_by_position_in_product_type(
api_client,
color_attribute,
size_attribute,
sort_field: str,
m2m_model: Union[AttributeVariant, AttributeProduct],
):
"""Sorts attributes for dashboard custom ordering inside a given product type."""
product_type = ProductType.objects.create(name="My Product Type")
m2m_model.objects.create(
product_type=product_type, attribute=color_attribute, sort_order=0
)
m2m_model.objects.create(
product_type=product_type, attribute=size_attribute, sort_order=1
)
variables = {"sortBy": {"field": sort_field, "direction": "DESC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "size"
assert attributes[1]["node"]["slug"] == "color"
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
@pytest.mark.parametrize("is_variant", (True, False))
def test_attributes_of_products_are_sorted(
staff_api_client, product, color_attribute, is_variant
):
"""Ensures the attributes of products and variants are sorted."""
variant = product.variants.first()
if is_variant:
query = """
query($id: ID!) {
productVariant(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
else:
query = """
query($id: ID!) {
product(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
# Create a dummy attribute with a higher ID
# This will allow us to make sure it is always the last attribute
# when sorted by ID. Thus, we are sure the query is actually passing the test.
other_attribute = Attribute.objects.create(name="Other", slug="other")
# Add the attribute to the product type
if is_variant:
product.product_type.variant_attributes.set([color_attribute, other_attribute])
else:
product.product_type.product_attributes.set([color_attribute, other_attribute])
# Retrieve the M2M object for the attribute vs the product type
if is_variant:
m2m_rel_other_attr = other_attribute.attributevariant.last()
else:
m2m_rel_other_attr = other_attribute.attributeproduct.last()
# Push the last attribute to the top and let the others to None
m2m_rel_other_attr.sort_order = 0
m2m_rel_other_attr.save(update_fields=["sort_order"])
# Assign attributes to the product
node = variant if is_variant else product # type: Union[Product, ProductVariant]
node.attributesrelated.clear()
associate_attribute_values_to_instance(
node, color_attribute, color_attribute.values.first()
)
# Sort the database attributes by their sort order and ID (when None)
expected_order = [other_attribute.pk, color_attribute.pk]
# Make the node ID
if is_variant:
node_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
else:
node_id = graphene.Node.to_global_id("Product", product.pk)
# Retrieve the attributes
data = get_graphql_content(staff_api_client.post_graphql(query, {"id": node_id}))[
"data"
]
attributes = data["productVariant" if is_variant else "product"]["attributes"]
actual_order = [
int(graphene.Node.from_global_id(attr["attribute"]["id"])[1])
for attr in attributes
]
# Compare the received data against our expectations
assert actual_order == expected_order
| [
"saleor.product.models.AttributeProduct.objects.create",
"saleor.core.taxes.zero_money",
"saleor.product.models.Attribute.objects.get_visible_to_user",
"saleor.product.models.Attribute.objects.create",
"saleor.product.models.Attribute.objects.all",
"saleor.product.models.Collection.objects.create",
"saleor.graphql.product.types.attributes.resolve_attribute_value_type",
"saleor.graphql.product.filters.filter_attributes_by_product_types",
"saleor.graphql.core.utils.snake_to_camel_case",
"saleor.product.models.AttributeValue",
"saleor.product.models.AttributeVariant.objects.create",
"django.db.models.Q",
"saleor.product.models.ProductType.objects.create",
"graphene.Node.to_global_id",
"saleor.graphql.product.mutations.attributes.validate_value_is_unique",
"saleor.product.models.AttributeValue.objects.create",
"unittest.mock.MagicMock",
"saleor.product.models.Category.objects.create",
"django.template.defaultfilters.slugify",
"pytest.raises",
"graphene.utils.str_converters.to_camel_case",
"pytest.mark.parametrize",
"tests.api.utils.get_graphql_content",
"saleor.product.models.Attribute",
"graphene.Node.from_global_id"
]
| [((5264, 5314), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_staff"""', '(False, True)'], {}), "('is_staff', (False, True))\n", (5287, 5314), False, 'import pytest\n'), ((11645, 11709), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_deprecated_filter"""', '[True, False]'], {}), "('test_deprecated_filter', [True, False])\n", (11668, 11709), False, 'import pytest\n'), ((11711, 11782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tested_field"""', "['inCategory', 'inCollection']"], {}), "('tested_field', ['inCategory', 'inCollection'])\n", (11734, 11782), False, 'import pytest\n'), ((16226, 16441), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_slug, expected_slug, expected_error"""', '((\'my-slug\', \'my-slug\', []), (None, \'my-name\', []), (\'\', None, [{\'field\':\n \'slug\', \'message\': "The attribute\'s slug cannot be blank."}]))'], {}), '(\'input_slug, expected_slug, expected_error\', ((\n \'my-slug\', \'my-slug\', []), (None, \'my-name\', []), (\'\', None, [{\'field\':\n \'slug\', \'message\': "The attribute\'s slug cannot be blank."}])))\n', (16249, 16441), False, 'import pytest\n'), ((17577, 17832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name_1, name_2, error_msg, error_code"""', "(('Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE))"], {}), "('name_1, name_2, error_msg, error_code', ((\n 'Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE)))\n", (17600, 17832), False, 'import pytest\n'), ((22133, 22388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name_1, name_2, error_msg, error_code"""', "(('Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE))"], {}), "('name_1, name_2, error_msg, error_code', ((\n 'Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE)))\n", (22156, 22388), False, 'import pytest\n'), ((30896, 31626), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raw_value, expected_type"""', "[('#0000', AttributeValueType.COLOR), ('#FF69B4', AttributeValueType.COLOR),\n ('rgb(255, 0, 0)', AttributeValueType.COLOR), ('hsl(0, 100%, 50%)',\n AttributeValueType.COLOR), ('hsla(120, 60%, 70%, 0.3)',\n AttributeValueType.COLOR), ('rgba(100%, 255, 0, 0)', AttributeValueType\n .COLOR), ('http://example.com', AttributeValueType.URL), (\n 'https://example.com', AttributeValueType.URL), ('ftp://example.com',\n AttributeValueType.URL), ('example.com', AttributeValueType.STRING), (\n 'Foo', AttributeValueType.STRING), ('linear-gradient(red, yellow)',\n AttributeValueType.GRADIENT), ('radial-gradient(#0000, yellow)',\n AttributeValueType.GRADIENT)]"], {}), "('raw_value, expected_type', [('#0000',\n AttributeValueType.COLOR), ('#FF69B4', AttributeValueType.COLOR), (\n 'rgb(255, 0, 0)', AttributeValueType.COLOR), ('hsl(0, 100%, 50%)',\n AttributeValueType.COLOR), ('hsla(120, 60%, 70%, 0.3)',\n AttributeValueType.COLOR), ('rgba(100%, 255, 0, 0)', AttributeValueType\n .COLOR), ('http://example.com', AttributeValueType.URL), (\n 'https://example.com', AttributeValueType.URL), ('ftp://example.com',\n AttributeValueType.URL), ('example.com', AttributeValueType.STRING), (\n 'Foo', AttributeValueType.STRING), ('linear-gradient(red, yellow)',\n AttributeValueType.GRADIENT), ('radial-gradient(#0000, yellow)',\n AttributeValueType.GRADIENT)])\n", (30919, 31626), False, 'import pytest\n'), ((38507, 38826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product_type_attribute_type, gql_attribute_type"""', '((AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT), (AttributeTypeEnum\n .VARIANT, AttributeTypeEnum.PRODUCT), (AttributeTypeEnum.PRODUCT,\n AttributeTypeEnum.PRODUCT), (AttributeTypeEnum.VARIANT,\n AttributeTypeEnum.VARIANT))'], {}), "('product_type_attribute_type, gql_attribute_type',\n ((AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT), (\n AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT), (\n AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT), (\n AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT)))\n", (38530, 38826), False, 'import pytest\n'), ((44097, 44361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attribute, expected_value"""', "(('filterable_in_storefront', True), ('filterable_in_dashboard', True), (\n 'visible_in_storefront', True), ('available_in_grid', True), (\n 'value_required', False), ('storefront_search_position', 0))"], {}), "('attribute, expected_value', ((\n 'filterable_in_storefront', True), ('filterable_in_dashboard', True), (\n 'visible_in_storefront', True), ('available_in_grid', True), (\n 'value_required', False), ('storefront_search_position', 0)))\n", (44120, 44361), False, 'import pytest\n'), ((47616, 47809), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attribute_type, relation_field, backref_field"""', "(('VARIANT', 'variant_attributes', 'attributevariant'), ('PRODUCT',\n 'product_attributes', 'attributeproduct'))"], {}), "('attribute_type, relation_field, backref_field', ((\n 'VARIANT', 'variant_attributes', 'attributevariant'), ('PRODUCT',\n 'product_attributes', 'attributeproduct')))\n", (47639, 47809), False, 'import pytest\n'), ((56560, 56720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort_field, m2m_model"""', "(('DASHBOARD_VARIANT_POSITION', AttributeVariant), (\n 'DASHBOARD_PRODUCT_POSITION', AttributeProduct))"], {}), "('sort_field, m2m_model', ((\n 'DASHBOARD_VARIANT_POSITION', AttributeVariant), (\n 'DASHBOARD_PRODUCT_POSITION', AttributeProduct)))\n", (56583, 56720), False, 'import pytest\n'), ((58178, 58230), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_variant"""', '(True, False)'], {}), "('is_variant', (True, False))\n", (58201, 58230), False, 'import pytest\n'), ((1643, 1691), 'saleor.graphql.product.mutations.attributes.validate_value_is_unique', 'validate_value_is_unique', (['color_attribute', 'value'], {}), '(color_attribute, value)\n', (1667, 1691), False, 'from saleor.graphql.product.mutations.attributes import validate_value_is_unique\n'), ((1803, 1877), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'color_attribute_without_values.id'], {}), "('Attribute', color_attribute_without_values.id)\n", (1829, 1877), False, 'import graphene\n'), ((2911, 2940), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (2930, 2940), False, 'from tests.api.utils import get_graphql_content\n'), ((3546, 3575), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (3565, 3575), False, 'from tests.api.utils import get_graphql_content\n'), ((4466, 4495), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (4485, 4495), False, 'from tests.api.utils import get_graphql_content\n'), ((8737, 8787), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', ([], {'name': '"""P"""', 'slug': '"""product"""'}), "(name='P', slug='product')\n", (8761, 8787), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((8823, 8873), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', ([], {'name': '"""V"""', 'slug': '"""variant"""'}), "(name='V', slug='variant')\n", (8847, 8873), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((9420, 9536), 'saleor.product.models.AttributeProduct.objects.create', 'AttributeProduct.objects.create', ([], {'attribute': 'unassigned_product_attribute', 'product_type': 'product_type', 'sort_order': '(0)'}), '(attribute=unassigned_product_attribute,\n product_type=product_type, sort_order=0)\n', (9451, 9536), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((9551, 9667), 'saleor.product.models.AttributeVariant.objects.create', 'AttributeVariant.objects.create', ([], {'attribute': 'unassigned_variant_attribute', 'product_type': 'product_type', 'sort_order': '(0)'}), '(attribute=unassigned_variant_attribute,\n product_type=product_type, sort_order=0)\n', (9582, 9667), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((10712, 10735), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ([], {}), '()\n', (10733, 10735), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((11062, 11085), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ([], {}), '()\n', (11083, 11085), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((11438, 11480), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Category"""', '(-1)'], {}), "('Category', -1)\n", (11464, 11480), False, 'import graphene\n'), ((11497, 11513), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (11511, 11513), False, 'from unittest import mock\n'), ((11523, 11596), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', (['mocked_qs', '"""in_category"""', 'category_id'], {}), "(mocked_qs, 'in_category', category_id)\n", (11557, 11596), False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((12538, 12602), 'saleor.product.models.Category.objects.create', 'Category.objects.create', ([], {'name': '"""Other Category"""', 'slug': '"""other-cat"""'}), "(name='Other Category', slug='other-cat')\n", (12561, 12602), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((12625, 12677), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', ([], {'name': '"""Other"""', 'slug': '"""other"""'}), "(name='Other', slug='other')\n", (12649, 12677), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((12703, 12798), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Other type"""', 'has_variants': '(True)', 'is_shipping_required': '(True)'}), "(name='Other type', has_variants=True,\n is_shipping_required=True)\n", (12729, 12798), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((13229, 13354), 'saleor.product.models.Collection.objects.create', 'Collection.objects.create', ([], {'name': '"""Other Collection"""', 'slug': '"""other-collection"""', 'is_published': '(True)', 'description': '"""Description"""'}), "(name='Other Collection', slug='other-collection',\n is_published=True, description='Description')\n", (13254, 13354), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((15478, 15507), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (15497, 15507), False, 'from tests.api.utils import get_graphql_content\n'), ((18415, 18444), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (18434, 18444), False, 'from tests.api.utils import get_graphql_content\n'), ((19751, 19804), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (19777, 19804), False, 'import graphene\n'), ((20021, 20050), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (20040, 20050), False, 'from tests.api.utils import get_graphql_content\n'), ((20512, 20565), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (20538, 20565), False, 'import graphene\n'), ((20634, 20698), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'attribute_value_id'], {}), "('AttributeValue', attribute_value_id)\n", (20660, 20698), False, 'import graphene\n'), ((20992, 21021), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (21011, 21021), False, 'from tests.api.utils import get_graphql_content\n'), ((21627, 21680), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (21653, 21680), False, 'import graphene\n'), ((21956, 21985), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (21975, 21985), False, 'from tests.api.utils import get_graphql_content\n'), ((22801, 22854), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (22827, 22854), False, 'import graphene\n'), ((23154, 23183), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (23173, 23183), False, 'from tests.api.utils import get_graphql_content\n'), ((23709, 23762), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (23735, 23762), False, 'import graphene\n'), ((23828, 23891), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'size_attribute.pk'], {}), "('AttributeValue', size_attribute.pk)\n", (23854, 23891), False, 'import graphene\n'), ((24196, 24225), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (24215, 24225), False, 'from tests.api.utils import get_graphql_content\n'), ((25031, 25084), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (25057, 25084), False, 'import graphene\n'), ((25250, 25279), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (25269, 25279), False, 'from tests.api.utils import get_graphql_content\n'), ((26145, 26198), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (26171, 26198), False, 'import graphene\n'), ((26412, 26441), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (26431, 26441), False, 'from tests.api.utils import get_graphql_content\n'), ((26982, 27035), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (27008, 27035), False, 'import graphene\n'), ((27282, 27311), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (27301, 27311), False, 'from tests.api.utils import get_graphql_content\n'), ((27748, 27801), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (27774, 27801), False, 'import graphene\n'), ((28056, 28085), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (28075, 28085), False, 'from tests.api.utils import get_graphql_content\n'), ((28897, 28951), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'value.id'], {}), "('AttributeValue', value.id)\n", (28923, 28951), False, 'import graphene\n'), ((29157, 29186), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (29176, 29186), False, 'from tests.api.utils import get_graphql_content\n'), ((29766, 29820), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'value.id'], {}), "('AttributeValue', value.id)\n", (29792, 29820), False, 'import graphene\n'), ((30021, 30050), 'tests.api.utils.get_graphql_content', 'get_graphql_content', (['response'], {}), '(response)\n', (30040, 30050), False, 'from tests.api.utils import get_graphql_content\n'), ((30610, 30664), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'value.id'], {}), "('AttributeValue', value.id)\n", (30636, 30664), False, 'import graphene\n'), ((34321, 34387), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Default Type"""', 'has_variants': '(True)'}), "(name='Default Type', has_variants=True)\n", (34347, 34387), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((34417, 34475), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (34443, 34475), False, 'import graphene\n'), ((36631, 36689), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (36657, 36689), False, 'import graphene\n'), ((37828, 37886), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (37854, 37886), False, 'import graphene\n'), ((39237, 39276), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Type"""'}), "(name='Type')\n", (39263, 39276), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((39429, 39487), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (39455, 39487), False, 'import graphene\n'), ((40959, 40998), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Type"""'}), "(name='Type')\n", (40985, 40998), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((41028, 41086), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (41054, 41086), False, 'import graphene\n'), ((41305, 41370), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'product_attributes[1].pk'], {}), "('Attribute', product_attributes[1].pk)\n", (41331, 41370), False, 'import graphene\n'), ((42531, 42570), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Type"""'}), "(name='Type')\n", (42557, 42570), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((42600, 42658), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.pk'], {}), "('ProductType', product_type.pk)\n", (42626, 42658), False, 'import graphene\n'), ((44703, 44727), 'graphene.utils.str_converters.to_camel_case', 'to_camel_case', (['attribute'], {}), '(attribute)\n', (44716, 44727), False, 'from graphene.utils.str_converters import to_camel_case\n'), ((45980, 46025), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', '(-1)'], {}), "('ProductType', -1)\n", (46006, 46025), False, 'import graphene\n'), ((46045, 46088), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', '(-1)'], {}), "('Attribute', -1)\n", (46071, 46088), False, 'import graphene\n'), ((46868, 46913), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Dummy Type"""'}), "(name='Dummy Type')\n", (46894, 46913), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((46936, 46994), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.id'], {}), "('ProductType', product_type.id)\n", (46962, 46994), False, 'import graphene\n'), ((47015, 47074), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'color_attribute.id'], {}), "('Attribute', color_attribute.id)\n", (47041, 47074), False, 'import graphene\n'), ((48177, 48222), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""Dummy Type"""'}), "(name='Dummy Type')\n", (48203, 48222), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((48245, 48303), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductType"""', 'product_type.id'], {}), "('ProductType', product_type.id)\n", (48271, 48303), False, 'import graphene\n'), ((50257, 50300), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', '(-1)'], {}), "('Attribute', -1)\n", (50283, 50300), False, 'import graphene\n'), ((50316, 50364), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', '(-1)'], {}), "('AttributeValue', -1)\n", (50342, 50364), False, 'import graphene\n'), ((51111, 51170), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'color_attribute.id'], {}), "('Attribute', color_attribute.id)\n", (51137, 51170), False, 'import graphene\n'), ((51186, 51234), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', '(-1)'], {}), "('AttributeValue', -1)\n", (51212, 51234), False, 'import graphene\n'), ((51937, 52015), 'saleor.product.models.AttributeValue.objects.create', 'AttributeValue.objects.create', ([], {'attribute': 'attribute', 'name': '"""Green"""', 'slug': '"""green"""'}), "(attribute=attribute, name='Green', slug='green')\n", (51966, 52015), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((52182, 52235), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.id'], {}), "('Attribute', attribute.id)\n", (52208, 52235), False, 'import graphene\n'), ((57044, 57094), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', ([], {'name': '"""My Product Type"""'}), "(name='My Product Type')\n", (57070, 57094), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((59198, 59250), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', ([], {'name': '"""Other"""', 'slug': '"""other"""'}), "(name='Other', slug='other')\n", (59222, 59250), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1290, 1320), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (1303, 1320), False, 'import pytest\n'), ((1507, 1549), 'saleor.product.models.AttributeValue', 'AttributeValue', ([], {'slug': '"""spanish-inquisition"""'}), "(slug='spanish-inquisition')\n", (1521, 1549), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((10748, 10797), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', (['qs', '"""..."""', '""""""'], {}), "(qs, '...', '')\n", (10782, 10797), False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((10815, 10866), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', (['qs', '"""..."""', 'None'], {}), "(qs, '...', None)\n", (10849, 10866), False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((11096, 11130), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (11109, 11130), False, 'import pytest\n'), ((11147, 11208), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', (['qs', '"""in_space"""', '"""a-value"""'], {}), "(qs, 'in_space', 'a-value')\n", (11181, 11208), False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((12039, 12094), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Collection"""', 'collection.pk'], {}), "('Collection', collection.pk)\n", (12065, 12094), False, 'import graphene\n'), ((15761, 15784), 'django.template.defaultfilters.slugify', 'slugify', (['attribute_name'], {}), '(attribute_name)\n', (15768, 15784), False, 'from django.template.defaultfilters import slugify\n'), ((16209, 16222), 'django.template.defaultfilters.slugify', 'slugify', (['name'], {}), '(name)\n', (16216, 16222), False, 'from django.template.defaultfilters import slugify\n'), ((25389, 25438), 'pytest.raises', 'pytest.raises', (['attribute._meta.model.DoesNotExist'], {}), '(attribute._meta.model.DoesNotExist)\n', (25402, 25438), False, 'import pytest\n'), ((26639, 26652), 'django.template.defaultfilters.slugify', 'slugify', (['name'], {}), '(name)\n', (26646, 26652), False, 'from django.template.defaultfilters import slugify\n'), ((29375, 29388), 'django.template.defaultfilters.slugify', 'slugify', (['name'], {}), '(name)\n', (29382, 29388), False, 'from django.template.defaultfilters import slugify\n'), ((30814, 30859), 'pytest.raises', 'pytest.raises', (['value._meta.model.DoesNotExist'], {}), '(value._meta.model.DoesNotExist)\n', (30827, 30859), False, 'import pytest\n'), ((31781, 31820), 'saleor.graphql.product.types.attributes.resolve_attribute_value_type', 'resolve_attribute_value_type', (['raw_value'], {}), '(raw_value)\n', (31809, 31820), False, 'from saleor.graphql.product.types.attributes import resolve_attribute_value_type\n'), ((49384, 49419), 'saleor.graphql.core.utils.snake_to_camel_case', 'snake_to_camel_case', (['relation_field'], {}), '(relation_field)\n', (49403, 49419), False, 'from saleor.graphql.core.utils import snake_to_camel_case\n'), ((49574, 49614), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', (["attr['id']"], {}), "(attr['id'])\n", (49602, 49614), False, 'import graphene\n'), ((53359, 53399), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', (["attr['id']"], {}), "(attr['id'])\n", (53387, 53399), False, 'import graphene\n'), ((55233, 55286), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.pk'], {}), "('Attribute', attribute.pk)\n", (55259, 55286), False, 'import graphene\n'), ((60373, 60429), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""ProductVariant"""', 'variant.pk'], {}), "('ProductVariant', variant.pk)\n", (60399, 60429), False, 'import graphene\n'), ((60458, 60507), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Product"""', 'product.pk'], {}), "('Product', product.pk)\n", (60484, 60507), False, 'import graphene\n'), ((1372, 1403), 'saleor.product.models.AttributeValue', 'AttributeValue', ([], {'slug': 'value.slug'}), '(slug=value.slug)\n', (1386, 1403), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((3366, 3425), 'saleor.product.models.Attribute.objects.get_visible_to_user', 'Attribute.objects.get_visible_to_user', (['user_api_client.user'], {}), '(user_api_client.user)\n', (3403, 3425), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((4020, 4043), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ([], {}), '()\n', (4041, 4043), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((9082, 9156), 'saleor.product.models.AttributeValue', 'AttributeValue', ([], {'slug': '"""a"""', 'name': '"""A"""', 'attribute': 'unassigned_product_attribute'}), "(slug='a', name='A', attribute=unassigned_product_attribute)\n", (9096, 9156), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((9170, 9244), 'saleor.product.models.AttributeValue', 'AttributeValue', ([], {'slug': '"""b"""', 'name': '"""B"""', 'attribute': 'unassigned_product_attribute'}), "(slug='b', name='B', attribute=unassigned_product_attribute)\n", (9184, 9244), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((12162, 12213), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Category"""', 'category.pk'], {}), "('Category', category.pk)\n", (12188, 12213), False, 'import graphene\n'), ((12319, 12371), 'django.db.models.Q', 'Q', ([], {'attributeproduct__product_type_id': 'product_type.pk'}), '(attributeproduct__product_type_id=product_type.pk)\n', (12320, 12371), False, 'from django.db.models import Q\n'), ((12382, 12434), 'django.db.models.Q', 'Q', ([], {'attributevariant__product_type_id': 'product_type.pk'}), '(attributevariant__product_type_id=product_type.pk)\n', (12383, 12434), False, 'from django.db.models import Q\n'), ((13037, 13049), 'saleor.core.taxes.zero_money', 'zero_money', ([], {}), '()\n', (13047, 13049), False, 'from saleor.core.taxes import zero_money\n'), ((36774, 36827), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.pk'], {}), "('Attribute', attribute.pk)\n", (36800, 36827), False, 'import graphene\n'), ((37971, 38024), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.pk'], {}), "('Attribute', attribute.pk)\n", (37997, 38024), False, 'import graphene\n'), ((39918, 39971), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attribute.pk'], {}), "('Attribute', attribute.pk)\n", (39944, 39971), False, 'import graphene\n'), ((41523, 41588), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'product_attributes[0].pk'], {}), "('Attribute', product_attributes[0].pk)\n", (41549, 41588), False, 'import graphene\n'), ((42797, 42871), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'color_attribute_without_values.pk'], {}), "('Attribute', color_attribute_without_values.pk)\n", (42823, 42871), False, 'import graphene\n'), ((56109, 56148), 'saleor.product.models.Attribute', 'Attribute', ([], {'name': '"""MyAttribute"""', 'slug': '"""b"""'}), "(name='MyAttribute', slug='b')\n", (56118, 56148), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((56162, 56201), 'saleor.product.models.Attribute', 'Attribute', ([], {'name': '"""MyAttribute"""', 'slug': '"""a"""'}), "(name='MyAttribute', slug='a')\n", (56171, 56201), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((57843, 57872), 'saleor.product.models.Attribute', 'Attribute', ([], {'name': '"""A"""', 'slug': '"""b"""'}), "(name='A', slug='b')\n", (57852, 57872), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((57874, 57903), 'saleor.product.models.Attribute', 'Attribute', ([], {'name': '"""B"""', 'slug': '"""a"""'}), "(name='B', slug='a')\n", (57883, 57903), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((4193, 4253), 'saleor.product.models.Attribute.objects.get_visible_to_user', 'Attribute.objects.get_visible_to_user', (['staff_api_client.user'], {}), '(staff_api_client.user)\n', (4230, 4253), False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((34861, 34909), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attr_id'], {}), "('Attribute', attr_id)\n", (34887, 34909), False, 'import graphene\n'), ((35030, 35078), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attr_id'], {}), "('Attribute', attr_id)\n", (35056, 35078), False, 'import graphene\n'), ((35659, 35699), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', (["attr['id']"], {}), "(attr['id'])\n", (35687, 35699), False, 'import graphene\n'), ((35818, 35858), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', (["attr['id']"], {}), "(attr['id'])\n", (35846, 35858), False, 'import graphene\n'), ((48687, 48744), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attributes[0].pk'], {}), "('Attribute', attributes[0].pk)\n", (48713, 48744), False, 'import graphene\n'), ((48830, 48887), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""Attribute"""', 'attributes[2].pk'], {}), "('Attribute', attributes[2].pk)\n", (48856, 48887), False, 'import graphene\n'), ((52564, 52622), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'values[0].pk'], {}), "('AttributeValue', values[0].pk)\n", (52590, 52622), False, 'import graphene\n'), ((52708, 52766), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', (['"""AttributeValue"""', 'values[2].pk'], {}), "('AttributeValue', values[2].pk)\n", (52734, 52766), False, 'import graphene\n'), ((60763, 60816), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', (["attr['attribute']['id']"], {}), "(attr['attribute']['id'])\n", (60791, 60816), False, 'import graphene\n')] |
from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
def func(num):
if num == 0:
return
b = 0
for x in range(1,100000):
b += x
return func(num - 1)
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| [
"pyinstrument.Profiler"
]
| [((39, 65), 'pyinstrument.Profiler', 'Profiler', ([], {'use_signal': '(False)'}), '(use_signal=False)\n', (47, 65), False, 'from pyinstrument import Profiler\n')] |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def get_arch_id(elffile):
e_machine = elffile.header['e_machine']
if e_machine == 'EM_ARM':
return 0
if e_machine == 'EM_AARCH64':
return 1
eprint('Unknown e_machine "%s"' % e_machine)
sys.exit(1)
def get_name(obj):
# Symbol or section .name might be a byte array or a string, we want a
# string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def get_symbol(elffile, name):
global elffile_symbols
global lsyms_def
if elffile_symbols is None:
elffile_symbols = dict()
lsyms_def = dict()
symbol_tables = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for symbol in section.iter_symbols():
symbol_name = get_name(symbol)
if symbol['st_info']['bind'] == 'STB_GLOBAL':
elffile_symbols[symbol_name] = symbol
elif symbol['st_info']['bind'] == 'STB_LOCAL':
if symbol_name not in elffile_symbols.keys():
elffile_symbols[symbol_name] = symbol
if symbol_name not in lsyms_def.keys():
lsyms_def[symbol_name] = 1
else:
lsyms_def[symbol_name] += 1
if name in lsyms_def.keys() and lsyms_def[name] > 1:
eprint("Multiple definitions of local symbol %s" % name)
sys.exit(1)
if name not in elffile_symbols.keys():
eprint("Cannot find symbol %s" % name)
sys.exit(1)
return elffile_symbols[name]
def get_sections(elffile, pad_to, dump_names):
last_end = 0
bin_data = bytearray()
for section in elffile.iter_sections():
section_name = get_name(section)
if (section['sh_type'] == 'SHT_NOBITS' or
not (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) or
not dump_names.match(section_name)):
continue
if last_end == 0:
bin_data = section.data()
else:
if section['sh_addr'] > last_end:
bin_data += bytearray(section['sh_addr'] - last_end)
bin_data += section.data()
last_end = section['sh_addr'] + section['sh_size']
if pad_to > last_end:
bin_data += bytearray(pad_to - last_end)
last_end = pad_to
return bin_data
def get_pageable_bin(elffile):
global tee_pageable_bin
if tee_pageable_bin is None:
pad_to = 0
dump_names = re.compile(r'^\..*_(pageable|init)$')
tee_pageable_bin = get_sections(elffile, pad_to, dump_names)
return tee_pageable_bin
def get_pager_bin(elffile):
global tee_pager_bin
if tee_pager_bin is None:
pad_to = get_symbol(elffile, '__data_end')['st_value']
dump_names = re.compile(
r'^\.(text|rodata|got|data|ARM\.exidx|ARM\.extab)$')
tee_pager_bin = get_sections(elffile, pad_to, dump_names)
return tee_pager_bin
def get_reloc_bin(elffile):
if get_arch_id(elffile) == 0:
exp_rel_type = ENUM_RELOC_TYPE_ARM['R_ARM_RELATIVE']
else:
exp_rel_type = ENUM_RELOC_TYPE_AARCH64['R_AARCH64_RELATIVE']
link_address = get_symbol(elffile, '__text_start')['st_value']
addrs = []
for section in elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
for rel in section.iter_relocations():
if rel['r_info_type'] == 0:
continue
if rel['r_info_type'] != exp_rel_type:
eprint("Unexpected relocation type 0x%x" %
rel['r_info_type'])
sys.exit(1)
addrs.append(rel['r_offset'] - link_address)
addrs.sort()
data = bytearray()
for a in addrs:
data += struct.pack('<I', a)
# Relocations has been reduced to only become the relative type with
# addend at the address (r_offset) of relocation, that is, increase by
# load_offset. The addresses (r_offset) are also sorted. The format is
# then:
# uint32_t: relocation #1
# uint32_t: relocation #2
# ...
# uint32_t: relocation #n
return data
def get_hashes_bin(elffile):
pageable_bin = get_pageable_bin(elffile)
if len(pageable_bin) % small_page_size != 0:
eprint("pageable size not a multiple of 4K: "
"{}".format(paged_area_size))
sys.exit(1)
data = bytearray()
for n in range(0, len(pageable_bin), small_page_size):
page = pageable_bin[n:n + small_page_size]
data += hashlib.sha256(page).digest()
return data
def get_embdata_bin(elffile):
global tee_embdata_bin
if tee_embdata_bin is None:
hashes_bin = get_hashes_bin(elffile)
reloc_bin = get_reloc_bin(elffile)
num_entries = 2
hash_offs = 2 * 4 + num_entries * (2 * 4)
hash_pad = round_up(len(hashes_bin), 8) - len(hashes_bin)
reloc_offs = hash_offs + len(hashes_bin) + hash_pad
reloc_pad = round_up(len(reloc_bin), 8) - len(reloc_bin)
total_len = reloc_offs + len(reloc_bin) + reloc_pad
tee_embdata_bin = struct.pack('<IIIIII', total_len, num_entries,
hash_offs, len(hashes_bin),
reloc_offs, len(reloc_bin))
tee_embdata_bin += hashes_bin + bytearray(hash_pad)
tee_embdata_bin += reloc_bin + bytearray(reloc_pad)
# The embedded data region is designed to be easy to extend when
# needed, it's formatted as:
# +---------------------------------------------------------+
# | uint32_t: Length of entire area including this field |
# +---------------------------------------------------------+
# | uint32_t: Number of entries "2" |
# +---------------------------------------------------------+
# | uint32_t: Offset of hashes from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of hashes |
# +---------------------------------------------------------+
# | uint32_t: Offset of relocations from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of relocations |
# +---------------------------------------------------------+
# | Data of hashes + eventual padding |
# +---------------------------------------------------------+
# | Data of relocations + eventual padding |
# +---------------------------------------------------------+
return tee_embdata_bin
def output_pager_bin(elffile, outf):
outf.write(get_pager_bin(elffile))
def output_pageable_bin(elffile, outf):
outf.write(get_pageable_bin(elffile))
def get_init_load_addr(elffile):
init_load_addr = get_symbol(elffile, '_start')['st_value']
init_load_addr_hi = init_load_addr >> 32
init_load_addr_lo = init_load_addr & 0xffffffff
return init_load_addr_hi, init_load_addr_lo
def output_header_v1(elffile, outf):
arch_id = get_arch_id(elffile)
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(pager_bin)
paged_area_size = len(pageable_bin)
init_mem_usage = (get_symbol(elffile, '__get_tee_init_end')['st_value'] -
get_symbol(elffile, '__text_start')['st_value'] +
len(embdata_bin))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
len(embdata_bin))
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 1
flags = 0
outf.write(struct.pack('<IBBHIIIII', magic, version, arch_id, flags,
init_size, init_load_addr[0], init_load_addr[1],
init_mem_usage, paged_size))
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v2(elffile, outf):
arch_id = get_arch_id(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(get_pager_bin(elffile))
paged_area_size = len(get_pageable_bin(elffile))
embdata_bin_size = len(get_embdata_bin(elffile))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
embdata_bin_size)
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 2
flags = 0
nb_images = 1 if paged_size == 0 else 2
outf.write(struct.pack('<IBBHI', magic, version, arch_id, flags,
nb_images))
outf.write(struct.pack('<IIII', init_load_addr[0], init_load_addr[1],
0, init_size))
if nb_images == 2:
outf.write(struct.pack('<IIII', 0xffffffff, 0xffffffff, 1, paged_size))
def output_pager_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
def output_pageable_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(get_pageable_bin(elffile)[init_bin_size:])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input tee.elf')
parser.add_argument('--out_tee_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee.bin')
parser.add_argument('--out_tee_pager_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager.bin')
parser.add_argument('--out_tee_pageable_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable.bin')
parser.add_argument('--out_header_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_header_v2.bin')
parser.add_argument('--out_pager_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager_v2.bin')
parser.add_argument('--out_pageable_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable_v2.bin')
return parser.parse_args()
def main():
args = get_args()
elffile = ELFFile(args.input)
if args.out_tee_bin:
output_header_v1(elffile, args.out_tee_bin)
if args.out_tee_pager_bin:
output_pager_bin(elffile, args.out_tee_pager_bin)
if args.out_tee_pageable_bin:
output_pageable_bin(elffile, args.out_tee_pageable_bin)
if args.out_header_v2:
output_header_v2(elffile, args.out_header_v2)
if args.out_pager_v2:
output_pager_v2(elffile, args.out_pager_v2)
if args.out_pageable_v2:
output_pageable_v2(elffile, args.out_pageable_v2)
if __name__ == "__main__":
main()
| [
"argparse.FileType",
"hashlib.sha256",
"argparse.ArgumentParser",
"re.compile",
"elftools.elf.elffile.ELFFile",
"struct.pack",
"sys.exit"
]
| [((1428, 1439), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1436, 1439), False, 'import sys\n'), ((11109, 11134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11132, 11134), False, 'import argparse\n'), ((12419, 12438), 'elftools.elf.elffile.ELFFile', 'ELFFile', (['args.input'], {}), '(args.input)\n', (12426, 12438), False, 'from elftools.elf.elffile import ELFFile\n'), ((2758, 2769), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2766, 2769), False, 'import sys\n'), ((2868, 2879), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2876, 2879), False, 'import sys\n'), ((3833, 3870), 're.compile', 're.compile', (['"""^\\\\..*_(pageable|init)$"""'], {}), "('^\\\\..*_(pageable|init)$')\n", (3843, 3870), False, 'import re\n'), ((4137, 4202), 're.compile', 're.compile', (['"""^\\\\.(text|rodata|got|data|ARM\\\\.exidx|ARM\\\\.extab)$"""'], {}), "('^\\\\.(text|rodata|got|data|ARM\\\\.exidx|ARM\\\\.extab)$')\n", (4147, 4202), False, 'import re\n'), ((5141, 5161), 'struct.pack', 'struct.pack', (['"""<I"""', 'a'], {}), "('<I', a)\n", (5152, 5161), False, 'import struct\n'), ((5747, 5758), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5755, 5758), False, 'import sys\n'), ((9287, 9425), 'struct.pack', 'struct.pack', (['"""<IBBHIIIII"""', 'magic', 'version', 'arch_id', 'flags', 'init_size', 'init_load_addr[0]', 'init_load_addr[1]', 'init_mem_usage', 'paged_size'], {}), "('<IBBHIIIII', magic, version, arch_id, flags, init_size,\n init_load_addr[0], init_load_addr[1], init_mem_usage, paged_size)\n", (9298, 9425), False, 'import struct\n'), ((10268, 10332), 'struct.pack', 'struct.pack', (['"""<IBBHI"""', 'magic', 'version', 'arch_id', 'flags', 'nb_images'], {}), "('<IBBHI', magic, version, arch_id, flags, nb_images)\n", (10279, 10332), False, 'import struct\n'), ((10376, 10448), 'struct.pack', 'struct.pack', (['"""<IIII"""', 'init_load_addr[0]', 'init_load_addr[1]', '(0)', 'init_size'], {}), "('<IIII', init_load_addr[0], init_load_addr[1], 0, init_size)\n", (10387, 10448), False, 'import struct\n'), ((10519, 10578), 'struct.pack', 'struct.pack', (['"""<IIII"""', '(4294967295)', '(4294967295)', '(1)', 'paged_size'], {}), "('<IIII', 4294967295, 4294967295, 1, paged_size)\n", (10530, 10578), False, 'import struct\n'), ((11215, 11238), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (11232, 11238), False, 'import argparse\n'), ((11377, 11400), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (11394, 11400), False, 'import argparse\n'), ((11546, 11569), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (11563, 11569), False, 'import argparse\n'), ((11724, 11747), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (11741, 11747), False, 'import argparse\n'), ((11898, 11921), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (11915, 11921), False, 'import argparse\n'), ((12072, 12095), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (12089, 12095), False, 'import argparse\n'), ((12248, 12271), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (12265, 12271), False, 'import argparse\n'), ((4995, 5006), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5003, 5006), False, 'import sys\n'), ((5909, 5929), 'hashlib.sha256', 'hashlib.sha256', (['page'], {}), '(page)\n', (5923, 5929), False, 'import hashlib\n')] |
from django.contrib import admin
#from .models import *
from . import models
# Register your models here.
admin.site.register(models.ClimbModel)
| [
"django.contrib.admin.site.register"
]
| [((108, 146), 'django.contrib.admin.site.register', 'admin.site.register', (['models.ClimbModel'], {}), '(models.ClimbModel)\n', (127, 146), False, 'from django.contrib import admin\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
class PlotsProducer:
def __init__(self, document, output_path):
# Load background image
self.image_path = document.image_path
self.img = plt.imread(self.image_path)
self.img_opencv = cv2.imread(self.image_path)
dpi = 120
mpl.rcParams['figure.dpi'] = dpi
height = self.img.shape[0]
width = self.img.shape[1]
self.figsize = width / float(dpi), height / float(dpi) # Fig size in inches
self.document = document
self.output_path = output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
def plot_word_boxes_on_image(self):
set_of_words = [[word] for word in self.document.get_words()] # list of singleton word lists
fig, ax = plt.subplots(1, figsize=self.figsize)
monochrome_colors_list = ['#5a5d8f' for _ in self.document.get_words()]
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='',
entity_sets=set_of_words,
colors_list=monochrome_colors_list)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_word_boxes.png'))
plt.close(fig)
def save_phrase_detection_results(self):
set_of_phrases = [[phrase] for phrase in self.document.get_phrases()] # list of singleton phrase lists
fig, ax = plt.subplots(1, figsize=self.figsize)
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='Phrase Detection', entity_sets=set_of_phrases)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_phrase_detection.png'))
plt.close(fig)
def save_clustering_results(self, with_title=True, colors_list=None):
set_of_clusters = [cluster.words for cluster in self.document.get_clusters()] # list of list of words (clusters)
self._save_set_of_clusters(set_of_clusters, with_title, colors_list)
def save_clustering_labels(self, clustering_labels, colors_list=None):
cluster_ids = np.unique(np.array(clustering_labels))
cluster_id_to_cluster_idx = {cluster_id: idx for idx, cluster_id in enumerate(cluster_ids)}
# Converts from list of labels to list of list of words (clusters)
set_of_clusters = [[] for _ in range(len(cluster_ids))]
for word_idx, word in enumerate(self.document.get_words()):
cluster_id = clustering_labels[word_idx]
if cluster_id == -1: # Ignore non-clustered words
continue
cluster_idx = cluster_id_to_cluster_idx[cluster_id]
set_of_clusters[cluster_idx].append(word)
self._save_set_of_clusters(set_of_clusters, colors_list)
def _save_set_of_clusters(self, set_of_clusters, with_title=True, colors_list=None):
"""
:param document:
:param set_of_clusters: list of list of words (clusters)
:return:
"""
output_img = self._draw_entity_bounding_boxes_opencv(bg_img=self.img_opencv,
entity_sets=set_of_clusters,
colors_list=colors_list)
cv2.imwrite(os.path.join(self.output_path, self.document.basename + '_clustering.png'), output_img)
@staticmethod
def _draw_entity_bounding_boxes_opencv(bg_img, entity_sets, colors_list=None):
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
output_img = bg_img.copy()
alpha = 0.8
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
# writing the text onto the image and returning it
rgb_color = rgb_hex_to_tuple(face_color)
cv2.rectangle(output_img, (int(x), int(y)), (int(x + width), int(y + height)),
(rgb_color[2], rgb_color[1], rgb_color[0]), cv2.FILLED)
output_img = cv2.addWeighted(output_img, alpha, bg_img, 1 - alpha, 0)
return output_img
@staticmethod
def _draw_entity_bounding_boxes(fig, ax, bg_img, title, entity_sets, colors_list=None):
ax.set_title(title)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
plt.imshow(bg_img)
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
rect = patches.Rectangle((x, y), width, height,
linewidth=2,
edgecolor=edge_color,
facecolor=face_color,
alpha=0.4)
ax.add_patch(rect)
@staticmethod
def plot_pca_embedding_space_for_clusters(document, output_path,
embedding_property='embedding',
title=''):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or getattr(words[0], embedding_property) is None:
return
if embedding_property == 'unprojected_embedding':
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
plot_title = embedding_property
if plot_title != '':
plot_title += ': ' + title
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=1, alpha=0.8)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
@staticmethod
def _find_k_furthest_words_per_cluster(document, embeddings_2d, k=3):
""" Greedy approximation algorithm for finding k furthest neighbour words per cluster.
k is expected to be relatively small (< 100)
"""
words = document.get_words()
word_to_embedding_2d_idx = {word: idx for idx, word in enumerate(words)}
clusters = document.get_clusters()
solution_per_cluster = {}
ClusterSolution = namedtuple('ClusterSolution', ['word_indices', 'words'])
for cluster in clusters:
# Generate cluster pairwise distances matrix
all_cluster_embeddings_indices = [word_to_embedding_2d_idx[word] for word in cluster.words]
all_cluster_embeddings = np.take(embeddings_2d, all_cluster_embeddings_indices, axis=0)
pairwise_distances = pdist(all_cluster_embeddings, metric='euclidean')
distances_matrix = squareform(pairwise_distances)
# Total distance from selected set so far
distances_accumulator = np.zeros(len(cluster.words))
# Sample first point
random_index = randrange(len(cluster.words))
# Indices of selected points
selected_points = [random_index]
# How many points we need to add
points_to_calc_count = min(k - 1, len(words) - 1)
for _ in range(points_to_calc_count):
last_point_selected = selected_points[-1]
# Update accumulator with distance collected from last point
distances_accumulator += distances_matrix[last_point_selected]
# Eliminate last point selected from distance matrix & accumulator
distances_matrix[:, random_index] = 0
distances_matrix[random_index, :] = 0
furthrest_point_from_set = np.argmax(distances_accumulator, axis=0)
selected_points.append(furthrest_point_from_set)
selected_words = [cluster.words[point] for point in selected_points]
selected_word_indices = [word_to_embedding_2d_idx[word] for word in selected_words]
solution_per_cluster[cluster] = ClusterSolution(word_indices=selected_word_indices, words=selected_words)
return solution_per_cluster
@staticmethod
def _extract_crops_per_cluster_solution(document, solution_per_cluster):
"""
Extracts crops for each selected word in k-furthest neighbours solution
:param document:
:param solution_per_cluster: Solution of k-furthest neighbours
:return:
"""
word_indices_to_crops = {}
for cluster, cluster_solution in solution_per_cluster.items():
for word_index, word in zip(cluster_solution.word_indices, cluster_solution.words):
bbox = word.get_bbox() # left, top, width, height
y_min = int(round(bbox[1] * document.height))
y_max = int(round((bbox[1] + bbox[3]) * document.height))
x_min = int(round(bbox[0] * document.width))
x_max = int(round((bbox[0] + bbox[2]) * document.width))
image_of_crop = document.image[max(0, y_min):min(y_max, document.height),
max(0, x_min):min(x_max, document.width), :]
pil_image = Image.fromarray(image_of_crop[...,::-1]) # BGR to RGB
pil_image = pil_image.convert('RGB')
word_indices_to_crops[word_index] = pil_image
return word_indices_to_crops
@staticmethod
def _space_out_crops(indices_to_crops, words, x_list, y_list, dist_from_pt=0.01, height=0.02):
"""
Calculates the positions and dimensions of crop images on the embedding space plot.
Makes sure crops don't overlay each other.
This method assumes a small number of crops (< 1000) and performs a naive linear comparison for each crop.
:param indices_to_crops: dict of word index (by order in doc) to PIL crop
:param words: List of words
:param x_list: List of corresponding pt x positions
:param y_list: List of corresponding pt y positions
:param dist_from_pt: How far in (x-y) coords the crop should be placed from the plot
:param height: Height of the crop, in figure axes dimensions (note: for normalized pca space: -1 to 1)
:return: indices_to_extents: dict of word index to extens describing position and dimensions of each crop.
Crops are shifted so they don't cover each other,
"""
indices_to_extents = {}
MatplotExtent = namedtuple('matplot_extent', ['left', 'right', 'bottom', 'top'])
is_extent_x_intersect = lambda e1, e2: not (e1.right < e2.left or e1.left > e2.right)
is_extent_y_intersect = lambda e1, e2: not (e1.top > e2.bottom or e1.bottom < e2.top)
is_extent_intersect = lambda e1, e2: is_extent_x_intersect(e1, e2) and is_extent_y_intersect(e1, e2)
min_x, max_x = min(x_list), max(x_list)
min_y, max_y = min(y_list), max(y_list)
height = (max_y - min_y) * height
dist_from_pt = min(max_y - min_y, max_x - min_x) * dist_from_pt
for point_index, crop in indices_to_crops.items():
word_aspect_ratio = words[point_index].geometry.width / words[point_index].geometry.height
axis_ratio = (max_x-min_x) / (max_y-min_y) / 2
width = height * word_aspect_ratio * axis_ratio
left, right = x_list[point_index] + dist_from_pt, x_list[point_index] + dist_from_pt + width
bottom, top = y_list[point_index] + dist_from_pt + height, y_list[point_index] + dist_from_pt
overlap = True
while overlap:
overlap = False
extent = MatplotExtent(left, right, bottom, top)
for other_crop_extent in indices_to_extents.values():
other_left, other_right, other_bottom, other_top = other_crop_extent
spaceout_margin = dist_from_pt / 2
if is_extent_intersect(extent, other_crop_extent):
overlap = True
# shift below
if other_bottom <= top <= other_top:
top = other_bottom + spaceout_margin
bottom = top + height
else: # shift above
bottom = other_top - spaceout_margin
top = bottom - height
continue
indices_to_extents[point_index] = extent
return indices_to_extents
def plot_clusters_and_embedding_space_with_crops(self, document, output_path, crops_per_cluster=3,
embedding_properties=['embedding', 'unprojected_embedding'],
unprojected_caption=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or \
all([getattr(words[0], embedding_property) is None for embedding_property in embedding_properties]):
return
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
# Initially empty, the first embedding property we process will set those for all figures
selected_word_crops_per_cluster = None
indices_to_crops = None
for embedding_property in embedding_properties:
if embedding_property == 'unprojected_embedding': # Can't handle tuples, concat them
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
if crops_per_cluster > 0:
if selected_word_crops_per_cluster is None and indices_to_crops is None: # Calculate per first attribute
selected_word_crops_per_cluster = PlotsProducer._find_k_furthest_words_per_cluster(document, embeddings_2d, k=crops_per_cluster)
indices_to_crops = PlotsProducer._extract_crops_per_cluster_solution(document, selected_word_crops_per_cluster)
indices_to_extents = PlotsProducer._space_out_crops(indices_to_crops, words,
x_list, y_list, dist_from_pt=0.02, height=0.04)
# Plot crop images
for point_index, crop in indices_to_crops.items():
extent = indices_to_extents[point_index]
rect = patches.Rectangle((extent.left, extent.top), extent.right-extent.left, extent.bottom-extent.top,
linewidth=0.5,
edgecolor="black",
facecolor="none",
zorder=5)
ax.imshow(crop, aspect='auto', alpha=0.65, extent=extent, zorder=4)
ax.add_patch(rect)
# Plot points
if embedding_property == 'unprojected_embedding':
plot_title = 'Initial unprojected embeddings, pre training (PCA)'
else:
if unprojected_caption is None:
plot_title = 'Projected embeddings, post training (PCA)'
else:
plot_title = unprojected_caption
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
# Finally plot clusters on original image
self.save_clustering_results(with_title=False, colors_list=colors_palette)
return colors_palette
@staticmethod
def animate_pca_embedding_space_for_clusters(document, output_path, embeddings_history, colors_palette=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or embeddings_history is None or len(embeddings_history) == 0:
return
if colors_palette is None:
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
scatter_data = []
for state_idx, embeddings_state in enumerate(embeddings_history):
epoch = state_idx + 1
normalized_embeddings_dict = embeddings_state['normalized']
unnormalized_embeddings_dict = embeddings_state['unnormalized']
if len(normalized_embeddings_dict) > 0:
normalized_embeddings = [normalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = normalized_embeddings
elif len(unnormalized_embeddings_dict) > 0:
unnormalized_embeddings = [unnormalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = unnormalized_embeddings
else:
return
embeddings_array = np.array(chosen_embedding).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
push_pull_ratio = embeddings_state['push_pull_ratio']
scatter_data.append((epoch, x_list, y_list, push_pull_ratio))
min_x = min(min(scatter_data, key=lambda entry: min(entry[1]))[1])
max_x = max(max(scatter_data, key=lambda entry: max(entry[1]))[1])
min_y = min(min(scatter_data, key=lambda entry: min(entry[2]))[2])
max_y = max(max(scatter_data, key=lambda entry: max(entry[2]))[2])
padding_factor = 0.1
min_x -= (max_x - min_x) * padding_factor
max_x += (max_x - min_x) * padding_factor
min_y -= (max_y - min_y) * padding_factor
max_y += (max_y - min_y) * padding_factor
frames = []
for epoch, x_list, y_list, push_pull_ratio in scatter_data:
fig, ax = plt.subplots(1)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
plot_title = 'Projected embeddings at epoch #' + str(epoch) + ' (PCA)'
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Used to return the plot as an image rray
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
output_frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
output_frame = output_frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(output_frame)
imageio.mimsave(os.path.join(output_path, document.basename + '_embeddings_history.gif'), frames, fps=2)
| [
"numpy.array",
"matplotlib.pyplot.imshow",
"os.path.exists",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.close",
"cv2.addWeighted",
"numpy.take",
"matplotlib.pyplot.scatter",
"collections.namedtuple",
"scipy.spatial.distance.squareform",
"matplotlib.use",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.tick_params",
"numpy.argmax",
"multimodal_affinities.visualization.colors_util.rgb_hex_to_tuple",
"matplotlib.pyplot.title",
"cv2.imread",
"torch.cat",
"PIL.Image.fromarray",
"matplotlib.patches.Rectangle",
"os.makedirs",
"matplotlib.pyplot.imread",
"multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette",
"os.path.join",
"matplotlib.pyplot.subplots"
]
| [((370, 391), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (384, 391), False, 'import matplotlib\n'), ((939, 966), 'matplotlib.pyplot.imread', 'plt.imread', (['self.image_path'], {}), '(self.image_path)\n', (949, 966), True, 'import matplotlib.pyplot as plt\n'), ((993, 1020), 'cv2.imread', 'cv2.imread', (['self.image_path'], {}), '(self.image_path)\n', (1003, 1020), False, 'import cv2\n'), ((1551, 1588), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'self.figsize'}), '(1, figsize=self.figsize)\n', (1563, 1588), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2056), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2051, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2270), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'self.figsize'}), '(1, figsize=self.figsize)\n', (2245, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2564), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2559, 2564), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4597), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', (['colors_list'], {}), '(colors_list)\n', (4584, 4597), False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((5398, 5454), 'cv2.addWeighted', 'cv2.addWeighted', (['output_img', 'alpha', 'bg_img', '(1 - alpha)', '(0)'], {}), '(output_img, alpha, bg_img, 1 - alpha, 0)\n', (5413, 5454), False, 'import cv2\n'), ((5630, 5762), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (5645, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5816, 5834), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bg_img'], {}), '(bg_img)\n', (5826, 5834), True, 'import matplotlib.pyplot as plt\n'), ((6081, 6128), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', (['colors_list'], {}), '(colors_list)\n', (6115, 6128), False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((8798, 8813), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (8810, 8813), True, 'import matplotlib.pyplot as plt\n'), ((8930, 8951), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (8939, 8951), True, 'import matplotlib.pyplot as plt\n'), ((8960, 9013), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(1)', 'alpha': '(0.8)'}), '(x_list, y_list, c=colors, s=1, alpha=0.8)\n', (8971, 9013), True, 'import matplotlib.pyplot as plt\n'), ((9156, 9170), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9165, 9170), True, 'import matplotlib.pyplot as plt\n'), ((9649, 9705), 'collections.namedtuple', 'namedtuple', (['"""ClusterSolution"""', "['word_indices', 'words']"], {}), "('ClusterSolution', ['word_indices', 'words'])\n", (9659, 9705), False, 'from collections import namedtuple\n'), ((13820, 13884), 'collections.namedtuple', 'namedtuple', (['"""matplot_extent"""', "['left', 'right', 'bottom', 'top']"], {}), "('matplot_extent', ['left', 'right', 'bottom', 'top'])\n", (13830, 13884), False, 'from collections import namedtuple\n'), ((1324, 1351), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (1338, 1351), False, 'import os\n'), ((1365, 1389), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (1376, 1389), False, 'import os\n'), ((1958, 2032), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_word_boxes.png')"], {}), "(self.output_path, self.document.basename + '_word_boxes.png')\n", (1970, 2032), False, 'import os\n'), ((2460, 2545), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_phrase_detection.png')"], {}), "(self.output_path, self.document.basename + '_phrase_detection.png'\n )\n", (2472, 2545), False, 'import os\n'), ((2947, 2974), 'numpy.array', 'np.array', (['clustering_labels'], {}), '(clustering_labels)\n', (2955, 2974), True, 'import numpy as np\n'), ((4113, 4187), 'os.path.join', 'os.path.join', (['self.output_path', "(self.document.basename + '_clustering.png')"], {}), "(self.output_path, self.document.basename + '_clustering.png')\n", (4125, 4187), False, 'import os\n'), ((7415, 7442), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (7429, 7442), False, 'import os\n'), ((7456, 7480), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (7467, 7480), False, 'import os\n'), ((9062, 9150), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_' + embedding_property + '_pca.png')"], {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')\n", (9074, 9150), False, 'import os\n'), ((9937, 9999), 'numpy.take', 'np.take', (['embeddings_2d', 'all_cluster_embeddings_indices'], {'axis': '(0)'}), '(embeddings_2d, all_cluster_embeddings_indices, axis=0)\n', (9944, 9999), True, 'import numpy as np\n'), ((10033, 10082), 'scipy.spatial.distance.pdist', 'pdist', (['all_cluster_embeddings'], {'metric': '"""euclidean"""'}), "(all_cluster_embeddings, metric='euclidean')\n", (10038, 10082), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((10114, 10144), 'scipy.spatial.distance.squareform', 'squareform', (['pairwise_distances'], {}), '(pairwise_distances)\n', (10124, 10144), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((16469, 16496), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (16483, 16496), False, 'import os\n'), ((16510, 16534), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (16521, 16534), False, 'import os\n'), ((18261, 18276), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (18273, 18276), True, 'import matplotlib.pyplot as plt\n'), ((19986, 20007), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (19995, 20007), True, 'import matplotlib.pyplot as plt\n'), ((20020, 20123), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(18)', 'alpha': '(1.0)', 'edgecolors': '"""black"""', 'linewidth': '(1.0)', 'zorder': '(3)'}), "(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black',\n linewidth=1.0, zorder=3)\n", (20031, 20123), True, 'import matplotlib.pyplot as plt\n'), ((20132, 20264), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (20147, 20264), True, 'import matplotlib.pyplot as plt\n'), ((20563, 20577), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20572, 20577), True, 'import matplotlib.pyplot as plt\n'), ((21189, 21216), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (21203, 21216), False, 'import os\n'), ((21230, 21254), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (21241, 21254), False, 'import os\n'), ((23723, 23738), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (23735, 23738), True, 'import matplotlib.pyplot as plt\n'), ((23910, 23931), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (23919, 23931), True, 'import matplotlib.pyplot as plt\n'), ((23945, 24048), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list', 'y_list'], {'c': 'colors', 's': '(18)', 'alpha': '(1.0)', 'edgecolors': '"""black"""', 'linewidth': '(1.0)', 'zorder': '(3)'}), "(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black',\n linewidth=1.0, zorder=3)\n", (23956, 24048), True, 'import matplotlib.pyplot as plt\n'), ((24057, 24189), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""off"""', 'right': '"""off"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='off', right='off', left='off', labelleft='off')\n", (24072, 24189), True, 'import matplotlib.pyplot as plt\n'), ((24730, 24802), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_embeddings_history.gif')"], {}), "(output_path, document.basename + '_embeddings_history.gif')\n", (24742, 24802), False, 'import os\n'), ((5166, 5194), 'multimodal_affinities.visualization.colors_util.rgb_hex_to_tuple', 'rgb_hex_to_tuple', (['face_color'], {}), '(face_color)\n', (5182, 5194), False, 'from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple\n'), ((6571, 6683), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x, y)', 'width', 'height'], {'linewidth': '(2)', 'edgecolor': 'edge_color', 'facecolor': 'face_color', 'alpha': '(0.4)'}), '((x, y), width, height, linewidth=2, edgecolor=edge_color,\n facecolor=face_color, alpha=0.4)\n', (6588, 6683), True, 'import matplotlib.patches as patches\n'), ((7815, 7873), 'torch.cat', 'torch.cat', (["word.unprojected_embedding['embeddings']"], {'dim': '(1)'}), "(word.unprojected_embedding['embeddings'], dim=1)\n", (7824, 7873), False, 'import torch\n'), ((8480, 8500), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (8488, 8500), True, 'import numpy as np\n'), ((8560, 8590), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (8563, 8590), False, 'from sklearn.decomposition import PCA\n'), ((11053, 11093), 'numpy.argmax', 'np.argmax', (['distances_accumulator'], {'axis': '(0)'}), '(distances_accumulator, axis=0)\n', (11062, 11093), True, 'import numpy as np\n'), ((12540, 12581), 'PIL.Image.fromarray', 'Image.fromarray', (['image_of_crop[..., ::-1]'], {}), '(image_of_crop[..., ::-1])\n', (12555, 12581), False, 'from PIL import Image\n'), ((20465, 20553), 'os.path.join', 'os.path.join', (['output_path', "(document.basename + '_' + embedding_property + '_pca.png')"], {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')\n", (20477, 20553), False, 'import os\n'), ((17558, 17616), 'torch.cat', 'torch.cat', (["word.unprojected_embedding['embeddings']"], {'dim': '(1)'}), "(word.unprojected_embedding['embeddings'], dim=1)\n", (17567, 17616), False, 'import torch\n'), ((17923, 17943), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (17931, 17943), True, 'import numpy as np\n'), ((18011, 18041), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (18014, 18041), False, 'from sklearn.decomposition import PCA\n'), ((19119, 19291), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(extent.left, extent.top)', '(extent.right - extent.left)', '(extent.bottom - extent.top)'], {'linewidth': '(0.5)', 'edgecolor': '"""black"""', 'facecolor': '"""none"""', 'zorder': '(5)'}), "((extent.left, extent.top), extent.right - extent.left, \n extent.bottom - extent.top, linewidth=0.5, edgecolor='black', facecolor\n ='none', zorder=5)\n", (19136, 19291), True, 'import matplotlib.patches as patches\n'), ((22621, 22647), 'numpy.array', 'np.array', (['chosen_embedding'], {}), '(chosen_embedding)\n', (22629, 22647), True, 'import numpy as np\n'), ((22715, 22745), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_pca_comp'}), '(n_components=num_pca_comp)\n', (22718, 22745), False, 'from sklearn.decomposition import PCA\n')] |
import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
class DELegislatorScraper(LegislatorScraper,LXMLMixin):
jurisdiction = 'de'
def scrape(self, chamber, term):
url = {
'upper': 'http://legis.delaware.gov/legislature.nsf/sen?openview',
'lower': 'http://legis.delaware.gov/Legislature.nsf/Reps?openview',
}[chamber]
doc = self.lxmlize(url)
if chamber == "upper":
#for the senate, it's the same table
#but the html is hard-coded in js.
table_js = doc.xpath('.//script')[-1].text_content()
table = None
for line in table_js.split("\n"):
if line.strip().startswith("var") and "sen=" in line:
table = line.replace("var","")
table = table.replace('sen="<','<')
table = table.replace('>";','>')
break
assert table is not None, "Senate table could not be found"
table = lxml.html.fromstring(table)
table.make_links_absolute(url)
trs = table.xpath('//tr')
else:
#same table for the house, but kindly in actual html
trs = doc.xpath('//tr')
base_url = "http://legis.delaware.gov"
for tr in trs:
name_and_url = tr.xpath('.//a')[0]
bio_url = name_and_url.attrib["href"]
bio_url = bio_url.replace("JavaScript:window.top.location.href=","")
bio_url = bio_url.replace('"','')
name = name_and_url.text_content()
if name.strip() == "." or name.strip() == "":
continue
if name.strip().lower().startswith("vacant"):
continue
re_spaces=re.compile(r'\s{1,5}')
name = ' '.join(re_spaces.split(name))
district = tr.xpath('.//td')[2].text_content()
district = district.replace("District:","").strip()
leg = self.scrape_bio(term, chamber, district, name, bio_url)
leg.add_source(bio_url, page="legislator detail page")
leg.add_source(url, page="legislator list page")
self.save_legislator(leg)
def scrape_bio(self, term, chamber, district, name, url):
# this opens the committee section without having to do another request
url += '&TableRow=1.5.5'
frame_doc = self.lxmlize(url)
actual_url = frame_doc.xpath("//frame[@name='right']/@src")[0]
doc = self.lxmlize(actual_url)
# party is in one of these
party = doc.xpath('//div[@id="page_header"]')[0].text.strip()[-3:]
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
else:
raise AssertionError("No party found for {name}".format(name=name))
leg = Legislator(term, chamber, district, name, party=party)
photo_url = doc.xpath('//img[contains(@src, "jpg")]/@src')
if photo_url:
leg['photo_url'] = photo_url[0]
contact_info = self.scrape_contact_info(doc)
leg.update(contact_info)
return leg
def scrape_contact_info(self, doc):
# Email
email = doc.xpath(".//a[contains(@href,'mailto')]")
email = email[0].text_content().strip()
leg_email = None
dist_email = None
try:
emails = email.split(";")
except AttributeError:
pass
else:
for e in emails:
e = e.strip()
if e:
if "state.de.us" in e:
leg_email = e
else:
dist_email = e
# Offices
leg_office = dict(name="Capitol Office", type="capitol",
phone=None, fax=None, email=leg_email, address=None)
dist_office = dict(name="Outside Office", type="capitol",
phone=None,fax=None, email=dist_email, address=None)
#this is enormously painful, DE.
office_list = doc.xpath("//tr")
for office in office_list:
title_td = 0
#in some trs the photo is the first td
if len(office.xpath("./td/img")) > 0:
title_td = 1
try:
title_text = office.xpath("./td")[title_td].text_content().lower()
content = office.xpath("./td")[title_td+1].text_content()
except IndexError:
continue
leg_office = self.add_contact("legislative",
title_text,content,leg_office)
dist_office = self.add_contact("outside",
title_text,content,dist_office)
offices = [o for o in [leg_office,dist_office] if o["address"]]
assert len(offices) > 0, "No offices with addresses found "\
"make sure we're not losing any data."
return {"offices":offices}
def add_contact(self,office_type,
title_text,content,office):
#office type is the name of the office
#either "legislative" or "outside"
if "{} office".format(office_type) in title_text:
office["address"] = content.strip()
if "{} phone".format(office_type) in title_text:
phones = content.lower().split("\n")
if len(phones) == 1:
phone = self.clean_phone(phones[0])
if phone:
office["phone"] = phone
else:
for line in phones:
if "phone" in line:
phone = self.clean_phone(line)
if phone:
office["phone"] = phone
elif "fax" in line:
phone = self.clean_phone(line)
if phone:
office["fax"] = phone
return office
def clean_phone(self,phone):
if not phone.strip():
return
if not re.search("\d",phone):
return
if not ":" in phone:
return phone
return phone.split(":")[1].strip()
| [
"re.search",
"billy.scrape.legislators.Legislator",
"re.compile"
]
| [((2963, 3017), 'billy.scrape.legislators.Legislator', 'Legislator', (['term', 'chamber', 'district', 'name'], {'party': 'party'}), '(term, chamber, district, name, party=party)\n', (2973, 3017), False, 'from billy.scrape.legislators import LegislatorScraper, Legislator\n'), ((1858, 1880), 're.compile', 're.compile', (['"""\\\\s{1,5}"""'], {}), "('\\\\s{1,5}')\n", (1868, 1880), False, 'import re\n'), ((6154, 6177), 're.search', 're.search', (['"""\\\\d"""', 'phone'], {}), "('\\\\d', phone)\n", (6163, 6177), False, 'import re\n')] |
import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
| [
"os.path.abspath",
"numpy.logspace",
"os.path.join"
]
| [((253, 275), 'numpy.logspace', 'np.logspace', (['(3)', '(12)', '(10)'], {}), '(3, 12, 10)\n', (264, 275), True, 'import numpy as np\n'), ((428, 480), 'os.path.join', 'os.path.join', (['"""../../data/connectivities"""', 'save_stem'], {}), "('../../data/connectivities', save_stem)\n", (440, 480), False, 'import os\n'), ((539, 579), 'os.path.abspath', 'os.path.abspath', (['"""../smoothness_c/solve"""'], {}), "('../smoothness_c/solve')\n", (554, 579), False, 'import os\n'), ((588, 632), 'os.path.join', 'os.path.join', (['save_dir', '"""model_fitting_cmds"""'], {}), "(save_dir, 'model_fitting_cmds')\n", (600, 632), False, 'import os\n'), ((650, 710), 'os.path.join', 'os.path.join', (['save_dir', '"""model_fitting_after_selection_cmds"""'], {}), "(save_dir, 'model_fitting_after_selection_cmds')\n", (662, 710), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:42:39 2020
@author: niklas
"""
from mossepy.mosse_tracker import MOSSE
# choose position of object in first frame
# that should be done by mouse click
objPos = [256, 256]
# choose tracker type
tracker = MOSSE()
# initialize object position in first frame
tracker.setObjPos(objPos)
# start tracking
tracker.trackImg() | [
"mossepy.mosse_tracker.MOSSE"
]
| [((283, 290), 'mossepy.mosse_tracker.MOSSE', 'MOSSE', ([], {}), '()\n', (288, 290), False, 'from mossepy.mosse_tracker import MOSSE\n')] |
#!/usr/bin/env python
#coding:utf-8
import os
import RPi.GPIO as GPIO #
import json
from time import sleep #
from twython import Twython
f=open("tw_config.json",'r')
config=json.load(f)
f.close()
CONSUMER_KEY =config['consumer_key']
CONSUMER_SECRET =config['consumer_secret']
ACCESS_TOKEN =config['access_token']
ACCESS_SECRET =config['access_secret']
dist=config['dist']
def on_positive_edge(channel):
#time stamp
timestamp = 'date +%F_%H:%M:%S'
current_time=os.popen(timestamp).readline().strip()
# get CPU temperature
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
temp = line.split('=')[1].split("'")[0]
direct_message='CPU:'+temp+'deg @'+current_time+' : by Python script'
global ledstate
if channel == trigger_input:
ledstate = not ledstate
GPIO.output(25, ledstate)
api.send_direct_message(text=direct_message ,screen_name=dist)
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
trigger_input=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(trigger_input, GPIO.RISING, callback=on_positive_edge, bouncetime=1000)
ledstate = GPIO.LOW
try:
while True:
sleep(0.01)
except KeyboardInterrupt: #
pass
GPIO.cleanup() #
| [
"RPi.GPIO.cleanup",
"RPi.GPIO.add_event_detect",
"twython.Twython",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"time.sleep",
"os.popen",
"json.load",
"RPi.GPIO.setmode"
]
| [((175, 187), 'json.load', 'json.load', (['f'], {}), '(f)\n', (184, 187), False, 'import json\n'), ((954, 1021), 'twython.Twython', 'Twython', (['CONSUMER_KEY', 'CONSUMER_SECRET', 'ACCESS_TOKEN', 'ACCESS_SECRET'], {}), '(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)\n', (961, 1021), False, 'from twython import Twython\n'), ((1038, 1060), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (1050, 1060), True, 'import RPi.GPIO as GPIO\n'), ((1061, 1085), 'RPi.GPIO.setup', 'GPIO.setup', (['(25)', 'GPIO.OUT'], {}), '(25, GPIO.OUT)\n', (1071, 1085), True, 'import RPi.GPIO as GPIO\n'), ((1086, 1146), 'RPi.GPIO.setup', 'GPIO.setup', (['trigger_input', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (1096, 1146), True, 'import RPi.GPIO as GPIO\n'), ((1148, 1245), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['trigger_input', 'GPIO.RISING'], {'callback': 'on_positive_edge', 'bouncetime': '(1000)'}), '(trigger_input, GPIO.RISING, callback=on_positive_edge,\n bouncetime=1000)\n', (1169, 1245), True, 'import RPi.GPIO as GPIO\n'), ((1344, 1358), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1356, 1358), True, 'import RPi.GPIO as GPIO\n'), ((849, 874), 'RPi.GPIO.output', 'GPIO.output', (['(25)', 'ledstate'], {}), '(25, ledstate)\n', (860, 874), True, 'import RPi.GPIO as GPIO\n'), ((1293, 1304), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (1298, 1304), False, 'from time import sleep\n'), ((477, 496), 'os.popen', 'os.popen', (['timestamp'], {}), '(timestamp)\n', (485, 496), False, 'import os\n'), ((601, 614), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (609, 614), False, 'import os\n')] |
""" Unit tests for ``wheezy.templates.utils``.
"""
import unittest
class FindAllBalancedTestCase(unittest.TestCase):
""" Test the ``find_all_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``([`` return.
"""
from wheezy.template.utils import find_all_balanced
assert 0 == find_all_balanced('test([', 0)
assert 3 == find_all_balanced('test([', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 4 == find_all_balanced('test(a, b', 4)
assert 4 == find_all_balanced('test[a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test(a, b)', 4)
assert 13 == find_all_balanced('test(a, b)[0]', 4)
assert 12 == find_all_balanced('test(a, b())', 4)
assert 17 == find_all_balanced('test(a, b())[0]()', 4)
class FindBalancedTestCase(unittest.TestCase):
""" Test the ``find_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``start_sep`` return.
"""
from wheezy.template.utils import find_balanced
assert 0 == find_balanced('test(', 0)
assert 3 == find_balanced('test(', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_balanced
assert 4 == find_balanced('test(a, b', 4)
assert 4 == find_balanced('test(a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test(a, b)', 4)
assert 12 == find_balanced('test(a, b())', 4)
| [
"wheezy.template.utils.find_all_balanced",
"wheezy.template.utils.find_balanced"
]
| [((338, 367), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test"""', '(10)'], {}), "('test', 10)\n", (355, 367), False, 'from wheezy.template.utils import find_all_balanced\n'), ((551, 581), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(["""', '(0)'], {}), "('test([', 0)\n", (568, 581), False, 'from wheezy.template.utils import find_all_balanced\n'), ((602, 632), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(["""', '(3)'], {}), "('test([', 3)\n", (619, 632), False, 'from wheezy.template.utils import find_all_balanced\n'), ((800, 833), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(a, b"""', '(4)'], {}), "('test(a, b', 4)\n", (817, 833), False, 'from wheezy.template.utils import find_all_balanced\n'), ((854, 889), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test[a, b()"""', '(4)'], {}), "('test[a, b()', 4)\n", (871, 889), False, 'from wheezy.template.utils import find_all_balanced\n'), ((1050, 1084), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(a, b)"""', '(4)'], {}), "('test(a, b)', 4)\n", (1067, 1084), False, 'from wheezy.template.utils import find_all_balanced\n'), ((1106, 1143), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(a, b)[0]"""', '(4)'], {}), "('test(a, b)[0]', 4)\n", (1123, 1143), False, 'from wheezy.template.utils import find_all_balanced\n'), ((1165, 1201), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(a, b())"""', '(4)'], {}), "('test(a, b())', 4)\n", (1182, 1201), False, 'from wheezy.template.utils import find_all_balanced\n'), ((1223, 1264), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', (['"""test(a, b())[0]()"""', '(4)'], {}), "('test(a, b())[0]()', 4)\n", (1240, 1264), False, 'from wheezy.template.utils import find_all_balanced\n'), ((1523, 1548), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test"""', '(10)'], {}), "('test', 10)\n", (1536, 1548), False, 'from wheezy.template.utils import find_balanced\n'), ((1735, 1760), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test("""', '(0)'], {}), "('test(', 0)\n", (1748, 1760), False, 'from wheezy.template.utils import find_balanced\n'), ((1781, 1806), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test("""', '(3)'], {}), "('test(', 3)\n", (1794, 1806), False, 'from wheezy.template.utils import find_balanced\n'), ((1970, 1999), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test(a, b"""', '(4)'], {}), "('test(a, b', 4)\n", (1983, 1999), False, 'from wheezy.template.utils import find_balanced\n'), ((2020, 2051), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test(a, b()"""', '(4)'], {}), "('test(a, b()', 4)\n", (2033, 2051), False, 'from wheezy.template.utils import find_balanced\n'), ((2208, 2238), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test(a, b)"""', '(4)'], {}), "('test(a, b)', 4)\n", (2221, 2238), False, 'from wheezy.template.utils import find_balanced\n'), ((2260, 2292), 'wheezy.template.utils.find_balanced', 'find_balanced', (['"""test(a, b())"""', '(4)'], {}), "('test(a, b())', 4)\n", (2273, 2292), False, 'from wheezy.template.utils import find_balanced\n')] |
import json
from os import path
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
from sqlalchemy.orm.exc import NoResultFound
from database import session, Tweet, Hashtag, User
consumer_key = "0qFf4T2xPWVIycLmAwk3rDQ55"
consumer_secret = "<KEY>"
access_token = "<KEY>"
acces_token_secret = "<KEY>"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, acces_token_secret)
def save_tweets():
directory = _get_dir_absolute_path()
filepath = path.join(directory, "tweets.json")
listener = DatabaseListener(number_tweets_to_save = 1000, filepath=filepath)
stream = Stream(auth, listener)
languages = ("en",)
try:
stream.sample(languages = languages)
except KeyboardInterrupt:
listener.file.close()
class DatabaseListener(StreamListener):
def __init__(self, number_tweets_to_save, filepath = None):
self._final_count = number_tweets_to_save
self._current_count = 0
if filepath is None:
filepath = "tweets.txt"
self.file = open(filepath,"w")
#Slightly dangerous due to circular references>>
def __del__(self):
self.file.close()
def on_data(self, raw_data):
data = json.loads(raw_data)
json.dump(raw_data, self.file)
self.file.write("\n")
if "in_reply_to_status_id" in data:
return self.on_status(data)
def on_status(self, data):
#this method is define in this file
save_to_database(data)
self._current_count += 1
print("status count: {}".format(self._current_count))
if self._current_count >= self._final_count:
return False
def create_user_helper(user_data):
#alias to shorten calls
u = user_data
user = user(uid = u["id_str"],
name = u["name"],
screen_name = u["screen_name"],
created_at = u["created_at"],
description = u.get("description"),
followers_count = u["followers_count"],
statuses_count = u["statuses_count"],
favourites_count = u["favourites_count"],
listed_count = u["listed_count"],
geo_enabled = u["geo_enabled"],
lang = u.get("lang"))
return user
def create_tweet_helper(tweet_data, user):
#alias for shorten calls
t = tweet_data
retweet = True if t["text"][:3] == "RT " else False
coordinates = json.dumps(t["coordinates"])
tweet = Tweet(tid=t["id_str"],
tweet=t["text"],
user=user,
coordinates=coordinates,
created_at = t["created_at"],
favorite_count = t["favorite_count"],
in_reply_to_screen_name = t["in_reply_to_screen_name"],
in_reply_to_status_id = t["in_reply_to_status_id"],
in_reply_to_user_id = t["in_reply_to_user_id"],
lang = t.get("lang"),
quoted_status_id = t.get("quoted_status_id"),
retweet_count = t["retweet_count"],
source = t["source"],
is_retweet = retweet)
return tweet
def save_to_database(data):
try:
user = session.query(User).filter_by(id=str(data["user"]["id"])).one()
except NoResultFound:
user = create_user_helper(data["user"])
session.add(user)
hashtag_results = []
hashtags = data["entities"]["hashtags"]
for hashtag in hashtags:
hashtag = hashtag["text"].lower()
try:
hashtag_obj=session.query(Hashtag).filer_by(text = hashtag).one()
except NoResutlFound:
user = create_
hashtag_obj = Hashtag(text = hashtag)
session.add(hashtag_obj)
hashtag_results.append(hashtag_obj)
tweet = create_tweet_helper(data, user)
for hashtag in hashtag_results:
tweet.hashtags.append(hashtag)
session.add(tweet)
session.commit()
| [
"json.loads",
"database.Hashtag",
"tweepy.Stream",
"json.dumps",
"os.path.join",
"database.session.add",
"database.session.query",
"database.session.commit",
"json.dump",
"tweepy.OAuthHandler"
]
| [((345, 388), 'tweepy.OAuthHandler', 'OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (357, 388), False, 'from tweepy import OAuthHandler, Stream\n'), ((521, 556), 'os.path.join', 'path.join', (['directory', '"""tweets.json"""'], {}), "(directory, 'tweets.json')\n", (530, 556), False, 'from os import path\n'), ((661, 683), 'tweepy.Stream', 'Stream', (['auth', 'listener'], {}), '(auth, listener)\n', (667, 683), False, 'from tweepy import OAuthHandler, Stream\n'), ((2501, 2529), 'json.dumps', 'json.dumps', (["t['coordinates']"], {}), "(t['coordinates'])\n", (2511, 2529), False, 'import json\n'), ((1262, 1282), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (1272, 1282), False, 'import json\n'), ((1291, 1321), 'json.dump', 'json.dump', (['raw_data', 'self.file'], {}), '(raw_data, self.file)\n', (1300, 1321), False, 'import json\n'), ((4034, 4052), 'database.session.add', 'session.add', (['tweet'], {}), '(tweet)\n', (4045, 4052), False, 'from database import session, Tweet, Hashtag, User\n'), ((4061, 4077), 'database.session.commit', 'session.commit', ([], {}), '()\n', (4075, 4077), False, 'from database import session, Tweet, Hashtag, User\n'), ((3445, 3462), 'database.session.add', 'session.add', (['user'], {}), '(user)\n', (3456, 3462), False, 'from database import session, Tweet, Hashtag, User\n'), ((3778, 3799), 'database.Hashtag', 'Hashtag', ([], {'text': 'hashtag'}), '(text=hashtag)\n', (3785, 3799), False, 'from database import session, Tweet, Hashtag, User\n'), ((3814, 3838), 'database.session.add', 'session.add', (['hashtag_obj'], {}), '(hashtag_obj)\n', (3825, 3838), False, 'from database import session, Tweet, Hashtag, User\n'), ((3299, 3318), 'database.session.query', 'session.query', (['User'], {}), '(User)\n', (3312, 3318), False, 'from database import session, Tweet, Hashtag, User\n'), ((3641, 3663), 'database.session.query', 'session.query', (['Hashtag'], {}), '(Hashtag)\n', (3654, 3663), False, 'from database import session, Tweet, Hashtag, User\n')] |
from flask import render_template, jsonify
from app import app
import random
@app.route('/')
@app.route('/index')
def index():
# Feature flags init goes here!
#
# noinspection PyDictCreation
flags = {
"welcome_text": "welcome to my python FF tutorial!"
}
# Flag goes here!
#
flags["alternate_homescreen"] = False
return render_template(
'index.html',
**flags,
title='Home'
)
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact')
| [
"flask.render_template",
"random.uniform",
"app.app.route",
"random.randint",
"flask.jsonify"
]
| [((81, 95), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (90, 95), False, 'from app import app\n'), ((97, 116), 'app.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (106, 116), False, 'from app import app\n'), ((461, 478), 'app.app.route', 'app.route', (['"""/map"""'], {}), "('/map')\n", (470, 478), False, 'from app import app\n'), ((545, 588), 'app.app.route', 'app.route', (['"""/map/refresh"""'], {'methods': "['POST']"}), "('/map/refresh', methods=['POST'])\n", (554, 588), False, 'from app import app\n'), ((810, 831), 'app.app.route', 'app.route', (['"""/contact"""'], {}), "('/contact')\n", (819, 831), False, 'from app import app\n'), ((375, 427), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Home"""'}), "('index.html', **flags, title='Home')\n", (390, 427), False, 'from flask import render_template, jsonify\n'), ((501, 541), 'flask.render_template', 'render_template', (['"""map.html"""'], {'title': '"""Map"""'}), "('map.html', title='Map')\n", (516, 541), False, 'from flask import render_template, jsonify\n'), ((779, 806), 'flask.jsonify', 'jsonify', (["{'points': points}"], {}), "({'points': points})\n", (786, 806), False, 'from flask import render_template, jsonify\n'), ((858, 906), 'flask.render_template', 'render_template', (['"""contact.html"""'], {'title': '"""Contact"""'}), "('contact.html', title='Contact')\n", (873, 906), False, 'from flask import render_template, jsonify\n'), ((623, 657), 'random.uniform', 'random.uniform', (['(48.84341)', '(48.86341)'], {}), '(48.84341, 48.86341)\n', (637, 657), False, 'import random\n'), ((678, 708), 'random.uniform', 'random.uniform', (['(2.3388)', '(2.3588)'], {}), '(2.3388, 2.3588)\n', (692, 708), False, 'import random\n'), ((745, 765), 'random.randint', 'random.randint', (['(2)', '(9)'], {}), '(2, 9)\n', (759, 765), False, 'import random\n')] |
# encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 下午9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
# 行
a[1]
a[[1,2]]
a[np.array([1,2])]
# 列
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])] | [
"numpy.array",
"numpy.arange"
]
| [((194, 210), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (202, 210), True, 'import numpy as np\n'), ((146, 158), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (155, 158), True, 'import numpy as np\n'), ((238, 254), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (246, 254), True, 'import numpy as np\n')] |
import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class RainbowAgent(AgentBase):
"""Rainbow agent as described in [1].
Rainbow is a DQN agent with some improvments that were suggested before 2017.
As mentioned by the authors it's not exhaustive improvment but all changes are in
relatively separate areas so their connection makes sense. These improvements are:
* Priority Experience Replay
* Multi-step
* Double Q net
* Dueling nets
* NoisyNet
* CategoricalNet for Q estimate
Consider this class as a particular version of the DQN agent.
[1] "Rainbow: Combining Improvements in Deep Reinforcement Learning" by Hessel et al. (DeepMind team)
https://arxiv.org/abs/1710.02298
"""
model = "Rainbow"
def __init__(
self,
obs_space: DataSpace,
action_space: DataSpace,
state_transform: Optional[Callable]=None,
reward_transform: Optional[Callable]=None,
**kwargs
):
"""
A wrapper over the DQN thus majority of the logic is in the DQNAgent.
Special treatment is required because the Rainbow agent uses categorical nets
which operate on probability distributions. Each action is taken as the estimate
from such distributions.
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
state_transform (optional func):
reward_transform (optional func):
Keyword parameters:
pre_network_fn (function that takes input_shape and returns network):
Used to preprocess state before it is used in the value- and advantage-function in the dueling nets.
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (100, 100).
lr (default: 1e-3): Learning rate value.
gamma (float): Discount factor. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
update_freq (int): Number of steps between each learning step. Default 1.
batch_size (int): Number of samples to use at each learning step. Default: 80.
buffer_size (int): Number of most recent samples to keep in memory for learning. Default: 1e5.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
max_grad_norm (float): Maximum norm of the gradient used in learning. Default: 10.
using_double_q (bool): Whether to use Double Q Learning network. Default: True.
n_steps (int): Number of lookahead steps when estimating reward. See :ref:`NStepBuffer`. Default: 3.
v_min (float): Lower bound for distributional value V. Default: -10.
v_max (float): Upper bound for distributional value V. Default: 10.
num_atoms (int): Number of atoms (discrete states) in the value V distribution. Default: 21.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE, update=True)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
self.action_size = action_space.to_feature()
self.lr = float(self._register_param(kwargs, 'lr', 3e-4))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.002))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 80, update=True))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e5), update=True))
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
self.max_grad_norm = float(self._register_param(kwargs, 'max_grad_norm', 10))
self.iteration: int = 0
self.using_double_q = bool(self._register_param(kwargs, "using_double_q", True))
self.state_transform = state_transform if state_transform is not None else lambda x: x
self.reward_transform = reward_transform if reward_transform is not None else lambda x: x
v_min = float(self._register_param(kwargs, "v_min", -10))
v_max = float(self._register_param(kwargs, "v_max", 10))
self.num_atoms = int(self._register_param(kwargs, "num_atoms", 21, drop=True))
self.z_atoms = torch.linspace(v_min, v_max, self.num_atoms, device=self.device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
self.buffer = PERBuffer(**kwargs)
self.__batch_indices = torch.arange(self.batch_size, device=self.device)
self.n_steps = int(self._register_param(kwargs, "n_steps", 3))
self.n_buffer = NStepBuffer(n_steps=self.n_steps, gamma=self.gamma)
# Note that in case a pre_network is provided, e.g. a shared net that extracts pixels values,
# it should be explicitly passed in kwargs
kwargs["hidden_layers"] = to_numbers_seq(self._register_param(kwargs, "hidden_layers", (100, 100)))
self.net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.target_net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.dist_probs = None
self._loss = float('nan')
@property
def loss(self):
return {'loss': self._loss}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
value = value['loss']
self._loss = value
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
"""Letting the agent to take a step.
On some steps the agent will initiate learning step. This is dependent on
the `update_freq` value.
Parameters:
obs (ObservationType): Observation.
action (int): Discrete action associated with observation.
reward (float): Reward obtained for taking action at state.
next_obs (ObservationType): Observation in a state where the action took.
done: (bool) Whether in terminal (end of episode) state.
"""
assert isinstance(action, int), "Rainbow expects discrete action (int)"
self.iteration += 1
t_obs = to_tensor(self.state_transform(obs)).float().to("cpu")
t_next_obs = to_tensor(self.state_transform(next_obs)).float().to("cpu")
reward = self.reward_transform(reward)
# Delay adding to buffer to account for n_steps (particularly the reward)
self.n_buffer.add(
state=t_obs.numpy(), action=[int(action)], reward=[reward], done=[done], next_state=t_next_obs.numpy()
)
if not self.n_buffer.available:
return
self.buffer.add(**self.n_buffer.get().get_dict())
if self.iteration < self.warm_up:
return
if len(self.buffer) >= self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
# Update networks only once - sync local & target
soft_update(self.target_net, self.net, self.tau)
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/agent", self._loss, step)
if full_log and self.dist_probs is not None:
assert len(self.action_space.shape) == 1, "Only 1D actions currently supported"
action_size = self.action_size[0]
for action_idx in range(action_size):
dist = self.dist_probs[0, action_idx]
data_logger.log_value(f'dist/expected_{action_idx}', (dist*self.z_atoms).sum().item(), step)
data_logger.add_histogram(
f'dist/Q_{action_idx}', min=self.z_atoms[0], max=self.z_atoms[-1], num=len(self.z_atoms),
sum=dist.sum(), sum_squares=dist.pow(2).sum(), bucket_limits=self.z_atoms+self.z_delta,
bucket_counts=dist, global_step=step
)
# This method, `log_metrics`, isn't executed on every iteration but just in case we delay plotting weights.
# It simply might be quite costly. Thread wisely.
if full_log:
for idx, layer in enumerate(self.net.value_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"value_net/layer_weights_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"value_net/layer_bias_{idx}", layer.bias.cpu(), step)
for idx, layer in enumerate(self.net.advantage_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"advantage_net/layer_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"advantage_net/layer_bias_{idx}", layer.bias.cpu(), step)
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
return NetworkState(net=dict(net=self.net.state_dict(), target_net=self.target_net.state_dict()))
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = RainbowAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_network(self, network_state: NetworkState) -> None:
self.net.load_state_dict(network_state.net['net'])
self.target_net.load_state_dict(network_state.net['target_net'])
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
| [
"ai_traineree.networks.heads.RainbowNet",
"ai_traineree.utils.to_tensor",
"torch.load",
"json.dump",
"ai_traineree.agents.agent_utils.soft_update",
"json.load",
"torch.sum",
"ai_traineree.buffers.PERBuffer",
"torch.linspace",
"torch.save",
"torch.no_grad",
"copy.copy",
"ai_traineree.buffers.buffer_factory.BufferFactory.from_state",
"torch.isnan",
"ai_traineree.buffers.NStepBuffer",
"torch.arange",
"torch.argmax"
]
| [((5392, 5456), 'torch.linspace', 'torch.linspace', (['v_min', 'v_max', 'self.num_atoms'], {'device': 'self.device'}), '(v_min, v_max, self.num_atoms, device=self.device)\n', (5406, 5456), False, 'import torch\n'), ((5537, 5556), 'ai_traineree.buffers.PERBuffer', 'PERBuffer', ([], {}), '(**kwargs)\n', (5546, 5556), False, 'from ai_traineree.buffers import NStepBuffer, PERBuffer\n'), ((5588, 5637), 'torch.arange', 'torch.arange', (['self.batch_size'], {'device': 'self.device'}), '(self.batch_size, device=self.device)\n', (5600, 5637), False, 'import torch\n'), ((5734, 5785), 'ai_traineree.buffers.NStepBuffer', 'NStepBuffer', ([], {'n_steps': 'self.n_steps', 'gamma': 'self.gamma'}), '(n_steps=self.n_steps, gamma=self.gamma)\n', (5745, 5785), False, 'from ai_traineree.buffers import NStepBuffer, PERBuffer\n'), ((6067, 6153), 'ai_traineree.networks.heads.RainbowNet', 'RainbowNet', (['obs_space.shape', 'self.action_size'], {'num_atoms': 'self.num_atoms'}), '(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **\n kwargs)\n', (6077, 6153), False, 'from ai_traineree.networks.heads import RainbowNet\n'), ((6175, 6261), 'ai_traineree.networks.heads.RainbowNet', 'RainbowNet', (['obs_space.shape', 'self.action_size'], {'num_atoms': 'self.num_atoms'}), '(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **\n kwargs)\n', (6185, 6261), False, 'from ai_traineree.networks.heads import RainbowNet\n'), ((11824, 11872), 'ai_traineree.agents.agent_utils.soft_update', 'soft_update', (['self.target_net', 'self.net', 'self.tau'], {}), '(self.target_net, self.net, self.tau)\n', (11835, 11872), False, 'from ai_traineree.agents.agent_utils import soft_update\n'), ((14651, 14674), 'copy.copy', 'copy.copy', (['state.config'], {}), '(state.config)\n', (14660, 14674), False, 'import copy\n'), ((15269, 15307), 'ai_traineree.buffers.buffer_factory.BufferFactory.from_state', 'BufferFactory.from_state', (['buffer_state'], {}), '(buffer_state)\n', (15293, 15307), False, 'from ai_traineree.buffers.buffer_factory import BufferFactory\n'), ((15535, 15564), 'torch.save', 'torch.save', (['agent_state', 'path'], {}), '(agent_state, path)\n', (15545, 15564), False, 'import torch\n'), ((15791, 15807), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (15801, 15807), False, 'import torch\n'), ((8255, 8303), 'ai_traineree.agents.agent_utils.soft_update', 'soft_update', (['self.target_net', 'self.net', 'self.tau'], {}), '(self.target_net, self.net, self.tau)\n', (8266, 8303), False, 'from ai_traineree.agents.agent_utils import soft_update\n'), ((10243, 10258), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10256, 10258), False, 'import torch\n'), ((11240, 11266), 'torch.sum', 'torch.sum', (['(m * log_prob)', '(1)'], {}), '(m * log_prob, 1)\n', (11249, 11266), False, 'import torch\n'), ((16358, 16376), 'json.dump', 'json.dump', (['dump', 'f'], {}), '(dump, f)\n', (16367, 16376), False, 'import json\n'), ((16671, 16683), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16680, 16683), False, 'import json\n'), ((10613, 10641), 'torch.argmax', 'torch.argmax', (['q_next'], {'dim': '(-1)'}), '(q_next, dim=-1)\n', (10625, 10641), False, 'import torch\n'), ((9619, 9651), 'ai_traineree.utils.to_tensor', 'to_tensor', (["experiences['reward']"], {}), "(experiences['reward'])\n", (9628, 9651), False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((9692, 9722), 'ai_traineree.utils.to_tensor', 'to_tensor', (["experiences['done']"], {}), "(experiences['done'])\n", (9701, 9722), False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((9772, 9803), 'ai_traineree.utils.to_tensor', 'to_tensor', (["experiences['state']"], {}), "(experiences['state'])\n", (9781, 9803), False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((9850, 9886), 'ai_traineree.utils.to_tensor', 'to_tensor', (["experiences['next_state']"], {}), "(experiences['next_state'])\n", (9859, 9886), False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((9929, 9961), 'ai_traineree.utils.to_tensor', 'to_tensor', (["experiences['action']"], {}), "(experiences['action'])\n", (9938, 9961), False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((11649, 11667), 'torch.isnan', 'torch.isnan', (['error'], {}), '(error)\n', (11660, 11667), False, 'import torch\n')] |
import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
class MyEnv(btt.env.OpenAIRemoteEnv):
def __init__(self, background=True, **kwargs):
super().__init__(version='1.0.0')
self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR /
'env.blend.py', background=background, **kwargs)
# For Blender 2.9 if we pass scene='', the tests below fail since
# _env_post_step() is not called. Its unclear currently why this happens.
def _run_remote_env(background):
env = MyEnv(background=background)
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2 # 1 is already set by reset()
obs, reward, done, info = env.step(0.6)
assert obs == pytest.approx(0.6)
assert reward == 1.
assert not done
assert info['count'] == 3
for _ in range(8):
obs, reward, done, info = env.step(0.6)
assert done
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2
env.close()
@pytest.mark.background
def test_remote_env():
_run_remote_env(background=True)
def test_remote_env_ui():
_run_remote_env(background=False)
| [
"pytest.approx",
"pathlib.Path"
]
| [((78, 92), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (82, 92), False, 'from pathlib import Path\n'), ((714, 732), 'pytest.approx', 'pytest.approx', (['(0.1)'], {}), '(0.1)\n', (727, 732), False, 'import pytest\n'), ((900, 918), 'pytest.approx', 'pytest.approx', (['(0.6)'], {}), '(0.6)\n', (913, 918), False, 'import pytest\n'), ((1186, 1204), 'pytest.approx', 'pytest.approx', (['(0.1)'], {}), '(0.1)\n', (1199, 1204), False, 'import pytest\n')] |
from typing import Union, Iterable, List
import numpy as np
import pandas as pd
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
class _DataFrameTransformer(_ArrayTransformer):
'''`_ArrayTransformer` wrapper for `pandas.DataFrame`.
'''
def __init__(self):
super().__init__()
def fit(self, X : pd.DataFrame, axis : Union[int, Iterable[int]] = 0):
if not isinstance(X, pd.DataFrame):
raise ValueError('This interface is for `pandas.DataFrame` only')
if isinstance(axis, list):
axis = axis[0]
# Set sample and feature index
if axis == 0:
self.index_samples = X.index
self.index_features = X.columns
elif axis == 1:
self.index_samples = X.columns
self.index_features = X.index
else:
raise ValueError('axis must be either 0 or 1')
# Fit the data
try:
super().fit(X=X.values, axis=axis)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
return self
def transform(self, X : pd.DataFrame) -> np.ndarray:
try:
return super().transform(X.values)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
def fit_transform(self, X : pd.DataFrame, axis : int = 0) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def transform_weights(self, weights : pd.DataFrame) -> np.ndarray:
try:
return super().transform_weights(weights.values)
except AttributeError:
return super().transform_weights(weights)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
df = super().back_transform(X)
return pd.DataFrame(
df,
index=self.index_samples,
columns=self.index_features
)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
eofs = super().back_transform_eofs(X)
return pd.DataFrame(
eofs,
index=self.index_features,
columns=range(1, eofs.shape[-1] + 1)
)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
pcs = super().back_transform_pcs(X)
return pd.DataFrame(
pcs,
index=self.index_samples,
columns=range(1, pcs.shape[-1] + 1)
)
class _MultiDataFrameTransformer(_MultiArrayTransformer):
'Transform multiple 2D ``pd.DataFrame`` to a single 2D ``np.ndarry``.'
def __init__(self):
super().__init__()
def fit(self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0):
X = self._convert2list(X)
self.tfs = [_DataFrameTransformer().fit(x, axis=axis) for x in X]
if len(set([tf.n_valid_samples for tf in self.tfs])) > 1:
err_msg = 'All individual arrays must have same number of samples.'
raise ValueError(err_msg)
self.idx_array_sep = np.cumsum([tf.n_valid_features for tf in self.tfs])
self.axis_samples = self.tfs[0].axis_samples
return self
def transform(self, X : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform(X=X)
def transform_weights(self, weights : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform_weights(weights=weights)
def fit_transform(
self, X : Union[pd.DataFrame, List[pd.DataFrame]],
axis : Union[int, Iterable[int]] = 0
) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform(X=X)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_eofs(X=X)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_pcs(X=X)
| [
"pandas.DataFrame",
"numpy.cumsum"
]
| [((1911, 1982), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'index': 'self.index_samples', 'columns': 'self.index_features'}), '(df, index=self.index_samples, columns=self.index_features)\n', (1923, 1982), True, 'import pandas as pd\n'), ((3153, 3204), 'numpy.cumsum', 'np.cumsum', (['[tf.n_valid_features for tf in self.tfs]'], {}), '([tf.n_valid_features for tf in self.tfs])\n', (3162, 3204), True, 'import numpy as np\n')] |
from PIL import Image
from PIL import ImageTk
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
class TelloUI:
"""Wrapper class to enable the GUI."""
def __init__(self,tello,outputpath):
"""
Initial all the element of the GUI,support by Tkinter
:param tello: class interacts with the Tello drone.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.tello = tello # videostream device
self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button
self.frame = None # frame read from h264decoder and used for pose recognition
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello
self.quit_waiting_flag = False
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create buttons
self.btn_snapshot = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
self.btn_snapshot.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo)
self.btn_pause.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_landing = tki.Button(
self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("TELLO Controller")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
def videoLoop(self):
"""
The mainloop thread of Tkinter
Raises:
RuntimeError: To get around a RunTime error that Tkinter throws due to threading.
"""
try:
# start the thread that get GUI image and drwa skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.tello.read()
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system =="Windows" or system =="Linux":
self._updateGUIImage(image)
else:
thread_tmp = threading.Thread(target=self._updateGUIImage,args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def _updateGUIImage(self,image):
"""
Main operation to initial the object of image,and update the GUI panel
"""
image = ImageTk.PhotoImage(image)
# if the panel none ,we need to initial it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
def _sendingCommand(self):
"""
start a while loop that sends 'command' to tello every 5 second
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
set the variable as TRUE,it will stop computer waiting for response from tello
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
open the cmd window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Command Panel")
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify="left")
text1.pack(side="top")
self.btn_landing = tki.Button(
panel, text="Land", relief="raised", command=self.telloLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text="Takeoff", relief="raised", command=self.telloTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text="Flip", relief="raised", command=self.openFlipWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side="left")
self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised",
command=self.updateDistancebar,
)
self.btn_distance.pack(side="left", fill="both",
expand="yes", padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side="right")
self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar)
self.btn_distance.pack(side="right", fill="both",
expand="yes", padx=10, pady=5)
def openFlipWindow(self):
"""
open the flip window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Gesture Recognition")
self.btn_flipl = tki.Button(
panel, text="Flip Left", relief="raised", command=self.telloFlip_l)
self.btn_flipl.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text="Flip Right", relief="raised", command=self.telloFlip_r)
self.btn_flipr.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text="Flip Forward", relief="raised", command=self.telloFlip_f)
self.btn_flipf.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text="Flip Backward", relief="raised", command=self.telloFlip_b)
self.btn_flipb.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
def takeSnapshot(self):
"""
save the current frame of the video as a jpg file and put it into outputpath
"""
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
def pauseVideo(self):
"""
Toggle the freeze/unfreze of video
"""
if self.btn_pause.config('relief')[-1] == 'sunken':
self.btn_pause.config(relief="raised")
self.tello.video_freeze(False)
else:
self.btn_pause.config(relief="sunken")
self.tello.video_freeze(True)
def telloTakeOff(self):
return self.tello.takeoff()
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateTrackBar(self):
self.my_tello_hand.setThr(self.hand_thr_bar.get())
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print ('reset distance to %.1f' % self.distance)
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print ('reset distance to %d' % self.degree)
def on_keypress_w(self, event):
print ("up %d m" % self.distance)
self.telloUp(self.distance)
def on_keypress_s(self, event):
print ("down %d m" % self.distance)
self.telloDown(self.distance)
def on_keypress_a(self, event):
print ("ccw %d degree" % self.degree)
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print ("cw %d m" % self.degree)
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print ("forward %d m" % self.distance)
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print ("backward %d m" % self.distance)
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print ("left %d m" % self.distance)
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print ("right %d m" % self.distance)
self.telloMoveRight(self.distance)
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
def onClose(self):
"""
set the stop event, cleanup the camera, and allow the rest of
the quit process to continue
"""
print("[INFO] closing...")
self.stopEvent.set()
del self.tello
self.root.quit()
| [
"PIL.Image.fromarray",
"tkinter.Toplevel",
"tkinter.Button",
"time.sleep",
"threading.Event",
"tkinter.Scale",
"datetime.datetime.now",
"tkinter.Tk",
"os.path.sep.join",
"platform.system",
"tkinter.Label",
"cv2.cvtColor",
"threading.Thread",
"tkinter.Frame",
"PIL.ImageTk.PhotoImage"
]
| [((1269, 1277), 'tkinter.Tk', 'tki.Tk', ([], {}), '()\n', (1275, 1277), True, 'import tkinter as tki\n'), ((1358, 1424), 'tkinter.Button', 'tki.Button', (['self.root'], {'text': '"""Snapshot!"""', 'command': 'self.takeSnapshot'}), "(self.root, text='Snapshot!', command=self.takeSnapshot)\n", (1368, 1424), True, 'import tkinter as tki\n'), ((1611, 1688), 'tkinter.Button', 'tki.Button', (['self.root'], {'text': '"""Pause"""', 'relief': '"""raised"""', 'command': 'self.pauseVideo'}), "(self.root, text='Pause', relief='raised', command=self.pauseVideo)\n", (1621, 1688), True, 'import tkinter as tki\n'), ((1832, 1930), 'tkinter.Button', 'tki.Button', (['self.root'], {'text': '"""Open Command Panel"""', 'relief': '"""raised"""', 'command': 'self.openCmdWindow'}), "(self.root, text='Open Command Panel', relief='raised', command=\n self.openCmdWindow)\n", (1842, 1930), True, 'import tkinter as tki\n'), ((2199, 2216), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2214, 2216), False, 'import threading\n'), ((2239, 2287), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.videoLoop', 'args': '()'}), '(target=self.videoLoop, args=())\n', (2255, 2287), False, 'import threading\n'), ((2601, 2646), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._sendingCommand'}), '(target=self._sendingCommand)\n', (2617, 2646), False, 'import threading\n'), ((4324, 4349), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (4342, 4349), False, 'from PIL import ImageTk\n'), ((5314, 5333), 'tkinter.Toplevel', 'Toplevel', (['self.root'], {}), '(self.root)\n', (5322, 5333), False, 'from tkinter import Toplevel, Scale\n'), ((5425, 5610), 'tkinter.Label', 'tki.Label', (['panel'], {'text': '"""This Controller map keyboard inputs to Tello control commands\nAdjust the trackbar to reset distance and degree parameter"""', 'font': '"""Helvetica 10 bold"""'}), '(panel, text=\n """This Controller map keyboard inputs to Tello control commands\nAdjust the trackbar to reset distance and degree parameter"""\n , font=\'Helvetica 10 bold\')\n', (5434, 5610), True, 'import tkinter as tki\n'), ((5759, 6041), 'tkinter.Label', 'tki.Label', (['panel'], {'text': '"""W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\nS - Move Tello Down\t\t\tArrow Down - Move Tello Backward\nA - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\nD - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right"""', 'justify': '"""left"""'}), '(panel, text=\n """W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\nS - Move Tello Down\t\t\tArrow Down - Move Tello Backward\nA - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\nD - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right"""\n , justify=\'left\')\n', (5768, 6041), True, 'import tkinter as tki\n'), ((6239, 6313), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Land"""', 'relief': '"""raised"""', 'command': 'self.telloLanding'}), "(panel, text='Land', relief='raised', command=self.telloLanding)\n", (6249, 6313), True, 'import tkinter as tki\n'), ((6474, 6551), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Takeoff"""', 'relief': '"""raised"""', 'command': 'self.telloTakeOff'}), "(panel, text='Takeoff', relief='raised', command=self.telloTakeOff)\n", (6484, 6551), True, 'import tkinter as tki\n'), ((6752, 6789), 'tkinter.Frame', 'tki.Frame', (['panel'], {'width': '(100)', 'height': '(2)'}), '(panel, width=100, height=2)\n', (6761, 6789), True, 'import tkinter as tki\n'), ((7390, 7466), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Flip"""', 'relief': '"""raised"""', 'command': 'self.openFlipWindow'}), "(panel, text='Flip', relief='raised', command=self.openFlipWindow)\n", (7400, 7466), True, 'import tkinter as tki\n'), ((7628, 7730), 'tkinter.Scale', 'Scale', (['panel'], {'from_': '(0.02)', 'to': '(5)', 'tickinterval': '(0.01)', 'digits': '(3)', 'label': '"""Distance(m)"""', 'resolution': '(0.01)'}), "(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label=\n 'Distance(m)', resolution=0.01)\n", (7633, 7730), False, 'from tkinter import Toplevel, Scale\n'), ((7868, 7962), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Reset Distance"""', 'relief': '"""raised"""', 'command': 'self.updateDistancebar'}), "(panel, text='Reset Distance', relief='raised', command=self.\n updateDistancebar)\n", (7878, 7962), True, 'import tkinter as tki\n'), ((8184, 8246), 'tkinter.Scale', 'Scale', (['panel'], {'from_': '(1)', 'to': '(360)', 'tickinterval': '(10)', 'label': '"""Degree"""'}), "(panel, from_=1, to=360, tickinterval=10, label='Degree')\n", (8189, 8246), False, 'from tkinter import Toplevel, Scale\n'), ((8351, 8441), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Reset Degree"""', 'relief': '"""raised"""', 'command': 'self.updateDegreebar'}), "(panel, text='Reset Degree', relief='raised', command=self.\n updateDegreebar)\n", (8361, 8441), True, 'import tkinter as tki\n'), ((8702, 8721), 'tkinter.Toplevel', 'Toplevel', (['self.root'], {}), '(self.root)\n', (8710, 8721), False, 'from tkinter import Toplevel, Scale\n'), ((8794, 8872), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Flip Left"""', 'relief': '"""raised"""', 'command': 'self.telloFlip_l'}), "(panel, text='Flip Left', relief='raised', command=self.telloFlip_l)\n", (8804, 8872), True, 'import tkinter as tki\n'), ((9027, 9106), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Flip Right"""', 'relief': '"""raised"""', 'command': 'self.telloFlip_r'}), "(panel, text='Flip Right', relief='raised', command=self.telloFlip_r)\n", (9037, 9106), True, 'import tkinter as tki\n'), ((9261, 9347), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Flip Forward"""', 'relief': '"""raised"""', 'command': 'self.telloFlip_f'}), "(panel, text='Flip Forward', relief='raised', command=self.\n telloFlip_f)\n", (9271, 9347), True, 'import tkinter as tki\n'), ((9497, 9584), 'tkinter.Button', 'tki.Button', (['panel'], {'text': '"""Flip Backward"""', 'relief': '"""raised"""', 'command': 'self.telloFlip_b'}), "(panel, text='Flip Backward', relief='raised', command=self.\n telloFlip_b)\n", (9507, 9584), True, 'import tkinter as tki\n'), ((9941, 9964), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9962, 9964), False, 'import datetime\n'), ((10047, 10092), 'os.path.sep.join', 'os.path.sep.join', (['(self.outputPath, filename)'], {}), '((self.outputPath, filename))\n', (10063, 10092), False, 'import os\n'), ((2942, 2957), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2952, 2957), False, 'import time\n'), ((4457, 4479), 'tkinter.Label', 'tki.Label', ([], {'image': 'image'}), '(image=image)\n', (4466, 4479), True, 'import tkinter as tki\n'), ((4951, 4964), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4961, 4964), False, 'import time\n'), ((10141, 10184), 'cv2.cvtColor', 'cv2.cvtColor', (['self.frame', 'cv2.COLOR_RGB2BGR'], {}), '(self.frame, cv2.COLOR_RGB2BGR)\n', (10153, 10184), False, 'import cv2\n'), ((3094, 3111), 'platform.system', 'platform.system', ([], {}), '()\n', (3109, 3111), False, 'import platform\n'), ((3395, 3422), 'PIL.Image.fromarray', 'Image.fromarray', (['self.frame'], {}), '(self.frame)\n', (3410, 3422), False, 'from PIL import Image\n'), ((3872, 3932), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._updateGUIImage', 'args': '(image,)'}), '(target=self._updateGUIImage, args=(image,))\n', (3888, 3932), False, 'import threading\n'), ((3991, 4007), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (4001, 4007), False, 'import time\n')] |
# Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# <NAME>
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100 | [
"numpy.array",
"numpy.amax"
]
| [((247, 295), 'numpy.array', 'np.array', (['([3, 5], [5, 1], [10, 2])'], {'dtype': 'float'}), '(([3, 5], [5, 1], [10, 2]), dtype=float)\n', (255, 295), True, 'import numpy as np\n'), ((297, 338), 'numpy.array', 'np.array', (['([75], [82], [93])'], {'dtype': 'float'}), '(([75], [82], [93]), dtype=float)\n', (305, 338), True, 'import numpy as np\n'), ((358, 376), 'numpy.amax', 'np.amax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (365, 376), True, 'import numpy as np\n')] |
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def menubuttonclass(context, appname):
if appname == context['request'].resolver_match.func.view_class.__module__.split(".")[0]:
return "btn-primary"
else:
return "btn-default"
| [
"django.template.Library"
]
| [((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.