code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| [
"django.conf.urls.re_path"
]
| [((164, 219), 'django.conf.urls.re_path', 're_path', (['"""^$"""', 'views.admin_home_view'], {'name': '"""admin_home"""'}), "('^$', views.admin_home_view, name='admin_home')\n", (171, 219), False, 'from django.conf.urls import re_path\n'), ((227, 299), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup/$"""', 'views.data_cleanup_view'], {'name': '"""data_cleanup"""'}), "('^data_cleanup/$', views.data_cleanup_view, name='data_cleanup')\n", (234, 299), False, 'from django.conf.urls import re_path\n'), ((306, 454), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup_organization_analysis/$"""', 'views.data_cleanup_organization_analysis_view'], {'name': '"""data_cleanup_organization_analysis"""'}), "('^data_cleanup_organization_analysis/$', views.\n data_cleanup_organization_analysis_view, name=\n 'data_cleanup_organization_analysis')\n", (313, 454), False, 'from django.conf.urls import re_path\n'), ((459, 622), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup_organization_list_analysis/$"""', 'views.data_cleanup_organization_list_analysis_view'], {'name': '"""data_cleanup_organization_list_analysis"""'}), "('^data_cleanup_organization_list_analysis/$', views.\n data_cleanup_organization_list_analysis_view, name=\n 'data_cleanup_organization_list_analysis')\n", (466, 622), False, 'from django.conf.urls import re_path\n'), ((627, 778), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup_position_list_analysis/$"""', 'views.data_cleanup_position_list_analysis_view'], {'name': '"""data_cleanup_position_list_analysis"""'}), "('^data_cleanup_position_list_analysis/$', views.\n data_cleanup_position_list_analysis_view, name=\n 'data_cleanup_position_list_analysis')\n", (634, 778), False, 'from django.conf.urls import re_path\n'), ((783, 946), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup_voter_hanging_data_process/$"""', 'views.data_cleanup_voter_hanging_data_process_view'], {'name': '"""data_cleanup_voter_hanging_data_process"""'}), "('^data_cleanup_voter_hanging_data_process/$', views.\n data_cleanup_voter_hanging_data_process_view, name=\n 'data_cleanup_voter_hanging_data_process')\n", (790, 946), False, 'from django.conf.urls import re_path\n'), ((951, 1093), 'django.conf.urls.re_path', 're_path', (['"""^data_cleanup_voter_list_analysis/$"""', 'views.data_cleanup_voter_list_analysis_view'], {'name': '"""data_cleanup_voter_list_analysis"""'}), "('^data_cleanup_voter_list_analysis/$', views.\n data_cleanup_voter_list_analysis_view, name=\n 'data_cleanup_voter_list_analysis')\n", (958, 1093), False, 'from django.conf.urls import re_path\n'), ((1098, 1202), 'django.conf.urls.re_path', 're_path', (['"""^data_voter_statistics/$"""', 'views.data_voter_statistics_view'], {'name': '"""data_voter_statistics"""'}), "('^data_voter_statistics/$', views.data_voter_statistics_view, name=\n 'data_voter_statistics')\n", (1105, 1202), False, 'from django.conf.urls import re_path\n'), ((1204, 1299), 'django.conf.urls.re_path', 're_path', (['"""^import_sample_data/$"""', 'views.import_sample_data_view'], {'name': '"""import_sample_data"""'}), "('^import_sample_data/$', views.import_sample_data_view, name=\n 'import_sample_data')\n", (1211, 1299), False, 'from django.conf.urls import re_path\n'), ((1301, 1388), 'django.conf.urls.re_path', 're_path', (['"""^statistics/$"""', 'views.statistics_summary_view'], {'name': '"""statistics_summary"""'}), "('^statistics/$', views.statistics_summary_view, name=\n 'statistics_summary')\n", (1308, 1388), False, 'from django.conf.urls import re_path\n'), ((1390, 1488), 'django.conf.urls.re_path', 're_path', (['"""^sync_dashboard/$"""', 'views.sync_data_with_master_servers_view'], {'name': '"""sync_dashboard"""'}), "('^sync_dashboard/$', views.sync_data_with_master_servers_view, name\n ='sync_dashboard')\n", (1397, 1488), False, 'from django.conf.urls import re_path\n')] |
from .zero import zero
from main_module._unittester import UnitTester
test = UnitTester(__name__)
del UnitTester | [
"main_module._unittester.UnitTester"
]
| [((78, 98), 'main_module._unittester.UnitTester', 'UnitTester', (['__name__'], {}), '(__name__)\n', (88, 98), False, 'from main_module._unittester import UnitTester\n')] |
import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| [
"numpy.unique",
"tomo_challenge.metrics.metric",
"numpy.random.random_integers",
"scipy.optimize.minimize",
"numpy.linspace",
"numpy.concatenate",
"numpy.flatten",
"numpy.shape"
]
| [((529, 558), 'numpy.flatten', 'np.flatten', (['treepars.cut_vals'], {}), '(treepars.cut_vals)\n', (539, 558), True, 'import numpy as np\n'), ((569, 598), 'numpy.flatten', 'np.flatten', (['treepars.tree_ids'], {}), '(treepars.tree_ids)\n', (579, 598), True, 'import numpy as np\n'), ((609, 636), 'numpy.concatenate', 'np.concatenate', (['(cuts, ids)'], {}), '((cuts, ids))\n', (623, 636), True, 'import numpy as np\n'), ((2661, 2679), 'numpy.shape', 'np.shape', (['galaxies'], {}), '(galaxies)\n', (2669, 2679), True, 'import numpy as np\n'), ((3179, 3226), 'scipy.optimize.minimize', 'spo.minimize', (['eval_metric', 'ivals'], {'args': 'galaxies'}), '(eval_metric, ivals, args=galaxies)\n', (3191, 3226), True, 'import scipy.optimize as spo\n'), ((4109, 4132), 'tomo_challenge.metrics.metric', 'tcm.metric', (['assignments'], {}), '(assignments)\n', (4119, 4132), True, 'import tomo_challenge.metrics as tcm\n'), ((943, 962), 'numpy.unique', 'np.unique', (['flat_ids'], {}), '(flat_ids)\n', (952, 962), True, 'import numpy as np\n'), ((2929, 2974), 'numpy.random.random_integers', 'npr.random_integers', (['(0)', 'nbins', '(nbins ** nfeat)'], {}), '(0, nbins, nbins ** nfeat)\n', (2948, 2974), True, 'import numpy.random as npr\n'), ((2753, 2781), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nbins'], {}), '(0.0, 1.0, nbins)\n', (2764, 2781), True, 'import numpy as np\n'), ((2808, 2825), 'numpy.flatten', 'np.flatten', (['ivals'], {}), '(ivals)\n', (2818, 2825), True, 'import numpy as np\n'), ((2992, 3011), 'numpy.unique', 'np.unique', (['tree_ids'], {}), '(tree_ids)\n', (3001, 3011), True, 'import numpy as np\n')] |
import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| [
"numpy.savez",
"numpy.append",
"numpy.sum",
"numpy.empty",
"numpy.load",
"glob.glob"
]
| [((36, 54), 'numpy.empty', 'np.empty', (['(0, 193)'], {}), '((0, 193))\n', (44, 54), True, 'import numpy as np\n'), ((59, 76), 'numpy.empty', 'np.empty', (['(0, 10)'], {}), '((0, 10))\n', (67, 76), True, 'import numpy as np\n'), ((86, 102), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (94, 102), True, 'import numpy as np\n'), ((115, 147), 'glob.glob', 'glob.glob', (['"""./urban_sound_?.npz"""'], {}), "('./urban_sound_?.npz')\n", (124, 147), False, 'import glob\n'), ((444, 492), 'numpy.savez', 'np.savez', (['"""urban_sound"""'], {'X': 'X', 'y': 'y', 'groups': 'groups'}), "('urban_sound', X=X, y=y, groups=groups)\n", (452, 492), True, 'import numpy as np\n'), ((194, 205), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (201, 205), True, 'import numpy as np\n'), ((214, 245), 'numpy.append', 'np.append', (['X', "data['X']"], {'axis': '(0)'}), "(X, data['X'], axis=0)\n", (223, 245), True, 'import numpy as np\n'), ((254, 285), 'numpy.append', 'np.append', (['y', "data['y']"], {'axis': '(0)'}), "(y, data['y'], axis=0)\n", (263, 285), True, 'import numpy as np\n'), ((299, 340), 'numpy.append', 'np.append', (['groups', "data['groups']"], {'axis': '(0)'}), "(groups, data['groups'], axis=0)\n", (308, 340), True, 'import numpy as np\n'), ((410, 419), 'numpy.sum', 'np.sum', (['r'], {}), '(r)\n', (416, 419), True, 'import numpy as np\n')] |
import os
import errno
import sys
def mock_directory_tree(tree):
tree = dict([(os.path.join(*key), value) \
for key, value in tree.iteritems()])
def listdir(path):
try:
names = tree[path]
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
if names is None:
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
return names
def isfile(path):
try:
item = tree[path]
except KeyError:
return False
return item is None
def isdir(path):
try:
item = tree[path]
except KeyError:
return False
return item is not None
return listdir, isfile, isdir
class PreserveOs(object):
def setUp(self):
super(PreserveOs, self).setUp()
self.__listdir = os.listdir
self.__isfile = os.path.isfile
self.__isdir = os.path.isdir
def tearDown(self):
os.path.isdir = self.__isdir
os.path.isfile = self.__isfile
os.listdir = self.__listdir
super(PreserveOs, self).tearDown()
def full_test_tree(self):
tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py',
'test_sub_first', 't_sub_first', 'test_sub_third'),
('.', '__init__.py'): None,
('.', 'test_first.py'): None,
('.', 'test_second.py'): None,
('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 'test_sub_first', '__init__.py'): None,
('.', 'test_sub_first', 'test_sub_first.py'): None,
('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 't_sub_first', '__init__.py'): None,
('.', 't_sub_first', 'test_sub_first.py'): None,
('.', 'test_sub_second'): ('test_sub_first.py',),
('.', 'test_sub_second', 'test_sub_first.py'): None,
('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py',
'test_sub_second'),
('.', 'test_sub_third', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second'): \
('__init__.py', 'test_sub_first.py', 't_sub_second.py'),
('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_second',
'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second',
't_sub_second.py'): None}
os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree)
self.expected_content = {'first': 'test_first',
'second': 'test_second',
'sub_first': 'test_sub_first',
'sub_first.sub_first': \
'test_sub_first.test_sub_first',
'sub_third': 'test_sub_third',
'sub_third.sub_first': \
'test_sub_third.test_sub_first',
'sub_third.sub_second': \
'test_sub_third.test_sub_second',
'sub_third.sub_second.sub_first': \
'test_sub_third.test_sub_second.' \
'test_sub_first'}
class ImportTrash(object):
def setUp(self):
self.modules_trash = []
self.meta_path_trash = []
def tearDown(self):
for item in self.meta_path_trash:
if item in sys.meta_path:
sys.meta_path.remove(item)
for name in self.modules_trash:
if name in sys.modules:
del sys.modules[name]
| [
"os.strerror",
"os.path.join",
"sys.meta_path.remove"
]
| [((84, 102), 'os.path.join', 'os.path.join', (['*key'], {}), '(*key)\n', (96, 102), False, 'import os\n'), ((406, 432), 'os.strerror', 'os.strerror', (['errno.ENOTDIR'], {}), '(errno.ENOTDIR)\n', (417, 432), False, 'import os\n'), ((3873, 3899), 'sys.meta_path.remove', 'sys.meta_path.remove', (['item'], {}), '(item)\n', (3893, 3899), False, 'import sys\n'), ((305, 330), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (316, 330), False, 'import os\n')] |
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
class CreateSiteUsersParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname)
CreateSiteUsersParser.create_site_user_parser(manager, mock_command)
def test_create_site_users_parser_users_file(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with('users.csv', 'r', -1, None, None)
def test_create_site_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_site_user_parser_role(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')):
mock_args = [commandname, "users.csv", '--site', 'site-name']
args = self.parser_under_test.parse_args(mock_args)
assert args.site == 'site-name', args
| [
"tabcmd.parsers.create_site_users_parser.CreateSiteUsersParser.create_site_user_parser",
"mock.mock_open"
]
| [((445, 513), 'tabcmd.parsers.create_site_users_parser.CreateSiteUsersParser.create_site_user_parser', 'CreateSiteUsersParser.create_site_user_parser', (['manager', 'mock_command'], {}), '(manager, mock_command)\n', (490, 513), False, 'from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser\n'), ((612, 644), 'mock.mock_open', 'mock.mock_open', ([], {'read_data': '"""test"""'}), "(read_data='test')\n", (626, 644), False, 'import mock\n'), ((1146, 1178), 'mock.mock_open', 'mock.mock_open', ([], {'read_data': '"""test"""'}), "(read_data='test')\n", (1160, 1178), False, 'import mock\n')] |
import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
) | [
"setuptools.find_packages",
"os.path.join",
"versioneer.get_version",
"os.path.dirname",
"os.path.abspath",
"versioneer.get_cmdclass"
]
| [((244, 269), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (259, 269), False, 'import os\n'), ((285, 328), 'os.path.join', 'os.path.join', (['base_dir', '"""gammy"""', '"""_meta.py"""'], {}), "(base_dir, 'gammy', '_meta.py')\n", (297, 328), False, 'import os\n'), ((435, 459), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (457, 459), False, 'import versioneer\n'), ((707, 732), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (730, 732), False, 'import versioneer\n'), ((757, 772), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (770, 772), False, 'from setuptools import setup, find_packages\n'), ((156, 181), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import os\n')] |
# python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
class ScenePredictor(BasePredictor):
"""Defines the predictor class for scene analysis."""
def __init__(self):
super().__init__('scene')
def build(self):
self.net = resnet18(num_classes=NUM_CATEGORIES)
def load(self):
# Load category labels.
self.check_attr('category_anno_path')
self.category_name_to_idx = {}
self.category_idx_to_name = {}
with open(self.category_anno_path, 'r') as f:
for line in f:
name, idx = line.strip().split(' ')
name = name[3:].replace('/', '__')
idx = int(idx)
self.category_name_to_idx[name] = idx
self.category_idx_to_name[idx] = name
assert len(self.category_name_to_idx) == NUM_CATEGORIES
assert len(self.category_idx_to_name) == NUM_CATEGORIES
# Load attribute labels.
self.check_attr('attribute_anno_path')
self.attribute_name_to_idx = {}
self.attribute_idx_to_name = {}
with open(self.attribute_anno_path, 'r') as f:
for idx, line in enumerate(f):
name = line.strip().replace(' ', '_')
self.attribute_name_to_idx[name] = idx
self.attribute_idx_to_name[idx] = name
assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES
assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES
# Transform for input images.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load pre-trained weights for category prediction.
checkpoint = torch.load(self.weight_path,
map_location=lambda storage, loc: storage)
state_dict = {k.replace('module.', ''): v
for k, v in checkpoint['state_dict'].items()}
self.net.load_state_dict(state_dict)
fc_weight = list(self.net.parameters())[-2].data.numpy()
fc_weight[fc_weight < 0] = 0
# Load additional weights for attribute prediction.
self.check_attr('attribute_additional_weight_path')
self.attribute_weight = np.load(self.attribute_additional_weight_path)
assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM)
def _predict(self, images):
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
if images.dtype != np.uint8:
raise ValueError(f'Images should be with dtype `numpy.uint8`!')
if not (len(images.shape) == 4 and
0 < images.shape[0] <= self.batch_size and
images.shape[3] == self.image_channels):
raise ValueError(f'Images should be with shape [batch_size, height '
f'width, channel], where `batch_size` no larger than '
f'{self.batch_size}, and `channel` equals to '
f'{self.image_channels}!\n'
f'But {images.shape} received!')
xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images]
xs = torch.cat(xs, dim=0).to(self.run_device)
logits, features = self.net(xs)
category_scores = self.get_value(F.softmax(logits, dim=1))
features = self.get_value(features).squeeze(axis=(2, 3))
attribute_scores = features.dot(self.attribute_weight.T)
assert (len(category_scores.shape) == 2 and
category_scores.shape[1] == NUM_CATEGORIES)
assert (len(attribute_scores.shape) == 2 and
attribute_scores.shape[1] == NUM_ATTRIBUTES)
results = {
'category': category_scores,
'attribute': attribute_scores,
}
if self.use_cuda:
torch.cuda.empty_cache()
return results
def predict(self, images, **kwargs):
return self.batch_run(images, self._predict)
| [
"torch.nn.functional.softmax",
"PIL.Image.fromarray",
"torch.load",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"numpy.load",
"torch.cuda.empty_cache",
"torch.cat"
]
| [((1996, 2067), 'torch.load', 'torch.load', (['self.weight_path'], {'map_location': '(lambda storage, loc: storage)'}), '(self.weight_path, map_location=lambda storage, loc: storage)\n', (2006, 2067), False, 'import torch\n'), ((2492, 2538), 'numpy.load', 'np.load', (['self.attribute_additional_weight_path'], {}), '(self.attribute_additional_weight_path)\n', (2499, 2538), True, 'import numpy as np\n'), ((3561, 3585), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (3570, 3585), True, 'import torch.nn.functional as F\n'), ((4063, 4087), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4085, 4087), False, 'import torch\n'), ((1772, 1801), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1789, 1801), True, 'import torchvision.transforms as transforms\n'), ((1812, 1833), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1831, 1833), True, 'import torchvision.transforms as transforms\n'), ((1844, 1910), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1864, 1910), True, 'import torchvision.transforms as transforms\n'), ((3443, 3463), 'torch.cat', 'torch.cat', (['xs'], {'dim': '(0)'}), '(xs, dim=0)\n', (3452, 3463), False, 'import torch\n'), ((3379, 3399), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3394, 3399), False, 'from PIL import Image\n')] |
from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | [
"collections.deque"
]
| [((172, 187), 'collections.deque', 'deque', (['bus_stop'], {}), '(bus_stop)\n', (177, 187), False, 'from collections import deque\n')] |
import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| [
"torch.nn.functional.mse_loss",
"rltoolkit.algorithms.ddpg.models.Critic",
"rltoolkit.algorithms.ddpg.models.Actor",
"torch.mean",
"torch.Tensor",
"numpy.exp",
"torch.tensor",
"torch.nn.functional.softplus",
"torch.sum",
"rltoolkit.algorithms.DDPG.save_model",
"torch.no_grad",
"torch.randn",
"torch.cat"
]
| [((1132, 1206), 'rltoolkit.algorithms.ddpg.models.Actor', 'Actor', (['self.ob_dim'], {'ac_lim': 'self.actor_ac_lim', 'ac_dim': 'self.actor_output_dim'}), '(self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim)\n', (1137, 1206), False, 'from rltoolkit.algorithms.ddpg.models import Actor, Critic\n'), ((6017, 6049), 'rltoolkit.algorithms.DDPG.save_model', 'DDPG.save_model', (['self', 'save_path'], {}), '(self, save_path)\n', (6032, 6049), False, 'from rltoolkit.algorithms import DDPG\n'), ((15520, 15538), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_q', 'y'], {}), '(y_q, y)\n', (15530, 15538), True, 'from torch.nn import functional as F\n'), ((1287, 1336), 'rltoolkit.algorithms.ddpg.models.Critic', 'Critic', (['self.ob_dim'], {'ac_dim': 'self.actor_output_dim'}), '(self.ob_dim, ac_dim=self.actor_output_dim)\n', (1293, 1336), False, 'from rltoolkit.algorithms.ddpg.models import Actor, Critic\n'), ((1471, 1503), 'torch.tensor', 'torch.tensor', (['custom_loss_scaled'], {}), '(custom_loss_scaled)\n', (1483, 1503), False, 'import torch\n'), ((1537, 1595), 'torch.Tensor', 'torch.Tensor', (['([custom_loss_scaled] * self.actor_output_dim)'], {}), '([custom_loss_scaled] * self.actor_output_dim)\n', (1549, 1595), False, 'import torch\n'), ((3569, 3623), 'torch.randn', 'torch.randn', (['self.actor_output_dim'], {'device': 'self.device'}), '(self.actor_output_dim, device=self.device)\n', (3580, 3623), False, 'import torch\n'), ((6559, 6574), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6572, 6574), False, 'import torch\n'), ((8910, 8949), 'torch.cat', 'torch.cat', (['[obs, denorm_action]'], {'axis': '(1)'}), '([obs, denorm_action], axis=1)\n', (8919, 8949), False, 'import torch\n'), ((9826, 9841), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (9835, 9841), False, 'import torch\n'), ((10383, 10446), 'torch.tensor', 'torch.tensor', (["(dataset['timeouts'][i] or dataset['terminals'][i])"], {}), "(dataset['timeouts'][i] or dataset['terminals'][i])\n", (10395, 10446), False, 'import torch\n'), ((10465, 10505), 'torch.tensor', 'torch.tensor', (["dataset['observations'][i]"], {}), "(dataset['observations'][i])\n", (10477, 10505), False, 'import torch\n'), ((1413, 1432), 'numpy.exp', 'np.exp', (['custom_loss'], {}), '(custom_loss)\n', (1419, 1432), True, 'import numpy as np\n'), ((6776, 6818), 'torch.cat', 'torch.cat', (['[next_obs, next_action]'], {'axis': '(1)'}), '([next_obs, next_action], axis=1)\n', (6785, 6818), False, 'import torch\n'), ((10672, 10712), 'torch.tensor', 'torch.tensor', (["dataset['observations'][i]"], {}), "(dataset['observations'][i])\n", (10684, 10712), False, 'import torch\n'), ((10748, 10783), 'torch.tensor', 'torch.tensor', (["dataset['rewards'][i]"], {}), "(dataset['rewards'][i])\n", (10760, 10783), False, 'import torch\n'), ((10809, 10872), 'torch.tensor', 'torch.tensor', (["(dataset['timeouts'][i] or dataset['terminals'][i])"], {}), "(dataset['timeouts'][i] or dataset['terminals'][i])\n", (10821, 10872), False, 'import torch\n'), ((10900, 10935), 'torch.tensor', 'torch.tensor', (["dataset['actions'][i]"], {}), "(dataset['actions'][i])\n", (10912, 10935), False, 'import torch\n'), ((10976, 11013), 'torch.tensor', 'torch.tensor', (["dataset['terminals'][i]"], {}), "(dataset['terminals'][i])\n", (10988, 11013), False, 'import torch\n'), ((9394, 9428), 'torch.nn.functional.softplus', 'F.softplus', (['self.custom_loss_param'], {}), '(self.custom_loss_param)\n', (9404, 9428), True, 'from torch.nn import functional as F\n'), ((9503, 9537), 'torch.nn.functional.softplus', 'F.softplus', (['self.custom_loss_param'], {}), '(self.custom_loss_param)\n', (9513, 9537), True, 'from torch.nn import functional as F\n'), ((9553, 9590), 'torch.Tensor', 'torch.Tensor', (['self.custom_loss_target'], {}), '(self.custom_loss_target)\n', (9565, 9590), False, 'import torch\n'), ((9770, 9799), 'torch.mean', 'torch.mean', (["self.loss['dist']"], {}), "(self.loss['dist'])\n", (9780, 9799), False, 'import torch\n'), ((7592, 7626), 'torch.nn.functional.softplus', 'F.softplus', (['self.custom_loss_param'], {}), '(self.custom_loss_param)\n', (7602, 7626), True, 'from torch.nn import functional as F\n'), ((8135, 8169), 'torch.nn.functional.softplus', 'F.softplus', (['self.custom_loss_param'], {}), '(self.custom_loss_param)\n', (8145, 8169), True, 'from torch.nn import functional as F\n'), ((8291, 8325), 'torch.nn.functional.softplus', 'F.softplus', (['self.custom_loss_param'], {}), '(self.custom_loss_param)\n', (8301, 8325), True, 'from torch.nn import functional as F\n'), ((8341, 8378), 'torch.Tensor', 'torch.Tensor', (['self.custom_loss_target'], {}), '(self.custom_loss_target)\n', (8353, 8378), False, 'import torch\n')] |
import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
def capture_sysout(cmd):
capturedOutput = io.StringIO() # Create StringIO object
sys.stdout = capturedOutput # and redirect stdout.
cmd() # Call function.
sys.stdout = sys.__stdout__ # Reset redirect.
return capturedOutput.getvalue() # Now works as before.
def test_progress_iterator():
def progress_generator():
sum(ProgressIterator([1, 2, 3], interval=0, description=""))
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
def test_progress_iterator_with_statement():
def progress_generator():
with ProgressIterator([1,2,3], interval=0, description="") as it:
sum(it)
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
| [
"io.StringIO",
"textnn.utils.ProgressIterator"
]
| [((161, 174), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (172, 174), False, 'import io\n'), ((575, 630), 'textnn.utils.ProgressIterator', 'ProgressIterator', (['[1, 2, 3]'], {'interval': '(0)', 'description': '""""""'}), "([1, 2, 3], interval=0, description='')\n", (591, 630), False, 'from textnn.utils import ProgressIterator\n'), ((1239, 1294), 'textnn.utils.ProgressIterator', 'ProgressIterator', (['[1, 2, 3]'], {'interval': '(0)', 'description': '""""""'}), "([1, 2, 3], interval=0, description='')\n", (1255, 1294), False, 'from textnn.utils import ProgressIterator\n')] |
# -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
class PyVisaLibrary(highlevel.VisaLibraryBase):
"""A pure Python backend for PyVISA.
The object is basically a dispatcher with some common functions implemented.
When a new resource object is requested to pyvisa, the library creates a
Session object (that knows how to perform low-level communication operations)
associated with a session handle (a number, usually refered just as session).
A call to a library function is handled by PyVisaLibrary if it involves a
resource agnostic function or dispatched to the correct session object
(obtained from the session id).
Importantly, the user is unaware of this. PyVisaLibrary behaves for
the user just as NIVisaLibrary.
"""
#: Live session object identified by a randon session ID
sessions: Dict[int, sessions.Session]
# Try to import packages implementing lower level functionality.
try:
from .serial import SerialSession
logger.debug("SerialSession was correctly imported.")
except Exception as e:
logger.debug("SerialSession was not imported %s." % e)
try:
from .usb import USBRawSession, USBSession
logger.debug("USBSession and USBRawSession were correctly imported.")
except Exception as e:
logger.debug("USBSession and USBRawSession were not imported %s." % e)
try:
from .tcpip import TCPIPInstrSession, TCPIPSocketSession
logger.debug("TCPIPSession was correctly imported.")
except Exception as e:
logger.debug("TCPIPSession was not imported %s." % e)
try:
from .gpib import GPIBSession
logger.debug("GPIBSession was correctly imported.")
except Exception as e:
logger.debug("GPIBSession was not imported %s." % e)
@staticmethod
def get_library_paths() -> Iterable[LibraryPath]:
"""List a dummy library path to allow to create the library."""
return (LibraryPath("py"),)
@staticmethod
def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]:
"""Return a list of lines with backend info."""
from . import __version__
d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict()
d["Version"] = "%s" % __version__
for key, val in sessions.Session.iter_valid_session_classes():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = "Available " + val.get_low_level_info()
for key, issue in sessions.Session.iter_session_classes_issues():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = issue.split("\n")
return d
def _init(self) -> None:
"""Custom initialization code."""
# Map session handle to session object.
self.sessions = {}
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| [
"pyvisa.typing.VISASession",
"collections.OrderedDict",
"pyvisa.rname.filter",
"pyvisa.rname.parse_resource_name",
"pyvisa.util.LibraryPath",
"typing.cast",
"random.randint"
]
| [((2771, 2784), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2782, 2784), False, 'from collections import OrderedDict\n'), ((15966, 15996), 'pyvisa.rname.filter', 'rname.filter', (['resources', 'query'], {}), '(resources, query)\n', (15978, 15996), False, 'from pyvisa import constants, highlevel, rname\n'), ((2494, 2511), 'pyvisa.util.LibraryPath', 'LibraryPath', (['"""py"""'], {}), "('py')\n", (2505, 2511), False, 'from pyvisa.util import LibraryPath\n'), ((3693, 3725), 'random.randint', 'random.randint', (['(1000000)', '(9999999)'], {}), '(1000000, 9999999)\n', (3707, 3725), False, 'import random\n'), ((5275, 5315), 'pyvisa.rname.parse_resource_name', 'rname.parse_resource_name', (['resource_name'], {}), '(resource_name)\n', (5300, 5315), False, 'from pyvisa import constants, highlevel, rname\n'), ((20228, 20272), 'typing.cast', 'cast', (['constants.ResourceAttribute', 'attribute'], {}), '(constants.ResourceAttribute, attribute)\n', (20232, 20272), False, 'from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast\n'), ((5395, 5409), 'pyvisa.typing.VISASession', 'VISASession', (['(0)'], {}), '(0)\n', (5406, 5409), False, 'from pyvisa.typing import VISAEventContext, VISARMSession, VISASession\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
| [
"cv2.rectangle",
"os.path.exists",
"cv2.imwrite",
"numpy.abs",
"os.makedirs",
"math.floor",
"numpy.where",
"detectron.core.config.get_output_dir",
"os.path.join",
"PIL.ImageFont.truetype",
"numpy.asarray",
"cv2.putText",
"numpy.argsort",
"numpy.sum",
"PIL.ImageDraw.Draw",
"numpy.true_divide",
"cv2.cvtColor",
"cv2.getTextSize"
]
| [((602, 651), 'detectron.core.config.get_output_dir', 'get_output_dir', (['cfg.TRAIN.DATASETS'], {'training': '(True)'}), '(cfg.TRAIN.DATASETS, training=True)\n', (616, 651), False, 'from detectron.core.config import get_output_dir\n'), ((669, 709), 'os.path.join', 'os.path.join', (['output_dir', '"""webly_sample"""'], {}), "(output_dir, 'webly_sample')\n", (681, 709), False, 'import os\n'), ((5949, 5994), 'numpy.true_divide', 'np.true_divide', (['rois_pred_E_sum', 'y_logN__logy'], {}), '(rois_pred_E_sum, y_logN__logy)\n', (5963, 5994), True, 'import numpy as np\n'), ((6012, 6055), 'numpy.where', 'np.where', (['(E_sum_norm > 1.0)', '(1.0)', 'E_sum_norm'], {}), '(E_sum_norm > 1.0, 1.0, E_sum_norm)\n', (6020, 6055), True, 'import numpy as np\n'), ((11348, 11361), 'math.floor', 'math.floor', (['a'], {}), '(a)\n', (11358, 11361), False, 'import math\n'), ((11398, 11423), 'math.floor', 'math.floor', (['(255 * (a - X))'], {}), '(255 * (a - X))\n', (11408, 11423), False, 'import math\n'), ((11465, 11490), 'math.floor', 'math.floor', (['(128 * (a - X))'], {}), '(128 * (a - X))\n', (11475, 11490), False, 'import math\n'), ((12046, 12098), 'cv2.getTextSize', 'cv2.getTextSize', (['string', 'font', 'font_scale', 'thickness'], {}), '(string, font, font_scale, thickness)\n', (12061, 12098), False, 'import cv2\n'), ((12104, 12268), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x + thickness, y + thickness)', '(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2)', 'fontColor', 'cv2.FILLED'], {'lineType': 'cv2.LINE_AA'}), '(img, (x + thickness, y + thickness), (x + thickness + s[0][0] +\n 2, y + thickness + s[0][1] + 2), fontColor, cv2.FILLED, lineType=cv2.\n LINE_AA)\n', (12117, 12268), False, 'import cv2\n'), ((12403, 12500), 'cv2.putText', 'cv2.putText', (['img', 'string', 'position', 'font', 'font_scale', '(255, 255, 255)', 'thickness', 'cv2.LINE_AA'], {}), '(img, string, position, font, font_scale, (255, 255, 255),\n thickness, cv2.LINE_AA)\n', (12414, 12500), False, 'import cv2\n'), ((12700, 12730), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font', 'size'], {}), '(font, size)\n', (12718, 12730), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((12772, 12795), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_PIL'], {}), '(img_PIL)\n', (12786, 12795), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((721, 747), 'os.path.exists', 'os.path.exists', (['sample_dir'], {}), '(sample_dir)\n', (735, 747), False, 'import os\n'), ((757, 780), 'os.makedirs', 'os.makedirs', (['sample_dir'], {}), '(sample_dir)\n', (768, 780), False, 'import os\n'), ((12651, 12687), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (12663, 12687), False, 'import cv2\n'), ((13157, 13176), 'numpy.asarray', 'np.asarray', (['img_PIL'], {}), '(img_PIL)\n', (13167, 13176), True, 'import numpy as np\n'), ((3212, 3238), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'im'], {}), '(file_name, im)\n', (3223, 3238), False, 'import cv2\n'), ((4900, 4928), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'im_S'], {}), '(file_name, im_S)\n', (4911, 4928), False, 'import cv2\n'), ((5553, 5581), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'im_A'], {}), '(file_name, im_A)\n', (5564, 5581), False, 'import cv2\n'), ((5889, 5916), 'numpy.sum', 'np.sum', (['rois_pred_E'], {'axis': '(0)'}), '(rois_pred_E, axis=0)\n', (5895, 5916), True, 'import numpy as np\n'), ((10387, 10419), 'cv2.imwrite', 'cv2.imwrite', (['file_name_roi', 'im_S'], {}), '(file_name_roi, im_S)\n', (10398, 10419), False, 'import cv2\n'), ((10590, 10626), 'cv2.imwrite', 'cv2.imwrite', (['file_name_hatE', 'im_hatE'], {}), '(file_name_hatE, im_hatE)\n', (10601, 10626), False, 'import cv2\n'), ((10791, 10821), 'cv2.imwrite', 'cv2.imwrite', (['file_name_E', 'im_E'], {}), '(file_name_E, im_E)\n', (10802, 10821), False, 'import cv2\n'), ((4668, 4739), 'cv2.rectangle', 'cv2.rectangle', (['im_S', '(roi[1], roi[2])', '(roi[3], roi[4])', 'jet', 'thickness'], {}), '(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)\n', (4681, 4739), False, 'import cv2\n'), ((5327, 5390), 'cv2.rectangle', 'cv2.rectangle', (['im_A', '(roi[1], roi[2])', '(roi[3], roi[4])', 'jet', '(1)'], {}), '(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)\n', (5340, 5390), False, 'import cv2\n'), ((6603, 6630), 'numpy.argsort', 'np.argsort', (['roi_score[:, c]'], {}), '(roi_score[:, c])\n', (6613, 6630), True, 'import numpy as np\n'), ((6664, 6696), 'numpy.argsort', 'np.argsort', (['rois_pred_hatE[:, c]'], {}), '(rois_pred_hatE[:, c])\n', (6674, 6696), True, 'import numpy as np\n'), ((6727, 6756), 'numpy.argsort', 'np.argsort', (['rois_pred_E[:, c]'], {}), '(rois_pred_E[:, c])\n', (6737, 6756), True, 'import numpy as np\n'), ((7640, 7730), 'cv2.rectangle', 'cv2.rectangle', (['im_S', '(roi[1], roi[2])', '(roi[3], roi[4])', 'bgr', '(2)'], {'lineType': 'cv2.LINE_AA'}), '(im_S, (roi[1], roi[2]), (roi[3], roi[4]), bgr, 2, lineType=\n cv2.LINE_AA)\n', (7653, 7730), False, 'import cv2\n'), ((8326, 8439), 'cv2.rectangle', 'cv2.rectangle', (['im_hatE', '(hatE_roi[1], hatE_roi[2])', '(hatE_roi[3], hatE_roi[4])', 'bgr', '(2)'], {'lineType': 'cv2.LINE_AA'}), '(im_hatE, (hatE_roi[1], hatE_roi[2]), (hatE_roi[3], hatE_roi[4\n ]), bgr, 2, lineType=cv2.LINE_AA)\n', (8339, 8439), False, 'import cv2\n'), ((9153, 9250), 'cv2.rectangle', 'cv2.rectangle', (['im_E', '(E_roi[1], E_roi[2])', '(E_roi[3], E_roi[4])', 'bgr', '(2)'], {'lineType': 'cv2.LINE_AA'}), '(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]), bgr, 2,\n lineType=cv2.LINE_AA)\n', (9166, 9250), False, 'import cv2\n'), ((4045, 4068), 'numpy.abs', 'np.abs', (['roi_score[:, c]'], {}), '(roi_score[:, c])\n', (4051, 4068), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
def cmd(self, fun, arg, pub_data=None, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(arg)
def _append_kwarg(arglist, kwarg):
'''
Append the kwarg dict to the arglist
'''
kwarg['__kwarg__'] = True
arglist.append(kwarg)
if kwarg:
try:
if isinstance(arglist[-1], dict) \
and '__kwarg__' in arglist[-1]:
for key, val in six.iteritems(kwarg):
if key in arglist[-1]:
log.warning(
'Overriding keyword argument {0!r}'.format(key)
)
arglist[-1][key] = val
else:
# No kwargs yet present in arglist
_append_kwarg(arglist, kwarg)
except IndexError:
# arglist is empty, just append
_append_kwarg(arglist, kwarg)
self._verify_fun(fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
log.debug('Runner starting with jid {0}'.format(jid))
self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job'))
target = RunnerClient._thread_return
data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs}
args = (self, self.opts, data)
ret = jid
if self.opts.get('async', False):
process = multiprocessing.Process(
target=target, args=args
)
process.start()
else:
ret = target(*args)
return ret
@classmethod
def _thread_return(cls, instance, opts, data):
'''
The multiprocessing process calls back here
to stream returns
'''
# Runners modules runtime injection:
# - the progress event system with the correct jid
# - Provide JID if the runner wants to access it directly
done = {}
progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress
for func_name, func in instance.functions.items():
if func.__module__ in done:
continue
mod = sys.modules[func.__module__]
mod.__jid__ = data['jid']
mod.__progress__ = progress
done[func.__module__] = mod
ret = instance.functions[data['fun']](*data['args'], **data['kwargs'])
# Sleep for just a moment to let any progress events return
time.sleep(0.1)
ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']}
# Don't use the invoking processes' event socket because it could be closed down by the time we arrive here.
# Create another, for safety's sake.
salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner'))
try:
fstr = '{0}.save_runner_load'.format(opts['master_job_cache'])
instance.returners[fstr](data['jid'], ret_load)
except KeyError:
log.debug(
'The specified returner used for the master job cache '
'"{0}" does not have a save_runner_load function! The results '
'of this runner execution will not be stored.'.format(
opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
if opts.get('async', False):
return data['jid']
else:
return ret
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'runner'
sreq = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
reformatted_low = {'fun': low.pop('fun')}
reformatted_low.update(auth_creds)
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return self.master_call(**reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
sevent = salt.utils.event.get_event('master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts)
reformatted_low = self._reformat_low(low)
job = self.master_call(**reformatted_low)
ret_tag = tagify('ret', base=job['tag'])
timelimit = time.time() + (timeout or 300)
while True:
ret = sevent.get_event(full=True)
if ret is None:
if time.time() > timelimit:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
else:
continue
if ret['tag'] == ret_tag:
return ret['data']['return']
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
ret = {}
if self.opts.get('doc', False):
self.print_docs()
else:
try:
# Run the runner!
jid = super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
if self.opts.get('async', False):
log.info('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured.')
sys.exit(0)
rets = self.get_runner_returns(jid)
else:
rets = [jid]
# Gather the returns
for ret in rets:
if not self.opts.get('quiet', False):
if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None:
print(self.outputters[ret['outputter']](ret['data']))
else:
salt.output.display_output(ret, '', self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
def get_runner_returns(self, jid, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
if timeout is None:
timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
while True:
raw = self.event.get_event(timeout, full=True)
time.sleep(0.1)
# If we saw no events in the event bus timeout
# OR
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
if raw is None and (time.time() > timeout_at and
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid:
continue
elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid:
last_progress_timestamp = time.time()
yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']}
elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid:
yield raw['data']['return']
break
# Handle a findjob that might have been kicked off under the covers
elif raw['data']['fun'] == 'saltutil.findjob':
timeout_at = timeout_at + 10
continue
except (IndexError, KeyError):
continue
| [
"logging.getLogger",
"salt.utils.event.tagify",
"multiprocessing.Process",
"time.sleep",
"salt.ext.six.iteritems",
"salt.utils.error.raise_error",
"sys.exit",
"time.time"
]
| [((572, 599), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (589, 599), False, 'import logging\n'), ((5758, 5773), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5768, 5773), False, 'import time\n'), ((9665, 9695), 'salt.utils.event.tagify', 'tagify', (['"""ret"""'], {'base': "job['tag']"}), "('ret', base=job['tag'])\n", (9671, 9695), False, 'from salt.utils.event import tagify\n'), ((12351, 12362), 'time.time', 'time.time', ([], {}), '()\n', (12360, 12362), False, 'import time\n'), ((4429, 4456), 'salt.utils.event.tagify', 'tagify', (["[jid, 'new']", '"""job"""'], {}), "([jid, 'new'], 'job')\n", (4435, 4456), False, 'from salt.utils.event import tagify\n'), ((4696, 4745), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'target', 'args': 'args'}), '(target=target, args=args)\n', (4719, 4745), False, 'import multiprocessing\n'), ((6093, 6134), 'salt.utils.event.tagify', 'tagify', (["[data['jid'], 'return']", '"""runner"""'], {}), "([data['jid'], 'return'], 'runner')\n", (6099, 6134), False, 'from salt.utils.event import tagify\n'), ((9717, 9728), 'time.time', 'time.time', ([], {}), '()\n', (9726, 9728), False, 'import time\n'), ((12295, 12306), 'time.time', 'time.time', ([], {}), '()\n', (12304, 12306), False, 'import time\n'), ((12455, 12470), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (12465, 12470), False, 'import time\n'), ((7404, 7431), 'salt.utils.error.raise_error', 'raise_error', ([], {}), "(**ret['error'])\n", (7415, 7431), False, 'from salt.utils.error import raise_error\n'), ((3543, 3563), 'salt.ext.six.iteritems', 'six.iteritems', (['kwarg'], {}), '(kwarg)\n', (3556, 3563), True, 'import salt.ext.six as six\n'), ((9861, 9872), 'time.time', 'time.time', ([], {}), '()\n', (9870, 9872), False, 'import time\n'), ((11256, 11267), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (11264, 11267), False, 'import sys\n'), ((12724, 12735), 'time.time', 'time.time', ([], {}), '()\n', (12733, 12735), False, 'import time\n'), ((13177, 13188), 'time.time', 'time.time', ([], {}), '()\n', (13186, 13188), False, 'import time\n'), ((12785, 12796), 'time.time', 'time.time', ([], {}), '()\n', (12794, 12796), False, 'import time\n')] |
# For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
def get_hook_dirs():
return [os.path.dirname(__file__)] | [
"os.path.dirname"
]
| [((160, 185), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n')] |
import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
| [
"click.argument",
"dexp.cli.parsing._parse_chunks",
"arbol.arbol.aprint",
"click.option",
"dexp.datasets.open_dataset.glob_datasets",
"dexp.datasets.operations.crop.dataset_crop",
"arbol.arbol.asection",
"dexp.cli.parsing._get_output_path",
"click.command",
"dexp.cli.parsing._parse_channels"
]
| [((319, 334), 'click.command', 'click.command', ([], {}), '()\n', (332, 334), False, 'import click\n'), ((336, 375), 'click.argument', 'click.argument', (['"""input_paths"""'], {'nargs': '(-1)'}), "('input_paths', nargs=-1)\n", (350, 375), False, 'import click\n'), ((401, 436), 'click.option', 'click.option', (['"""--output_path"""', '"""-o"""'], {}), "('--output_path', '-o')\n", (413, 436), False, 'import click\n'), ((462, 566), 'click.option', 'click.option', (['"""--channels"""', '"""-c"""'], {'default': 'None', 'help': '"""List of channels, all channels when ommited."""'}), "('--channels', '-c', default=None, help=\n 'List of channels, all channels when ommited.')\n", (474, 566), False, 'import click\n'), ((563, 724), 'click.option', 'click.option', (['"""--quantile"""', '"""-q"""'], {'default': '(0.99)', 'type': 'float', 'help': '"""Quantile parameter for lower bound of brightness for thresholding."""', 'show_default': '(True)'}), "('--quantile', '-q', default=0.99, type=float, help=\n 'Quantile parameter for lower bound of brightness for thresholding.',\n show_default=True)\n", (575, 724), False, 'import click\n'), ((744, 897), 'click.option', 'click.option', (['"""--reference-channel"""', '"""-rc"""'], {'default': 'None', 'help': '"""Reference channel to estimate cropping. If no provided it picks the first one."""'}), "('--reference-channel', '-rc', default=None, help=\n 'Reference channel to estimate cropping. If no provided it picks the first one.'\n )\n", (756, 897), False, 'import click\n'), ((908, 1029), 'click.option', 'click.option', (['"""--store"""', '"""-st"""'], {'default': 'DEFAULT_STORE', 'help': '"""Zarr store: ‘dir’, ‘ndir’, or ‘zip’"""', 'show_default': '(True)'}), "('--store', '-st', default=DEFAULT_STORE, help=\n 'Zarr store: ‘dir’, ‘ndir’, or ‘zip’', show_default=True)\n", (920, 1029), False, 'import click\n'), ((1026, 1137), 'click.option', 'click.option', (['"""--chunks"""', '"""-chk"""'], {'default': 'None', 'help': '"""Dataset chunks dimensions, e.g. (1, 126, 512, 512)."""'}), "('--chunks', '-chk', default=None, help=\n 'Dataset chunks dimensions, e.g. (1, 126, 512, 512).')\n", (1038, 1137), False, 'import click\n'), ((1134, 1301), 'click.option', 'click.option', (['"""--codec"""', '"""-z"""'], {'default': 'DEFAULT_CODEC', 'help': '"""Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ """', 'show_default': '(True)'}), "('--codec', '-z', default=DEFAULT_CODEC, help=\n 'Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ '\n , show_default=True)\n", (1146, 1301), False, 'import click\n'), ((1316, 1430), 'click.option', 'click.option', (['"""--clevel"""', '"""-l"""'], {'type': 'int', 'default': 'DEFAULT_CLEVEL', 'help': '"""Compression level"""', 'show_default': '(True)'}), "('--clevel', '-l', type=int, default=DEFAULT_CLEVEL, help=\n 'Compression level', show_default=True)\n", (1328, 1430), False, 'import click\n'), ((1427, 1533), 'click.option', 'click.option', (['"""--overwrite"""', '"""-w"""'], {'is_flag': '(True)', 'help': '"""Forces overwrite of target"""', 'show_default': '(True)'}), "('--overwrite', '-w', is_flag=True, help=\n 'Forces overwrite of target', show_default=True)\n", (1439, 1533), False, 'import click\n'), ((1530, 1704), 'click.option', 'click.option', (['"""--workers"""', '"""-wk"""'], {'default': '(-4)', 'help': '"""Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| """', 'show_default': '(True)'}), "('--workers', '-wk', default=-4, help=\n 'Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| '\n , show_default=True)\n", (1542, 1704), False, 'import click\n'), ((1722, 1834), 'click.option', 'click.option', (['"""--check"""', '"""-ck"""'], {'default': '(True)', 'help': '"""Checking integrity of written file."""', 'show_default': '(True)'}), "('--check', '-ck', default=True, help=\n 'Checking integrity of written file.', show_default=True)\n", (1734, 1834), False, 'import click\n'), ((2050, 2076), 'dexp.datasets.open_dataset.glob_datasets', 'glob_datasets', (['input_paths'], {}), '(input_paths)\n', (2063, 2076), False, 'from dexp.datasets.open_dataset import glob_datasets\n'), ((2095, 2149), 'dexp.cli.parsing._get_output_path', '_get_output_path', (['input_paths[0]', 'output_path', '"""_crop"""'], {}), "(input_paths[0], output_path, '_crop')\n", (2111, 2149), False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((2165, 2205), 'dexp.cli.parsing._parse_channels', '_parse_channels', (['input_dataset', 'channels'], {}), '(input_dataset, channels)\n', (2180, 2205), False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((2309, 2330), 'dexp.cli.parsing._parse_chunks', '_parse_chunks', (['chunks'], {}), '(chunks)\n', (2322, 2330), False, 'from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks\n'), ((2341, 2485), 'arbol.arbol.asection', 'asection', (['f"""Cropping from: {input_paths} to {output_path} for channels: {channels}, using channel {reference_channel} as a reference."""'], {}), "(\n f'Cropping from: {input_paths} to {output_path} for channels: {channels}, using channel {reference_channel} as a reference.'\n )\n", (2349, 2485), False, 'from arbol.arbol import aprint, asection\n'), ((2512, 2764), 'dexp.datasets.operations.crop.dataset_crop', 'dataset_crop', (['input_dataset', 'output_path'], {'channels': 'channels', 'reference_channel': 'reference_channel', 'quantile': 'quantile', 'store': 'store', 'chunks': 'chunks', 'compression': 'codec', 'compression_level': 'clevel', 'overwrite': 'overwrite', 'workers': 'workers', 'check': 'check'}), '(input_dataset, output_path, channels=channels,\n reference_channel=reference_channel, quantile=quantile, store=store,\n chunks=chunks, compression=codec, compression_level=clevel, overwrite=\n overwrite, workers=workers, check=check)\n', (2524, 2764), False, 'from dexp.datasets.operations.crop import dataset_crop\n'), ((2946, 2961), 'arbol.arbol.aprint', 'aprint', (['"""Done!"""'], {}), "('Done!')\n", (2952, 2961), False, 'from arbol.arbol import aprint, asection\n')] |
import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| [
"os.listdir",
"numpy.int64",
"numpy.float64",
"matplotlib.image.imread",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"numpy.zeros",
"sys.stdout.flush"
]
| [((638, 660), 'numpy.zeros', 'np.zeros', (['[nx, ny, nz]'], {}), '([nx, ny, nz])\n', (646, 660), True, 'import numpy as np\n'), ((174, 185), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (183, 185), False, 'import os\n'), ((488, 531), 'os.path.join', 'os.path.join', (['input_dir', 'input_filenames[0]'], {}), '(input_dir, input_filenames[0])\n', (500, 531), False, 'import os\n'), ((705, 723), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (721, 723), False, 'import sys\n'), ((807, 829), 'matplotlib.image.imread', 'mpimg.imread', (['fileName'], {}), '(fileName)\n', (819, 829), True, 'from matplotlib import image as mpimg\n'), ((325, 346), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (335, 346), False, 'import os\n'), ((1499, 1517), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1515, 1517), False, 'import sys\n'), ((1995, 2013), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2011, 2013), False, 'import sys\n'), ((371, 409), 'os.path.isfile', 'os.path.isfile', (["(input_dir + '/' + name)"], {}), "(input_dir + '/' + name)\n", (385, 409), False, 'import os\n'), ((1010, 1031), 'numpy.int64', 'np.int64', (['DIPHA_CONST'], {}), '(DIPHA_CONST)\n', (1018, 1031), True, 'import numpy as np\n'), ((1118, 1150), 'numpy.int64', 'np.int64', (['DIPHA_IMAGE_TYPE_CONST'], {}), '(DIPHA_IMAGE_TYPE_CONST)\n', (1126, 1150), True, 'import numpy as np\n'), ((1200, 1222), 'numpy.int64', 'np.int64', (['(nx * ny * nz)'], {}), '(nx * ny * nz)\n', (1208, 1222), True, 'import numpy as np\n'), ((1265, 1278), 'numpy.int64', 'np.int64', (['DIM'], {}), '(DIM)\n', (1273, 1278), True, 'import numpy as np\n'), ((1336, 1348), 'numpy.int64', 'np.int64', (['nx'], {}), '(nx)\n', (1344, 1348), True, 'import numpy as np\n'), ((1374, 1386), 'numpy.int64', 'np.int64', (['ny'], {}), '(ny)\n', (1382, 1386), True, 'import numpy as np\n'), ((1412, 1424), 'numpy.int64', 'np.int64', (['nz'], {}), '(nz)\n', (1420, 1424), True, 'import numpy as np\n'), ((1825, 1840), 'numpy.float64', 'np.float64', (['val'], {}), '(val)\n', (1835, 1840), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"numpy.hstack",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.show"
]
| [((396, 408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (406, 408), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MetropolisNormal.png"""'], {'dpi': '(100)'}), "('MetropolisNormal.png', dpi=100)\n", (1137, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1220, 1222), True, 'import matplotlib.pyplot as plt\n'), ((436, 448), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (444, 448), True, 'import numpy as np\n'), ((801, 827), 'numpy.linspace', 'np.linspace', (['(-10)', '(20)', '(5000)'], {}), '(-10, 20, 5000)\n', (812, 827), True, 'import numpy as np\n'), ((145, 184), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / 2 / sigma ** 2)'], {}), '(-(x - mu) ** 2 / 2 / sigma ** 2)\n', (151, 184), True, 'import numpy as np\n'), ((524, 540), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (538, 540), True, 'import numpy as np\n'), ((558, 581), 'numpy.random.normal', 'np.random.normal', (['x', '(10)'], {}), '(x, 10)\n', (574, 581), True, 'import numpy as np\n'), ((704, 721), 'numpy.hstack', 'np.hstack', (['(X, x)'], {}), '((X, x))\n', (713, 721), True, 'import numpy as np\n')] |
import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
class CarlaScenarioSimulator(CarlaSimulator):
"""
Carla simualtor used to run scenarios.
The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map
according to it. The sensors and running status are set as common Carla simulator.
When created, it will set up Carla client due to arguments, set simulator basic configurations used all around
its lifetime, and set some default running configurations.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
- scenario_manager (Any): Scenario Manager instance used to get running state.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
resolution=1.0,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: int = 9050,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla scenario simulator.
"""
super().__init__(cfg, client, host, port, tm_port, timeout)
self._resolution = self._cfg.resolution
self._scenario = None
self._start_scenario = False
self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout)
self._criteria_status = dict()
def init(self, config: Any) -> None:
"""
Init simulator episode with provided args.
This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be
a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario
manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the
route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians
dut to provided args and default configs, and reset running status. If no collision happens when creating
actors, the init will end and return.
:Arguments:
- config (Any): Scenario configuration instance, containing information about the scenarios.
"""
self._scenario_config = config
self.clean_up()
self._set_town(config.town)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap":
print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name))
print("WARNING: This scenario requires to use map: {}".format(config.town))
print("[SIMULATOR] Preparing scenario: " + config.name)
config.n_vehicles = self._n_vehicles
config.disable_two_wheels = self._disable_two_wheels
if "RouteScenario" in config.name:
self._scenario = RouteScenario(
world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution
)
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._scenario.route_timeout
else:
# select scenario
if config.type in SCENARIO_CLASS_DICT:
scenario_class = SCENARIO_CLASS_DICT[config.type]
ego_vehicles = []
for vehicle in config.ego_vehicles:
ego_vehicles.append(
CarlaDataProvider.request_new_actor(
vehicle.model,
vehicle.transform,
vehicle.rolename,
True,
color=vehicle.color,
actor_category=vehicle.category
)
)
self._scenario = scenario_class(
world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug
)
else:
raise RuntimeError("Scenario '{}' not support!".format(config.type))
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True)
self._total_distance = self._planner.distance_to_goal
self._spawn_pedestrians()
if self._ready():
if self._debug:
self._count_actors()
break
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and scenarios, update informations for all sensors and measurement.
"""
if not self._start_scenario:
self._manager.start_scenario()
self._start_scenario = True
self._tick += 1
world_snapshot = self._world.get_snapshot()
timestamp = world_snapshot.timestamp
self._timestamp = timestamp.elapsed_seconds
self._manager.tick_scenario(timestamp)
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def get_criteria(self) -> List:
"""
Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted.
:Returns:
List: Criteria list of scenario.
"""
criterion_list = self._manager.analyze_tick()
for name, actor_id, result, actual_value, expected_value in criterion_list:
if actor_id == self._hero_actor.id:
self._criteria_status.update({name: [result, actual_value, expected_value]})
return self._criteria_status
def end_scenario(self) -> None:
"""
End current scenario. Must be called before ending an episode.
"""
if self._start_scenario:
self._manager.end_scenario()
self._start_scenario = False
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider,
and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla
client to start next episode.
"""
if self._manager is not None:
self._manager.clean_up()
self._criteria_status.clear()
super().clean_up()
@property
def scenario_manager(self) -> Any:
return self._manager
| [
"core.simulators.carla_data_provider.CarlaDataProvider.request_new_actor",
"core.simulators.carla_data_provider.CarlaDataProvider.get_map",
"core.simulators.carla_data_provider.CarlaDataProvider.set_world",
"core.simulators.carla_data_provider.CarlaDataProvider.get_hero_vehicle_route",
"core.simulators.carla_data_provider.CarlaDataProvider.set_client"
]
| [((4290, 4332), 'core.simulators.carla_data_provider.CarlaDataProvider.set_client', 'CarlaDataProvider.set_client', (['self._client'], {}), '(self._client)\n', (4318, 4332), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((4345, 4385), 'core.simulators.carla_data_provider.CarlaDataProvider.set_world', 'CarlaDataProvider.set_world', (['self._world'], {}), '(self._world)\n', (4372, 4385), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((5403, 5445), 'core.simulators.carla_data_provider.CarlaDataProvider.get_hero_vehicle_route', 'CarlaDataProvider.get_hero_vehicle_route', ([], {}), '()\n', (5443, 5445), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((4478, 4505), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ([], {}), '()\n', (4503, 4505), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((4530, 4557), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ([], {}), '()\n', (4555, 4557), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((4662, 4689), 'core.simulators.carla_data_provider.CarlaDataProvider.get_map', 'CarlaDataProvider.get_map', ([], {}), '()\n', (4687, 4689), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((5939, 6095), 'core.simulators.carla_data_provider.CarlaDataProvider.request_new_actor', 'CarlaDataProvider.request_new_actor', (['vehicle.model', 'vehicle.transform', 'vehicle.rolename', '(True)'], {'color': 'vehicle.color', 'actor_category': 'vehicle.category'}), '(vehicle.model, vehicle.transform,\n vehicle.rolename, True, color=vehicle.color, actor_category=vehicle.\n category)\n', (5974, 6095), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n')] |
import myproject
myproject.logs(show_level='debug')
myproject.mymod.do_something()
| [
"myproject.mymod.do_something",
"myproject.logs"
]
| [((18, 52), 'myproject.logs', 'myproject.logs', ([], {'show_level': '"""debug"""'}), "(show_level='debug')\n", (32, 52), False, 'import myproject\n'), ((55, 85), 'myproject.mymod.do_something', 'myproject.mymod.do_something', ([], {}), '()\n', (83, 85), False, 'import myproject\n')] |
# -*- coding: utf-8 -*-
#
# Helper Script for Mass-Invitation of Participant Organisations
#
# RLPPTM Template Version 1.0
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py
#
import os
import sys
from core import s3_format_datetime
from templates.RLPPTM.config import SCHOOLS
from templates.RLPPTM.helpers import InviteUserOrg
# Batch limit (set to False to disable)
BATCH_LIMIT = 250
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
log = None
def info(msg):
sys.stderr.write("%s" % msg)
if log:
log.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
if log:
log.write("%s\n" % msg)
# Load models for tables
otable = s3db.org_organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
utable = s3db.auth_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
ctable = s3db.pr_contact
timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S")
LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp)
# -----------------------------------------------------------------------------
# Invite organisations
#
if not failed:
try:
with open(LOGFILE, "w", encoding="utf-8") as logfile:
log = logfile
join = [mtable.on((mtable.organisation_id == otable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == SCHOOLS) & \
(gtable.deleted == False)),
]
query = (otable.deleted == False)
organisations = db(query).select(otable.id,
otable.pe_id,
otable.name,
join = join,
orderby = otable.id,
)
total = len(organisations)
infoln("Total: %s Organisations" % total)
infoln("")
skipped = sent = failures = 0
invite_org = InviteUserOrg.invite_account
for organisation in organisations:
info("%s..." % organisation.name)
# Get all accounts that are linked to this org
organisation_id = organisation.id
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = db(query).select(utable.id,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
if rows:
# There are already accounts linked to this organisation
invited, registered = [], []
for row in rows:
username = row.auth_user.email
if row.pr_person_user.pe_id:
registered.append(username)
else:
invited.append(username)
if registered:
infoln("already registered (%s)." % ", ".join(registered))
else:
infoln("already invited (%s)." % ", ".join(invited))
skipped += 1
continue
# Find email address
query = (ctable.pe_id == organisation.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
info("(%s)..." % email)
else:
infoln("no email address.")
skipped += 1
continue
error = invite_org(organisation, email, account=None)
if not error:
sent += 1
infoln("invited.")
db.commit()
else:
failures += 1
infoln("invitation failed (%s)." % error)
if BATCH_LIMIT and sent >= BATCH_LIMIT:
infoln("Batch limit (%s) reached" % BATCH_LIMIT)
skipped = total - (sent + failures)
break
infoln("")
infoln("%s invitations sent" % sent)
infoln("%s invitations failed" % failures)
infoln("%s organisations skipped" % skipped)
log = None
except IOError:
infoln("...failed (could not create logfile)")
failed = True
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("PROCESS FAILED - Action rolled back.")
else:
db.commit()
infoln("PROCESS SUCCESSFUL.")
| [
"sys.stderr.write",
"os.path.join",
"core.s3_format_datetime"
]
| [((1031, 1071), 'core.s3_format_datetime', 's3_format_datetime', ([], {'dtfmt': '"""%Y%m%d%H%M%S"""'}), "(dtfmt='%Y%m%d%H%M%S')\n", (1049, 1071), False, 'from core import s3_format_datetime\n'), ((1082, 1146), 'os.path.join', 'os.path.join', (['request.folder', '"""private"""', "('mis_%s.log' % timestmp)"], {}), "(request.folder, 'private', 'mis_%s.log' % timestmp)\n", (1094, 1146), False, 'import os\n'), ((620, 648), 'sys.stderr.write', 'sys.stderr.write', (["('%s' % msg)"], {}), "('%s' % msg)\n", (636, 648), False, 'import sys\n'), ((712, 742), 'sys.stderr.write', 'sys.stderr.write', (["('%s\\n' % msg)"], {}), "('%s\\n' % msg)\n", (728, 742), False, 'import sys\n')] |
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from paprika_sync.core.models import PaprikaAccount
from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer
from paprika_sync.core.utils import log_start_end
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import all recipes from file to specified PaprikaAccount'
def add_arguments(self, parser):
parser.add_argument(
'file',
help='Path to json file containing list of all recipes',
)
parser.add_argument(
'--categories-file',
help='Path to json file containing list of all categories',
)
parser.add_argument(
'paprika_account_id',
type=int,
help='ID of PaprikaAccount to import recipes to',
)
parser.add_argument(
'-r', '--remove',
action='store_true',
help="Removes all of account's existing recipes before importing",
)
@log_start_end(logger)
def handle(self, *args, **options):
recipes_file = options['file']
categories_file = options['categories_file']
pa_id = options['paprika_account_id']
wipe_account = options['remove']
logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account)
pa = PaprikaAccount.objects.get(id=pa_id)
with open(recipes_file, 'rt') as fin:
recipes = json.load(fin)
logger.info('Found %s recipes to import to %s', len(recipes), pa)
categories = []
if categories_file:
with open(categories_file, 'rt') as fin:
categories = json.load(fin)
logger.info('Found %s categories to import to %s', len(categories), pa)
with transaction.atomic():
if wipe_account:
pa.recipes.all().delete()
pa.categories.all().delete()
for category in categories:
category['paprika_account'] = pa.id
cs = CategorySerializer(data=category)
if cs.is_valid():
cs.save()
else:
logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors)
for recipe in recipes:
# Remove categories if we're not bothering to import them
if not categories:
recipe['categories'] = []
recipe['paprika_account'] = pa.id
rs = RecipeSerializer(data=recipe)
if rs.is_valid():
rs.save()
else:
logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors)
# recipe_field_names = set([f.name for f in Recipe._meta.fields])
# Recipe.objects.create(
# paprika_account=pa,
# **{k: v for k, v in recipe.items() if k in recipe_field_names},
# )
logger.info('Finished recipe import successfully')
# transaction.set_rollback(True)
| [
"logging.getLogger",
"django.db.transaction.atomic",
"paprika_sync.core.utils.log_start_end",
"paprika_sync.core.models.PaprikaAccount.objects.get",
"paprika_sync.core.serializers.RecipeSerializer",
"paprika_sync.core.serializers.CategorySerializer",
"json.load"
]
| [((307, 334), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (324, 334), False, 'import logging\n'), ((1089, 1110), 'paprika_sync.core.utils.log_start_end', 'log_start_end', (['logger'], {}), '(logger)\n', (1102, 1110), False, 'from paprika_sync.core.utils import log_start_end\n'), ((1469, 1505), 'paprika_sync.core.models.PaprikaAccount.objects.get', 'PaprikaAccount.objects.get', ([], {'id': 'pa_id'}), '(id=pa_id)\n', (1495, 1505), False, 'from paprika_sync.core.models import PaprikaAccount\n'), ((1574, 1588), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1583, 1588), False, 'import json\n'), ((1912, 1932), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1930, 1932), False, 'from django.db import transaction\n'), ((1799, 1813), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1808, 1813), False, 'import json\n'), ((2164, 2197), 'paprika_sync.core.serializers.CategorySerializer', 'CategorySerializer', ([], {'data': 'category'}), '(data=category)\n', (2182, 2197), False, 'from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer\n'), ((2683, 2712), 'paprika_sync.core.serializers.RecipeSerializer', 'RecipeSerializer', ([], {'data': 'recipe'}), '(data=recipe)\n', (2699, 2712), False, 'from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer\n')] |
from dataclasses import dataclass, field
@dataclass
class FooTest:
class Meta:
name = "fooTest"
value: str = field(
init=False,
default="Hello"
)
@dataclass
class Root:
class Meta:
name = "root"
foo_test: str = field(
init=False,
default="Hello",
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| [
"dataclasses.field"
]
| [((128, 162), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '"""Hello"""'}), "(init=False, default='Hello')\n", (133, 162), False, 'from dataclasses import dataclass, field\n'), ((269, 374), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '"""Hello"""', 'metadata': "{'name': 'fooTest', 'type': 'Element', 'required': True}"}), "(init=False, default='Hello', metadata={'name': 'fooTest', 'type':\n 'Element', 'required': True})\n", (274, 374), False, 'from dataclasses import dataclass, field\n')] |
import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return np.vstack(all_preds)
def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None,
mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge",
pad_kwargs=None):
with torch.no_grad():
x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by)
#x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size)
new_shp = x.shape
data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32)
data[0] = x
if BATCH_SIZE is not None:
data = np.vstack([data] * BATCH_SIZE)
stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring)
slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:]
stacked = stacked[slicer]
uncertainty = stacked.var(0)
bayesian_predictions = stacked
softmax_pred = stacked.mean(0)
if regions_class_order is None:
predicted_segmentation = softmax_pred.argmax(0)
else:
predicted_segmentation_shp = softmax_pred[0].shape
predicted_segmentation = np.zeros(predicted_segmentation_shp)
for i, c in enumerate(regions_class_order):
predicted_segmentation[softmax_pred[i] > 0.5] = c
return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(ax)
return input
class Generic_UNet_Cotraining(SegmentationNetwork):
def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False):
"""
Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah.
What a mess.
You know what's the best part? No documentation. What a great piece of code.
:param input_channels:
:param base_num_features:
:param num_classes:
:param num_conv_per_stage:
:param num_downscale:
:param feat_map_mul_on_downscale:
:param conv_op:
:param conv_kwargs:
:param norm_op:
:param norm_op_kwargs:
:param dropout_op:
:param dropout_op_kwargs:
:param nonlin:
:param nonlin_kwargs:
:param deep_supervision:
:param dropout_in_localization:
:param final_nonlin:
:param weightInitializer:
:param pool_op_kernel_sizes:
:param upscale_logits:
:param convolutional_pooling:
:param convolutional_upsampling:
"""
super(Generic_UNet_Cotraining, self).__init__()
assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int"
self.num_classes = num_classes
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p':0.5, 'inplace':True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
if pool_op_kernel_sizes is None:
if conv_op == nn.Conv2d:
pool_op_kernel_sizes = [(2, 2)] * num_downscale
elif conv_op == nn.Conv3d:
pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.final_nonlin = final_nonlin
assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage"
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.do_ds = deep_supervision
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_downscale):
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d-1]
else:
first_stride = None
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
if self.conv_op == nn.Conv3d:
output_features = min(output_features, max_num_filters_3d)
else:
output_features = min(output_features, max_num_filters_2d)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)))
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_downscale):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_downscale-1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes]))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_downscale - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
self.apply(self.weightInitializer)
self.test_return_output = 0
self.inference = False
def train(self, mode=True):
super(Generic_UNet_Cotraining, self).train(mode)
def eval(self):
super(Generic_UNet_Cotraining, self).eval()
def infer(self, infer):
self.train(False)
self.inference = infer
def forward(self, x):
#input_var = x
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
if not self.inference:
seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))])
else:
seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x)))
if self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object):
def __init__(self):
self.preprocessed_data_directory = None
# set through arguments from init
self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce"
self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce"
self.output_folder = 'model/params'
self.dataset_directory = None
self.device = 0
self.fold = 0
self.preprocessed_data_directory = None
self.gt_niftis_folder = None
# set in self.initialize()
self.network = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.plans = None
self.was_initialized = False
self.also_val_in_tr_mode = False
self.dataset = None
self.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True):
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold)
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.process_plans_file()
if training:
raise NotImplementedError
self.initialize_network_optimizer_and_scheduler()
self.network.inference_apply_nonlin = self.inference_apply_nonlin
self.was_initialized = True
def initialize_network_optimizer_and_scheduler(self):
net_numpool = max(self.net_pool_per_axis)
net_pool_kernel_sizes = []
for s in range(1, net_numpool+1):
this_pool_kernel_sizes = [1, 1, 1]
if self.net_pool_per_axis[0] >= s:
this_pool_kernel_sizes[0] = 2
if self.net_pool_per_axis[1] >= s:
this_pool_kernel_sizes[1] = 2
if len(self.patch_size)>2:
if self.net_pool_per_axis[2] >= s:
this_pool_kernel_sizes[2] = 2
else:
this_pool_kernel_sizes = this_pool_kernel_sizes[:-1]
net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes))
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False}
dropout_op_kwargs = {'p':0, 'inplace':True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2,
conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2),
net_pool_kernel_sizes, True, False, False)
self.optimizer = None
self.lr_scheduler = None
self.network.set_device(self.device)
def process_plans_file(self):
self.batch_size = 2
self.net_pool_per_axis = [4, 4, 4]
self.patch_size = (128, 128, 128)
self.intensity_properties = None
self.normalization_schemes = ["nonCT"] * 4
self.base_num_features = 30
self.num_input_channels = 4
self.do_dummy_2D_aug = False
self.use_mask_for_norm = True
self.only_keep_largest_connected_component = {(0, ): False}
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,))
self.regions_class_order = (1, 3, 2)
self.batch_size = 2
self.base_num_features = 30
self.num_classes = (3, 3)
def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian):
return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2]
def load_best_checkpoint(self, train=True):
self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train)
def load_checkpoint(self, fname, train=True):
print("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize()
saved_model = torch.load(fname)
new_state_dict = OrderedDict()
for k, value in saved_model['state_dict'].items():
key = k
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
if len(saved_model['plot_stuff']) < 9:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff']
self.all_val_eval_metrics_dc_per_sample_std = []
else:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff']
self.network.set_device(self.device)
def resize_softmax_output(softmax_output, new_shape, order=3):
'''
Resizes softmax output. Resizes each channel in c separately and fuses results back together
:param softmax_output: c x x x y x z
:param new_shape: x x y x z
:param order:
:return:
'''
tpe = softmax_output.dtype
new_shp = [softmax_output.shape[0]] + list(new_shape)
result = np.zeros(new_shp, dtype=softmax_output.dtype)
for i in range(softmax_output.shape[0]):
result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True)
return result.astype(tpe)
def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None):
'''
segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out
of the original image
:param segmentation:
:param dct:
:param out_fname:
:return:
'''
old_size = dct.get('size_before_cropping')
bbox = dct.get('brain_bbox')
if bbox is not None:
seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size))
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c]))
seg_old_size[:, bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = softmax_output
else:
seg_old_size = softmax_output
segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order)
if region_class_order is None:
segmentation = segmentation.argmax(0)
else:
seg_old_spacing_final = np.zeros(segmentation.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[segmentation[i] > 0.5] = c
segmentation = seg_old_spacing_final
return segmentation.astype(np.uint8)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
def convert_labels_back(seg):
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
new_seg[seg == 1] = 2
new_seg[seg == 2] = 4
new_seg[seg == 3] = 1
return new_seg
def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0):
"""
brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but
gets the job done
:param itk_image:
:param is_seg:
:param spacing_target:
:param brain_mask:
:return:
"""
spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]]
image = sitk.GetArrayFromImage(itk_image).astype(float)
if not is_seg:
if brain_mask is None:
brain_mask = (image!=image[0,0,0]).astype(float)
if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]):
image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32)
brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int)
image[brain_mask==0] = 0
#subtract mean, divide by std. use heuristic masking
image[brain_mask!=0] -= image[brain_mask!=0].mean()
image[brain_mask!=0] /= image[brain_mask!=0].std()
else:
new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))),
int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))),
int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2]))))
image = resize_segmentation(image, new_shape, 1, cval)
return image
def create_brain_masks(data):
"""
data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic
to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction)
:param data:
:return:
"""
shp = list(data.shape)
brain_mask = np.zeros(shp, dtype=np.float32)
for b in range(data.shape[0]):
for c in range(data.shape[1]):
this_mask = data[b, c] != 0
this_mask = binary_fill_holes(this_mask)
brain_mask[b, c] = this_mask
return brain_mask
def extract_brain_region(image, segmentation, outside_value=0):
brain_voxels = np.where(segmentation != outside_value)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
# resize images
resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx))
return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]]
def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None):
images = {}
# t1
images["T1"] = sitk.ReadImage(t1_file)
# t1km
images["T1KM"] = sitk.ReadImage(t1km_file)
properties_dict = {
"spacing": images["T1"].GetSpacing(),
"direction": images["T1"].GetDirection(),
"size": images["T1"].GetSize(),
"origin": images["T1"].GetOrigin()
}
# t2
images["T2"] = sitk.ReadImage(t2_file)
# flair
images["FLAIR"] = sitk.ReadImage(flair_file)
if seg_file is not None:
images['seg'] = sitk.ReadImage(seg_file)
if bet_file is not None:
images['bet_mask'] = sitk.ReadImage(bet_file)
else:
t1_npy = sitk.GetArrayFromImage(images["T1"])
mask = create_brain_masks(t1_npy[None])[0].astype(int)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(images["T1"])
images['bet_mask'] = mask
try:
images["t1km_sub"] = images["T1KM"] - images["T1"]
except RuntimeError:
tmp1 = sitk.GetArrayFromImage(images["T1KM"])
tmp2 = sitk.GetArrayFromImage(images["T1"])
res = tmp1 - tmp2
res_itk = sitk.GetImageFromArray(res)
res_itk.CopyInformation(images["T1"])
images["t1km_sub"] = res_itk
for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]:
images[k] = sitk.Mask(images[k], images['bet_mask'], 0)
bet_numpy = sitk.GetArrayFromImage(images['bet_mask'])
for k in images.keys():
is_seg = (k == "seg") | (k == "bet_mask")
if is_seg:
cval = -1
else:
cval = 0
images[k] = preprocess_image(images[k], is_seg=is_seg,
spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval)
properties_dict['size_before_cropping'] = images["T1"].shape
mask = np.copy(images['bet_mask'])
for k in images.keys():
images[k], bbox = extract_brain_region(images[k], mask, False)
properties_dict['brain_bbox'] = bbox
if (label_conversion_fn is not None) and ("seg" in images.keys()):
images["seg"] = label_conversion_fn(images["seg"])
use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg']
if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()):
use_these.append("bet_mask")
else:
images["seg"][images["bet_mask"] <= 0] = -1
imgs = []
for seq in use_these:
if seq not in images.keys():
imgs.append(np.zeros(images["T1"].shape)[None])
else:
imgs.append(images[seq][None])
all_data = np.vstack(imgs)
return all_data, properties_dict
def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc):
"""
Segments the passed files
"""
trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE()
trainer.initialize(False)
all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None,
True, None)
all_softmax = []
for fold in range(5):
trainer.output_folder = join(netLoc, "%d" % fold)
trainer.load_best_checkpoint(False)
trainer.network.infer(True)
trainer.network.test_return_output = 0
softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False,
None, None, trainer.patch_size, True)
all_softmax.append(softmax[None])
softmax_consolidated = np.vstack(all_softmax).mean(0)
output = save_segmentation_nifti_softmax(softmax_consolidated, dct,
"tumor_isen2018_class.nii.gz", 1,
trainer.regions_class_order)
return output
| [
"numpy.prod",
"torch.nn.init.constant_",
"torch.exp",
"numpy.array",
"copy.deepcopy",
"torch.nn.Sigmoid",
"os.listdir",
"numpy.where",
"torch.nn.ModuleList",
"torch.nn.init.kaiming_normal_",
"SimpleITK.GetArrayFromImage",
"batchgenerators.augmentations.utils.resize_segmentation",
"numpy.max",
"os.path.isdir",
"numpy.vstack",
"os.mkdir",
"numpy.min",
"numpy.round",
"collections.OrderedDict",
"torch.nn.LeakyReLU",
"scipy.ndimage.binary_fill_holes",
"SimpleITK.ReadImage",
"SimpleITK.Mask",
"torch.nn.Upsample",
"skimage.transform.resize",
"torch.cat",
"numpy.copy",
"numpy.unique",
"SimpleITK.GetImageFromArray",
"torch.load",
"os.path.join",
"numpy.zeros",
"json.load",
"torch.no_grad",
"numpy.pad",
"torch.zeros"
]
| [((4533, 4572), 'numpy.pad', 'np.pad', (['image', 'pad_list', 'mode'], {}), '(image, pad_list, mode, **kwargs)\n', (4539, 4572), True, 'import numpy as np\n'), ((13081, 13101), 'torch.exp', 'torch.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (13090, 13101), False, 'import torch\n'), ((16372, 16387), 'numpy.unique', 'np.unique', (['axes'], {}), '(axes)\n', (16381, 16387), True, 'import numpy as np\n'), ((34691, 34736), 'numpy.zeros', 'np.zeros', (['new_shp'], {'dtype': 'softmax_output.dtype'}), '(new_shp, dtype=softmax_output.dtype)\n', (34699, 34736), True, 'import numpy as np\n'), ((36878, 36914), 'numpy.zeros', 'np.zeros', (['seg.shape'], {'dtype': 'seg.dtype'}), '(seg.shape, dtype=seg.dtype)\n', (36886, 36914), True, 'import numpy as np\n'), ((38796, 38827), 'numpy.zeros', 'np.zeros', (['shp'], {'dtype': 'np.float32'}), '(shp, dtype=np.float32)\n', (38804, 38827), True, 'import numpy as np\n'), ((39143, 39182), 'numpy.where', 'np.where', (['(segmentation != outside_value)'], {}), '(segmentation != outside_value)\n', (39151, 39182), True, 'import numpy as np\n'), ((39838, 39861), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['t1_file'], {}), '(t1_file)\n', (39852, 39861), True, 'import SimpleITK as sitk\n'), ((39894, 39919), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['t1km_file'], {}), '(t1km_file)\n', (39908, 39919), True, 'import SimpleITK as sitk\n'), ((40159, 40182), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['t2_file'], {}), '(t2_file)\n', (40173, 40182), True, 'import SimpleITK as sitk\n'), ((40218, 40244), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['flair_file'], {}), '(flair_file)\n', (40232, 40244), True, 'import SimpleITK as sitk\n'), ((41149, 41191), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (["images['bet_mask']"], {}), "(images['bet_mask'])\n", (41171, 41191), True, 'import SimpleITK as sitk\n'), ((41595, 41622), 'numpy.copy', 'np.copy', (["images['bet_mask']"], {}), "(images['bet_mask'])\n", (41602, 41622), True, 'import numpy as np\n'), ((42338, 42353), 'numpy.vstack', 'np.vstack', (['imgs'], {}), '(imgs)\n', (42347, 42353), True, 'import numpy as np\n'), ((649, 661), 'json.load', 'json.load', (['f'], {}), '(f)\n', (658, 661), False, 'import json\n'), ((1076, 1137), 'skimage.transform.resize', 'resize', (['image', 'new_shape'], {'order': 'order', 'mode': '"""edge"""', 'cval': 'cval'}), "(image, new_shape, order=order, mode='edge', cval=cval)\n", (1082, 1137), False, 'from skimage.transform import resize\n'), ((2762, 2796), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '(**self.nonlin_kwargs)\n', (2774, 2796), False, 'from torch import nn\n'), ((3665, 3684), 'numpy.array', 'np.array', (['new_shape'], {}), '(new_shape)\n', (3673, 3684), True, 'import numpy as np\n'), ((4647, 4665), 'numpy.array', 'np.array', (['pad_list'], {}), '(pad_list)\n', (4655, 4665), True, 'import numpy as np\n'), ((11276, 11296), 'numpy.vstack', 'np.vstack', (['all_preds'], {}), '(all_preds)\n', (11285, 11296), True, 'import numpy as np\n'), ((18495, 18527), 'numpy.prod', 'np.prod', (['pool_op_kernel_sizes', '(0)'], {}), '(pool_op_kernel_sizes, 0)\n', (18502, 18527), True, 'import numpy as np\n'), ((25719, 25763), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.conv_blocks_localization'], {}), '(self.conv_blocks_localization)\n', (25732, 25763), False, 'from torch import nn\n'), ((25799, 25838), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.conv_blocks_context'], {}), '(self.conv_blocks_context)\n', (25812, 25838), False, 'from torch import nn\n'), ((25857, 25879), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.td'], {}), '(self.td)\n', (25870, 25879), False, 'from torch import nn\n'), ((25898, 25920), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.tu'], {}), '(self.tu)\n', (25911, 25920), False, 'from torch import nn\n'), ((25948, 25979), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.seg_outputs'], {}), '(self.seg_outputs)\n', (25961, 25979), False, 'from torch import nn\n'), ((28785, 28797), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (28795, 28797), False, 'from torch import nn\n'), ((28960, 29014), 'os.path.join', 'os.path.join', (['self.output_folder', "('fold%d' % self.fold)"], {}), "(self.output_folder, 'fold%d' % self.fold)\n", (28972, 29014), False, 'import os\n'), ((32843, 32860), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (32853, 32860), False, 'import torch\n'), ((32886, 32899), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32897, 32899), False, 'from collections import OrderedDict\n'), ((35973, 36005), 'numpy.zeros', 'np.zeros', (['segmentation.shape[1:]'], {}), '(segmentation.shape[1:])\n', (35981, 36005), True, 'import numpy as np\n'), ((38395, 38441), 'batchgenerators.augmentations.utils.resize_segmentation', 'resize_segmentation', (['image', 'new_shape', '(1)', 'cval'], {}), '(image, new_shape, 1, cval)\n', (38414, 38441), False, 'from batchgenerators.augmentations.utils import resize_segmentation\n'), ((39201, 39224), 'numpy.min', 'np.min', (['brain_voxels[0]'], {}), '(brain_voxels[0])\n', (39207, 39224), True, 'import numpy as np\n'), ((39244, 39267), 'numpy.max', 'np.max', (['brain_voxels[0]'], {}), '(brain_voxels[0])\n', (39250, 39267), True, 'import numpy as np\n'), ((39287, 39310), 'numpy.min', 'np.min', (['brain_voxels[1]'], {}), '(brain_voxels[1])\n', (39293, 39310), True, 'import numpy as np\n'), ((39330, 39353), 'numpy.max', 'np.max', (['brain_voxels[1]'], {}), '(brain_voxels[1])\n', (39336, 39353), True, 'import numpy as np\n'), ((39373, 39396), 'numpy.min', 'np.min', (['brain_voxels[2]'], {}), '(brain_voxels[2])\n', (39379, 39396), True, 'import numpy as np\n'), ((39416, 39439), 'numpy.max', 'np.max', (['brain_voxels[2]'], {}), '(brain_voxels[2])\n', (39422, 39439), True, 'import numpy as np\n'), ((40299, 40323), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['seg_file'], {}), '(seg_file)\n', (40313, 40323), True, 'import SimpleITK as sitk\n'), ((40383, 40407), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['bet_file'], {}), '(bet_file)\n', (40397, 40407), True, 'import SimpleITK as sitk\n'), ((40435, 40471), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (["images['T1']"], {}), "(images['T1'])\n", (40457, 40471), True, 'import SimpleITK as sitk\n'), ((40550, 40578), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['mask'], {}), '(mask)\n', (40572, 40578), True, 'import SimpleITK as sitk\n'), ((41088, 41131), 'SimpleITK.Mask', 'sitk.Mask', (['images[k]', "images['bet_mask']", '(0)'], {}), "(images[k], images['bet_mask'], 0)\n", (41097, 41131), True, 'import SimpleITK as sitk\n'), ((4691, 4710), 'numpy.array', 'np.array', (['res.shape'], {}), '(res.shape)\n', (4699, 4710), True, 'import numpy as np\n'), ((8137, 8152), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8150, 8152), False, 'import torch\n'), ((11579, 11594), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11592, 11594), False, 'import torch\n'), ((13484, 13530), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['module.weight'], {'a': '(0.01)'}), '(module.weight, a=0.01)\n', (13507, 13530), False, 'from torch import nn\n'), ((14958, 14979), 'copy.deepcopy', 'deepcopy', (['conv_kwargs'], {}), '(conv_kwargs)\n', (14966, 14979), False, 'from copy import deepcopy\n'), ((26050, 26088), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.upscale_logits_ops'], {}), '(self.upscale_logits_ops)\n', (26063, 26088), False, 'from torch import nn\n'), ((26949, 26987), 'torch.cat', 'torch.cat', (['(x, skips[-(u + 1)])'], {'dim': '(1)'}), '((x, skips[-(u + 1)]), dim=1)\n', (26958, 26987), False, 'import torch\n'), ((28855, 28888), 'os.path.isdir', 'os.path.isdir', (['self.output_folder'], {}), '(self.output_folder)\n', (28868, 28888), False, 'import os\n'), ((28902, 28930), 'os.mkdir', 'os.mkdir', (['self.output_folder'], {}), '(self.output_folder)\n', (28910, 28930), False, 'import os\n'), ((29030, 29063), 'os.path.isdir', 'os.path.isdir', (['self.output_folder'], {}), '(self.output_folder)\n', (29043, 29063), False, 'import os\n'), ((29077, 29105), 'os.mkdir', 'os.mkdir', (['self.output_folder'], {}), '(self.output_folder)\n', (29085, 29105), False, 'import os\n'), ((32576, 32628), 'os.path.join', 'os.path.join', (['self.output_folder', '"""model_best.model"""'], {}), "(self.output_folder, 'model_best.model')\n", (32588, 32628), False, 'import os\n'), ((35484, 35547), 'numpy.min', 'np.min', (['(bbox[c][0] + softmax_output.shape[c + 1], old_size[c])'], {}), '((bbox[c][0] + softmax_output.shape[c + 1], old_size[c]))\n', (35490, 35547), True, 'import numpy as np\n'), ((35802, 35823), 'numpy.array', 'np.array', (["dct['size']"], {}), "(dct['size'])\n", (35810, 35823), True, 'import numpy as np\n'), ((36385, 36403), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (36395, 36403), False, 'import os\n'), ((37448, 37481), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['itk_image'], {}), '(itk_image)\n', (37470, 37481), True, 'import SimpleITK as sitk\n'), ((38966, 38994), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['this_mask'], {}), '(this_mask)\n', (38983, 38994), False, 'from scipy.ndimage import binary_fill_holes\n'), ((40765, 40803), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (["images['T1KM']"], {}), "(images['T1KM'])\n", (40787, 40803), True, 'import SimpleITK as sitk\n'), ((40819, 40855), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (["images['T1']"], {}), "(images['T1'])\n", (40841, 40855), True, 'import SimpleITK as sitk\n'), ((40900, 40927), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['res'], {}), '(res)\n', (40922, 40927), True, 'import SimpleITK as sitk\n'), ((43280, 43302), 'numpy.vstack', 'np.vstack', (['all_softmax'], {}), '(all_softmax)\n', (43289, 43302), True, 'import numpy as np\n'), ((12016, 12046), 'numpy.vstack', 'np.vstack', (['([data] * BATCH_SIZE)'], {}), '([data] * BATCH_SIZE)\n', (12025, 12046), True, 'import numpy as np\n'), ((12675, 12711), 'numpy.zeros', 'np.zeros', (['predicted_segmentation_shp'], {}), '(predicted_segmentation_shp)\n', (12683, 12711), True, 'import numpy as np\n'), ((13601, 13634), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', '(0)'], {}), '(module.bias, 0)\n', (13618, 13634), False, 'from torch import nn\n'), ((21721, 21774), 'numpy.round', 'np.round', (['(output_features * feat_map_mul_on_downscale)'], {}), '(output_features * feat_map_mul_on_downscale)\n', (21729, 21774), True, 'import numpy as np\n'), ((25196, 25227), 'numpy.vstack', 'np.vstack', (['pool_op_kernel_sizes'], {}), '(pool_op_kernel_sizes)\n', (25205, 25227), True, 'import numpy as np\n'), ((36742, 36776), 'os.path.join', 'os.path.join', (['"""/"""', '*splits[:i + 1]'], {}), "('/', *splits[:i + 1])\n", (36754, 36776), False, 'import os\n'), ((36798, 36832), 'os.path.join', 'os.path.join', (['"""/"""', '*splits[:i + 1]'], {}), "('/', *splits[:i + 1])\n", (36810, 36832), False, 'import os\n'), ((41486, 41504), 'numpy.copy', 'np.copy', (['bet_numpy'], {}), '(bet_numpy)\n', (41493, 41504), True, 'import numpy as np\n'), ((8170, 8190), 'torch.zeros', 'torch.zeros', (['x.shape'], {}), '(x.shape)\n', (8181, 8190), False, 'import torch\n'), ((8591, 8602), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8599, 8602), True, 'import numpy as np\n'), ((23961, 24037), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'pool_op_kernel_sizes[-(u + 1)]', 'mode': 'upsample_mode'}), '(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode)\n', (23972, 24037), False, 'from torch import nn\n'), ((36422, 36445), 'os.path.join', 'os.path.join', (['folder', 'i'], {}), '(folder, i)\n', (36434, 36445), False, 'import os\n'), ((42230, 42258), 'numpy.zeros', 'np.zeros', (["images['T1'].shape"], {}), "(images['T1'].shape)\n", (42238, 42258), True, 'import numpy as np\n'), ((10110, 10131), 'numpy.copy', 'np.copy', (['data_for_net'], {}), '(data_for_net)\n', (10117, 10131), True, 'import numpy as np\n')] |
import os
import shutil
import tempfile
import zipfile
def archive_write(archivepath, data, filename, compression, compressionlevel):
"""
Create a file named filename in the archive and write data to it
:param archivepath: The path to the zip-archive
:type archivepath: str
:param data: The data to be written to the file
:type data: str
:param filename: The filename for the newly created file
:type filename: str
:param compression: The desired compression for the zip-archive
:type compression: int
:param compressionlevel: The desired compression level for the zip-archive
:type compressionlevel: int
:return: void
"""
archive = zipfile.ZipFile(archivepath, mode='a',
compression=compression,
compresslevel=compressionlevel)
archive.writestr(filename, data)
archive.close()
def create_archive(archivepath, filedict, compression, compressionlevel):
"""
Write filedict to zip-archive data subdirectory. Will check wether archive
at archivepath exists before writing. If file exists will raise a
FileExistsError.
:param archivepath: the path to the file
:param filedict: dictionary containing the filepath, filename key-value
pairs
:param compression: desired compression methods (see zipfile documentation)
:param compressionlevel: compression level (see zipfile documentation)
:return: void
"""
if os.path.isfile(archivepath):
raise FileExistsError("Specified file already exists")
else:
archive = zipfile.ZipFile(archivepath, mode='x',
compression=compression,
compresslevel=compressionlevel)
for filepath, filename in filedict.items():
archive.write(filepath, arcname="data/" + filename)
archive.close()
def extract_archdata(archivepath, filename, destination):
"""
Extract a file from a archive and write it to the destination. If the
destination path already exists extract_archdata will not overwrite but
will throw a "FileExists" error.
:param archivepath: The path to the archive containing the file
:type archivepath: str
:param filename: The archive name of the desired file.
:type filename: str
:param destination: The path at which the extracted file is to be placed.
:type destination: str
:return: void
:rtype: None
"""
# check if destination path already exists
if os.path.exists(destination):
raise FileExistsError("The specified destination is already in use")
archive = zipfile.ZipFile(archivepath, mode='r')
with tempfile.TemporaryDirectory() as tmpdir:
archive.extract(filename, path=tmpdir)
# create directories for the destination
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination)
def read_bin(archivepath, filelist):
"""
Read a list of files from an archive and return the file data as a
dictionary of filename, data key-value pairs.
:param archivepath: the path to the archive
:param filelist: list of filenames to read
:return: dictionary with filename, data key-value pairs
:rtype: dict
"""
datadict = dict()
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
for filename in filelist:
try:
file = archive.open(filename)
datadict[filename] = file.read().decode()
file.close()
except KeyError:
datadict[filename] = None
archive.close()
return datadict
def read_diff_log(archivepath):
"""
Read the diff-log.csv from a given archive file.
:param archivepath: The path to the zip-archive
:type archivepath: str
:return: The diff-log.csv contents in ascii string form.
:rtype: str
"""
arch = zipfile.ZipFile(archivepath, mode='r')
diff_log_file = arch.open("diff-log.csv")
diff_log_bin = diff_log_file.read()
diff_log = diff_log_bin.decode()
diff_log_file.close()
arch.close()
return diff_log
def zip_extract(archivepath, filelist, extractpath):
"""
Extract a list of files to a specific location
:param archivepath: the path to the zip-archive
:param filelist: list of member filenames to extract
:param extractpath: path for the extracted files
:return: void
"""
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
archive.extractall(path=extractpath, members=filelist)
archive.close()
| [
"os.path.exists",
"tempfile.TemporaryDirectory",
"zipfile.ZipFile",
"os.path.isfile",
"os.path.dirname",
"os.path.abspath"
]
| [((696, 795), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""a"""', 'compression': 'compression', 'compresslevel': 'compressionlevel'}), "(archivepath, mode='a', compression=compression,\n compresslevel=compressionlevel)\n", (711, 795), False, 'import zipfile\n'), ((1491, 1518), 'os.path.isfile', 'os.path.isfile', (['archivepath'], {}), '(archivepath)\n', (1505, 1518), False, 'import os\n'), ((2547, 2574), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (2561, 2574), False, 'import os\n'), ((2667, 2705), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""r"""'}), "(archivepath, mode='r')\n", (2682, 2705), False, 'import zipfile\n'), ((3372, 3399), 'os.path.isfile', 'os.path.isfile', (['archivepath'], {}), '(archivepath)\n', (3386, 3399), False, 'import os\n'), ((4073, 4111), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""r"""'}), "(archivepath, mode='r')\n", (4088, 4111), False, 'import zipfile\n'), ((4608, 4635), 'os.path.isfile', 'os.path.isfile', (['archivepath'], {}), '(archivepath)\n', (4622, 4635), False, 'import os\n'), ((1611, 1710), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""x"""', 'compression': 'compression', 'compresslevel': 'compressionlevel'}), "(archivepath, mode='x', compression=compression,\n compresslevel=compressionlevel)\n", (1626, 1710), False, 'import zipfile\n'), ((2715, 2744), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2742, 2744), False, 'import tempfile\n'), ((3419, 3457), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""r"""'}), "(archivepath, mode='r')\n", (3434, 3457), False, 'import zipfile\n'), ((4655, 4693), 'zipfile.ZipFile', 'zipfile.ZipFile', (['archivepath'], {'mode': '"""r"""'}), "(archivepath, mode='r')\n", (4670, 4693), False, 'import zipfile\n'), ((2873, 2901), 'os.path.dirname', 'os.path.dirname', (['destination'], {}), '(destination)\n', (2888, 2901), False, 'import os\n'), ((2939, 2979), 'os.path.abspath', 'os.path.abspath', (["(tmpdir + '/' + filename)"], {}), "(tmpdir + '/' + filename)\n", (2954, 2979), False, 'import os\n')] |
import argparse
import sys
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='init/start/migrate')
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_init.add_argument('--config_file', required=False,
help='Config file may be passed as argument')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
if args['which'] == 'init':
if(args['config_file'] is None):
env = bootstrap('config/kinto.ini')
else:
config_file = format(args['config_file'])
env = bootstrap(config_file)
elif args['which'] == 'migrate':
env = bootstrap('config/kinto.ini')
cliquet.init_schema(env)
elif args['which'] == 'start':
pserve_argv = ['pserve', 'config/kinto.ini', '--reload']
pserve.main(pserve_argv)
if __name__ == "__main__":
main()
| [
"cliquet.scripts.cliquet.init_schema",
"pyramid.paster.bootstrap",
"argparse.ArgumentParser",
"pyramid.scripts.pserve.main"
]
| [((269, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Kinto commands"""'}), "(description='Kinto commands')\n", (292, 322), False, 'import argparse\n'), ((1148, 1177), 'pyramid.paster.bootstrap', 'bootstrap', (['"""config/kinto.ini"""'], {}), "('config/kinto.ini')\n", (1157, 1177), False, 'from pyramid.paster import bootstrap\n'), ((1288, 1310), 'pyramid.paster.bootstrap', 'bootstrap', (['config_file'], {}), '(config_file)\n', (1297, 1310), False, 'from pyramid.paster import bootstrap\n'), ((1374, 1403), 'pyramid.paster.bootstrap', 'bootstrap', (['"""config/kinto.ini"""'], {}), "('config/kinto.ini')\n", (1383, 1403), False, 'from pyramid.paster import bootstrap\n'), ((1420, 1444), 'cliquet.scripts.cliquet.init_schema', 'cliquet.init_schema', (['env'], {}), '(env)\n', (1439, 1444), False, 'from cliquet.scripts import cliquet\n'), ((1573, 1597), 'pyramid.scripts.pserve.main', 'pserve.main', (['pserve_argv'], {}), '(pserve_argv)\n', (1584, 1597), False, 'from pyramid.scripts import pserve\n')] |
from django.contrib import admin
from django.contrib.auth.models import User
from .models import Vegetable, Harvest, Transaction, Merchandise, MerchandisePrice
from .models import PurchasedItem, UserProfile, VegetablePrice, StockedVegetable
from .models import MerchandisePhotos
admin.site.register(Vegetable)
admin.site.register(StockedVegetable)
admin.site.register(Harvest)
admin.site.register(VegetablePrice)
admin.site.register(PurchasedItem)
admin.site.register(Transaction)
admin.site.register(UserProfile)
admin.site.register(Merchandise)
admin.site.register(MerchandisePrice)
admin.site.register(MerchandisePhotos)
| [
"django.contrib.admin.site.register"
]
| [((280, 310), 'django.contrib.admin.site.register', 'admin.site.register', (['Vegetable'], {}), '(Vegetable)\n', (299, 310), False, 'from django.contrib import admin\n'), ((311, 348), 'django.contrib.admin.site.register', 'admin.site.register', (['StockedVegetable'], {}), '(StockedVegetable)\n', (330, 348), False, 'from django.contrib import admin\n'), ((349, 377), 'django.contrib.admin.site.register', 'admin.site.register', (['Harvest'], {}), '(Harvest)\n', (368, 377), False, 'from django.contrib import admin\n'), ((378, 413), 'django.contrib.admin.site.register', 'admin.site.register', (['VegetablePrice'], {}), '(VegetablePrice)\n', (397, 413), False, 'from django.contrib import admin\n'), ((414, 448), 'django.contrib.admin.site.register', 'admin.site.register', (['PurchasedItem'], {}), '(PurchasedItem)\n', (433, 448), False, 'from django.contrib import admin\n'), ((449, 481), 'django.contrib.admin.site.register', 'admin.site.register', (['Transaction'], {}), '(Transaction)\n', (468, 481), False, 'from django.contrib import admin\n'), ((482, 514), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile'], {}), '(UserProfile)\n', (501, 514), False, 'from django.contrib import admin\n'), ((515, 547), 'django.contrib.admin.site.register', 'admin.site.register', (['Merchandise'], {}), '(Merchandise)\n', (534, 547), False, 'from django.contrib import admin\n'), ((548, 585), 'django.contrib.admin.site.register', 'admin.site.register', (['MerchandisePrice'], {}), '(MerchandisePrice)\n', (567, 585), False, 'from django.contrib import admin\n'), ((586, 624), 'django.contrib.admin.site.register', 'admin.site.register', (['MerchandisePhotos'], {}), '(MerchandisePhotos)\n', (605, 624), False, 'from django.contrib import admin\n')] |
'''
Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table.
'''
import configparser
import select
import socket
import sys
import time
import threading
import struct
import datetime
from random import randint, randrange
DEBUG = False
HOST = '127.0.0.1' # localhost
BASE_TIMER = 5
MAX_METRIC = 16
ROUTE_TIMEOUT = BASE_TIMER * 6
DELETE_TIMEOUT = BASE_TIMER * 4
AF_INET = 2
# ===========================================================================
# TRANSITIONS
class Transistion():
'''Class Representing a transition between states.'''
def __init__(self, to_state):
self.to_state = to_state
def execute(self):
'''Run the transition functions'''
pass
# ===========================================================================
# STATES
class State():
'''Class Representing a generic state'''
def __init__(self, fsm):
self.fsm = fsm
def enter(self):
'''Execute functions for entering a state'''
pass
def execute(self):
'''Execute functions while in state'''
pass
def exit(self):
'''Execute functions for leaving a state'''
pass
class StartUp(State):
'''Class Representing the Start up state which reads the configuration file
'''
def __init__(self, fsm):
super(StartUp, self).__init__(fsm)
def execute(self):
'''Execute the configuration functions'''
print_message("Loading Configuration File: '"
+ self.fsm.router.config_file + "'")
config = configparser.ConfigParser()
config.read(self.fsm.router.config_file)
self.get_router_id(config)
self.setup_inputs(config)
self.get_outputs(config)
self.setup_routing_table()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
'''Print complete message'''
print_message("Router Setup Complete.")
def get_router_id(self, config):
'''Read the router id number from the configuration file'''
if 1 <= int(config['Settings']['router-id']) <= 64000:
self.fsm.router.router_settings['id'] = \
int(config['Settings']['router-id'])
else:
raise Exception('Invalid Router ID Number')
def get_outputs(self, config):
'''Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file'''
outputs = config['Settings']['outputs'].split(', ')
outputs = [i.split('-') for i in outputs]
self.fsm.router.router_settings['outputs'] = {}
existing_ports = []
for output in outputs:
is_valid_port = 1024 <= int(output[0]) <= 64000 and not \
int(output[0]) in existing_ports
is_valid_cost = 1 <= int(output[1]) < 16
is_valid_id = 1 <= int(output[2]) <= 64000
if is_valid_port and is_valid_cost and is_valid_id:
existing_ports.append(int(output[0]))
self.fsm.router.router_settings['outputs'][int(output[2])] = \
{'metric': int(output[1]),
'port': int(output[0])}
else:
raise Exception('Invalid Outputs')
def setup_inputs(self, config):
'''Create input sockets from the inputs specified in the config file'''
# get inputs from configuration file
ports = config['Settings']['input-ports'].split(', ')
inputs = []
for port in ports:
if 1024 <= int(port) <= 64000 and not int(port) in inputs:
inputs.append(int(port))
else:
raise Exception('Invalid Port Number')
self.fsm.router.router_settings['inputs'] = {}
# create socket for each input port
for port in inputs:
try:
self.fsm.router.router_settings['inputs'][port] = \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print_message('Socket ' + str(port) + ' Created.')
except socket.error as msg:
print('Failed to create socket. Message: ' + str(msg))
sys.exit()
# bind port to socket
try:
self.fsm.router.router_settings['inputs'][port].bind(
(HOST, port))
print_message('Socket ' + str(port) + ' Bind Complete.')
except socket.error as msg:
print('Failed to create socket. Message ' + str(msg))
sys.exit()
def setup_routing_table(self):
'''Setup routing table with the outputs specified in the config file'''
self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \
RIPRouteEntry(address=self.fsm.router.router_settings['id'],
nexthop=0,
metric=0,
imported=True)
class Waiting(State):
'''
Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state.
'''
def __init__(self, fsm):
super(Waiting, self).__init__(fsm)
def enter(self):
'''Display State entry message'''
print_message("Entering idle state...")
def execute(self):
'''Waits for input sockets to be readable and then changes the state
to process the received message.'''
readable = select.select(
self.fsm.router.router_settings['inputs'].values(), [], [])
if readable[0]:
self.fsm.router.readable_ports = readable[0]
self.fsm.to_transition("toReadMessage")
def exit(self):
'''Display State exit message'''
print_message("Message Received")
class ReadMessage(State):
'''Class representing the state for reading messages received on the input
sockets'''
def __init__(self, fsm):
super(ReadMessage, self).__init__(fsm)
def enter(self):
print_message("Reading Messages...")
def execute(self):
for port in self.fsm.router.readable_ports:
packet = RIPPacket(port.recvfrom(1024)[0])
self.fsm.router.update_routing_table(packet)
if self.fsm.router.route_change:
self.fsm.router.trigger_update()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
print_message("Messages Read.")
# ===========================================================================
# FINITE STATE MACHINE
class RouterFSM():
'''Class representing the Router finite state machine'''
def __init__(self, rip_router):
self.router = rip_router
self.states = {}
self.transitions = {}
self.cur_state = None
self.trans = None
def add_transistion(self, trans_name, transition):
'''Add a new transition to the FSM'''
self.transitions[trans_name] = transition
def add_state(self, state_name, state):
'''Add a new state to the FSM'''
self.states[state_name] = state
def set_state(self, state_name):
'''Set the current state of the FSM'''
self.cur_state = self.states[state_name]
def to_transition(self, to_trans):
'''Set the current transition of the FSM'''
self.trans = self.transitions[to_trans]
def execute(self):
'''Run the FSM'''
if self.trans:
self.cur_state.exit()
self.trans.execute()
self.set_state(self.trans.to_state)
self.cur_state.enter()
self.trans = None
self.cur_state.execute()
# ===========================================================================
# IMPLEMENTATION
class RIPPacket:
'''Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4.'''
def __init__(self, data=None, header=None, rtes=None):
if data:
self._init_from_network(data)
elif header and rtes:
self._init_from_host(header, rtes)
else:
raise ValueError
def __repr__(self):
return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \
format(self.header.cmd, self.header.ver, len(self.rtes))
def _init_from_network(self, data):
'''Init for RIPPacket if data is from the network'''
# Packet Validation
datalen = len(data)
if datalen < RIPHeader.SIZE:
raise FormatException
malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE
if malformed_rtes:
raise FormatException
# Convert bytes in packet to header and RTE data
num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE)
self.header = RIPHeader(data[0:RIPHeader.SIZE])
self.rtes = []
rte_start = RIPHeader.SIZE
rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE
# Loop over data packet to obtain each RTE
for i in range(num_rtes):
self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end],
src_id=self.header.src))
rte_start += RIPRouteEntry.SIZE
rte_end += RIPRouteEntry.SIZE
def _init_from_host(self, header, rtes):
'''Init for imported data'''
if header.ver != 2:
raise ValueError("Only Version 2 is supported.")
self.header = header
self.rtes = rtes
def serialize(self):
'''Return the byte sting representing this packet for network
transmission'''
packed = self.header.serialize()
for rte in self.rtes:
packed += rte.serialize()
return packed
class RIPHeader:
'''Class representing the header of a RIP packet'''
FORMAT = "!BBH"
SIZE = struct.calcsize(FORMAT)
TYPE_RESPONSE = 2
VERSION = 2
def __init__(self, rawdata=None, router_id=None):
self.packed = None
if rawdata:
self._init_from_network(rawdata)
elif router_id:
self._init_from_host(router_id)
else:
raise ValueError
def __repr__(self):
return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd,
self.ver,
self.src)
def _init_from_network(self, rawdata):
'''init for data from network'''
header = struct.unpack(self.FORMAT, rawdata)
self.cmd = header[0]
self.ver = header[1]
self.src = header[2]
def _init_from_host(self, router_id):
'''Init for data from host'''
self.cmd = self.TYPE_RESPONSE
self.ver = self.VERSION
self.src = router_id
def serialize(self):
'''Return the byte sting representing this header for network
transmission'''
return struct.pack(self.FORMAT, self.cmd, self.ver, self.src)
class RIPRouteEntry:
'''Class representing a single RIP route entry (RTE)'''
FORMAT = "!HHIII"
SIZE = struct.calcsize(FORMAT)
MIN_METRIC = 0
MAX_METRIC = 16
def __init__(self, rawdata=None, src_id=None, address=None,
nexthop=None, metric=None, imported=False):
self.changed = False
self.imported = imported
self.init_timeout()
if rawdata and src_id != None:
self._init_from_network(rawdata, src_id)
elif address and nexthop != None and metric != None:
self._init_from_host(address, nexthop, metric)
else:
raise ValueError
def __repr__(self):
template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|"
# Check that timeout is set
if self.timeout == None:
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
str(self.timeout))
else:
timeout = (datetime.datetime.now() - self.timeout).total_seconds()
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
round(timeout, 1))
def _init_from_host(self, address, nexthop, metric):
'''Init for data from host'''
self.afi = AF_INET
self.tag = 0 # not used
self.addr = address
self.nexthop = nexthop
self.metric = metric
def _init_from_network(self, rawdata, src_id):
'''Init for data received from network'''
rte = struct.unpack(self.FORMAT, rawdata)
self.afi = rte[0]
self.tag = rte[1]
self.addr = rte[2]
self.set_nexthop(rte[3])
self.metric = rte[4]
if self.nexthop == 0:
self.nexthop = src_id
# Validation
if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC:
raise FormatException
def init_timeout(self):
'''Initialize the timeout property'''
if self.imported:
self.timeout = None
else:
self.timeout = datetime.datetime.now()
self.garbage = False
self.marked_for_delection = False
def __eq__(self, other):
if self.afi == other.afi and \
self.addr == other.addr and \
self.tag == other.tag and \
self.nexthop == other.nexthop and \
self.metric == other.metric:
return True
else:
return False
def set_nexthop(self, nexthop):
'''Set the nexthop property'''
self.nexthop = nexthop
def serialize(self):
'''Pack entries into typical RIPv2 packet format for sending over the
network. '''
return struct.pack(self.FORMAT, self.afi, self.tag, self.addr,
self.nexthop, self.metric)
class FormatException(Exception):
'''Class representing the Format Exception'''
def __init__(self, message=""):
self.message = message
class Router:
'''Class representing a single router'''
def __init__(self, config_file):
self.fsm = RouterFSM(self)
self.config_file = config_file
# Dictionary of router settings, including router-id, inputs and
# outputs
self.router_settings = {}
self.readable_ports = []
# Dictionary of routing table
self.routing_table = {}
self.route_change = False
# STATES
self.fsm.add_state("StartUp", StartUp(self.fsm))
self.fsm.add_state("Waiting", Waiting(self.fsm))
self.fsm.add_state("ReadMessage", ReadMessage(self.fsm))
# TRANSITIONS
self.fsm.add_transistion("toWaiting", Transistion("Waiting"))
self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage"))
self.fsm.set_state("StartUp")
def execute(self):
'''Run the router's finite state machine'''
self.fsm.execute()
def update_routing_table(self, packet):
'''Update Routing table if new route info exist'''
for rte in packet.rtes:
# ignore RTEs of self
if rte.addr != self.fsm.router.router_settings['id']:
bestroute = self.routing_table.get(rte.addr)
# set nexthop to source router and calculate metric
rte.set_nexthop(packet.header.src)
rte.metric = min(rte.metric +
self.router_settings['outputs'][
packet.header.src]['metric'],
RIPRouteEntry.MAX_METRIC)
# Route dosn't yet exist
if not bestroute:
# ignore RTEs with a metric of MAX_METRIC
if rte.metric == RIPRouteEntry.MAX_METRIC:
return
# Add new RTE to routing table
rte.changed = True
self.route_change = True
self.routing_table[rte.addr] = rte
print_message("RTE added for Router: " + str(rte.addr))
return
else:
# Route already exists
if rte.nexthop == bestroute.nexthop:
if bestroute.metric != rte.metric:
if bestroute.metric != RIPRouteEntry.MAX_METRIC \
and rte.metric >= RIPRouteEntry.MAX_METRIC:
# mark for garbage collection
bestroute.metric = RIPRouteEntry.MAX_METRIC
bestroute.garbage = True
bestroute.changed = True
self.route_change = True
else:
self.update_route(bestroute, rte)
# Route still exists with same values
elif not bestroute.garbage:
bestroute.init_timeout()
# Lower metric on existing route
elif rte.metric < bestroute.metric:
self.update_route(bestroute, rte)
def update_route(self, bestroute, rte):
'''Update an existing route entry with new route info'''
bestroute.init_timeout()
bestroute.garbage = False
bestroute.changed = True
bestroute.metric = rte.metric
bestroute.nexthop = rte.nexthop
self.route_change = True
print_message("RTE for Router: " + str(rte.addr) +
" updated with metric=" + str(rte.metric) +
", nexthop=" + str(rte.nexthop) + ".")
def print_routing_table(self):
'''Print the routing table to the terminal'''
line = "+-----------+----------+-----------+---------------+----------+-------------+"
print(line)
print(
"| Routing Table (Router "
+ str(self.router_settings['id']) + ") |")
print(line)
print(
"|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |")
print(line)
print(self.routing_table[self.router_settings['id']])
print(
"+===========+==========+===========+===============+==========+=============+")
for entry in self.routing_table:
if entry != self.router_settings['id']:
print(self.routing_table[entry])
print(line)
print('\n')
def trigger_update(self):
'''Send Routing update for only the routes which have changed'''
changed_rtes = []
print_message("Sending Trigger update.")
for rte in self.routing_table.values():
if rte.changed:
changed_rtes.append(rte)
rte.changed = False
self.route_change = False
# send update with random delay between 1 and 5 seconds
delay = randint(1, 5)
threading.Timer(delay, self.update, [changed_rtes])
def update(self, entries):
'''Send a message to all output ports'''
if self.router_settings != {}:
sock = list(self.router_settings['inputs'].values())[1]
local_header = RIPHeader(router_id=self.router_settings['id'])
for output in self.router_settings['outputs']:
# Split horizon
# Remove RTES for which nexthop == output
split_horizon_entries = []
for entry in entries:
if entry.nexthop != output:
split_horizon_entries.append(entry)
else:
# Poison reverse
# Create new entry to get around some funky referencing
# When doing poisoned_entry = entry
poisoned_entry = RIPRouteEntry(rawdata=None,
src_id=None, address=entry.addr,
nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC,
imported=entry.imported)
split_horizon_entries.append(poisoned_entry)
# comment out to disable split horizon
packet = RIPPacket(
header=local_header, rtes=split_horizon_entries)
# Uncomment to disable split horizon
# packet = RIPPacket(header=local_header, rtes=entries)
sock.sendto(packet.serialize(),
(HOST,
self.router_settings['outputs'][output]["port"]))
print_message("Message Sent To Router: " + str(output))
def check_timeout(self):
'''Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer.'''
print_message("Checking timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.timeout != None and \
(datetime.datetime.now() - rte.timeout).total_seconds() \
>= ROUTE_TIMEOUT:
rte.garbage = True
rte.changed = True
self.route_change = True
rte.metric = RIPRouteEntry.MAX_METRIC
rte.timeout = datetime.datetime.now()
self.print_routing_table()
print_message("Router: " + str(rte.addr) + " timed out.")
def garbage_timer(self):
'''Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion'''
print_message("Checking garbage timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.garbage:
if (datetime.datetime.now() - rte.timeout).total_seconds() \
>= DELETE_TIMEOUT:
rte.marked_for_delection = True
def garbage_collection(self):
'''Check the routing table for RTE's that are marked for deletion and
remove them.'''
print_message("Collecting Garbage...")
if self.routing_table != {}:
delete_routes = []
for rte in self.routing_table.values():
if rte.marked_for_delection:
delete_routes.append(rte.addr)
print_message("Router: " + str(rte.addr) + " has been " +
"removed from the routing table.")
for entry in delete_routes:
del self.routing_table[entry]
self.print_routing_table()
def timer(self, function, param=None):
'''Start a periodic timer which calls a specified function'''
if param != None:
function(list(param.values()))
period = BASE_TIMER * randrange(8, 12, 1) / 10
else:
period = BASE_TIMER
function()
threading.Timer(period, self.timer, [function, param]).start()
def start_timers(self):
'''Start the timers on separate threads'''
self.timer(self.update, param=self.routing_table)
self.timer(self.check_timeout)
self.timer(self.garbage_timer)
self.timer(self.garbage_collection)
def main_loop(self):
'''Start the main loop for the program.'''
while True:
self.execute()
# RUN THE PROGRAM
def print_message(message):
'''Print the given message with the current time before it'''
if DEBUG:
print("[" + time.strftime("%H:%M:%S") + "]: " + message)
def main():
'''Main function to run the program.'''
if __name__ == "__main__":
router = Router(str(sys.argv[-1]))
router.start_timers()
router.main_loop()
main()
| [
"struct.calcsize",
"configparser.ConfigParser",
"socket.socket",
"random.randrange",
"threading.Timer",
"time.strftime",
"struct.pack",
"datetime.datetime.now",
"struct.unpack",
"sys.exit",
"random.randint"
]
| [((15093, 15116), 'struct.calcsize', 'struct.calcsize', (['FORMAT'], {}), '(FORMAT)\n', (15108, 15116), False, 'import struct\n'), ((16427, 16450), 'struct.calcsize', 'struct.calcsize', (['FORMAT'], {}), '(FORMAT)\n', (16442, 16450), False, 'import struct\n'), ((6336, 6363), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (6361, 6363), False, 'import configparser\n'), ((15792, 15827), 'struct.unpack', 'struct.unpack', (['self.FORMAT', 'rawdata'], {}), '(self.FORMAT, rawdata)\n', (15805, 15827), False, 'import struct\n'), ((16246, 16300), 'struct.pack', 'struct.pack', (['self.FORMAT', 'self.cmd', 'self.ver', 'self.src'], {}), '(self.FORMAT, self.cmd, self.ver, self.src)\n', (16257, 16300), False, 'import struct\n'), ((18000, 18035), 'struct.unpack', 'struct.unpack', (['self.FORMAT', 'rawdata'], {}), '(self.FORMAT, rawdata)\n', (18013, 18035), False, 'import struct\n'), ((19227, 19314), 'struct.pack', 'struct.pack', (['self.FORMAT', 'self.afi', 'self.tag', 'self.addr', 'self.nexthop', 'self.metric'], {}), '(self.FORMAT, self.afi, self.tag, self.addr, self.nexthop, self.\n metric)\n', (19238, 19314), False, 'import struct\n'), ((24673, 24686), 'random.randint', 'randint', (['(1)', '(5)'], {}), '(1, 5)\n', (24680, 24686), False, 'from random import randint, randrange\n'), ((24696, 24747), 'threading.Timer', 'threading.Timer', (['delay', 'self.update', '[changed_rtes]'], {}), '(delay, self.update, [changed_rtes])\n', (24711, 24747), False, 'import threading\n'), ((18561, 18584), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18582, 18584), False, 'import datetime\n'), ((8835, 8883), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (8848, 8883), False, 'import socket\n'), ((29020, 29074), 'threading.Timer', 'threading.Timer', (['period', 'self.timer', '[function, param]'], {}), '(period, self.timer, [function, param])\n', (29035, 29074), False, 'import threading\n'), ((9082, 9092), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9090, 9092), False, 'import sys\n'), ((9459, 9469), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9467, 9469), False, 'import sys\n'), ((27251, 27274), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27272, 27274), False, 'import datetime\n'), ((28912, 28931), 'random.randrange', 'randrange', (['(8)', '(12)', '(1)'], {}), '(8, 12, 1)\n', (28921, 28931), False, 'from random import randint, randrange\n'), ((17379, 17402), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17400, 17402), False, 'import datetime\n'), ((29638, 29663), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (29651, 29663), False, 'import time\n'), ((26936, 26959), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26957, 26959), False, 'import datetime\n'), ((27830, 27853), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27851, 27853), False, 'import datetime\n')] |
from django.db import models
class SiteSettings(models.Model):
site_name = models.CharField(max_length=200 , verbose_name='Site Name')
site_url = models.CharField(max_length=200 , verbose_name='Site URL')
site_address = models.CharField(max_length=300 , verbose_name='Site Address')
site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone')
site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax')
site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email')
about_us_text = models.TextField(verbose_name='About Us Text')
site_copy_right = models.TextField(verbose_name='Copyright Text')
site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo')
is_main_setting = models.BooleanField(verbose_name='Site Main Settings')
def __str__(self) -> str:
super(SiteSettings , self).__str__()
return self.site_name
class Meta:
verbose_name = 'Site Setting'
verbose_name_plural = 'Site Settings'
class FooterLinkBox(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
def __str__(self) -> str:
super(FooterLinkBox , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link Setting'
verbose_name_plural = 'Footer Link Settings'
class FooterLink(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
url = models.URLField(max_length=500 , verbose_name='Links')
footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE)
def __str__(self) -> str:
super(FooterLink , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link'
verbose_name_plural = 'Footer Links'
class Slider(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
description = models.TextField(verbose_name='Slider Description')
url_title = models.CharField(max_length=200 , verbose_name='URL Title')
url = models.URLField(max_length=200 , verbose_name='URL Address')
image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image')
is_active = models.BooleanField(default=False , verbose_name='Active / Inactive')
def __str__(self) -> str:
super(Slider , self).__str__()
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural = 'Sliders' | [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.URLField",
"django.db.models.CharField"
]
| [((83, 141), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Site Name"""'}), "(max_length=200, verbose_name='Site Name')\n", (99, 141), False, 'from django.db import models\n'), ((158, 215), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Site URL"""'}), "(max_length=200, verbose_name='Site URL')\n", (174, 215), False, 'from django.db import models\n'), ((236, 297), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'verbose_name': '"""Site Address"""'}), "(max_length=300, verbose_name='Site Address')\n", (252, 297), False, 'from django.db import models\n'), ((316, 403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Site Phone"""'}), "(max_length=100, null=True, blank=True, verbose_name=\n 'Site Phone')\n", (332, 403), False, 'from django.db import models\n'), ((417, 502), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Site Fax"""'}), "(max_length=200, null=True, blank=True, verbose_name='Site Fax'\n )\n", (433, 502), False, 'from django.db import models\n'), ((518, 606), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Site Email"""'}), "(max_length=200, null=True, blank=True, verbose_name=\n 'Site Email')\n", (535, 606), False, 'from django.db import models\n'), ((625, 671), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""About Us Text"""'}), "(verbose_name='About Us Text')\n", (641, 671), False, 'from django.db import models\n'), ((694, 741), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Copyright Text"""'}), "(verbose_name='Copyright Text')\n", (710, 741), False, 'from django.db import models\n'), ((758, 835), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/site-setting/"""', 'verbose_name': '"""Site Logo"""'}), "(upload_to='images/site-setting/', verbose_name='Site Logo')\n", (775, 835), False, 'from django.db import models\n'), ((859, 913), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Site Main Settings"""'}), "(verbose_name='Site Main Settings')\n", (878, 913), False, 'from django.db import models\n'), ((1213, 1267), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (1229, 1267), False, 'from django.db import models\n'), ((1577, 1631), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (1593, 1631), False, 'from django.db import models\n'), ((1643, 1696), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(500)', 'verbose_name': '"""Links"""'}), "(max_length=500, verbose_name='Links')\n", (1658, 1696), False, 'from django.db import models\n'), ((1720, 1811), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'FooterLinkBox', 'verbose_name': '"""Category"""', 'on_delete': 'models.CASCADE'}), "(to=FooterLinkBox, verbose_name='Category', on_delete=\n models.CASCADE)\n", (1737, 1811), False, 'from django.db import models\n'), ((2101, 2155), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (2117, 2155), False, 'from django.db import models\n'), ((2175, 2226), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Slider Description"""'}), "(verbose_name='Slider Description')\n", (2191, 2226), False, 'from django.db import models\n'), ((2243, 2301), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""URL Title"""'}), "(max_length=200, verbose_name='URL Title')\n", (2259, 2301), False, 'from django.db import models\n'), ((2313, 2372), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(200)', 'verbose_name': '"""URL Address"""'}), "(max_length=200, verbose_name='URL Address')\n", (2328, 2372), False, 'from django.db import models\n'), ((2386, 2460), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/sliders"""', 'verbose_name': '"""Slider Image"""'}), "(upload_to='images/sliders', verbose_name='Slider Image')\n", (2403, 2460), False, 'from django.db import models\n'), ((2478, 2546), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Active / Inactive"""'}), "(default=False, verbose_name='Active / Inactive')\n", (2497, 2546), False, 'from django.db import models\n')] |
# -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| [
"vispy.app.Canvas",
"numpy.random.normal",
"vispy.gloo.VertexBuffer",
"vispy.visuals.CompoundVisual.__init__",
"vispy.visuals.filters.Clipper",
"vispy.visuals.Visual.__init__",
"vispy.scene.SceneCanvas",
"vispy.scene.visuals.create_visual_node",
"numpy.exp",
"vispy.visuals.shaders.MultiProgram",
"numpy.array",
"vispy.visuals.filters.ColorFilter",
"vispy.visuals.transforms.STTransform.__init__",
"vispy.app.run",
"vispy.visuals.collections.PointCollection",
"vispy.visuals.transforms.STTransform"
]
| [((5805, 5891), 'vispy.app.Canvas', 'app.Canvas', ([], {'keys': '"""interactive"""', 'size': '(900, 600)', 'show': '(True)', 'title': '"""Visual Canvas"""'}), "(keys='interactive', size=(900, 600), show=True, title=\n 'Visual Canvas')\n", (5815, 5891), False, 'from vispy import app, gloo, visuals\n'), ((6096, 6141), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(2, 1)', 'translate': '(20, 20)'}), '(scale=(2, 1), translate=(20, 20))\n', (6107, 6141), False, 'from vispy.visuals.transforms import STTransform\n'), ((6800, 6845), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(2, 1)', 'translate': '(25, 25)'}), '(scale=(2, 1), translate=(25, 25))\n', (6811, 6845), False, 'from vispy.visuals.transforms import STTransform\n'), ((7215, 7264), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(2, 0.5)', 'translate': '(450, 150)'}), '(scale=(2, 0.5), translate=(450, 150))\n', (7226, 7264), False, 'from vispy.visuals.transforms import STTransform\n'), ((7532, 7580), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'translate': '(80, 450)', 'scale': '(1.5, 1)'}), '(translate=(80, 450), scale=(1.5, 1))\n', (7543, 7580), False, 'from vispy.visuals.transforms import STTransform\n'), ((7815, 7864), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(1.5, 1)', 'translate': '(450, 400)'}), '(scale=(1.5, 1), translate=(450, 400))\n', (7826, 7864), False, 'from vispy.visuals.transforms import STTransform\n'), ((8102, 8151), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(1.5, 1)', 'translate': '(455, 405)'}), '(scale=(1.5, 1), translate=(455, 405))\n', (8113, 8151), False, 'from vispy.visuals.transforms import STTransform\n'), ((8479, 8512), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'translate': '(750, 150)'}), '(translate=(750, 150))\n', (8490, 8512), False, 'from vispy.visuals.transforms import STTransform\n'), ((8755, 8802), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'scale': '(1, 1)', 'translate': '(752, 152)'}), '(scale=(1, 1), translate=(752, 152))\n', (8766, 8802), False, 'from vispy.visuals.transforms import STTransform\n'), ((9537, 9567), 'vispy.scene.visuals.create_visual_node', 'create_visual_node', (['LineVisual'], {}), '(LineVisual)\n', (9555, 9567), False, 'from vispy.scene.visuals import create_visual_node\n'), ((9578, 9642), 'vispy.scene.SceneCanvas', 'SceneCanvas', ([], {'keys': '"""interactive"""', 'title': '"""Scene Canvas"""', 'show': '(True)'}), "(keys='interactive', title='Scene Canvas', show=True)\n", (9589, 9642), False, 'from vispy.scene import SceneCanvas\n'), ((6354, 6383), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', (['(1, 1, 0.5, 0.7)'], {}), '((1, 1, 0.5, 0.7))\n', (6365, 6383), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((6619, 6660), 'vispy.visuals.filters.Clipper', 'Clipper', (['(20, 20, 260, 260)'], {'transform': 'tr'}), '((20, 20, 260, 260), transform=tr)\n', (6626, 6660), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((6904, 6931), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', (['(0, 0, 0, 0.6)'], {}), '((0, 0, 0, 0.6))\n', (6915, 6931), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((7022, 7063), 'vispy.visuals.filters.Clipper', 'Clipper', (['(20, 20, 260, 260)'], {'transform': 'tr'}), '((20, 20, 260, 260), transform=tr)\n', (7029, 7063), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((7337, 7379), 'vispy.visuals.filters.Clipper', 'Clipper', (['(320, 20, 260, 260)'], {'transform': 'tr'}), '((320, 20, 260, 260), transform=tr)\n', (7344, 7379), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((7653, 7695), 'vispy.visuals.filters.Clipper', 'Clipper', (['(20, 320, 260, 260)'], {'transform': 'tr'}), '((20, 320, 260, 260), transform=tr)\n', (7660, 7695), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((7939, 7982), 'vispy.visuals.filters.Clipper', 'Clipper', (['(320, 320, 260, 260)'], {'transform': 'tr'}), '((320, 320, 260, 260), transform=tr)\n', (7946, 7982), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((8167, 8194), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', (['(0, 0, 0, 0.6)'], {}), '((0, 0, 0, 0.6))\n', (8178, 8194), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((8288, 8331), 'vispy.visuals.filters.Clipper', 'Clipper', (['(320, 320, 260, 260)'], {'transform': 'tr'}), '((320, 320, 260, 260), transform=tr)\n', (8295, 8331), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((8531, 8581), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(20)', 'size': '(10000, 3)'}), '(loc=0, scale=20, size=(10000, 3))\n', (8547, 8581), True, 'import numpy as np\n'), ((8818, 8845), 'vispy.visuals.filters.ColorFilter', 'ColorFilter', (['(0, 0, 0, 0.6)'], {}), '((0, 0, 0, 0.6))\n', (8829, 8845), False, 'from vispy.visuals.filters import Clipper, ColorFilter\n'), ((1099, 1154), 'vispy.visuals.Visual.__init__', 'visuals.Visual.__init__', (['self'], {'vcode': 'vcode', 'fcode': 'fcode'}), '(self, vcode=vcode, fcode=fcode)\n', (1122, 1154), False, 'from vispy import app, gloo, visuals\n'), ((1179, 1198), 'vispy.gloo.VertexBuffer', 'gloo.VertexBuffer', ([], {}), '()\n', (1196, 1198), False, 'from vispy import app, gloo, visuals\n'), ((3816, 3880), 'vispy.visuals.CompoundVisual.__init__', 'visuals.CompoundVisual.__init__', (['self', '[self._line, self._point]'], {}), '(self, [self._line, self._point])\n', (3847, 3880), False, 'from vispy import app, gloo, visuals\n'), ((4060, 4092), 'vispy.visuals.shaders.MultiProgram', 'MultiProgram', ([], {'vcode': '""""""', 'fcode': '""""""'}), "(vcode='', fcode='')\n", (4072, 4092), False, 'from vispy.visuals.shaders import MultiProgram\n'), ((4115, 4167), 'vispy.visuals.collections.PointCollection', 'PointCollection', (['"""agg"""'], {'color': '"""shared"""', 'program': 'prog'}), "('agg', color='shared', program=prog)\n", (4130, 4167), False, 'from vispy.visuals.collections import PointCollection\n'), ((4176, 4219), 'vispy.visuals.Visual.__init__', 'visuals.Visual.__init__', (['self'], {'program': 'prog'}), '(self, program=prog)\n', (4199, 4219), False, 'from vispy import app, gloo, visuals\n'), ((4885, 4921), 'vispy.visuals.transforms.STTransform.__init__', 'STTransform.__init__', (['self'], {}), '(self, **kwargs)\n', (4905, 4921), False, 'from vispy.visuals.transforms import STTransform\n'), ((5914, 5963), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000, 2)', 'loc': '(0)', 'scale': '(50)'}), '(size=(1000, 2), loc=0, scale=50)\n', (5930, 5963), True, 'import numpy as np\n'), ((9945, 9954), 'vispy.app.run', 'app.run', ([], {}), '()\n', (9952, 9954), False, 'from vispy import app, gloo, visuals\n'), ((5746, 5781), 'numpy.exp', 'np.exp', (['(event.delta * (0.01, -0.01))'], {}), '(event.delta * (0.01, -0.01))\n', (5752, 5781), True, 'import numpy as np\n'), ((5528, 5555), 'numpy.exp', 'np.exp', (['(dxy * (0.01, -0.01))'], {}), '(dxy * (0.01, -0.01))\n', (5534, 5555), True, 'import numpy as np\n'), ((5663, 5679), 'numpy.array', 'np.array', (['[s, s]'], {}), '([s, s])\n', (5671, 5679), True, 'import numpy as np\n')] |
from unittest import TestCase
from datetime import datetime
import pyarrow as pa
import numpy as np
import pandas as pd
from h1st.schema import SchemaInferrer
class SchemaInferrerTestCase(TestCase):
def test_infer_python(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(1), pa.int64())
self.assertEqual(inferrer.infer_schema(1.1), pa.float64())
self.assertEqual(inferrer.infer_schema({
'test1': 1,
'test2': "hello",
'test3': b"hello",
'today': datetime.now(),
}), {
'type': dict,
'fields': {
'test1': pa.int64(),
'test2': pa.string(),
'test3': pa.binary(),
'today': pa.date64(),
}
})
self.assertEqual(inferrer.infer_schema((
1, 2, 3
)), pa.list_(pa.int64()))
self.assertEqual(inferrer.infer_schema((
1.2, 1.3, 1.4
)), pa.list_(pa.float64()))
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array(["a", "b", "c"])],
['c1', 'c2']
)
self.assertEqual(inferrer.infer_schema(table), table.schema)
def test_infer_numpy(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(np.random.random((100, 28, 28))), {
'type': np.ndarray,
'item': pa.float64(),
'shape': (None, 28, 28)
})
self.assertEqual(inferrer.infer_schema(np.array(["1", "2", "3"])), {
'type': np.ndarray,
'item': pa.string()
})
def test_infer_dataframe(self):
inferrer = SchemaInferrer()
df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': ['a', 'b', 'c'],
'f3': [0.1, 0.2, 0.9]
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'f1': pa.int64(),
'f2': pa.string(),
'f3': pa.float64()
}
})
df = pd.DataFrame({
'Timestamp': [1.1, 2.2, 3.1],
'CarSpeed': [0.1, 0.2, 0.9],
'Gx': [0.1, 0.2, 0.9],
'Gy': [0.1, 0.2, 0.9],
'Label': ['1', '0', '1']
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
})
self.assertEqual(inferrer.infer_schema(pd.Series([1, 2, 3])), {
'type': pd.Series,
'item': pa.int64()
})
def test_infer_dict(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema({
'test': 123,
}), {
'type': dict,
'fields': {
'test': pa.int64(),
}
})
self.assertEqual(inferrer.infer_schema({
'test': 123,
'indices': [1, 2, 3]
}), {
'type': dict,
'fields': {
'test': pa.int64(),
'indices': pa.list_(pa.int64())
}
})
self.assertEqual(inferrer.infer_schema({
'results': pd.DataFrame({
'CarSpeed': [0, 1, 2],
'Label': ['a', 'b', 'c']
})
}), {
'type': dict,
'fields': {
'results': {
'type': pd.DataFrame,
'fields': {
'CarSpeed': pa.int64(),
'Label': pa.string(),
}
}
}
})
def test_infer_list(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema([
{'test': 123},
{'test': 345},
]), {
'type': list,
'item': {
'type': dict,
'fields': {
'test': pa.int64()
}
}
})
| [
"pandas.Series",
"pyarrow.date64",
"pyarrow.string",
"numpy.random.random",
"pyarrow.binary",
"h1st.schema.SchemaInferrer",
"numpy.array",
"datetime.datetime.now",
"pyarrow.int64",
"pandas.DataFrame",
"pyarrow.array",
"pyarrow.float64"
]
| [((253, 269), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ([], {}), '()\n', (267, 269), False, 'from h1st.schema import SchemaInferrer\n'), ((1286, 1302), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ([], {}), '()\n', (1300, 1302), False, 'from h1st.schema import SchemaInferrer\n'), ((1708, 1724), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ([], {}), '()\n', (1722, 1724), False, 'from h1st.schema import SchemaInferrer\n'), ((1738, 1815), 'pandas.DataFrame', 'pd.DataFrame', (["{'f1': [1, 2, 3], 'f2': ['a', 'b', 'c'], 'f3': [0.1, 0.2, 0.9]}"], {}), "({'f1': [1, 2, 3], 'f2': ['a', 'b', 'c'], 'f3': [0.1, 0.2, 0.9]})\n", (1750, 1815), True, 'import pandas as pd\n'), ((2118, 2267), 'pandas.DataFrame', 'pd.DataFrame', (["{'Timestamp': [1.1, 2.2, 3.1], 'CarSpeed': [0.1, 0.2, 0.9], 'Gx': [0.1, 0.2,\n 0.9], 'Gy': [0.1, 0.2, 0.9], 'Label': ['1', '0', '1']}"], {}), "({'Timestamp': [1.1, 2.2, 3.1], 'CarSpeed': [0.1, 0.2, 0.9],\n 'Gx': [0.1, 0.2, 0.9], 'Gy': [0.1, 0.2, 0.9], 'Label': ['1', '0', '1']})\n", (2130, 2267), True, 'import pandas as pd\n'), ((2864, 2880), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ([], {}), '()\n', (2878, 2880), False, 'from h1st.schema import SchemaInferrer\n'), ((3921, 3937), 'h1st.schema.SchemaInferrer', 'SchemaInferrer', ([], {}), '()\n', (3935, 3937), False, 'from h1st.schema import SchemaInferrer\n'), ((322, 332), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (330, 332), True, 'import pyarrow as pa\n'), ((387, 399), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (397, 399), True, 'import pyarrow as pa\n'), ((904, 914), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (912, 914), True, 'import pyarrow as pa\n'), ((1014, 1026), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1024, 1026), True, 'import pyarrow as pa\n'), ((1081, 1100), 'pyarrow.array', 'pa.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1089, 1100), True, 'import pyarrow as pa\n'), ((1102, 1127), 'pyarrow.array', 'pa.array', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1110, 1127), True, 'import pyarrow as pa\n'), ((1350, 1381), 'numpy.random.random', 'np.random.random', (['(100, 28, 28)'], {}), '((100, 28, 28))\n', (1366, 1381), True, 'import numpy as np\n'), ((1438, 1450), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1448, 1450), True, 'import pyarrow as pa\n'), ((1547, 1572), 'numpy.array', 'np.array', (["['1', '2', '3']"], {}), "(['1', '2', '3'])\n", (1555, 1572), True, 'import numpy as np\n'), ((1629, 1640), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1638, 1640), True, 'import pyarrow as pa\n'), ((2715, 2735), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2724, 2735), True, 'import pandas as pd\n'), ((2791, 2801), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2799, 2801), True, 'import pyarrow as pa\n'), ((557, 571), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (569, 571), False, 'from datetime import datetime\n'), ((662, 672), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (670, 672), True, 'import pyarrow as pa\n'), ((699, 710), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (708, 710), True, 'import pyarrow as pa\n'), ((737, 748), 'pyarrow.binary', 'pa.binary', ([], {}), '()\n', (746, 748), True, 'import pyarrow as pa\n'), ((775, 786), 'pyarrow.date64', 'pa.date64', ([], {}), '()\n', (784, 786), True, 'import pyarrow as pa\n'), ((1997, 2007), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2005, 2007), True, 'import pyarrow as pa\n'), ((2031, 2042), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2040, 2042), True, 'import pyarrow as pa\n'), ((2066, 2078), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2076, 2078), True, 'import pyarrow as pa\n'), ((2476, 2488), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2486, 2488), True, 'import pyarrow as pa\n'), ((2518, 2530), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2528, 2530), True, 'import pyarrow as pa\n'), ((2554, 2566), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2564, 2566), True, 'import pyarrow as pa\n'), ((2590, 2602), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2600, 2602), True, 'import pyarrow as pa\n'), ((2629, 2640), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2638, 2640), True, 'import pyarrow as pa\n'), ((3043, 3053), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (3051, 3053), True, 'import pyarrow as pa\n'), ((3276, 3286), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (3284, 3286), True, 'import pyarrow as pa\n'), ((3434, 3497), 'pandas.DataFrame', 'pd.DataFrame', (["{'CarSpeed': [0, 1, 2], 'Label': ['a', 'b', 'c']}"], {}), "({'CarSpeed': [0, 1, 2], 'Label': ['a', 'b', 'c']})\n", (3446, 3497), True, 'import pandas as pd\n'), ((3324, 3334), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (3332, 3334), True, 'import pyarrow as pa\n'), ((4189, 4199), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4197, 4199), True, 'import pyarrow as pa\n'), ((3747, 3757), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (3755, 3757), True, 'import pyarrow as pa\n'), ((3792, 3803), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3801, 3803), True, 'import pyarrow as pa\n')] |
"""Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
| [
"matplotlib.pyplot.imshow",
"seaborn.set",
"numpy.repeat",
"os.makedirs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.zeros",
"os.path.isdir",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
]
| [((172, 181), 'seaborn.set', 'sns.set', ([], {}), '()\n', (179, 181), True, 'import seaborn as sns\n'), ((1798, 1822), 'numpy.zeros', 'np.zeros', (['(h, 2 * w, ch)'], {}), '((h, 2 * w, ch))\n', (1806, 1822), True, 'import numpy as np\n'), ((2920, 2946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2930, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2978), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': 'label'}), '(loss, label=label)\n', (2959, 2978), True, 'import matplotlib.pyplot as plt\n'), ((2983, 2995), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2993, 2995), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3058), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3056, 3058), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4203), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4186, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4513, 4522), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4520, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4572, 4588), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4581, 4588), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1189), 'numpy.repeat', 'np.repeat', (['img', '(3)'], {'axis': '(0)'}), '(img, 3, axis=0)\n', (1173, 1189), True, 'import numpy as np\n'), ((2115, 2147), 'os.path.join', 'os.path.join', (['log_dir', 'expt_name'], {}), '(log_dir, expt_name)\n', (2127, 2147), False, 'import os\n'), ((2256, 2275), 'os.makedirs', 'os.makedirs', (['mypath'], {}), '(mypath)\n', (2267, 2275), False, 'import os\n'), ((3012, 3043), 'os.path.join', 'os.path.join', (['log_dir', 'filename'], {}), '(log_dir, filename)\n', (3024, 3043), False, 'import os\n'), ((4386, 4410), 'matplotlib.pyplot.subplot', 'plt.subplot', (['N', 'N', '(i + 1)'], {}), '(N, N, i + 1)\n', (4397, 4410), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4432), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4427, 4432), True, 'import matplotlib.pyplot as plt\n'), ((4441, 4456), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4449, 4456), True, 'import matplotlib.pyplot as plt\n'), ((4474, 4507), 'os.path.join', 'os.path.join', (['log_dir', '"""atob.png"""'], {}), "(log_dir, 'atob.png')\n", (4486, 4507), False, 'import os\n'), ((4788, 4828), 'os.path.join', 'os.path.join', (['log_dir', 'ATOB_WEIGHTS_FILE'], {}), '(log_dir, ATOB_WEIGHTS_FILE)\n', (4800, 4828), False, 'import os\n'), ((4872, 4909), 'os.path.join', 'os.path.join', (['log_dir', 'D_WEIGHTS_FILE'], {}), '(log_dir, D_WEIGHTS_FILE)\n', (4884, 4909), False, 'import os\n'), ((5124, 5164), 'os.path.join', 'os.path.join', (['log_dir', 'ATOB_WEIGHTS_FILE'], {}), '(log_dir, ATOB_WEIGHTS_FILE)\n', (5136, 5164), False, 'import os\n'), ((5185, 5222), 'os.path.join', 'os.path.join', (['log_dir', 'D_WEIGHTS_FILE'], {}), '(log_dir, D_WEIGHTS_FILE)\n', (5197, 5222), False, 'import os\n'), ((5414, 5449), 'os.path.join', 'os.path.join', (['log_dir', 'weights_file'], {}), '(log_dir, weights_file)\n', (5426, 5449), False, 'import os\n'), ((2706, 2743), 'os.path.join', 'os.path.join', (['expt_dir', '"""params.json"""'], {}), "(expt_dir, 'params.json')\n", (2718, 2743), False, 'import os\n'), ((3343, 3378), 'os.path.join', 'os.path.join', (['log_dir', '"""losses.pkl"""'], {}), "(log_dir, 'losses.pkl')\n", (3355, 3378), False, 'import os\n'), ((5638, 5673), 'os.path.join', 'os.path.join', (['log_dir', '"""losses.pkl"""'], {}), "(log_dir, 'losses.pkl')\n", (5650, 5673), False, 'import os\n'), ((6051, 6088), 'os.path.join', 'os.path.join', (['expt_dir', '"""params.json"""'], {}), "(expt_dir, 'params.json')\n", (6063, 6088), False, 'import os\n'), ((2338, 2359), 'os.path.isdir', 'os.path.isdir', (['mypath'], {}), '(mypath)\n', (2351, 2359), False, 'import os\n')] |
from github import Github
def parseGithubURL(url):
splitURL = url.split('/')
owner = splitURL[3]
repo = splitURL[4]
return {
"owner": owner,
"repo": repo
}
def fetchRepoFiles(owner, repo):
files = []
g = Github('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')
repo = g.get_repo(f'{owner}/{repo}')
contents = repo.get_contents('')
while contents:
file_content = contents.pop(0)
if file_content.type == 'dir':
contents.extend(repo.get_contents(file_content.path))
else:
files.append(file_content.path)
return files
# parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer')
# filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo'])
# files = [path.split('/')[-1] for path in filePaths]
# print(files)
| [
"github.Github"
]
| [((249, 299), 'github.Github', 'Github', (['"""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"""'], {}), "('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')\n", (255, 299), False, 'from github import Github\n')] |
import types
import django.test.testcases
from django.conf import settings
from facetools.models import TestUser
from facetools.common import _create_signed_request
from facetools.test import TestUserNotLoaded
from facetools.signals import sync_facebook_test_user, setup_facebook_test_client
from facetools.common import _get_facetools_test_fixture_name
class FacebookTestCaseMixin(object):
"""
TestCase which makes it possible to test views when the FacebookMiddleware
and SyncFacebookUser middlewares are activated. Must use the Client
attached to this object (i.e. self.client).
"""
facebook_test_user = None
def set_client_signed_request(self, facebook_id, access_token):
"""
Allow code to configure the test client so it has a signed request
of the specified test user for each request
"""
setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request(
settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token))
def _pre_setup(self):
if self.facebook_test_user:
if type(self.facebook_test_user) not in [str, unicode]:
raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user))
app_name = get_app_name_from_test_case(type(self).__module__)
facetools_fixture_name = _get_facetools_test_fixture_name(app_name)
if not hasattr(self, 'fixtures'):
self.fixtures = []
if facetools_fixture_name not in self.fixtures:
self.fixtures.append(facetools_fixture_name)
super(FacebookTestCaseMixin, self)._pre_setup()
# Make sure anybody that needs to sync their models loaded from fixtures
# has a chance to do so now that the refreshed user test data is available.
try:
for test_user in TestUser.objects.all():
sync_facebook_test_user.send(sender=None, test_user=test_user)
self.test_user = TestUser.objects.get(name=self.facebook_test_user)
self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token)
except TestUser.DoesNotExist:
raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" %
(self.facebook_test_user, facetools_fixture_name))
else:
super(FacebookTestCaseMixin, self)._pre_setup()
def get_app_name_from_test_case(module_path_string):
"""
Gets thet Django app from the __class__ attribute of a TestCase in a Django app.
class_string should look something like this: 'facetools_tests.tests.test_test_module'
"""
packages = module_path_string.split(".")
try:
tests_location = packages.index("tests")
except ValueError:
raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string)
if tests_location == 0:
raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string)
app_name = packages[tests_location - 1]
if app_name not in settings.INSTALLED_APPS:
raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string))
return app_name
# -----------------------------------------------------------------------------
# Test Cases
# -----------------------------------------------------------------------------
class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase):
def _pre_setup(self):
super(FacebookTransactionTestCase, self)._pre_setup()
class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase):
def _pre_setup(self):
super(FacebookTestCase, self)._pre_setup()
if 'LiveServerTestCase' in dir(django.test.testcases):
class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase):
def _pre_setup(self):
super(FacebookLiveServerTestCase, self)._pre_setup()
| [
"facetools.models.TestUser.objects.get",
"facetools.common._get_facetools_test_fixture_name",
"facetools.test.TestUserNotLoaded",
"facetools.signals.sync_facebook_test_user.send",
"facetools.common._create_signed_request",
"facetools.models.TestUser.objects.all"
]
| [((1433, 1475), 'facetools.common._get_facetools_test_fixture_name', '_get_facetools_test_fixture_name', (['app_name'], {}), '(app_name)\n', (1465, 1475), False, 'from facetools.common import _get_facetools_test_fixture_name\n'), ((947, 1054), 'facetools.common._create_signed_request', '_create_signed_request', (['settings.FACEBOOK_APPLICATION_SECRET_KEY', 'facebook_id'], {'oauth_token': 'access_token'}), '(settings.FACEBOOK_APPLICATION_SECRET_KEY,\n facebook_id, oauth_token=access_token)\n', (969, 1054), False, 'from facetools.common import _create_signed_request\n'), ((1963, 1985), 'facetools.models.TestUser.objects.all', 'TestUser.objects.all', ([], {}), '()\n', (1983, 1985), False, 'from facetools.models import TestUser\n'), ((2103, 2153), 'facetools.models.TestUser.objects.get', 'TestUser.objects.get', ([], {'name': 'self.facebook_test_user'}), '(name=self.facebook_test_user)\n', (2123, 2153), False, 'from facetools.models import TestUser\n'), ((2007, 2069), 'facetools.signals.sync_facebook_test_user.send', 'sync_facebook_test_user.send', ([], {'sender': 'None', 'test_user': 'test_user'}), '(sender=None, test_user=test_user)\n', (2035, 2069), False, 'from facetools.signals import sync_facebook_test_user, setup_facebook_test_client\n'), ((2322, 2495), 'facetools.test.TestUserNotLoaded', 'TestUserNotLoaded', (['("Test user %s hasn\'t been loaded via the %s fixture (did you run sync_facebook_test_users?)"\n % (self.facebook_test_user, facetools_fixture_name))'], {}), '(\n "Test user %s hasn\'t been loaded via the %s fixture (did you run sync_facebook_test_users?)"\n % (self.facebook_test_user, facetools_fixture_name))\n', (2339, 2495), False, 'from facetools.test import TestUserNotLoaded\n')] |
# ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# As time-series data assume a period variable is set that changes with time
train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4)
train_x['period'] = np.clip(train_x['period'], 0, 3)
test_x['period'] = 4
# -----------------------------------
# Hold-out method for time-series data
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training
is_tr = train_x['period'] < 3
is_va = train_x['period'] == 3
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# -----------------------------------
# Cross validation for time-series data (use method that follows time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] < va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used
from sklearn.model_selection import TimeSeriesSplit
tss = TimeSeriesSplit(n_splits=4)
for tr_idx, va_idx in tss.split(train_x):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# -----------------------------------
# Cross validation for time-series data (method to simply partition by time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [0, 1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] != va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
| [
"numpy.clip",
"pandas.read_csv",
"sklearn.model_selection.TimeSeriesSplit"
]
| [((302, 360), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample-data/train_preprocessed.csv"""'], {}), "('../input/sample-data/train_preprocessed.csv')\n", (313, 360), True, 'import pandas as pd\n'), ((437, 494), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample-data/test_preprocessed.csv"""'], {}), "('../input/sample-data/test_preprocessed.csv')\n", (448, 494), True, 'import pandas as pd\n'), ((663, 695), 'numpy.clip', 'np.clip', (["train_x['period']", '(0)', '(3)'], {}), "(train_x['period'], 0, 3)\n", (670, 695), True, 'import numpy as np\n'), ((1956, 1983), 'sklearn.model_selection.TimeSeriesSplit', 'TimeSeriesSplit', ([], {'n_splits': '(4)'}), '(n_splits=4)\n', (1971, 1983), False, 'from sklearn.model_selection import TimeSeriesSplit\n')] |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
did_delete = client.video\
.compositionHooks('HKXXXX')\
.delete()
if(did_delete):
print('Composition removed')
| [
"twilio.rest.Client"
]
| [((234, 269), 'twilio.rest.Client', 'Client', (['api_key_sid', 'api_key_secret'], {}), '(api_key_sid, api_key_secret)\n', (240, 269), False, 'from twilio.rest import Client\n')] |
from time import sleep
debug_mode = False
time_to_exit = False
exiting = False
exit_code = 0
def get_debug_mode():
return debug_mode
def trigger_exit(_exit_code):
global time_to_exit, exit_code
exit_code = _exit_code
time_to_exit = True
sleep(0.1)
| [
"time.sleep"
]
| [((263, 273), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (268, 273), False, 'from time import sleep\n')] |
from __future__ import absoulte_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
"hello! world, i've been\nwaiting\tfor\ryou for.a long time"
)
expected = [
"hello",
"world",
"i",
"ve",
"been",
"waiting",
"for",
"you",
"for",
"a",
"long",
"time",
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main"
]
| [((6909, 6923), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (6921, 6923), True, 'import tensorflow as tf\n')] |
from collections import OrderedDict
from random import Random
from typing import Set
from .._types import Dataset, Split, LabelIndices
from .._util import per_label
from ._RandomSplitter import RandomSplitter
from ._Splitter import Splitter
class StratifiedSplitter(Splitter):
"""
TODO
"""
def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()):
self._percentage = percentage
self._labels = labels
self._random = random
def __str__(self) -> str:
return f"strat-{self._percentage}"
def __call__(self, dataset: Dataset) -> Split:
subsets_per_label = per_label(dataset)
sub_splits = {
label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label])
for label in self._labels.keys()
}
result = OrderedDict(), OrderedDict()
for filename, label in dataset.items():
result_index = 0 if filename in sub_splits[label][0] else 1
result[result_index][filename] = label
return result
| [
"random.Random",
"collections.OrderedDict"
]
| [((386, 394), 'random.Random', 'Random', ([], {}), '()\n', (392, 394), False, 'from random import Random\n'), ((894, 907), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (905, 907), False, 'from collections import OrderedDict\n'), ((909, 922), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (920, 922), False, 'from collections import OrderedDict\n')] |
""" Module containing the RetryingClient wrapper class. """
from time import sleep
def _ensure_tuple_argument(argument_name, argument_value):
"""
Helper function to ensure the given arguments are tuples of Exceptions (or
subclasses), or can at least be converted to such.
Args:
argument_name: str, name of the argument we're checking, only used for
raising meaningful exceptions.
argument: any, the argument itself.
Returns:
tuple[Exception]: A tuple with the elements from the argument if they are
valid.
Exceptions:
ValueError: If the argument was not None, tuple or Iterable.
ValueError: If any of the elements of the argument is not a subclass of
Exception.
"""
# Ensure the argument is a tuple, set or list.
if argument_value is None:
return tuple()
elif not isinstance(argument_value, (tuple, set, list)):
raise ValueError("%s must be either a tuple, a set or a list." % argument_name)
# Convert the argument before checking contents.
argument_tuple = tuple(argument_value)
# Check that all the elements are actually inherited from Exception.
# (Catchable)
if not all([issubclass(arg, Exception) for arg in argument_tuple]):
raise ValueError(
"%s is only allowed to contain elements that are subclasses of "
"Exception." % argument_name
)
return argument_tuple
class RetryingClient(object):
"""
Client that allows retrying calls for the other clients.
"""
def __init__(
self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None
):
"""
Constructor for RetryingClient.
Args:
client: Client|PooledClient|HashClient, inner client to use for
performing actual work.
attempts: optional int, how many times to attempt an action before
failing. Must be 1 or above. Defaults to 2.
retry_delay: optional int|float, how many seconds to sleep between
each attempt.
Defaults to 0.
retry_for: optional None|tuple|set|list, what exceptions to
allow retries for. Will allow retries for all exceptions if None.
Example:
`(MemcacheClientError, MemcacheUnexpectedCloseError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
do_not_retry_for: optional None|tuple|set|list, what
exceptions should be retried. Will not block retries for any
Exception if None.
Example:
`(IOError, MemcacheIllegalInputError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
Exceptions:
ValueError: If `attempts` is not 1 or above.
ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or
Iterable.
ValueError: If any of the elements of `retry_for` or
`do_not_retry_for` is not a subclass of Exception.
ValueError: If there is any overlap between `retry_for` and
`do_not_retry_for`.
"""
if attempts < 1:
raise ValueError(
"`attempts` argument must be at least 1. "
"Otherwise no attempts are made."
)
self._client = client
self._attempts = attempts
self._retry_delay = retry_delay
self._retry_for = _ensure_tuple_argument("retry_for", retry_for)
self._do_not_retry_for = _ensure_tuple_argument(
"do_not_retry_for", do_not_retry_for
)
# Verify no overlap in the go/no-go exception collections.
for exc_class in self._retry_for:
if exc_class in self._do_not_retry_for:
raise ValueError(
'Exception class "%s" was present in both `retry_for` '
"and `do_not_retry_for`. Any exception class is only "
"allowed in a single argument." % repr(exc_class)
)
# Take dir from the client to speed up future checks.
self._client_dir = dir(self._client)
def _retry(self, name, func, *args, **kwargs):
"""
Workhorse function, handles retry logic.
Args:
name: str, Name of the function called.
func: callable, the function to retry.
*args: args, array arguments to pass to the function.
**kwargs: kwargs, keyword arguments to pass to the function.
"""
for attempt in range(self._attempts):
try:
result = func(*args, **kwargs)
return result
except Exception as exc:
# Raise the exception to caller if either is met:
# - We've used the last attempt.
# - self._retry_for is set, and we do not match.
# - self._do_not_retry_for is set, and we do match.
# - name is not actually a member of the client class.
if (
attempt >= self._attempts - 1
or (self._retry_for and not isinstance(exc, self._retry_for))
or (
self._do_not_retry_for
and isinstance(exc, self._do_not_retry_for)
)
or name not in self._client_dir
):
raise exc
# Sleep and try again.
sleep(self._retry_delay)
# This is the real magic soup of the class, we catch anything that isn't
# strictly defined for ourselves and pass it on to whatever client we've
# been given.
def __getattr__(self, name):
return lambda *args, **kwargs: self._retry(
name, self._client.__getattribute__(name), *args, **kwargs
)
# We implement these explicitly because they're "magic" functions and won't
# get passed on by __getattr__.
def __dir__(self):
return self._client_dir
# These magics are copied from the base client.
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
| [
"time.sleep"
]
| [((5561, 5585), 'time.sleep', 'sleep', (['self._retry_delay'], {}), '(self._retry_delay)\n', (5566, 5585), False, 'from time import sleep\n')] |
# Generated by Django 2.2.2 on 2019-08-25 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0024_auto_20190825_1723'),
]
operations = [
migrations.AddField(
model_name='myfile',
name='file',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"django.db.models.CharField"
]
| [((334, 378), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (350, 378), False, 'from django.db import migrations, models\n')] |
# -*- encoding: utf-8 -*-
from flask import request
from lazyblacksmith.utils.request import is_xhr
import logging
logger = logging.getLogger('lb.ajax')
def is_not_ajax():
"""
Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403)
"""
return not is_xhr(request)
| [
"logging.getLogger",
"lazyblacksmith.utils.request.is_xhr"
]
| [((132, 160), 'logging.getLogger', 'logging.getLogger', (['"""lb.ajax"""'], {}), "('lb.ajax')\n", (149, 160), False, 'import logging\n'), ((348, 363), 'lazyblacksmith.utils.request.is_xhr', 'is_xhr', (['request'], {}), '(request)\n', (354, 363), False, 'from lazyblacksmith.utils.request import is_xhr\n')] |
import os
class Traces:
def __init__(self, positive = set(), negative = set()):
self.positive = positive
self.negative = negative
"""
IG: at the moment we are adding a trace only if it ends up in an event.
should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '')
recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for
execution, nor for learning)
"""
def _should_add(self, trace, i):
prefixTrace = trace[:i]
if not prefixTrace[-1] == '':
return True
else:
return False
def _get_prefixes(self, trace, up_to_limit = None):
if up_to_limit is None:
up_to_limit = len(trace)
all_prefixes = set()
for i in range(1, up_to_limit+1):
if self._should_add(trace, i):
all_prefixes.add(trace[:i])
return all_prefixes
def symbol_to_trace(self,symbols):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(letters, numbers))
traces = list()
for symbol in symbols:
traces.append(dictionary.get(symbol))
return tuple(traces)
def trace_to_symbol(self,traces):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
symbols = list()
for trace in traces:
symbols.append(dictionary.get(trace))
return tuple(traces)
def rm_trace_to_symbol(self,rm_file):
file = rm_file
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
with open(file) as f:
content = f.readlines()
lines = []
for line in content:
end = 0
begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant
number = 0 #random, had to initialize
if line != content[0]:
number = str()
check = 0
count=0
for character in line:
if ((check==1) & (character=="'")): #looks for second quotation
check = 10 #end search
end = count-1
elif (character == "'"): #looks for first quotation
check = 1
begin = count+1
elif (check==1):
number += character
count = count+1
symbol = dictionary.get(int(number))
#symbol = symbol + '&!n'
line = list(line) #necessary for use of pop,insert
if end==begin+1:
line.pop(end)
line.pop(begin)
line.insert(begin,symbol)
elif end==begin:
line.pop(begin)
line.insert(begin,symbol)
lines.append(line)
with open(rm_file, 'w') as f:
for line in lines:
for item in line:
f.write(str(item))
def fix_rmfiles(self,rmfile):
file = rmfile
with open(file) as f:
content = f.readlines()
final_state = str()
for line in content:
if line != content[0]:
brackets = 0
commas = 0
state = str()
next_state = str()
for character in line:
if (character == "(") & (brackets == 0):
brackets = 1
elif brackets == 1:
if character == "(":
brackets = 2
elif brackets == 2:
if character == "1":
final_state = next_state
print(final_state)
if ((commas == 0) & (brackets == 1)):
if character == ",":
commas = 1
else:
state += character
elif ((commas == 1) & (brackets == 1)):
if character == ",":
commas = 2
else:
next_state += character
# with open(rmfile, 'w') as f:
# for line in content:
# for item in line:
# f.write(str(item))
# f.write("\n")
# writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))"
# f.write(writethis)
"""
when adding a trace, it additionally adds all prefixes as negative traces
"""
def add_trace(self, trace, reward, learned):
trace = tuple(trace)
if reward > 0:
self.positive.add(trace)
# | is a set union operator
#if learned==0:
self.negative |= self._get_prefixes(trace, len(trace)-1)
else:
#if learned == 0:
self.negative |= self._get_prefixes(trace)
# else:
# self.negative.add(trace)
def export_traces(self, filename):
parent_path = os.path.dirname(filename)
os.makedirs(parent_path,exist_ok=True)
with open(filename, "w") as output_file:
output_file.write("POSITIVE:")
for trace in self.positive:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
output_file.write("\nNEGATIVE:")
for trace in self.negative:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
def __repr__(self):
return repr(self.positive) + "\n\n" + repr(self.negative)
| [
"os.path.dirname",
"os.makedirs"
]
| [((5457, 5482), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (5472, 5482), False, 'import os\n'), ((5491, 5530), 'os.makedirs', 'os.makedirs', (['parent_path'], {'exist_ok': '(True)'}), '(parent_path, exist_ok=True)\n', (5502, 5530), False, 'import os\n')] |
import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if django.VERSION[:2] > (1, 9):
from django.views.i18n import JavaScriptCatalog
else:
from django.views.i18n import javascript_catalog
from django_comments_xtd import LatestCommentFeed
from django_comments_xtd.views import XtdCommentListView
from comp import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.HomepageView.as_view(), name='homepage'),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/', include('comp.articles.urls')),
url(r'^quotes/', include('comp.quotes.urls')),
url(r'^comments/', include('django_comments_xtd.urls')),
url(r'^comments/$', XtdCommentListView.as_view(
content_types=["articles.article", "quotes.quote"],
paginate_by=10, page_range=5),
name='comments-xtd-list'),
url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
if django.VERSION[:2] > (1, 9):
urlpatterns.append(
url(r'^jsi18n/$', JavaScriptCatalog.as_view(),
name='javascript-catalog')
)
else:
js_info_dict = {
'packages': ('django_comments_xtd',)
}
urlpatterns.append(
url(r'^jsi18n/$', javascript_catalog, js_info_dict,
name='javascript-catalog')
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
| [
"django.conf.urls.url",
"comp.views.HomepageView.as_view",
"django_comments_xtd.LatestCommentFeed",
"django.conf.urls.include",
"django.views.i18n.JavaScriptCatalog.as_view",
"django_comments_xtd.views.XtdCommentListView.as_view",
"django.contrib.staticfiles.urls.staticfiles_urlpatterns",
"django.contrib.admin.autodiscover"
]
| [((472, 492), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (490, 492), False, 'from django.contrib import admin\n'), ((1626, 1651), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ([], {}), '()\n', (1649, 1651), False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((526, 554), 'comp.views.HomepageView.as_view', 'views.HomepageView.as_view', ([], {}), '()\n', (552, 554), False, 'from comp import views\n'), ((593, 625), 'django.conf.urls.include', 'include', (['"""django.conf.urls.i18n"""'], {}), "('django.conf.urls.i18n')\n", (600, 625), False, 'from django.conf.urls import include, url\n'), ((648, 672), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (655, 672), False, 'from django.conf.urls import include, url\n'), ((698, 727), 'django.conf.urls.include', 'include', (['"""comp.articles.urls"""'], {}), "('comp.articles.urls')\n", (705, 727), False, 'from django.conf.urls import include, url\n'), ((751, 778), 'django.conf.urls.include', 'include', (['"""comp.quotes.urls"""'], {}), "('comp.quotes.urls')\n", (758, 778), False, 'from django.conf.urls import include, url\n'), ((804, 839), 'django.conf.urls.include', 'include', (['"""django_comments_xtd.urls"""'], {}), "('django_comments_xtd.urls')\n", (811, 839), False, 'from django.conf.urls import include, url\n'), ((866, 978), 'django_comments_xtd.views.XtdCommentListView.as_view', 'XtdCommentListView.as_view', ([], {'content_types': "['articles.article', 'quotes.quote']", 'paginate_by': '(10)', 'page_range': '(5)'}), "(content_types=['articles.article',\n 'quotes.quote'], paginate_by=10, page_range=5)\n", (892, 978), False, 'from django_comments_xtd.views import XtdCommentListView\n'), ((1058, 1077), 'django_comments_xtd.LatestCommentFeed', 'LatestCommentFeed', ([], {}), '()\n', (1075, 1077), False, 'from django_comments_xtd import LatestCommentFeed\n'), ((1129, 1187), 'django.conf.urls.include', 'include', (['"""rest_framework.urls"""'], {'namespace': '"""rest_framework"""'}), "('rest_framework.urls', namespace='rest_framework')\n", (1136, 1187), False, 'from django.conf.urls import include, url\n'), ((1490, 1567), 'django.conf.urls.url', 'url', (['"""^jsi18n/$"""', 'javascript_catalog', 'js_info_dict'], {'name': '"""javascript-catalog"""'}), "('^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog')\n", (1493, 1567), False, 'from django.conf.urls import include, url\n'), ((1306, 1333), 'django.views.i18n.JavaScriptCatalog.as_view', 'JavaScriptCatalog.as_view', ([], {}), '()\n', (1331, 1333), False, 'from django.views.i18n import JavaScriptCatalog\n'), ((1732, 1755), 'django.conf.urls.include', 'include', (['"""rosetta.urls"""'], {}), "('rosetta.urls')\n", (1739, 1755), False, 'from django.conf.urls import include, url\n')] |
import pytest
from katana.dynamic_bitset import DynamicBitset
__all__ = []
SIZE = 50
@pytest.fixture
def dbs():
return DynamicBitset(SIZE)
def test_set(dbs):
dbs[10] = 1
assert dbs[10]
def test_set_invalid_type(dbs):
try:
dbs[2.3] = 0
assert False
except TypeError:
pass
def test_set_invalid_index_low(dbs):
try:
dbs[-1] = 1
assert False
except IndexError:
pass
def test_set_invalid_index_high(dbs):
try:
dbs[SIZE] = 1
assert False
except IndexError:
pass
def test_reset(dbs):
dbs[10] = 1
dbs.reset()
assert not dbs[10]
assert len(dbs) == SIZE
def test_reset_index(dbs):
dbs[10] = 1
dbs[10] = 0
assert not dbs[10]
def test_reset_begin_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[12:17] = 0
assert dbs[10]
assert not dbs[15]
def test_reset_begin_end_invalid_step(dbs):
try:
dbs[12:17:22] = 0
assert False
except ValueError:
pass
def test_reset_none_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[:12] = 0
assert not dbs[10]
assert dbs[15]
def test_resize(dbs):
dbs.resize(20)
assert len(dbs) == 20
dbs[8] = 1
dbs.resize(20)
assert len(dbs) == 20
assert dbs[8]
dbs.resize(70)
assert len(dbs) == 70
assert dbs[8]
assert dbs.count() == 1
def test_clear(dbs):
dbs[10] = 1
dbs.clear()
assert len(dbs) == 0
dbs.resize(20)
assert len(dbs) == 20
assert not dbs[10]
def test_count(dbs):
dbs[10] = 1
assert dbs.count() == 1
| [
"katana.dynamic_bitset.DynamicBitset"
]
| [((128, 147), 'katana.dynamic_bitset.DynamicBitset', 'DynamicBitset', (['SIZE'], {}), '(SIZE)\n', (141, 147), False, 'from katana.dynamic_bitset import DynamicBitset\n')] |
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"astar.find_path"
]
| [((1010, 1025), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1023, 1025), False, 'import unittest\n'), ((719, 841), 'astar.find_path', 'astar.find_path', (['"""A"""', '"""B"""'], {'neighbors_fnct': 'neighbors', 'heuristic_cost_estimate_fnct': 'cost', 'distance_between_fnct': 'distance'}), "('A', 'B', neighbors_fnct=neighbors,\n heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance)\n", (734, 841), False, 'import astar\n')] |
#!/bin/env python3
import csv
def intersect(list1,list2):
list3 = [ value for value in list1 if value in list2]
return list3
def category(list1,effects):
cat = 'Good'
good = 0
bad = 0
for ing in list1:
if effects[ing]=='Good':
good += 1
else:
bad += 1
if bad==0:
return 'Potion'
elif good==0:
return 'Poison'
else:
return 'Downside'
effects = {}
ingredients = {}
print("Formulating formulas")
with open('ingredients.csv') as csvfile:
aff = csv.reader(csvfile, delimiter=',')
for row in aff:
if row[0] not in effects.keys():
effects[row[0]] = row[1]
with open('skyrim-ingredients.csv', newline='') as csvfile:
ingre = csv.reader(csvfile, delimiter=',')
for row in ingre:
if row[0] not in ingredients.keys():
ingredients[row[0]] = [row[1],row[2],row[3],row[4]]
multieffects = {}
for ce in effects:
curing = []
for ing in ingredients:
if ce in ingredients[ing]:
curing.append(ing)
for k,curi in enumerate(curing):
for i in range(k+1,len(curing)):
cureff = intersect(ingredients[curi],ingredients[curing[i]])
cureff.sort()
if len(cureff)>1:
if curi>curing[i]:
curname = curing[i] + ':' + curi
else:
curname = curi + ':' + curing[i]
multieffects[curname] = cureff
finallist = {}
for me in multieffects:
curing = me.split(":")
for ing in ingredients:
if ing!=curing[0] and ing!=curing[1]:
eff1 = intersect(ingredients[curing[0]],ingredients[ing])
eff2 = intersect(ingredients[curing[1]],ingredients[ing])
if len(eff1)>0 or len(eff2)>0:
tmpname = [ val for val in curing ]
tmpname.append(ing)
tmpname.sort()
finalname = ":".join(tmpname)
finallist[finalname] = list(set(multieffects[me] + eff1 + eff2))
finallist[finalname].sort()
with open('formulas.csv',mode='w') as formula_file:
formula_writer = csv.writer(formula_file, delimiter=',')
formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5'])
for fl in finallist:
formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl])
for fl in multieffects:
formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
| [
"csv.writer",
"csv.reader"
]
| [((550, 584), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (560, 584), False, 'import csv\n'), ((755, 789), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (765, 789), False, 'import csv\n'), ((2176, 2215), 'csv.writer', 'csv.writer', (['formula_file'], {'delimiter': '""","""'}), "(formula_file, delimiter=',')\n", (2186, 2215), False, 'import csv\n')] |
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class UserAdditionalProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"member_of_groups": "list[str]",
"authentication": "AuthenticationData",
"is_external_user": "bool",
}
attribute_map = {
"member_of_groups": "memberOfGroups",
"authentication": "authentication",
"is_external_user": "isExternalUser",
}
def __init__(
self,
member_of_groups=None,
authentication=None,
is_external_user=None,
_configuration=None,
): # noqa: E501
"""UserAdditionalProperties - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._member_of_groups = None
self._authentication = None
self._is_external_user = None
self.discriminator = None
if member_of_groups is not None:
self.member_of_groups = member_of_groups
if authentication is not None:
self.authentication = authentication
if is_external_user is not None:
self.is_external_user = is_external_user
@property
def member_of_groups(self):
"""Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501
List of role names # noqa: E501
:return: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:rtype: list[str]
"""
return self._member_of_groups
@member_of_groups.setter
def member_of_groups(self, member_of_groups):
"""Sets the member_of_groups of this UserAdditionalProperties.
List of role names # noqa: E501
:param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:type: list[str]
"""
self._member_of_groups = member_of_groups
@property
def authentication(self):
"""Gets the authentication of this UserAdditionalProperties. # noqa: E501
user authentication # noqa: E501
:return: The authentication of this UserAdditionalProperties. # noqa: E501
:rtype: AuthenticationData
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this UserAdditionalProperties.
user authentication # noqa: E501
:param authentication: The authentication of this UserAdditionalProperties. # noqa: E501
:type: AuthenticationData
"""
self._authentication = authentication
@property
def is_external_user(self):
"""Gets the is_external_user of this UserAdditionalProperties. # noqa: E501
:return: The is_external_user of this UserAdditionalProperties. # noqa: E501
:rtype: bool
"""
return self._is_external_user
@is_external_user.setter
def is_external_user(self, is_external_user):
"""Sets the is_external_user of this UserAdditionalProperties.
:param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501
:type: bool
"""
self._is_external_user = is_external_user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(UserAdditionalProperties, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserAdditionalProperties):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserAdditionalProperties):
return True
return self.to_dict() != other.to_dict()
| [
"clients.ctm_api_client.configuration.Configuration",
"six.iteritems"
]
| [((4101, 4134), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (4114, 4134), False, 'import six\n'), ((1428, 1443), 'clients.ctm_api_client.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1441, 1443), False, 'from clients.ctm_api_client.configuration import Configuration\n')] |
# -*- coding: utf-8 -*-
"""
Pytorch models
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'No copyright. Just copyleft!'
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
###########
# imports #
###########
import torch
import torch.nn as nn
from embedder import Embedder
from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import
#############
# Ner Class #
#############
class Ner(nn.Module):
"""
named entity recognizer pytorch model
"""
def __init__(self, embedder, encoder, decoder):
"""
* embedder (Embedder)
[sentence_len, context_len] => [sentence_len, context_len, embed_dim]
* encoder (nn.Module)
[sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
* decoder (nn.Module)
[sentence_len, hidden_dim] => [sentence_len, n_tags],
"""
super().__init__()
self.embedder = embedder
self.encoder = encoder
self.decoder = decoder
assert isinstance(embedder, Embedder)
assert isinstance(encoder, nn.Module)
assert isinstance(decoder, nn.Module)
def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ
# [sentence_len, context_len] => [sentence_len, context_len, embed_dim]
sentence_embed = self.embedder(sentence, gazet, pos, words)
# [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
hidden = self.encoder(sentence_embed)
# [sentence_len, hidden_dim] => [sentence_len, n_tags]
predicted_tags = self.decoder(hidden)
return predicted_tags
def save(self, path):
"""
모델을 저장하는 메소드
:param path: 경로
"""
if torch.cuda.is_available():
self.cpu()
torch.save(self, str(path))
if torch.cuda.is_available():
self.cuda()
@classmethod
def load(cls, path):
"""
저장된 모델을 로드하는 메소드
:param path: 경로
:return: 모델 클래스 객체
"""
model = torch.load(str(path))
if torch.cuda.is_available():
model.cuda()
return model
#################
# Encoder Class #
#################
class Fnn5(nn.Module):
"""
2-Layer Full-Connected Neural Networks
"""
def __init__(self, context_len=21, in_dim=50, hidden_dim=500):
super(Fnn5, self).__init__()
self.context_len = context_len
self.hidden_dim = hidden_dim
self.out_dim = hidden_dim
self.net = nn.Sequential(
nn.Linear(context_len*in_dim, hidden_dim),
)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, in_dim]
Return:
x: [sentence_len, out_dim]
"""
sentence_len = x.size(0)
x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim]
x = self.net(x) # [setence_len, out_dim]
return x
class Cnn7(nn.Module):
"""
ConvNet kernels=[2,3,4,5] + Fully-Connected
"""
def __init__(self, in_dim=50, hidden_dim=500):
"""
"""
super(Cnn7, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = in_dim * 4
self.conv2 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1
)
self.conv4 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4
nn.ReLU(),
nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1
)
self.conv5 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1
)
def forward(self, x): #pylint: disable=arguments-differ
"""
Args:
x: [sentence_length, context_len, in_dim]
Return:
x: [sentence_length, in_dim * 4]
"""
# [sentence_length, in_dim, context_len]
x = x.transpose(1, 2)
conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim]
conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim]
conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim]
conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim]
# [sentence_len, in_dim * 4]
out = torch.cat([conv2, conv3, conv4, conv5], dim=1)
return out
class Cnn8(nn.Module):
"""
9-layer Conv NN + Batch Norm + Residual
"""
def __init__(self, context_len=21, in_dim=64, hidden_dim=None):
super(Cnn8, self).__init__()
self.context_len = context_len
# conv block 64
self.conv_block1_1 = self.conv_block(in_dim, 2, False)
self.conv_block1_2_1 = self.conv_block(in_dim, 1, False)
self.conv_block1_2_2 = self.conv_block(in_dim, 1, True)
self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 128
self.conv_block2_1 = self.conv_block(in_dim*2, 2, False)
self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False)
self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True)
self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 256
self.conv_block3_1 = self.conv_block(in_dim*4, 2, False)
self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False)
self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True)
self.pool3 = nn.MaxPool1d(kernel_size=2)
# conv block 512
self.conv_block4_1 = self.conv_block(in_dim*8, 2, False)
self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False)
self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True)
self.pool4 = nn.MaxPool1d(kernel_size=3)
self.out_dim = in_dim*16
@classmethod
def conv_block(cls, in_dim=64, depth=2, double=True):
"""
Args:
[batch_size, dim, length]
Return:
[batch_size, dim*2, length] if double=True
[batch_size, dim, length] if double=False
"""
out_dim = in_dim
layers = []
for i in range(depth):
if double:
if i == depth - 1:
out_dim = in_dim * 2
layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def forward(self, sentence):#pylint: disable=arguments-differ
"""
Args:
sentence: [sentence_len, context_len, embed_dim]
Return:
logit: [batch_size, out_dim]
"""
# [sentence_len, embed_dim, context_len]
x = sentence.transpose(1, 2)
# conv block 64
x = self.conv_block1_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21]
x = self.pool1(x) # [batch, in_dim*2, 11]
# conv block 128
x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11]
x = self.pool2(x) # [batch, in_dim*4, 6]
# conv block 256
x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6]
x = self.pool3(x) # [batch, in_dim*8, 3]
# conv block 512
x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3]
x = self.pool4(x) # [batch_size, in_dim*16, 1]
x = x.squeeze(-1) # [batch, in_dim*16]
return x
class RnnEncoder(nn.Module):
"""
RNN Encoder Module
"""
def __init__(self, context_len=21, in_dim=1024, out_dim=1024,
num_layers=2, cell='gru'):
super(RnnEncoder, self).__init__()
self.hidden_dim = out_dim // 2
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, input_size]
Return:
x: [sentence_len, hidden_size]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sequence_len, context_len, input_size]
# =>[sentence_len, context_len, hidden_size x 2]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x[:, 10, :]
return x
#################
# Decoder Class #
#################
class FCDecoder(nn.Module):
"""
Fully-Connected Decoder
"""
def __init__(self, in_dim, hidden_dim, n_tags):
super(FCDecoder, self).__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_dim, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
return self.net(x)
class RnnDecoder(nn.Module):
"""
RNN-based Decoder
"""
def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11,
num_layers=2, cell='gru'):
super(RnnDecoder, self).__init__()
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
self.out = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_dim * 2, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sentence_len, batch=1, input_size]
x = x.unsqueeze(1)
# x: [sentence_len, batch=1, hidden_size x 2]
# h_n: [num_layers * 2, batch=1, hidden_size]
# c_n: [num_layers * 2, batch=1, hidden_size]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x.squeeze(1)
# [sequence_len, n_tags]
x = self.out(x)
return x
| [
"torch.nn.MaxPool1d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"sru.SRU",
"torch.nn.Sequential",
"torch.nn.LSTM",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.cat",
"torch.nn.GRU"
]
| [((1774, 1799), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1797, 1799), False, 'import torch\n'), ((1871, 1896), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1894, 1896), False, 'import torch\n'), ((2117, 2142), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2140, 2142), False, 'import torch\n'), ((6207, 6253), 'torch.cat', 'torch.cat', (['[conv2, conv3, conv4, conv5]'], {'dim': '(1)'}), '([conv2, conv3, conv4, conv5], dim=1)\n', (6216, 6253), False, 'import torch\n'), ((6742, 6796), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'padding': '(1)', 'ceil_mode': '(True)'}), '(kernel_size=2, padding=1, ceil_mode=True)\n', (6754, 6796), True, 'import torch.nn as nn\n'), ((7042, 7096), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'padding': '(1)', 'ceil_mode': '(True)'}), '(kernel_size=2, padding=1, ceil_mode=True)\n', (7054, 7096), True, 'import torch.nn as nn\n'), ((7342, 7369), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (7354, 7369), True, 'import torch.nn as nn\n'), ((7615, 7642), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(3)'}), '(kernel_size=3)\n', (7627, 7642), True, 'import torch.nn as nn\n'), ((8312, 8334), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (8325, 8334), True, 'import torch.nn as nn\n'), ((2590, 2633), 'torch.nn.Linear', 'nn.Linear', (['(context_len * in_dim)', 'hidden_dim'], {}), '(context_len * in_dim, hidden_dim)\n', (2599, 2633), True, 'import torch.nn as nn\n'), ((3373, 3413), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(2)'}), '(in_dim, in_dim, kernel_size=2)\n', (3382, 3413), True, 'import torch.nn as nn\n'), ((3435, 3444), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3442, 3444), True, 'import torch.nn as nn\n'), ((3458, 3501), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (3470, 3501), True, 'import torch.nn as nn\n'), ((3523, 3563), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(2)'}), '(in_dim, in_dim, kernel_size=2)\n', (3532, 3563), True, 'import torch.nn as nn\n'), ((3584, 3593), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3591, 3593), True, 'import torch.nn as nn\n'), ((3607, 3650), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (3619, 3650), True, 'import torch.nn as nn\n'), ((3671, 3711), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(2)'}), '(in_dim, in_dim, kernel_size=2)\n', (3680, 3711), True, 'import torch.nn as nn\n'), ((3732, 3741), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3739, 3741), True, 'import torch.nn as nn\n'), ((3755, 3798), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (3767, 3798), True, 'import torch.nn as nn\n'), ((3819, 3859), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(2)'}), '(in_dim, in_dim, kernel_size=2)\n', (3828, 3859), True, 'import torch.nn as nn\n'), ((3927, 3978), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=3, padding=1)\n', (3936, 3978), True, 'import torch.nn as nn\n'), ((4000, 4009), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4007, 4009), True, 'import torch.nn as nn\n'), ((4023, 4066), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (4035, 4066), True, 'import torch.nn as nn\n'), ((4088, 4139), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=3, padding=1)\n', (4097, 4139), True, 'import torch.nn as nn\n'), ((4161, 4170), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4168, 4170), True, 'import torch.nn as nn\n'), ((4184, 4227), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (4196, 4227), True, 'import torch.nn as nn\n'), ((4248, 4299), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=3, padding=1)\n', (4257, 4299), True, 'import torch.nn as nn\n'), ((4320, 4329), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4327, 4329), True, 'import torch.nn as nn\n'), ((4343, 4386), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (4355, 4386), True, 'import torch.nn as nn\n'), ((4407, 4447), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(3)'}), '(in_dim, in_dim, kernel_size=3)\n', (4416, 4447), True, 'import torch.nn as nn\n'), ((4516, 4567), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(4)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=4, padding=1)\n', (4525, 4567), True, 'import torch.nn as nn\n'), ((4589, 4598), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4596, 4598), True, 'import torch.nn as nn\n'), ((4612, 4655), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (4624, 4655), True, 'import torch.nn as nn\n'), ((4677, 4728), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(4)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=4, padding=1)\n', (4686, 4728), True, 'import torch.nn as nn\n'), ((4749, 4758), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4756, 4758), True, 'import torch.nn as nn\n'), ((4772, 4815), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (4784, 4815), True, 'import torch.nn as nn\n'), ((4836, 4887), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(4)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=4, padding=1)\n', (4845, 4887), True, 'import torch.nn as nn\n'), ((4908, 4917), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4915, 4917), True, 'import torch.nn as nn\n'), ((4932, 4972), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(4)'}), '(in_dim, in_dim, kernel_size=4)\n', (4941, 4972), True, 'import torch.nn as nn\n'), ((5040, 5091), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(5)', 'padding': '(2)'}), '(in_dim, in_dim, kernel_size=5, padding=2)\n', (5049, 5091), True, 'import torch.nn as nn\n'), ((5113, 5122), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5120, 5122), True, 'import torch.nn as nn\n'), ((5136, 5179), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (5148, 5179), True, 'import torch.nn as nn\n'), ((5201, 5252), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(5)', 'padding': '(2)'}), '(in_dim, in_dim, kernel_size=5, padding=2)\n', (5210, 5252), True, 'import torch.nn as nn\n'), ((5274, 5283), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5281, 5283), True, 'import torch.nn as nn\n'), ((5297, 5340), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (5309, 5340), True, 'import torch.nn as nn\n'), ((5361, 5412), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(5)', 'padding': '(2)'}), '(in_dim, in_dim, kernel_size=5, padding=2)\n', (5370, 5412), True, 'import torch.nn as nn\n'), ((5433, 5442), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5440, 5442), True, 'import torch.nn as nn\n'), ((5456, 5499), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, ceil_mode=True)\n', (5468, 5499), True, 'import torch.nn as nn\n'), ((5520, 5571), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'in_dim'], {'kernel_size': '(5)', 'padding': '(1)'}), '(in_dim, in_dim, kernel_size=5, padding=1)\n', (5529, 5571), True, 'import torch.nn as nn\n'), ((10063, 10178), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'in_dim', 'hidden_size': 'self.hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=self.hidden_dim, num_layers=\n num_layers, dropout=0.5, bidirectional=True)\n', (10069, 10178), True, 'import torch.nn as nn\n'), ((10306, 10422), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'in_dim', 'hidden_size': 'self.hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=self.hidden_dim, num_layers=\n num_layers, dropout=0.5, bidirectional=True)\n', (10313, 10422), True, 'import torch.nn as nn\n'), ((11713, 11722), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11720, 11722), True, 'import torch.nn as nn\n'), ((11736, 11748), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (11746, 11748), True, 'import torch.nn as nn\n'), ((11762, 11787), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'n_tags'], {}), '(in_dim, n_tags)\n', (11771, 11787), True, 'import torch.nn as nn\n'), ((12233, 12342), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'in_dim', 'hidden_size': 'hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers,\n dropout=0.5, bidirectional=True)\n', (12239, 12342), True, 'import torch.nn as nn\n'), ((12471, 12581), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'in_dim', 'hidden_size': 'hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers,\n dropout=0.5, bidirectional=True)\n', (12478, 12581), True, 'import torch.nn as nn\n'), ((12974, 12983), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12981, 12983), True, 'import torch.nn as nn\n'), ((12997, 13009), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (13007, 13009), True, 'import torch.nn as nn\n'), ((13023, 13056), 'torch.nn.Linear', 'nn.Linear', (['(hidden_dim * 2)', 'n_tags'], {}), '(hidden_dim * 2, n_tags)\n', (13032, 13056), True, 'import torch.nn as nn\n'), ((8155, 8207), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_dim', 'out_dim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_dim, out_dim, kernel_size=3, padding=1)\n', (8164, 8207), True, 'import torch.nn as nn\n'), ((8235, 8258), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_dim'], {}), '(out_dim)\n', (8249, 8258), True, 'import torch.nn as nn\n'), ((8286, 8295), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8293, 8295), True, 'import torch.nn as nn\n'), ((10583, 10694), 'sru.SRU', 'SRU', ([], {'input_size': 'in_dim', 'hidden_size': 'self.hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=self.hidden_dim, num_layers=num_layers,\n dropout=0.5, bidirectional=True)\n', (10586, 10694), False, 'from sru import SRU\n'), ((12743, 12849), 'sru.SRU', 'SRU', ([], {'input_size': 'in_dim', 'hidden_size': 'hidden_dim', 'num_layers': 'num_layers', 'dropout': '(0.5)', 'bidirectional': '(True)'}), '(input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers,\n dropout=0.5, bidirectional=True)\n', (12746, 12849), False, 'from sru import SRU\n')] |
# coding: utf-8
import os.path
try:
from setuptools import setup
extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose'])
except ImportError:
from distutils.core import setup
extras = {}
import apscheduler
here = os.path.dirname(__file__)
readme_path = os.path.join(here, 'README.rst')
readme = open(readme_path).read()
setup(
name='APScheduler',
version=apscheduler.release,
description='In-process task scheduler with Cron-like capabilities',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='http://pypi.python.org/pypi/APScheduler/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3'
],
keywords='scheduling cron',
license='MIT',
packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'),
)
| [
"distutils.core.setup"
]
| [((367, 1185), 'distutils.core.setup', 'setup', ([], {'name': '"""APScheduler"""', 'version': 'apscheduler.release', 'description': '"""In-process task scheduler with Cron-like capabilities"""', 'long_description': 'readme', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://pypi.python.org/pypi/APScheduler/"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3']", 'keywords': '"""scheduling cron"""', 'license': '"""MIT"""', 'packages': "('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers',\n 'apscheduler.triggers.cron')"}), "(name='APScheduler', version=apscheduler.release, description=\n 'In-process task scheduler with Cron-like capabilities',\n long_description=readme, author='<NAME>', author_email='<EMAIL>', url=\n 'http://pypi.python.org/pypi/APScheduler/', classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3'], keywords='scheduling cron',\n license='MIT', packages=('apscheduler', 'apscheduler.jobstores',\n 'apscheduler.triggers', 'apscheduler.triggers.cron'))\n", (372, 1185), False, 'from distutils.core import setup\n')] |
import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| [
"os.system",
"os.listdir"
]
| [((86, 98), 'os.listdir', 'os.listdir', ([], {}), '()\n', (96, 98), False, 'import os\n'), ((514, 531), 'os.system', 'os.system', (['script'], {}), '(script)\n', (523, 531), False, 'import os\n')] |
"""Automated CI tools to run with Nox"""
import nox
from nox import Session
locations = "src", "noxfile.py", "docs/conf.py"
nox.options.sessions = "lint", "tests"
@nox.session(python="3.9")
def tests(session: Session) -> None:
"""Run tests with nox"""
session.run("poetry", "install", external=True)
session.run("pytest", "--cov")
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Run linting with nox"""
session.install(
"flake8",
"flake8-annotations",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-import-order",
)
args = session.posargs or locations
session.run("flake8", *args)
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Run black with nox"""
session.install("black")
args = session.posargs or locations
session.run("black", *args, "--line-length=120")
@nox.session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
package = "hypermodern_python"
@nox.session(python=["3.9"])
def typeguard(session: Session) -> None:
"""Run typeguard for type checking with nox"""
args = session.posargs or ["-m", "not e2e"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("pytest", "pytest-mock", "typeguard")
session.run("pytest", f"--typeguard-packages={package}", *args)
@nox.session(python="3.9")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.9")
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
| [
"nox.session"
]
| [((167, 192), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (178, 192), False, 'import nox\n'), ((349, 374), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (360, 374), False, 'import nox\n'), ((728, 753), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (739, 753), False, 'import nox\n'), ((945, 970), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (956, 970), False, 'import nox\n'), ((1216, 1243), 'nox.session', 'nox.session', ([], {'python': "['3.9']"}), "(python=['3.9'])\n", (1227, 1243), False, 'import nox\n'), ((1577, 1602), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (1588, 1602), False, 'import nox\n'), ((1854, 1879), 'nox.session', 'nox.session', ([], {'python': '"""3.9"""'}), "(python='3.9')\n", (1865, 1879), False, 'import nox\n')] |
__version__ = "2.1.1"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering,
load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import AlbertForSequenceClassification
# Optimization
from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule,
WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| [
"logging.getLogger"
]
| [((494, 521), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (511, 521), False, 'import logging\n')] |
#!/usr/bin/env python
"""
Info: This script loads the model trained in the cnn-asl.py script and enables the user to use it for classifying unseen ASL letters. It also visualizes the feature map of the last convolutional layer of the network to enable the user to get an insight into exactly which parts of the original image that the model is paying attention to when classifying the image.
Parameters:
(optional) model_name: str <name-of-the-model-to-load>, default = "saved_model.json"
(optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset"
(optional) unseen_image: str <name-of-unseen-image>, default = "unseen_img_test1.png"
Usage:
$ python use-model.py
Output:
- unseen_image_superimposed_heatmap.png: superimposed heatmap on unseen image.
- unseen_image_prediction.txt: model prediction of unseen image.
"""
### DEPENDENCIES ###
# Core libraries
import os
import sys
sys.path.append(os.path.join(".."))
# Matplotlib, numpy, OpenCV
import matplotlib.pyplot as plt
import numpy as np
import cv2
# TensorFlow
import tensorflow as tf
from tensorflow.keras.preprocessing.image import (load_img, img_to_array)
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.models import model_from_json
from tensorflow.keras import backend as K
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Model name
ap.add_argument("-m", "--model_name",
type = str,
required = False, # the argument is not required
help = "Name of the model",
default = "saved_model.json") # default name
# Argument 2: Training data
ap.add_argument("-t", "--train_data",
type = str,
required = False, # the argument is not required
help = "Name of training data folder",
default = "asl_alphabet_train_subset") # default is a subset of the training dataset
# Argument 3: Input image
ap.add_argument("-u", "--unseen_image",
type = str,
required = False, # the argument is not required
help = "Name of the image the model should classify",
default = "unseen_img_test1.png") # default unseen image provided in the unseen_images folder
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
model_name = args["model_name"]
train_data = os.path.join("..", "data", "subset_asl_sign_language", args["train_data"])
unseen_image = args["unseen_image"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message
print("\n[INFO] Initializing...")
# Instantiate the class
classifier = Loaded_model_classifier(train_data, unseen_image)
# Create list of label names from the directory names in the training data folder
labels = classifier.list_labels()
# Load the model
print(f"\n[INFO] Loading the CNN model, {model_name}, from 'output' directory...")
model = classifier.load_model(model_name)
# Classify input image
print(f"\n[INFO] Using the model to predict the class of {unseen_image}...")
label = classifier.classify_unseen_image(labels, model)
# Visualize feature map of network for input image
print(f"\n[INFO] Visualizing the feature map of the last convolutional layer of the network...")
classifier.visualize_feature_map(model)
# User message
print(f"\n[INFO] Done! The {unseen_image} has been classified as {label} and the feature map of the last convolutional layer of the network has been visualized and saved as {unseen_image}_superimposed_heatmap.png in 'output' directory\n")
# Creating classifier class
class Loaded_model_classifier:
def __init__(self, train_data, unseen_image):
# Receive inputs: train data and input image
self.train_data = train_data
self.unseen_image = unseen_image
def list_labels(self):
"""
This method defines the label names by listing the names of the folders within training directory without listing hidden files. It sorts the names alphabetically.
"""
# Create empty list
labels = []
# For every name in training directory
for name in os.listdir(self.train_data):
# If it does not start with . (which hidden files do)
if not name.startswith('.'):
labels.append(name)
# Sort labels alphabetically
labels = sorted(labels)
return labels
def load_model(self, model_name):
"""
This method loads the model and the model weights that are saved in the output directory.
"""
# Load JSON-file and create model
model_path = os.path.join("..", "output", model_name)
json_model = open(model_path, "r")
# Read file
loaded_file = json_model.read()
# Create model
loaded_model = model_from_json(loaded_file)
# Load weights into new model
loaded_model.load_weights(os.path.join("..", "output", "model_weights.h5"))
# Compile model
loaded_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return loaded_model
def classify_unseen_image(self, labels, model):
"""
This method takes an unseen image, performs some preprocessing to prepare it for the model, and predicts the class of the image using the model.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load unseen image
image = load_img(img_path, target_size=(224, 224)) # using the same size as the images the model has been trained on
# Convert the image to a numpy array
image = img_to_array(image)
# Reshape the image, because the model expects a tensor of rank 4. The image goes from being 3-dimensional to 4-dimensional: (1, 224, 224, 3)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the ResNet50 model
image = preprocess_input(image)
# Predict the class of the image
prediction = np.argmax(model.predict(image))
# Convert labels to be a dictionary which is needed to extract the label that corresponds to the prediction
labels = dict(zip(labels, range(len(labels))))
# Define function that finds the key (letter) that corresponds to the predicted value
def find_key(dictionary, value):
return {k for k, v in dictionary.items() if v == value}
# Extract letter that corresponds to the predicted value from the label dictionary
label = find_key(labels, prediction)
# Print the predicted class to the terminal
print(f"\nThe model predicts {self.unseen_image} to be the letter {label}")
# Save prediction as txt-file to output directory
with open(os.path.join("..", "output", f"{self.unseen_image}_prediction.txt"), "w") as f:
f.write(f"The predicted class of the {self.unseen_image} made by the model is {label}")
return label
def visualize_feature_map(self, model):
"""
This method visualizes the feature map of the last convolutional layer of the network.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load image with dimensions corresponding to training images
img = load_img(img_path, target_size=(224, 224))
# Convert image to array
x = img_to_array(img)
# Convert to rank 4 tensor
x = np.expand_dims(x, axis=0)
# Preprocess to be in line with ResNet50 data
x = preprocess_input(x)
# Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class.
with tf.GradientTape() as tape:
# Take the last convolutional layer in the network
last_conv_layer = model.get_layer('conv5_block3_out')
# Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions
iterate = tf.keras.models.Model([model.inputs],
[model.output, last_conv_layer.output])
# Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer
# Take the gradients from the last layer
model_out, last_conv_layer = iterate(x)
# Find the class that has been predicted by the model
class_out = model_out[:, np.argmax(model_out[0])]
# Extract gradient of the output neuron of the last convolutional layer
grads = tape.gradient(class_out,
last_conv_layer)
# Vector of mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
heatmap = heatmap.reshape((7,7))
plt.matshow(heatmap)
# Load unseen image with OpenCV
img = cv2.imread(img_path)
# Make heatmap semi-transparent
intensity = 0.5
# Resize the heatmap to be the original dimensions of the input
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
# Apply colormap
heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
# Multiply heatmap by intensity and 'add' this on top of the original image
superimposed = (heatmap * intensity) + img
# Save the superimposed image to output directory
cv2.imwrite(os.path.join("..", "output", f"{self.unseen_image}_superimposed_heatmap.png"), superimposed)
# User message
print(f"\n[INFO] The feature map has now been visualized and superimposed on {self.unseen_image}. Find image as {self.unseen_image}_superimposed_heatmap.png in 'output' directory...")
# Define behaviour when called from command line
if __name__=="__main__":
main() | [
"numpy.uint8",
"tensorflow.multiply",
"tensorflow.GradientTape",
"os.listdir",
"tensorflow.keras.backend.mean",
"argparse.ArgumentParser",
"numpy.max",
"tensorflow.keras.models.Model",
"numpy.maximum",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.resnet.preprocess_input",
"numpy.argmax",
"cv2.resize",
"matplotlib.pyplot.matshow",
"cv2.imread",
"tensorflow.keras.models.model_from_json",
"os.path.join",
"numpy.expand_dims"
]
| [((960, 978), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (972, 978), False, 'import os\n'), ((1486, 1511), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1509, 1511), False, 'import argparse\n'), ((2663, 2737), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""subset_asl_sign_language"""', "args['train_data']"], {}), "('..', 'data', 'subset_asl_sign_language', args['train_data'])\n", (2675, 2737), False, 'import os\n'), ((4653, 4680), 'os.listdir', 'os.listdir', (['self.train_data'], {}), '(self.train_data)\n', (4663, 4680), False, 'import os\n'), ((5190, 5230), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', 'model_name'], {}), "('..', 'output', model_name)\n", (5202, 5230), False, 'import os\n'), ((5390, 5418), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['loaded_file'], {}), '(loaded_file)\n', (5405, 5418), False, 'from tensorflow.keras.models import model_from_json\n'), ((6039, 6101), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""unseen_images"""', 'self.unseen_image'], {}), "('..', 'data', 'unseen_images', self.unseen_image)\n", (6051, 6101), False, 'import os\n'), ((6159, 6201), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (6167, 6201), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((6342, 6361), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (6354, 6361), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((6690, 6713), 'tensorflow.keras.applications.resnet.preprocess_input', 'preprocess_input', (['image'], {}), '(image)\n', (6706, 6713), False, 'from tensorflow.keras.applications.resnet import preprocess_input\n'), ((8070, 8132), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""unseen_images"""', 'self.unseen_image'], {}), "('..', 'data', 'unseen_images', self.unseen_image)\n", (8082, 8132), False, 'import os\n'), ((8230, 8272), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (8238, 8272), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((8331, 8348), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (8343, 8348), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((8409, 8434), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8423, 8434), True, 'import numpy as np\n'), ((8515, 8534), 'tensorflow.keras.applications.resnet.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (8531, 8534), False, 'from tensorflow.keras.applications.resnet import preprocess_input\n'), ((2864, 2892), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""'], {}), "('..', 'output')\n", (2876, 2892), False, 'import os\n'), ((2912, 2940), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""'], {}), "('..', 'output')\n", (2924, 2940), False, 'import os\n'), ((5496, 5544), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""model_weights.h5"""'], {}), "('..', 'output', 'model_weights.h5')\n", (5508, 5544), False, 'import os\n'), ((8983, 9000), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8998, 9000), True, 'import tensorflow as tf\n'), ((9335, 9412), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['[model.inputs]', '[model.output, last_conv_layer.output]'], {}), '([model.inputs], [model.output, last_conv_layer.output])\n', (9356, 9412), True, 'import tensorflow as tf\n'), ((10201, 10230), 'tensorflow.keras.backend.mean', 'K.mean', (['grads'], {'axis': '(0, 1, 2)'}), '(grads, axis=(0, 1, 2))\n', (10207, 10230), True, 'from tensorflow.keras import backend as K\n'), ((10557, 10579), 'numpy.maximum', 'np.maximum', (['heatmap', '(0)'], {}), '(heatmap, 0)\n', (10567, 10579), True, 'import numpy as np\n'), ((10603, 10618), 'numpy.max', 'np.max', (['heatmap'], {}), '(heatmap)\n', (10609, 10618), True, 'import numpy as np\n'), ((10676, 10696), 'matplotlib.pyplot.matshow', 'plt.matshow', (['heatmap'], {}), '(heatmap)\n', (10687, 10696), True, 'import matplotlib.pyplot as plt\n'), ((10776, 10796), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (10786, 10796), False, 'import cv2\n'), ((10995, 11044), 'cv2.resize', 'cv2.resize', (['heatmap', '(img.shape[1], img.shape[0])'], {}), '(heatmap, (img.shape[1], img.shape[0]))\n', (11005, 11044), False, 'import cv2\n'), ((7609, 7676), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', 'f"""{self.unseen_image}_prediction.txt"""'], {}), "('..', 'output', f'{self.unseen_image}_prediction.txt')\n", (7621, 7676), False, 'import os\n'), ((10482, 10524), 'tensorflow.multiply', 'tf.multiply', (['pooled_grads', 'last_conv_layer'], {}), '(pooled_grads, last_conv_layer)\n', (10493, 10524), True, 'import tensorflow as tf\n'), ((11127, 11150), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (11135, 11150), True, 'import numpy as np\n'), ((11423, 11500), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', 'f"""{self.unseen_image}_superimposed_heatmap.png"""'], {}), "('..', 'output', f'{self.unseen_image}_superimposed_heatmap.png')\n", (11435, 11500), False, 'import os\n'), ((9838, 9861), 'numpy.argmax', 'np.argmax', (['model_out[0]'], {}), '(model_out[0])\n', (9847, 9861), True, 'import numpy as np\n')] |
from Algorithmia import ADK
# API calls will begin at the apply() method, with the request body passed as 'input'
# For more details, see algorithmia.com/developers/algorithm-development/languages
def apply(input):
# If your apply function uses state that's loaded into memory via load, you can pass that loaded state to your apply
# function by defining an additional "globals" parameter in your apply function; but it's optional!
return "hello {}".format(str(input))
# This turns your library code into an algorithm that can run on the platform.
# If you intend to use loading operations, remember to pass a `load` function as a second variable.
algorithm = ADK(apply)
# The 'init()' function actually starts the algorithm, you can follow along in the source code
# to see how everything works.
algorithm.init("Algorithmia")
| [
"Algorithmia.ADK"
]
| [((677, 687), 'Algorithmia.ADK', 'ADK', (['apply'], {}), '(apply)\n', (680, 687), False, 'from Algorithmia import ADK\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Callable, Optional, List, Tuple
import pandas as pd
from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from .predictor import (
TabularPredictor,
mean_abs_scaling,
get_features_dataframe,
)
logger = logging.getLogger(__name__)
class TabularEstimator(Estimator):
"""An estimator that trains an Autogluon Tabular model for time series
forecasting.
Additional keyword arguments to the constructor, other than the ones documented
below, will be passed on to Autogluon Tabular's ``fit`` method used for training
the model.
Parameters
----------
freq
Frequency of the data to handle
prediction_length
Prediction length
lag_indices
List of indices of the lagged observations to use as features. If
None, this will be set automatically based on the frequency.
time_features
List of time features to be used. If None, this will be set automatically
based on the frequency.
scaling
Function to be used to scale time series. This should take a pd.Series object
as input, and return a scaled pd.Series and the scale (float). By default,
this divides a series by the mean of its absolute value.
batch_size
Batch size of the resulting predictor; this is just used at prediction
time, and does not affect training in any way.
disable_auto_regression
Whether to forecefully disable auto-regression in the model. If ``True``,
this will remove any lag index which is smaller than ``prediction_length``.
This will make predictions more efficient, but may impact their accuracy.
quantiles_to_predict
Whether to forecast in quantile way. If assigned with quantile values,
this will train model using quantile prediction model. If None, then the model
will be trained in a regular way.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
lag_indices: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
scaling: Callable[
[pd.Series], Tuple[pd.Series, float]
] = mean_abs_scaling,
batch_size: Optional[int] = 32,
disable_auto_regression: bool = False,
last_k_for_val: Optional[int] = None,
quantiles_to_predict: Optional[List[float]] = None,
eval_metric: str = "mean_absolute_error",
**kwargs,
) -> None:
super().__init__()
self.freq = freq
self.prediction_length = prediction_length
self.lag_indices = (
lag_indices
if lag_indices is not None
else get_lags_for_frequency(self.freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.batch_size = batch_size
self.disable_auto_regression = disable_auto_regression
self.scaling = scaling
self.last_k_for_val = last_k_for_val
self.eval_metric = eval_metric
self.quantiles_to_predict = quantiles_to_predict
if self.disable_auto_regression:
self.lag_indices = [
lag_idx
for lag_idx in self.lag_indices
if lag_idx >= self.prediction_length
]
default_kwargs = {
"time_limit": 60,
# "excluded_model_types": ["KNN", "XT", "RF"],
"presets": [
"high_quality_fast_inference_only_refit",
"optimize_for_deployment",
],
"auto_stack": True,
}
self.kwargs = {**default_kwargs, **kwargs}
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
) -> TabularPredictor:
kwargs_override = {}
dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in training_data
]
if validation_data is not None or self.last_k_for_val is not None:
kwargs_override["auto_stack"] = False
logger.warning(
"Auto Stacking is turned off "
"as validation dataset is provided before input into Tabular Predictor."
)
if validation_data is not None:
logger.log(20, "Validation dataset is directly provided.")
validation_dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in validation_data
]
train_df = pd.concat(dfs)
val_df = pd.concat(validation_dfs)
elif self.last_k_for_val is not None:
logger.log(
20,
f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set.",
)
train_dfs = [
tmp_df.iloc[: -self.last_k_for_val, :] for tmp_df in dfs
]
validation_dfs = [
tmp_df.iloc[-self.last_k_for_val :, :] for tmp_df in dfs
]
train_df = pd.concat(train_dfs)
val_df = pd.concat(validation_dfs)
else:
logger.log(
20,
"No validation dataset is provided, will let TabularPredictor do the splitting automatically,"
"Note that this might break the time order of time series data.",
)
train_df = pd.concat(dfs)
val_df = None
if self.quantiles_to_predict is not None:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="quantile",
quantile_levels=self.quantiles_to_predict,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
else:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="regression",
eval_metric=self.eval_metric,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
return TabularPredictor(
ag_model=ag_model,
freq=self.freq,
prediction_length=self.prediction_length,
time_features=self.time_features,
lag_indices=self.lag_indices,
scaling=self.scaling,
batch_size=self.batch_size,
quantiles_to_predict=self.quantiles_to_predict,
)
| [
"logging.getLogger",
"autogluon.tabular.TabularPredictor",
"gluonts.time_feature.time_features_from_frequency_str",
"gluonts.time_feature.get_lags_for_frequency",
"gluonts.dataset.util.to_pandas",
"pandas.concat",
"gluonts.core.component.validated"
]
| [((1145, 1172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1162, 1172), False, 'import logging\n'), ((2824, 2835), 'gluonts.core.component.validated', 'validated', ([], {}), '()\n', (2833, 2835), False, 'from gluonts.core.component import validated\n'), ((3622, 3655), 'gluonts.time_feature.get_lags_for_frequency', 'get_lags_for_frequency', (['self.freq'], {}), '(self.freq)\n', (3644, 3655), False, 'from gluonts.time_feature import TimeFeature, get_lags_for_frequency, time_features_from_frequency_str\n'), ((3781, 3824), 'gluonts.time_feature.time_features_from_frequency_str', 'time_features_from_frequency_str', (['self.freq'], {}), '(self.freq)\n', (3813, 3824), False, 'from gluonts.time_feature import TimeFeature, get_lags_for_frequency, time_features_from_frequency_str\n'), ((5864, 5878), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (5873, 5878), True, 'import pandas as pd\n'), ((5900, 5925), 'pandas.concat', 'pd.concat', (['validation_dfs'], {}), '(validation_dfs)\n', (5909, 5925), True, 'import pandas as pd\n'), ((6407, 6427), 'pandas.concat', 'pd.concat', (['train_dfs'], {}), '(train_dfs)\n', (6416, 6427), True, 'import pandas as pd\n'), ((6449, 6474), 'pandas.concat', 'pd.concat', (['validation_dfs'], {}), '(validation_dfs)\n', (6458, 6474), True, 'import pandas as pd\n'), ((6763, 6777), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (6772, 6777), True, 'import pandas as pd\n'), ((6878, 6991), 'autogluon.tabular.TabularPredictor', 'AutogluonTabularPredictor', ([], {'label': '"""target"""', 'problem_type': '"""quantile"""', 'quantile_levels': 'self.quantiles_to_predict'}), "(label='target', problem_type='quantile',\n quantile_levels=self.quantiles_to_predict)\n", (6903, 6991), True, 'from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor\n'), ((7223, 7325), 'autogluon.tabular.TabularPredictor', 'AutogluonTabularPredictor', ([], {'label': '"""target"""', 'problem_type': '"""regression"""', 'eval_metric': 'self.eval_metric'}), "(label='target', problem_type='regression',\n eval_metric=self.eval_metric)\n", (7248, 7325), True, 'from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor\n'), ((4931, 4947), 'gluonts.dataset.util.to_pandas', 'to_pandas', (['entry'], {}), '(entry)\n', (4940, 4947), False, 'from gluonts.dataset.util import to_pandas\n'), ((5638, 5654), 'gluonts.dataset.util.to_pandas', 'to_pandas', (['entry'], {}), '(entry)\n', (5647, 5654), False, 'from gluonts.dataset.util import to_pandas\n')] |
from XDR_iocs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'test'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
@freeze_time('2020-06-01T00:00:00Z')
def test_sanity(self, mocker):
"""
Given:
- API key
- API key ID
Then:
- Verify headers created correct.
"""
params = {
"apikey_id": "7",
"apikey": "<KEY>" # noqa: E501
}
headers = {
'Authorization': 'da94963b561e3c95899d843b1284cecf410606e9e809be528ec1cf03880c6e9e',
'x-iocs-source': 'xsoar',
'x-xdr-auth-id': '7',
'x-xdr-nonce': '1111111111111111111111111111111111111111111111111111111111111111',
'x-xdr-timestamp': '1590969600000'
}
mocker.patch('secrets.choice', return_value='1')
output = get_headers(params)
assert output == headers, f'get_headers({params})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(headers)}'
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
class Res:
content = 'error'.encode()
def __init__(self, code):
self.status_code = code
@staticmethod
def json():
return {}
XDR_SERVER_ERROR = 500
INVALID_CREDS = 401
LICENSE_ERROR = 402
PERMISSION_ERROR = 403
OK = 200
data_test_http_request_error_codes = [
(OK, {}),
(XDR_SERVER_ERROR, 'XDR internal server error.\t(error)'),
(INVALID_CREDS, 'Unauthorized access. An issue occurred during authentication. This can indicate an incorrect key, id, or other invalid authentication parameters.\t(error)'), # noqa: E501
(LICENSE_ERROR, 'Unauthorized access. User does not have the required license type to run this API.\t(error)'),
(PERMISSION_ERROR, 'Unauthorized access. The provided API key does not have the required RBAC permissions to run this API.\t(error)') # noqa: E501
]
@pytest.mark.parametrize('res, expected_output', data_test_http_request_error_codes)
def test_http_request_error_codes(self, res, expected_output, mocker):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error/success format.
"""
mocker.patch('requests.post', return_value=self.Res(res))
try:
output = client.http_request('', {})
except DemistoException as error:
output = str(error)
assert output == expected_output, f'status code {res}\n\treturns: {output}\n\tinstead: {expected_output}'
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToXDR:
data_test_demisto_expiration_to_xdr = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, xdr_expiration', data_test_demisto_expiration_to_xdr)
def test_demisto_expiration_to_xdr(self, demisto_expiration, xdr_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_xdr(demisto_expiration)
assert xdr_expiration == output, f'demisto_expiration_to_xdr({demisto_expiration})\n\treturns: {output}\n\tinstead: {xdr_expiration}' # noqa: E501
data_test_demisto_reliability_to_xdr = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, xdr_reliability', data_test_demisto_reliability_to_xdr)
def test_demisto_reliability_to_xdr(self, demisto_reliability, xdr_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_xdr(demisto_reliability)
assert output == xdr_reliability, f'demisto_reliability_to_xdr({demisto_reliability})\n\treturns: {output}\n\tinstead: {xdr_reliability}' # noqa: E501
data_test_demisto_types_to_xdr = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, xdr_type', data_test_demisto_types_to_xdr)
def test_demisto_types_to_xdr(self, demisto_type, xdr_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_xdr(demisto_type)
assert output == xdr_type, f'demisto_reliability_to_xdr({demisto_type})\n\treturns: {output}\n\tinstead: {xdr_type}'
data_test_demisto_vendors_to_xdr = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, xdr_vendor', data_test_demisto_vendors_to_xdr)
def test_demisto_vendors_to_xdr(self, demisto_vendor, xdr_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_xdr(demisto_vendor)[0]
assert output == xdr_vendor, f'demisto_vendors_to_xdr({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_vendor)}' # noqa: E501
data_test_demisto_ioc_to_xdr = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)
def test_demisto_ioc_to_xdr(self, demisto_ioc, xdr_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_xdr(demisto_ioc)
assert output == xdr_ioc, f'demisto_ioc_to_xdr({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_xdr(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_xdr({})
assert output == {}, 'demisto_ioc_to_xdr({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestXDRIOCToDemisto:
data_test_xdr_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('xdr_expiration, demisto_expiration', data_test_xdr_expiration_to_demisto)
def test_xdr_expiration_to_demisto(self, xdr_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = xdr_expiration_to_demisto(xdr_expiration)
assert output == demisto_expiration, f'xdr_expiration_to_demisto({xdr_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_xdr_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex XDR - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'enabled'
}
}
)
]
@pytest.mark.parametrize('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)
def test_xdr_ioc_to_demisto(self, xdr_ioc, demisto_ioc, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
output = xdr_ioc_to_demisto(xdr_ioc)
del output['rawJSON']
assert output == demisto_ioc, f'xdr_ioc_to_demisto({xdr_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(demisto_ioc)}' # noqa: E501
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
enable_ioc = mocker.patch('XDR_iocs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
disable_ioc = mocker.patch('XDR_iocs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('XDR_iocs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
def test_get_changes(self, mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'createIndicators')
mocker.patch.object(demisto, 'searchIndicators', return_value={})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
xdr_ioc_to_timeline(list(map(lambda x: str(x[0].get('RULE_INDICATOR')), TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))) # noqa: E501
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex XDR',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color', tags_test)
def test_feed_tags_and_tlp_color(self, demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
mocker.patch.object(demisto, 'params', return_value=param_value)
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'searchIndicators', return_value={})
outputs = mocker.patch.object(demisto, 'createIndicators')
Client.tag = demisto.params().get('feedTags', demisto.params().get('tag', Client.tag))
Client.tlp_color = demisto.params().get('tlp_color')
client = Client({'url': 'yana'})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
output = outputs.call_args.args[0]
assert output[0]['fields']['tags'] == expected_tags
assert output[0]['fields'].get('trafficlightprotocol') == expected_tlp_color
| [
"pytest.mark.parametrize",
"freezegun.freeze_time"
]
| [((218, 253), 'freezegun.freeze_time', 'freeze_time', (['"""2020-06-01T00:00:00Z"""'], {}), "('2020-06-01T00:00:00Z')\n", (229, 253), False, 'from freezegun import freeze_time\n'), ((2243, 2330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""res, expected_output"""', 'data_test_http_request_error_codes'], {}), "('res, expected_output',\n data_test_http_request_error_codes)\n", (2266, 2330), False, 'import pytest\n'), ((7399, 7471), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_iocs, out_iocs"""', 'data_test_create_file_sync'], {}), "('in_iocs, out_iocs', data_test_create_file_sync)\n", (7422, 7471), False, 'import pytest\n'), ((8937, 9032), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""defective_indicator"""', 'data_test_create_file_with_empty_indicators'], {}), "('defective_indicator',\n data_test_create_file_with_empty_indicators)\n", (8960, 9032), False, 'import pytest\n'), ((10880, 10965), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_iocs, out_iocs"""', 'data_test_create_file_iocs_to_keep'], {}), "('in_iocs, out_iocs', data_test_create_file_iocs_to_keep\n )\n", (10903, 10965), False, 'import pytest\n'), ((12595, 12697), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_expiration, xdr_expiration"""', 'data_test_demisto_expiration_to_xdr'], {}), "('demisto_expiration, xdr_expiration',\n data_test_demisto_expiration_to_xdr)\n", (12618, 12697), False, 'import pytest\n'), ((13470, 13575), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_reliability, xdr_reliability"""', 'data_test_demisto_reliability_to_xdr'], {}), "('demisto_reliability, xdr_reliability',\n data_test_demisto_reliability_to_xdr)\n", (13493, 13575), False, 'import pytest\n'), ((14169, 14254), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_type, xdr_type"""', 'data_test_demisto_types_to_xdr'], {}), "('demisto_type, xdr_type',\n data_test_demisto_types_to_xdr)\n", (14192, 14254), False, 'import pytest\n'), ((15441, 15532), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_vendor, xdr_vendor"""', 'data_test_demisto_vendors_to_xdr'], {}), "('demisto_vendor, xdr_vendor',\n data_test_demisto_vendors_to_xdr)\n", (15464, 15532), False, 'import pytest\n'), ((18998, 19075), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_ioc, xdr_ioc"""', 'data_test_demisto_ioc_to_xdr'], {}), "('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)\n", (19021, 19075), False, 'import pytest\n'), ((20033, 20135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""xdr_expiration, demisto_expiration"""', 'data_test_xdr_expiration_to_demisto'], {}), "('xdr_expiration, demisto_expiration',\n data_test_xdr_expiration_to_demisto)\n", (20056, 20135), False, 'import pytest\n'), ((23758, 23835), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""xdr_ioc, demisto_ioc"""', 'data_test_xdr_ioc_to_demisto'], {}), "('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)\n", (23781, 23835), False, 'import pytest\n'), ((26797, 26832), 'freezegun.freeze_time', 'freeze_time', (['"""2020-06-03T02:00:00Z"""'], {}), "('2020-06-03T02:00:00Z')\n", (26808, 26832), False, 'from freezegun import freeze_time\n'), ((29524, 29639), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color"""', 'tags_test'], {}), "(\n 'demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color',\n tags_test)\n", (29547, 29639), False, 'import pytest\n')] |
from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_round_to_power_of_2(value: Union[float, int],
policy: RoundingPolicy=RoundingPolicy.ROUND_UP) -> float:
if value == 0: return 0
sign = 1 if value >= 0 else -1
assert isinstance(value, float) or isinstance(value, int), \
'power-of-2 round only takes effect on float or int.'
return sign * float(pow(2, ppq_numerical_round(log2(sign * value), policy=policy)))
| [
"math.ceil",
"math.floor",
"torch.floor",
"math.log2",
"torch.sign",
"torch.ceil",
"decimal.Decimal"
]
| [((1397, 1411), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (1404, 1411), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1425, 1435), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1432, 1435), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((3472, 3489), 'torch.sign', 'torch.sign', (['value'], {}), '(value)\n', (3482, 3489), False, 'import torch\n'), ((4434, 4452), 'math.log2', 'log2', (['(sign * value)'], {}), '(sign * value)\n', (4438, 4452), False, 'from math import ceil, floor, log2\n'), ((3597, 3614), 'torch.sign', 'torch.sign', (['value'], {}), '(value)\n', (3607, 3614), False, 'import torch\n'), ((3714, 3737), 'torch.ceil', 'torch.ceil', (['(value - 0.5)'], {}), '(value - 0.5)\n', (3724, 3737), False, 'import torch\n'), ((1546, 1560), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (1553, 1560), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1574, 1584), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1581, 1584), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1636, 1650), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (1643, 1650), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1664, 1674), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1671, 1674), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((3802, 3826), 'torch.floor', 'torch.floor', (['(value + 0.5)'], {}), '(value + 0.5)\n', (3813, 3826), False, 'import torch\n'), ((1787, 1801), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (1794, 1801), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1815, 1825), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1822, 1825), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1879, 1893), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (1886, 1893), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((1907, 1917), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1914, 1917), False, 'from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal\n'), ((2291, 2309), 'math.floor', 'floor', (['(value + 0.5)'], {}), '(value + 0.5)\n', (2296, 2309), False, 'from math import ceil, floor, log2\n'), ((2331, 2348), 'math.ceil', 'ceil', (['(value - 0.5)'], {}), '(value - 0.5)\n', (2335, 2348), False, 'from math import ceil, floor, log2\n'), ((2408, 2419), 'math.ceil', 'ceil', (['value'], {}), '(value)\n', (2412, 2419), False, 'from math import ceil, floor, log2\n')] |
from django.conf.urls import patterns, url
from roomsensor import views
urlpatterns = patterns('',
url(r'^$', views.index, name='roomsensor'),
# ex: /roomsensor/name/
url(r'^(?P<roomsensor_name>\w+)/$', views.display, name='roomsensor_display'),
url(r'^(?P<roomsensor_name>\w+)/read/$', views.read, name='roomsensor_read'),
# JSON data for graph creation
url(r'^(?P<roomsensor_name>\w+)/rawdata/(?P<datapoints>\d+)/(?P<compression_factor>\d+)/$', views.rawdata, name='roomsensor_rawdata'),
) | [
"django.conf.urls.url"
]
| [((105, 146), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""roomsensor"""'}), "('^$', views.index, name='roomsensor')\n", (108, 146), False, 'from django.conf.urls import patterns, url\n'), ((182, 259), 'django.conf.urls.url', 'url', (['"""^(?P<roomsensor_name>\\\\w+)/$"""', 'views.display'], {'name': '"""roomsensor_display"""'}), "('^(?P<roomsensor_name>\\\\w+)/$', views.display, name='roomsensor_display')\n", (185, 259), False, 'from django.conf.urls import patterns, url\n'), ((265, 341), 'django.conf.urls.url', 'url', (['"""^(?P<roomsensor_name>\\\\w+)/read/$"""', 'views.read'], {'name': '"""roomsensor_read"""'}), "('^(?P<roomsensor_name>\\\\w+)/read/$', views.read, name='roomsensor_read')\n", (268, 341), False, 'from django.conf.urls import patterns, url\n'), ((383, 523), 'django.conf.urls.url', 'url', (['"""^(?P<roomsensor_name>\\\\w+)/rawdata/(?P<datapoints>\\\\d+)/(?P<compression_factor>\\\\d+)/$"""', 'views.rawdata'], {'name': '"""roomsensor_rawdata"""'}), "('^(?P<roomsensor_name>\\\\w+)/rawdata/(?P<datapoints>\\\\d+)/(?P<compression_factor>\\\\d+)/$'\n , views.rawdata, name='roomsensor_rawdata')\n", (386, 523), False, 'from django.conf.urls import patterns, url\n')] |
import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki') | [
"tqdm.tqdm"
]
| [((874, 885), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (878, 885), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0", "suds2==0.7.1"]
setup_requirements = [
# TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name="runa",
version="0.2.10",
description="Librería para uso de WS del Bus Gubernamental de Ecuador",
long_description=readme + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ovnicraft/runa",
packages=find_packages(include=["runa"]),
entry_points={"console_scripts": ["runa=runa.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="runa webservices ecuador bgs",
classifiers=[
"Development Status :: 3 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| [
"setuptools.find_packages"
]
| [((769, 800), 'setuptools.find_packages', 'find_packages', ([], {'include': "['runa']"}), "(include=['runa'])\n", (782, 800), False, 'from setuptools import setup, find_packages\n')] |
import time
import board
import displayio
import busio
from analogio import AnalogIn
import neopixel
import adafruit_adt7410
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_pyportal import PyPortal
# ------------- Inputs and Outputs Setup ------------- #
# init. the temperature sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)
WHITE = 0xffffff
RED = 0xff0000
YELLOW = 0xffff00
GREEN = 0x00ff00
BLUE = 0x0000ff
PURPLE = 0xff00ff
BLACK = 0x000000
# ---------- Sound Effects ------------- #
soundDemo = '/sounds/sound.wav'
soundBeep = '/sounds/beep.wav'
soundTab = '/sounds/tab.wav'
# ------------- Other Helper Functions------------- #
# Helper for cycling through a number set of 1 to x.
def numberUP(num, max_val):
num += 1
if num <= max_val:
return num
else:
return 1
# ------------- Screen Setup ------------- #
pyportal = PyPortal()
display = board.DISPLAY
display.rotation = 270
# Backlight function
# Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness.
def set_backlight(val):
val = max(0, min(1.0, val))
board.DISPLAY.auto_brightness = False
board.DISPLAY.brightness = val
# Set the Backlight
set_backlight(0.3)
# Touchscreen setup
# ------Rotate 270:
screen_width = 240
screen_height = 320
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_YD, board.TOUCH_YU,
board.TOUCH_XR, board.TOUCH_XL,
calibration=((5200, 59000),
(5800, 57000)),
size=(screen_width, screen_height))
# ------------- Display Groups ------------- #
splash = displayio.Group(max_size=15) # The Main Display Group
view1 = displayio.Group(max_size=15) # Group for View 1 objects
view2 = displayio.Group(max_size=15) # Group for View 2 objects
view3 = displayio.Group(max_size=15) # Group for View 3 objects
def hideLayer(hide_target):
try:
splash.remove(hide_target)
except ValueError:
pass
def showLayer(show_target):
try:
time.sleep(0.1)
splash.append(show_target)
except ValueError:
pass
# ------------- Setup for Images ------------- #
# Display an image until the loop starts
pyportal.set_background('/images/loading.bmp')
bg_group = displayio.Group(max_size=1)
splash.append(bg_group)
icon_group = displayio.Group(max_size=1)
icon_group.x = 180
icon_group.y = 120
icon_group.scale = 1
view2.append(icon_group)
# This will handel switching Images and Icons
def set_image(group, filename):
"""Set the image file for a given goup for display.
This is most useful for Icons or image slideshows.
:param group: The chosen group
:param filename: The filename of the chosen image
"""
print("Set image to ", filename)
if group:
group.pop()
if not filename:
return # we're done, no icon desired
image_file = open(filename, "rb")
image = displayio.OnDiskBitmap(image_file)
try:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter())
except TypeError:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter(),
position=(0, 0))
group.append(image_sprite)
set_image(bg_group, "/images/BGimage.bmp")
# ---------- Text Boxes ------------- #
# Set the font and preload letters
font = bitmap_font.load_font("/fonts/Helvetica-Bold-16.bdf")
font.load_glyphs(b'abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()')
# Default Label styling:
TABS_X = 5
TABS_Y = 50
# Text Label Objects
feed1_label = Label(font, text="Text Wondow 1", color=0xE39300, max_glyphs=200)
feed1_label.x = TABS_X
feed1_label.y = TABS_Y
view1.append(feed1_label)
feed2_label = Label(font, text="Text Wondow 2", color=0xFFFFFF, max_glyphs=200)
feed2_label.x = TABS_X
feed2_label.y = TABS_Y
view2.append(feed2_label)
sensors_label = Label(font, text="Data View", color=0x03AD31, max_glyphs=200)
sensors_label.x = TABS_X
sensors_label.y = TABS_Y
view3.append(sensors_label)
sensor_data = Label(font, text="Data View", color=0x03AD31, max_glyphs=100)
sensor_data.x = TABS_X+15
sensor_data.y = 170
view3.append(sensor_data)
text_hight = Label(font, text="M", color=0x03AD31, max_glyphs=10)
# return a reformatted string with word wrapping using PyPortal.wrap_nicely
def text_box(target, top, string, max_chars):
text = pyportal.wrap_nicely(string, max_chars)
new_text = ""
test = ""
for w in text:
new_text += '\n'+w
test += 'M\n'
text_hight.text = test # Odd things happen without this
glyph_box = text_hight.bounding_box
target.text = "" # Odd things happen without this
target.y = int(glyph_box[3]/2)+top
target.text = new_text
# ---------- Display Buttons ------------- #
# Default button styling:
BUTTON_HEIGHT = 40
BUTTON_WIDTH = 80
# We want three buttons across the top of the screen
TAPS_HEIGHT = 40
TAPS_WIDTH = int(screen_width/3)
TAPS_Y = 0
# We want two big buttons at the bottom of the screen
BIG_BUTTON_HEIGHT = int(screen_height/3.2)
BIG_BUTTON_WIDTH = int(screen_width/2)
BIG_BUTTON_Y = int(screen_height-BIG_BUTTON_HEIGHT)
# This group will make it easy for us to read a button press later.
buttons = []
# Main User Interface Buttons
button_view1 = Button(x=0, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View1", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view1) # adding this button to the buttons group
button_view2 = Button(x=TAPS_WIDTH, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View2", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view2) # adding this button to the buttons group
button_view3 = Button(x=TAPS_WIDTH*2, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View3", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view3) # adding this button to the buttons group
button_switch = Button(x=0, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Switch", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_switch) # adding this button to the buttons group
button_2 = Button(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Button", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_2) # adding this button to the buttons group
# Add all of the main buttons to the spalsh Group
for b in buttons:
splash.append(b.group)
# Make a button to change the icon image on view2
button_icon = Button(x=150, y=60,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Icon", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_icon) # adding this button to the buttons group
# Add this button to view2 Group
view2.append(button_icon.group)
# Make a button to play a sound on view2
button_sound = Button(x=150, y=170,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Sound", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_sound) # adding this button to the buttons group
# Add this button to view2 Group
view3.append(button_sound.group)
#pylint: disable=global-statement
def switch_view(what_view):
global view_live
if what_view == 1:
hideLayer(view2)
hideLayer(view3)
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
view_live = 1
print("View1 On")
elif what_view == 2:
# global icon
hideLayer(view1)
hideLayer(view3)
button_view1.selected = True
button_view2.selected = False
button_view3.selected = True
showLayer(view2)
view_live = 2
print("View2 On")
else:
hideLayer(view1)
hideLayer(view2)
button_view1.selected = True
button_view2.selected = True
button_view3.selected = False
showLayer(view3)
view_live = 3
print("View3 On")
#pylint: enable=global-statement
# Set veriables and startup states
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
hideLayer(view2)
hideLayer(view3)
view_live = 1
icon = 1
icon_name = "Ruby"
button_mode = 1
switch_state = 0
button_switch.label = "OFF"
button_switch.selected = True
# Update out Labels with display text.
text_box(feed1_label, TABS_Y,
"The text on this screen is wrapped so that all of it fits nicely into a \
text box that is ### x ###.", 30)
text_box(feed1_label, TABS_Y,
'The text on this screen is wrapped so that all of it fits nicely into a \
text box that is {} x {}.'
.format(feed1_label.bounding_box[2], feed1_label.bounding_box[3]*2), 30)
text_box(feed2_label, TABS_Y, 'Tap on the Icon button to meet a new friend.', 18)
text_box(sensors_label, TABS_Y,
"This screen can display sensor readings and tap Sound to play a WAV file.", 28)
board.DISPLAY.show(splash)
# ------------- Code Loop ------------- #
while True:
touch = ts.touch_point
light = light_sensor.value
tempC = round(adt.temperature)
tempF = tempC * 1.8 + 32
sensor_data.text = 'Touch: {}\nLight: {}\n Temp: {}°F'.format(touch, light, tempF)
# ------------- Handle Button Press Detection ------------- #
if touch: # Only do this if the screen is touched
# loop with buttons using enumerate() to number each button group as i
for i, b in enumerate(buttons):
if b.contains(touch): # Test each button to see if it was pressed
print('button%d pressed' % i)
if i == 0 and view_live != 1: # only if view1 is visable
pyportal.play_file(soundTab)
switch_view(1)
while ts.touch_point:
pass
if i == 1 and view_live != 2: # only if view2 is visable
pyportal.play_file(soundTab)
switch_view(2)
while ts.touch_point:
pass
if i == 2 and view_live != 3: # only if view3 is visable
pyportal.play_file(soundTab)
switch_view(3)
while ts.touch_point:
pass
if i == 3:
pyportal.play_file(soundBeep)
# Toggle switch button type
if switch_state == 0:
switch_state = 1
b.label = "ON"
b.selected = False
pixel.fill(WHITE)
print("Swich ON")
else:
switch_state = 0
b.label = "OFF"
b.selected = True
pixel.fill(BLACK)
print("Swich OFF")
# for debounce
while ts.touch_point:
pass
print("Swich Pressed")
if i == 4:
pyportal.play_file(soundBeep)
# Momentary button type
b.selected = True
print('Button Pressed')
button_mode = numberUP(button_mode, 5)
if button_mode == 1:
pixel.fill(RED)
elif button_mode == 2:
pixel.fill(YELLOW)
elif button_mode == 3:
pixel.fill(GREEN)
elif button_mode == 4:
pixel.fill(BLUE)
elif button_mode == 5:
pixel.fill(PURPLE)
switch_state = 1
button_switch.label = "ON"
button_switch.selected = False
# for debounce
while ts.touch_point:
pass
print("Button released")
b.selected = False
if i == 5 and view_live == 2: # only if view2 is visable
pyportal.play_file(soundBeep)
b.selected = True
while ts.touch_point:
pass
print("Icon Button Pressed")
icon = numberUP(icon, 3)
if icon == 1:
icon_name = "Ruby"
elif icon == 2:
icon_name = "Gus"
elif icon == 3:
icon_name = "Billie"
b.selected = False
text_box(feed2_label, TABS_Y,
"Every time you tap the Icon button the icon image will \
change. Say hi to {}!".format(icon_name), 18)
set_image(icon_group, "/images/"+icon_name+".bmp")
if i == 6 and view_live == 3: # only if view3 is visable
b.selected = True
while ts.touch_point:
pass
print("Sound Button Pressed")
pyportal.play_file(soundDemo)
b.selected = False
| [
"adafruit_bitmap_font.bitmap_font.load_font",
"busio.I2C",
"analogio.AnalogIn",
"adafruit_button.Button",
"board.DISPLAY.show",
"displayio.Group",
"adafruit_touchscreen.Touchscreen",
"time.sleep",
"displayio.ColorConverter",
"neopixel.NeoPixel",
"adafruit_adt7410.ADT7410",
"adafruit_pyportal.PyPortal",
"displayio.OnDiskBitmap",
"adafruit_display_text.label.Label"
]
| [((417, 448), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (426, 448), False, 'import busio\n'), ((455, 500), 'adafruit_adt7410.ADT7410', 'adafruit_adt7410.ADT7410', (['i2c_bus'], {'address': '(72)'}), '(i2c_bus, address=72)\n', (479, 500), False, 'import adafruit_adt7410\n'), ((571, 592), 'analogio.AnalogIn', 'AnalogIn', (['board.LIGHT'], {}), '(board.LIGHT)\n', (579, 592), False, 'from analogio import AnalogIn\n'), ((602, 652), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['board.NEOPIXEL', '(1)'], {'brightness': '(1)'}), '(board.NEOPIXEL, 1, brightness=1)\n', (619, 652), False, 'import neopixel\n'), ((1182, 1192), 'adafruit_pyportal.PyPortal', 'PyPortal', ([], {}), '()\n', (1190, 1192), False, 'from adafruit_pyportal import PyPortal\n'), ((1597, 1782), 'adafruit_touchscreen.Touchscreen', 'adafruit_touchscreen.Touchscreen', (['board.TOUCH_YD', 'board.TOUCH_YU', 'board.TOUCH_XR', 'board.TOUCH_XL'], {'calibration': '((5200, 59000), (5800, 57000))', 'size': '(screen_width, screen_height)'}), '(board.TOUCH_YD, board.TOUCH_YU, board.\n TOUCH_XR, board.TOUCH_XL, calibration=((5200, 59000), (5800, 57000)),\n size=(screen_width, screen_height))\n', (1629, 1782), False, 'import adafruit_touchscreen\n'), ((1997, 2025), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(15)'}), '(max_size=15)\n', (2012, 2025), False, 'import displayio\n'), ((2060, 2088), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(15)'}), '(max_size=15)\n', (2075, 2088), False, 'import displayio\n'), ((2125, 2153), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(15)'}), '(max_size=15)\n', (2140, 2153), False, 'import displayio\n'), ((2190, 2218), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(15)'}), '(max_size=15)\n', (2205, 2218), False, 'import displayio\n'), ((2641, 2668), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(1)'}), '(max_size=1)\n', (2656, 2668), False, 'import displayio\n'), ((2708, 2735), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(1)'}), '(max_size=1)\n', (2723, 2735), False, 'import displayio\n'), ((3768, 3821), 'adafruit_bitmap_font.bitmap_font.load_font', 'bitmap_font.load_font', (['"""/fonts/Helvetica-Bold-16.bdf"""'], {}), "('/fonts/Helvetica-Bold-16.bdf')\n", (3789, 3821), False, 'from adafruit_bitmap_font import bitmap_font\n'), ((3995, 4060), 'adafruit_display_text.label.Label', 'Label', (['font'], {'text': '"""Text Wondow 1"""', 'color': '(14914304)', 'max_glyphs': '(200)'}), "(font, text='Text Wondow 1', color=14914304, max_glyphs=200)\n", (4000, 4060), False, 'from adafruit_display_text.label import Label\n'), ((4148, 4213), 'adafruit_display_text.label.Label', 'Label', (['font'], {'text': '"""Text Wondow 2"""', 'color': '(16777215)', 'max_glyphs': '(200)'}), "(font, text='Text Wondow 2', color=16777215, max_glyphs=200)\n", (4153, 4213), False, 'from adafruit_display_text.label import Label\n'), ((4303, 4362), 'adafruit_display_text.label.Label', 'Label', (['font'], {'text': '"""Data View"""', 'color': '(240945)', 'max_glyphs': '(200)'}), "(font, text='Data View', color=240945, max_glyphs=200)\n", (4308, 4362), False, 'from adafruit_display_text.label import Label\n'), ((4458, 4517), 'adafruit_display_text.label.Label', 'Label', (['font'], {'text': '"""Data View"""', 'color': '(240945)', 'max_glyphs': '(100)'}), "(font, text='Data View', color=240945, max_glyphs=100)\n", (4463, 4517), False, 'from adafruit_display_text.label import Label\n'), ((4607, 4657), 'adafruit_display_text.label.Label', 'Label', (['font'], {'text': '"""M"""', 'color': '(240945)', 'max_glyphs': '(10)'}), "(font, text='M', color=240945, max_glyphs=10)\n", (4612, 4657), False, 'from adafruit_display_text.label import Label\n'), ((5696, 5932), 'adafruit_button.Button', 'Button', ([], {'x': '(0)', 'y': '(0)', 'width': 'TAPS_WIDTH', 'height': 'TAPS_HEIGHT', 'label': '"""View1"""', 'label_font': 'font', 'label_color': '(16743936)', 'fill_color': '(6052700)', 'outline_color': '(7763574)', 'selected_fill': '(1710618)', 'selected_outline': '(3026478)', 'selected_label': '(5395026)'}), "(x=0, y=0, width=TAPS_WIDTH, height=TAPS_HEIGHT, label='View1',\n label_font=font, label_color=16743936, fill_color=6052700,\n outline_color=7763574, selected_fill=1710618, selected_outline=3026478,\n selected_label=5395026)\n", (5702, 5932), False, 'from adafruit_button import Button\n'), ((6124, 6370), 'adafruit_button.Button', 'Button', ([], {'x': 'TAPS_WIDTH', 'y': '(0)', 'width': 'TAPS_WIDTH', 'height': 'TAPS_HEIGHT', 'label': '"""View2"""', 'label_font': 'font', 'label_color': '(16743936)', 'fill_color': '(6052700)', 'outline_color': '(7763574)', 'selected_fill': '(1710618)', 'selected_outline': '(3026478)', 'selected_label': '(5395026)'}), "(x=TAPS_WIDTH, y=0, width=TAPS_WIDTH, height=TAPS_HEIGHT, label=\n 'View2', label_font=font, label_color=16743936, fill_color=6052700,\n outline_color=7763574, selected_fill=1710618, selected_outline=3026478,\n selected_label=5395026)\n", (6130, 6370), False, 'from adafruit_button import Button\n'), ((6561, 6811), 'adafruit_button.Button', 'Button', ([], {'x': '(TAPS_WIDTH * 2)', 'y': '(0)', 'width': 'TAPS_WIDTH', 'height': 'TAPS_HEIGHT', 'label': '"""View3"""', 'label_font': 'font', 'label_color': '(16743936)', 'fill_color': '(6052700)', 'outline_color': '(7763574)', 'selected_fill': '(1710618)', 'selected_outline': '(3026478)', 'selected_label': '(5395026)'}), "(x=TAPS_WIDTH * 2, y=0, width=TAPS_WIDTH, height=TAPS_HEIGHT, label=\n 'View3', label_font=font, label_color=16743936, fill_color=6052700,\n outline_color=7763574, selected_fill=1710618, selected_outline=3026478,\n selected_label=5395026)\n", (6567, 6811), False, 'from adafruit_button import Button\n'), ((7001, 7264), 'adafruit_button.Button', 'Button', ([], {'x': '(0)', 'y': 'BIG_BUTTON_Y', 'width': 'BIG_BUTTON_WIDTH', 'height': 'BIG_BUTTON_HEIGHT', 'label': '"""Switch"""', 'label_font': 'font', 'label_color': '(16743936)', 'fill_color': '(6052700)', 'outline_color': '(7763574)', 'selected_fill': '(1710618)', 'selected_outline': '(3026478)', 'selected_label': '(5395026)'}), "(x=0, y=BIG_BUTTON_Y, width=BIG_BUTTON_WIDTH, height=\n BIG_BUTTON_HEIGHT, label='Switch', label_font=font, label_color=\n 16743936, fill_color=6052700, outline_color=7763574, selected_fill=\n 1710618, selected_outline=3026478, selected_label=5395026)\n", (7007, 7264), False, 'from adafruit_button import Button\n'), ((7455, 7733), 'adafruit_button.Button', 'Button', ([], {'x': 'BIG_BUTTON_WIDTH', 'y': 'BIG_BUTTON_Y', 'width': 'BIG_BUTTON_WIDTH', 'height': 'BIG_BUTTON_HEIGHT', 'label': '"""Button"""', 'label_font': 'font', 'label_color': '(16743936)', 'fill_color': '(6052700)', 'outline_color': '(7763574)', 'selected_fill': '(1710618)', 'selected_outline': '(3026478)', 'selected_label': '(5395026)'}), "(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y, width=BIG_BUTTON_WIDTH, height=\n BIG_BUTTON_HEIGHT, label='Button', label_font=font, label_color=\n 16743936, fill_color=6052700, outline_color=7763574, selected_fill=\n 1710618, selected_outline=3026478, selected_label=5395026)\n", (7461, 7733), False, 'from adafruit_button import Button\n'), ((8044, 8313), 'adafruit_button.Button', 'Button', ([], {'x': '(150)', 'y': '(60)', 'width': 'BUTTON_WIDTH', 'height': 'BUTTON_HEIGHT', 'label': '"""Icon"""', 'label_font': 'font', 'label_color': '(16777215)', 'fill_color': '(8978687)', 'outline_color': '(12342781)', 'selected_fill': '(5921370)', 'selected_outline': '(16737792)', 'selected_label': '(5395026)', 'style': 'Button.ROUNDRECT'}), "(x=150, y=60, width=BUTTON_WIDTH, height=BUTTON_HEIGHT, label='Icon',\n label_font=font, label_color=16777215, fill_color=8978687,\n outline_color=12342781, selected_fill=5921370, selected_outline=\n 16737792, selected_label=5395026, style=Button.ROUNDRECT)\n", (8050, 8313), False, 'from adafruit_button import Button\n'), ((8603, 8875), 'adafruit_button.Button', 'Button', ([], {'x': '(150)', 'y': '(170)', 'width': 'BUTTON_WIDTH', 'height': 'BUTTON_HEIGHT', 'label': '"""Sound"""', 'label_font': 'font', 'label_color': '(16777215)', 'fill_color': '(8978687)', 'outline_color': '(12342781)', 'selected_fill': '(5921370)', 'selected_outline': '(16737792)', 'selected_label': '(5395026)', 'style': 'Button.ROUNDRECT'}), "(x=150, y=170, width=BUTTON_WIDTH, height=BUTTON_HEIGHT, label=\n 'Sound', label_font=font, label_color=16777215, fill_color=8978687,\n outline_color=12342781, selected_fill=5921370, selected_outline=\n 16737792, selected_label=5395026, style=Button.ROUNDRECT)\n", (8609, 8875), False, 'from adafruit_button import Button\n'), ((10943, 10969), 'board.DISPLAY.show', 'board.DISPLAY.show', (['splash'], {}), '(splash)\n', (10961, 10969), False, 'import board\n'), ((3305, 3339), 'displayio.OnDiskBitmap', 'displayio.OnDiskBitmap', (['image_file'], {}), '(image_file)\n', (3327, 3339), False, 'import displayio\n'), ((2402, 2417), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2412, 2417), False, 'import time\n'), ((3411, 3437), 'displayio.ColorConverter', 'displayio.ColorConverter', ([], {}), '()\n', (3435, 3437), False, 'import displayio\n'), ((3523, 3549), 'displayio.ColorConverter', 'displayio.ColorConverter', ([], {}), '()\n', (3547, 3549), False, 'import displayio\n')] |
from astropy import coordinates as coord
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from misc import bcolors
import numpy as np
import os
def convert_hms_dd(RA, DEC):
'''
Convert HMS to DD system
'''
if (':' in RA) and (':' in DEC):
Coord_dd = coord.SkyCoord(RA, DEC, unit=(u.hour,u.degree), frame='icrs')
RA_dd = Coord_dd.ra.deg
Dec_dd = Coord_dd.dec.deg
elif (not (':' in RA) and not (':' in DEC)) and (('.' in RA) and ('.' in DEC)):
RA_dd, Dec_dd = float(RA), float(DEC)
else:
print(bcolors.FAIL + 'Coordinates have wrong format.' + bcolors.ENDC)
sys.exit()
return RA_dd, Dec_dd
def get_header(FILE, KEYWORD):
'''
Get keyword from fits file
'''
header = fits.getheader(FILE)
return header[KEYWORD]
def pix2arcsec(FITS):
'''
Get pixel scale
'''
hdu = fits.open(FITS)
if len(hdu) > 1:
header = fits.getheader(FITS, 0)
header += fits.getheader(FITS, 1)
else:
header = fits.getheader(FITS)
hdu_wcs = wcs.WCS(header)
return np.median(wcs.utils.proj_plane_pixel_scales(hdu_wcs)) * 3600
def sky2xy (FITS, RA=False, DEC=False, CAT=None):
'''
Coordinate transformation: sky -> xy
'''
if CAT == None:
if RA != False and DEC != False:
cmd=('sky2xy %s %s %s | grep -v off' %(FITS, RA, DEC))
program_call = os.popen(cmd)
xy = []
for line in program_call:
xy=np.array(line.strip().split()[-2:]).astype(float)
if len(xy) > 0:
return xy
else:
cmd =("more %s | awk '{print $1,$2}' > %s" %(CAT, CAT.replace(CAT.split('.')[-1], 'reg')))
os.system(cmd)
cmd = ("sky2xy %s @%s | grep -v off | awk '{print $5, $6}'" %(FITS, CAT.replace(CAT.split('.')[-1], 'reg')))
cat = os.popen(cmd)
xy = []
for line in cat:
xy.append(list(map(float, line.replace('\n', '').split())))
return np.array(xy)
def xy2sky (FITSFILE,X,Y):
'''
Coordinate transformation: xy -> sky
'''
program_call = os.popen('xy2sky %s %s %s' %(FITSFILE, X, Y))
sky = []
for line in program_call:
sky.append(line.strip().split()[:2])
return sky
| [
"astropy.io.fits.getheader",
"astropy.coordinates.SkyCoord",
"numpy.array",
"os.popen",
"astropy.io.fits.open",
"os.system",
"astropy.wcs.WCS",
"astropy.wcs.utils.proj_plane_pixel_scales"
]
| [((736, 756), 'astropy.io.fits.getheader', 'fits.getheader', (['FILE'], {}), '(FILE)\n', (750, 756), False, 'from astropy.io import fits\n'), ((841, 856), 'astropy.io.fits.open', 'fits.open', (['FITS'], {}), '(FITS)\n', (850, 856), False, 'from astropy.io import fits\n'), ((999, 1014), 'astropy.wcs.WCS', 'wcs.WCS', (['header'], {}), '(header)\n', (1006, 1014), False, 'from astropy import wcs\n'), ((1925, 1971), 'os.popen', 'os.popen', (["('xy2sky %s %s %s' % (FITSFILE, X, Y))"], {}), "('xy2sky %s %s %s' % (FITSFILE, X, Y))\n", (1933, 1971), False, 'import os\n'), ((298, 360), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['RA', 'DEC'], {'unit': '(u.hour, u.degree)', 'frame': '"""icrs"""'}), "(RA, DEC, unit=(u.hour, u.degree), frame='icrs')\n", (312, 360), True, 'from astropy import coordinates as coord\n'), ((886, 909), 'astropy.io.fits.getheader', 'fits.getheader', (['FITS', '(0)'], {}), '(FITS, 0)\n', (900, 909), False, 'from astropy.io import fits\n'), ((922, 945), 'astropy.io.fits.getheader', 'fits.getheader', (['FITS', '(1)'], {}), '(FITS, 1)\n', (936, 945), False, 'from astropy.io import fits\n'), ((964, 984), 'astropy.io.fits.getheader', 'fits.getheader', (['FITS'], {}), '(FITS)\n', (978, 984), False, 'from astropy.io import fits\n'), ((1564, 1578), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1573, 1578), False, 'import os\n'), ((1700, 1713), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (1708, 1713), False, 'import os\n'), ((1818, 1830), 'numpy.array', 'np.array', (['xy'], {}), '(xy)\n', (1826, 1830), True, 'import numpy as np\n'), ((1033, 1075), 'astropy.wcs.utils.proj_plane_pixel_scales', 'wcs.utils.proj_plane_pixel_scales', (['hdu_wcs'], {}), '(hdu_wcs)\n', (1066, 1075), False, 'from astropy import wcs\n'), ((1316, 1329), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (1324, 1329), False, 'import os\n')] |
import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
class PolyLR(_LRScheduler):
"""
Sets the learning rate of each parameter group according to poly learning rate policy
"""
def __init__(self, optimizer, max_iter=90000, power=0.9, last_epoch=-1):
self.max_iter = max_iter
self.power = power
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * (1 - float(self.last_epoch) / self.max_iter) ** self.power for base_lr in self.base_lrs]
func_zoo = {
"cosine_decay": lambda epoch, step, len_epoch, total_epoch: 0.5
* (math.cos(step * math.pi / (total_epoch * len_epoch)) + 1)
}
class CosineWarmRestart:
def __init__(
self,
optimizer: Optimizer,
func: str = "cosine_decay",
warmup: bool = True,
warmup_epoch: int = 1,
period: int = 10,
min_lr: float = 1e-5,
low_epoch: int = 1,
):
# self.base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups))[0]
self.base_lrs = [x["lr"] for x in optimizer.param_groups][0]
self.optimizer = optimizer
self.warmup = warmup
self.warmup_epoch = warmup_epoch
self.period = period
self.cos_period = period - low_epoch
self.low_epoch = low_epoch
self.lr_func = func_zoo[func]
self.min_lr = min_lr
def cosine_step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
if self.warmup and current_epoch < self.warmup_epoch:
lr = self.base_lrs * float(1 + global_step) / (self.warmup_epoch * len_epoch)
else:
lr = self.base_lrs * self.lr_func(current_epoch, global_step, len_epoch, self.cos_period)
lr = max(self.min_lr, lr)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
def step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
current_epoch = current_epoch % self.period
if current_epoch >= self.period - self.low_epoch:
global_step = len_epoch * self.cos_period
else:
global_step = global_step % (self.period * len_epoch)
return self.cosine_step(current_epoch, global_step, len_epoch)
| [
"math.cos"
]
| [((657, 709), 'math.cos', 'math.cos', (['(step * math.pi / (total_epoch * len_epoch))'], {}), '(step * math.pi / (total_epoch * len_epoch))\n', (665, 709), False, 'import math\n')] |
import json
import logging
logger = logging.getLogger(__name__)
with open('configuration.json') as f:
config = json.load(f)
TELEGRAM_TOKEN = config["telegram-bot-token"]
NOTION_TOKEN = config["notion-token"]
NOTION_TABLE_URL = config["inbox_table"]["table_url"]
def check_allowed_user(user_id):
"""
check if allowed user
:param user_id: telegram user id
:return True if user is valid , False otherwise
"""
valid_user = config["allowed_user_id"]
user_id = str(user_id)
return user_id == valid_user
def restrict_action(handled_action):
"""
Wrapper for creating a private bot
:param handled_action: the action to perform
"""
def check_private(update, context):
if not (check_allowed_user(update.message.from_user.id)):
logging.warning("An unauthorized user attempted to use the bot. username: {}, id: {} .".format(
update.message.from_user.username, update.message.from_user.id
))
return
else:
return handled_action(update, context)
return check_private
| [
"logging.getLogger",
"json.load"
]
| [((37, 64), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (54, 64), False, 'import logging\n'), ((117, 129), 'json.load', 'json.load', (['f'], {}), '(f)\n', (126, 129), False, 'import json\n')] |
from time import time
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union
import arrow
import datetime
import math
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Season, Queue, SEASON_IDS, QUEUE_IDS
from ...dto.match import MatchDto, MatchListDto, TimelineDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_current_time(query: MutableMapping[str, Any], context: PipelineContext = None) -> int:
return int(time()) * 1000
class MatchAPI(RiotAPIService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
_validate_get_match_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(MatchDto)
@validate_query(_validate_get_match_query, convert_region_to_platform)
def get_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["gameId"] = query["id"]
data["region"] = query["platform"].region.value
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
return MatchDto(data)
_validate_get_many_match_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(MatchDto)
@validate_query(_validate_get_many_match_query, convert_region_to_platform)
def get_many_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for participant in data["participants"]:
participant.setdefault("runes", [])
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
data["gameId"] = id
data["region"] = query["platform"].region.value
yield MatchDto(data)
return generator()
_validate_get_match_list_query = Query. \
has("accountId").as_(str).also. \
has("platform").as_(Platform).also. \
has("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
has("beginIndex").as_(int).also. \
has("maxNumberOfMatches").as_(float).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get.register(MatchListDto)
@validate_query(_validate_get_match_list_query, convert_region_to_platform)
def get_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchListDto:
params = {}
riot_index_interval = 100
riot_date_interval = datetime.timedelta(days=7)
begin_time = query["beginTime"] # type: arrow.Arrow
end_time = query.get("endTime", arrow.now()) # type: arrow.Arrow
if isinstance(begin_time, int):
begin_time = arrow.get(begin_time / 1000)
if isinstance(end_time, int):
end_time = arrow.get(end_time / 1000)
def determine_calling_method(begin_time, end_time) -> str:
"""Returns either "by_date" or "by_index"."""
matches_per_date_interval = 10 # This is an assumption
seconds_per_day = (60 * 60 * 24)
riot_date_interval_in_days = riot_date_interval.total_seconds() / seconds_per_day # in units of days
npulls_by_date = (end_time - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days
npulls_by_index = (arrow.now() - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days * matches_per_date_interval / riot_index_interval
if math.ceil(npulls_by_date) < math.ceil(npulls_by_index):
by = "by_date"
else:
by = "by_index"
return by
calling_method = determine_calling_method(begin_time, end_time)
if calling_method == "by_date":
params["beginTime"] = begin_time.timestamp * 1000
if "endTime" in query:
params["endTime"] = min((begin_time + riot_date_interval).timestamp * 1000, query["endTime"])
else:
params["endTime"] = (begin_time + riot_date_interval).timestamp * 1000
else:
params["beginIndex"] = query["beginIndex"]
params["endIndex"] = query["beginIndex"] + min(riot_index_interval, query["maxNumberOfMatches"])
params["endIndex"] = int(params["endIndex"])
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
champions = query["champion.ids"]
params["champion"] = champions
else:
champions = set()
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=query["accountId"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError:
data = {"matches": []}
data["accountId"] = query["accountId"]
data["region"] = query["platform"].region.value
data["season"] = seasons
data["champion"] = champions
data["queue"] = queues
if calling_method == "by_index":
data["beginIndex"] = params["beginIndex"]
data["endIndex"] = params["endIndex"]
data["maxNumberOfMatches"] = query["maxNumberOfMatches"]
else:
data["beginTime"] = params["beginTime"]
data["endTime"] = params["endTime"]
for match in data["matches"]:
match["accountId"] = query["accountId"]
match["region"] = Platform(match["platformId"]).region.value
return MatchListDto(data)
_validate_get_many_match_list_query = Query. \
has("accountIds").as_(Iterable).also. \
has("platform").as_(Platform).also. \
can_have("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
can_have("beginIndex").as_(int).also. \
can_have("endIndex").as_(int).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get_many.register(MatchListDto)
@validate_query(_validate_get_many_match_list_query, convert_region_to_platform)
def get_many_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchListDto, None, None]:
params = {}
if "beginIndex" in query:
params["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
params["endIndex"] = query["endIndex"]
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
params["champion"] = {query["champion.ids"]}
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
def generator():
for id in query["accountIds"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["accountId"] = id
data["region"] = query["platform"].region.value
if "beginIndex" in query:
data["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
data["endIndex"] = query["endIndex"]
if "seasons" in query:
data["seasons"] = seasons
if "champion.ids" in query:
data["champion"] = params["champion"]
if "queues" in query:
params["queue"] = queues
yield MatchListDto(data)
return generator()
_validate_get_timeline_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(TimelineDto)
@validate_query(_validate_get_timeline_query, convert_region_to_platform)
def get_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> TimelineDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = query["id"]
data["region"] = query["platform"].region.value
return TimelineDto(data)
_validate_get_many_timeline_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(TimelineDto)
@validate_query(_validate_get_many_timeline_query, convert_region_to_platform)
def get_many_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[TimelineDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = id
data["region"] = query["platform"].region.value
yield TimelineDto(data)
return generator()
| [
"math.ceil",
"arrow.now",
"arrow.get",
"datapipelines.Query.has",
"datetime.timedelta",
"datapipelines.validate_query",
"time.time",
"typing.TypeVar"
]
| [((477, 489), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (484, 489), False, 'from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union\n'), ((1103, 1172), 'datapipelines.validate_query', 'validate_query', (['_validate_get_match_query', 'convert_region_to_platform'], {}), '(_validate_get_match_query, convert_region_to_platform)\n', (1117, 1172), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((2194, 2268), 'datapipelines.validate_query', 'validate_query', (['_validate_get_many_match_query', 'convert_region_to_platform'], {}), '(_validate_get_many_match_query, convert_region_to_platform)\n', (2208, 2268), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((3959, 4033), 'datapipelines.validate_query', 'validate_query', (['_validate_get_match_list_query', 'convert_region_to_platform'], {}), '(_validate_get_match_list_query, convert_region_to_platform)\n', (3973, 4033), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((8372, 8451), 'datapipelines.validate_query', 'validate_query', (['_validate_get_many_match_list_query', 'convert_region_to_platform'], {}), '(_validate_get_many_match_list_query, convert_region_to_platform)\n', (8386, 8451), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((10709, 10781), 'datapipelines.validate_query', 'validate_query', (['_validate_get_timeline_query', 'convert_region_to_platform'], {}), '(_validate_get_timeline_query, convert_region_to_platform)\n', (10723, 10781), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((11664, 11741), 'datapipelines.validate_query', 'validate_query', (['_validate_get_many_timeline_query', 'convert_region_to_platform'], {}), '(_validate_get_many_timeline_query, convert_region_to_platform)\n', (11678, 11741), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((4230, 4256), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (4248, 4256), False, 'import datetime\n'), ((603, 609), 'time.time', 'time', ([], {}), '()\n', (607, 609), False, 'from time import time\n'), ((4359, 4370), 'arrow.now', 'arrow.now', ([], {}), '()\n', (4368, 4370), False, 'import arrow\n'), ((4458, 4486), 'arrow.get', 'arrow.get', (['(begin_time / 1000)'], {}), '(begin_time / 1000)\n', (4467, 4486), False, 'import arrow\n'), ((4548, 4574), 'arrow.get', 'arrow.get', (['(end_time / 1000)'], {}), '(end_time / 1000)\n', (4557, 4574), False, 'import arrow\n'), ((5229, 5254), 'math.ceil', 'math.ceil', (['npulls_by_date'], {}), '(npulls_by_date)\n', (5238, 5254), False, 'import math\n'), ((5257, 5283), 'math.ceil', 'math.ceil', (['npulls_by_index'], {}), '(npulls_by_index)\n', (5266, 5283), False, 'import math\n'), ((987, 1002), 'datapipelines.Query.has', 'Query.has', (['"""id"""'], {}), "('id')\n", (996, 1002), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((2067, 2083), 'datapipelines.Query.has', 'Query.has', (['"""ids"""'], {}), "('ids')\n", (2076, 2083), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((10590, 10605), 'datapipelines.Query.has', 'Query.has', (['"""id"""'], {}), "('id')\n", (10599, 10605), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((11534, 11550), 'datapipelines.Query.has', 'Query.has', (['"""ids"""'], {}), "('ids')\n", (11543, 11550), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((5075, 5086), 'arrow.now', 'arrow.now', ([], {}), '()\n', (5084, 5086), False, 'import arrow\n'), ((3495, 3517), 'datapipelines.Query.has', 'Query.has', (['"""accountId"""'], {}), "('accountId')\n", (3504, 3517), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n'), ((7894, 7917), 'datapipelines.Query.has', 'Query.has', (['"""accountIds"""'], {}), "('accountIds')\n", (7903, 7917), False, 'from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query\n')] |
"""
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- module caching (`load_parser` and `save_parser`), which uses pickle and is
really important to assure low load times of modules like ``numpy``.
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import os
import sys
import json
import hashlib
import gc
import inspect
import shutil
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from jedi import settings
from jedi import common
from jedi import debug
_time_caches = {}
# for fast_parser, should not be deleted
parser_cache = {}
class ParserCacheItem(object):
def __init__(self, parser, change_time=None):
self.parser = parser
if change_time is None:
change_time = time.time()
self.change_time = change_time
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
s
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
@time_cache("call_signatures_validity")
def cache_call_signatures(evaluator, call, source, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = common.splitlines(source)
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[call.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = call.get_parent_until().path
yield None if module_path is None else (module_path, before_bracket, call.start_pos)
yield evaluator.eval_element(call)
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
def wrapper(self):
try:
return getattr(self, name)
except AttributeError:
result = func(self)
if inspect.isgenerator(result):
result = list(result)
setattr(self, name, result)
return result
return wrapper
def memoize_method(method):
"""A normal memoize function."""
def wrapper(self, *args, **kwargs):
dct = self.__dict__.setdefault('_memoize_method_dct', {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper
def memoize_function(obj):
""" A normal memoize function for memoizing free functions. """
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def cache_star_import(func):
@time_cache("star_import_cache_validity")
def wrapper(self):
yield self.base # The cache key
yield func(self)
return wrapper
def _invalidate_star_import_cache_module(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
t, modules = _time_caches['star_import_cache_validity'][module]
except KeyError:
pass
else:
del _time_caches['star_import_cache_validity'][module]
def invalidate_star_import_cache(path):
"""On success returns True."""
try:
parser_cache_item = parser_cache[path]
except KeyError:
pass
else:
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
def load_parser(path):
"""
Returns the module or None, if it fails.
"""
p_time = os.path.getmtime(path) if path else None
try:
parser_cache_item = parser_cache[path]
if not path or p_time <= parser_cache_item.change_time:
return parser_cache_item.parser
else:
# In case there is already a module cached and this module
# has to be reparsed, we also need to invalidate the import
# caches.
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
except KeyError:
if settings.use_filesystem_cache:
return ParserPickling.load_parser(path, p_time)
def save_parser(path, parser, pickling=True):
try:
p_time = None if path is None else os.path.getmtime(path)
except OSError:
p_time = None
pickling = False
item = ParserCacheItem(parser, p_time)
parser_cache[path] = item
if settings.use_filesystem_cache and pickling:
ParserPickling.save_parser(path, item)
class ParserPickling(object):
version = 24
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
parser representation classes. For example, the following changes
are regarded as incompatible.
- Class name is changed.
- Class is moved to another module.
- Defined slot of the class is changed.
"""
def __init__(self):
self.__index = None
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python < 3.3
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
.. todo:: Detect interpreter (e.g., PyPy).
"""
def load_parser(self, path, original_changed_time):
try:
pickle_changed_time = self._index[path]
except KeyError:
return None
if original_changed_time is not None \
and pickle_changed_time < original_changed_time:
# the pickle file is outdated
return None
with open(self._get_hashed_path(path), 'rb') as f:
try:
gc.disable()
parser_cache_item = pickle.load(f)
finally:
gc.enable()
debug.dbg('pickle loaded: %s', path)
parser_cache[path] = parser_cache_item
return parser_cache_item.parser
def save_parser(self, path, parser_cache_item):
self.__index = None
try:
files = self._index
except KeyError:
files = {}
self._index = files
with open(self._get_hashed_path(path), 'wb') as f:
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
files[path] = parser_cache_item.change_time
self._flush_index()
@property
def _index(self):
if self.__index is None:
try:
with open(self._get_path('index.json')) as f:
data = json.load(f)
except (IOError, ValueError):
self.__index = {}
else:
# 0 means version is not defined (= always delete cache):
if data.get('version', 0) != self.version:
self.clear_cache()
self.__index = {}
else:
self.__index = data['index']
return self.__index
def _remove_old_modules(self):
# TODO use
change = False
if change:
self._flush_index(self)
self._index # reload index
def _flush_index(self):
data = {'version': self.version, 'index': self._index}
with open(self._get_path('index.json'), 'w') as f:
json.dump(data, f)
self.__index = None
def clear_cache(self):
shutil.rmtree(self._cache_directory())
def _get_hashed_path(self, path):
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
def _get_path(self, file):
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return os.path.join(dir, file)
def _cache_directory(self):
return os.path.join(settings.cache_directory, self.py_tag)
# is a singleton
ParserPickling = ParserPickling()
| [
"os.path.exists",
"pickle.dump",
"gc.enable",
"os.makedirs",
"jedi.debug.dbg",
"gc.disable",
"os.path.join",
"re.match",
"inspect.isgenerator",
"pickle.load",
"json.load",
"os.path.getmtime",
"jedi.common.splitlines",
"time.time",
"json.dump"
]
| [((3207, 3232), 'jedi.common.splitlines', 'common.splitlines', (['source'], {}), '(source)\n', (3224, 3232), False, 'from jedi import common\n'), ((3404, 3439), 're.match', 're.match', (['""".*\\\\("""', 'whole', 're.DOTALL'], {}), "('.*\\\\(', whole, re.DOTALL)\n", (3412, 3439), False, 'import re\n'), ((5956, 5978), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (5972, 5978), False, 'import os\n'), ((8324, 8360), 'jedi.debug.dbg', 'debug.dbg', (['"""pickle loaded: %s"""', 'path'], {}), "('pickle loaded: %s', path)\n", (8333, 8360), False, 'from jedi import debug\n'), ((10197, 10220), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (10209, 10220), False, 'import os\n'), ((10269, 10320), 'os.path.join', 'os.path.join', (['settings.cache_directory', 'self.py_tag'], {}), '(settings.cache_directory, self.py_tag)\n', (10281, 10320), False, 'import os\n'), ((1226, 1237), 'time.time', 'time.time', ([], {}), '()\n', (1235, 1237), False, 'import time\n'), ((6645, 6667), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (6661, 6667), False, 'import os\n'), ((8726, 8784), 'pickle.dump', 'pickle.dump', (['parser_cache_item', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)\n', (8737, 8784), False, 'import pickle\n'), ((9798, 9816), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (9807, 9816), False, 'import json\n'), ((10132, 10151), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (10146, 10151), False, 'import os\n'), ((10165, 10181), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (10176, 10181), False, 'import os\n'), ((4214, 4241), 'inspect.isgenerator', 'inspect.isgenerator', (['result'], {}), '(result)\n', (4233, 4241), False, 'import inspect\n'), ((8202, 8214), 'gc.disable', 'gc.disable', ([], {}), '()\n', (8212, 8214), False, 'import gc\n'), ((8251, 8265), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8262, 8265), False, 'import pickle\n'), ((8303, 8314), 'gc.enable', 'gc.enable', ([], {}), '()\n', (8312, 8314), False, 'import gc\n'), ((1944, 1955), 'time.time', 'time.time', ([], {}), '()\n', (1953, 1955), False, 'import time\n'), ((2667, 2678), 'time.time', 'time.time', ([], {}), '()\n', (2676, 2678), False, 'import time\n'), ((9046, 9058), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9055, 9058), False, 'import json\n'), ((2918, 2929), 'time.time', 'time.time', ([], {}), '()\n', (2927, 2929), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrestamoDeLibros.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(140, 70, 121, 41))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(140, 160, 121, 41))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton.setText(_translate("Form", "Solicitar", None))
self.pushButton_2.setText(_translate("Form", "Reservar", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| [
"PyQt4.QtGui.QApplication",
"PyQt4.QtGui.QWidget",
"PyQt4.QtCore.QMetaObject.connectSlotsByName",
"PyQt4.QtGui.QPushButton",
"PyQt4.QtGui.QApplication.translate",
"PyQt4.QtCore.QRect"
]
| [((1541, 1569), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1559, 1569), False, 'from PyQt4 import QtCore, QtGui\n'), ((1581, 1596), 'PyQt4.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (1594, 1596), False, 'from PyQt4 import QtCore, QtGui\n'), ((458, 522), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig', '_encoding'], {}), '(context, text, disambig, _encoding)\n', (486, 522), False, 'from PyQt4 import QtCore, QtGui\n'), ((815, 838), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['Form'], {}), '(Form)\n', (832, 838), False, 'from PyQt4 import QtCore, QtGui\n'), ((998, 1021), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['Form'], {}), '(Form)\n', (1015, 1021), False, 'from PyQt4 import QtCore, QtGui\n'), ((1202, 1245), 'PyQt4.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (1239, 1245), False, 'from PyQt4 import QtCore, QtGui\n'), ((606, 659), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['context', 'text', 'disambig'], {}), '(context, text, disambig)\n', (634, 659), False, 'from PyQt4 import QtCore, QtGui\n'), ((875, 905), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(140)', '(70)', '(121)', '(41)'], {}), '(140, 70, 121, 41)\n', (887, 905), False, 'from PyQt4 import QtCore, QtGui\n'), ((1060, 1091), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(140)', '(160)', '(121)', '(41)'], {}), '(140, 160, 121, 41)\n', (1072, 1091), False, 'from PyQt4 import QtCore, QtGui\n')] |
from hwtest.shell_utils import run_command
def test_linux_usb3hub():
"""
Test for Linux Foundation 3.0 root hub in `lsusb` output
"""
resp = run_command(["lsusb"])
assert "1d6b:0003" in resp
| [
"hwtest.shell_utils.run_command"
]
| [((160, 182), 'hwtest.shell_utils.run_command', 'run_command', (["['lsusb']"], {}), "(['lsusb'])\n", (171, 182), False, 'from hwtest.shell_utils import run_command\n')] |
# Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import backends
import environment, mesonlib
import build
import mlog
import dependencies
from mesonlib import File
from meson_install import InstallData
from build import InvalidArguments
from coredata import MesonException
import os, sys, pickle, re
import subprocess, shutil
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
else:
quote_char = "'"
execute_wrapper = ''
def ninja_quote(text):
return text.replace(' ', '$ ').replace(':', '$:')
class RawFilename():
def __init__(self, fname):
self.fname = fname
def split(self, c):
return self.fname.split(c)
def startswith(self, s):
return self.fname.startswith(s)
class NinjaBuildElement():
def __init__(self, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
line = 'build %s: %s %s' % (' '.join([ninja_quote(i) for i in self.outfilenames]),\
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
for e in self.elems:
(name, elems) = e
should_quote = True
if name == 'DEPFILE' or name == 'DESC' or name == 'pool':
should_quote = False
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.source_suffix_in_objs = True
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
def check_outputs(self, elem):
for n in elem.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
def detect_vs_dep_prefix(self, outfile, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
if shutil.which('cl') is None:
return outfile
outfile.close()
open(os.path.join(self.environment.get_scratch_dir(), 'incdetect.c'),
'w').write('''#include<stdio.h>
int dummy;
''')
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.environment.get_scratch_dir())
(stdo, _) = pc.communicate()
for line in stdo.split(b'\r\n'):
if line.endswith(b'stdio.h'):
matchstr = b':'.join(line.split(b':')[0:2]) + b':'
binfile = open(tempfilename, 'ab')
binfile.write(b'msvc_deps_prefix = ' + matchstr + b'\r\n')
binfile.close()
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
outfile = open(tempfilename, 'w')
outfile.write('# This is the build file for project "%s"\n' % self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
outfile = self.detect_vs_dep_prefix(outfile, tempfilename)
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
[self.generate_target(t, outfile) for t in self.build.get_targets().values()]
if len(self.build.pot) > 0:
outfile.write('# Build rules for localisation.\n\n')
self.generate_po(outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
if self.environment.coredata.get_builtin_option('coverage'):
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
outfile.close()
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
builddir = self.environment.get_build_dir()
jsondb = subprocess.check_output([ninja_exe, '-t', 'compdb', 'c_COMPILER', 'cpp_COMPILER'], cwd=builddir)
open(os.path.join(builddir, 'compile_commands.json'), 'wb').write(jsondb)
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
continue
for src in gensource.get_outfilelist():
if self.environment.is_header(src):
header_deps.append(os.path.join(self.get_target_private_dir(target), src))
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
gen_src_deps = []
if name in self.processed_targets:
return
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in self.environment.coredata.compilers.keys() and self.has_rust(target):
self.generate_rust_target(target, outfile)
return
if 'cs' in self.environment.coredata.compilers.keys() and self.has_cs(target):
self.generate_cs_target(target, outfile)
return
if 'vala' in self.environment.coredata.compilers.keys() and self.has_vala(target):
gen_src_deps += self.generate_vala_compile(target, outfile)
if 'swift' in self.environment.coredata.compilers.keys() and self.has_swift(target):
self.generate_swift_target(target, outfile)
return
self.scan_fortran_module_outputs(target)
# The following deals with C/C++ compilation.
(gen_src, gen_other_deps) = self.process_dep_gens(outfile, target)
gen_src_deps += gen_src
self.process_target_dependencies(target, outfile)
self.generate_custom_generator_rules(target, outfile)
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.get_builtin_option('use_pch')
is_unity = self.environment.coredata.get_builtin_option('unity')
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = gen_other_deps
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
for src in gensource.output:
src = os.path.join(self.get_target_dir(gensource), src)
if self.environment.is_source(src) and not self.environment.is_header(src):
if is_unity:
unity_deps.append(os.path.join(self.environment.get_build_dir(), RawFilename(src)))
else:
obj_list.append(self.generate_single_compile(target, outfile, RawFilename(src), True,
header_deps))
elif self.environment.is_object(src):
obj_list.append(src)
elif self.environment.is_library(src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(RawFilename(src))
else:
for src in gensource.get_outfilelist():
if self.environment.is_object(src):
obj_list.append(os.path.join(self.get_target_private_dir(target), src))
elif not self.environment.is_header(src):
if is_unity:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
unity_deps.append(rel_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps))
src_list = []
for src in gen_src_deps:
src_list.append(src)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True, [], header_deps))
for src in target.get_sources():
if src.endswith('.vala'):
continue
if not self.environment.is_header(src):
src_list.append(src)
if is_unity:
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target, src_list)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
self.processed_targets[name] = True
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if not tname in self.processed_targets:
self.generate_target(t, outfile)
def generate_custom_target(self, target, outfile):
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
fname = i.get_filename()
if isinstance(fname, list):
fname = fname[0]
deps.append(os.path.join(self.get_target_dir(i), fname))
if target.build_always:
deps.append('PHONY')
elem = NinjaBuildElement(ofilenames, 'CUSTOM_COMMAND', srcs)
for i in target.depend_files:
if isinstance(i, mesonlib.File):
deps.append(i.rel_to_builddir(self.build_to_src))
else:
deps.append(os.path.join(self.build_to_src, i))
elem.add_dep(deps)
for d in target.extra_depends:
tmp = d.get_filename()
if not isinstance(tmp, list):
tmp = [tmp]
for fname in tmp:
elem.add_dep(os.path.join(self.get_target_dir(d), fname))
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Generating %s with a custom command.' % target.name)
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
runnerscript = os.path.join(self.environment.get_script_dir(), 'commandrunner.py')
deps = []
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
deps.append(relfname)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
mlog.debug(str(i))
raise MesonException('Unreachable code in generate_run_target.')
elem = NinjaBuildElement(target.name, 'CUSTOM_COMMAND', deps)
cmd = [sys.executable, runnerscript, self.environment.get_source_dir(), self.environment.get_build_dir(), target.subdir]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() \
and self.environment.cross_info.config['binaries'].get('exe_wrapper', None) is not None:
cmd += [self.environment.cross_info.config['binaries']['exe_wrapper']]
cmd.append(abs_exe)
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_po(self, outfile):
for p in self.build.pot:
(packagename, languages, subdir) = p
input_file = os.path.join(subdir, 'POTFILES')
elem = NinjaBuildElement('pot', 'GEN_POT', [])
elem.add_item('PACKAGENAME', packagename)
elem.add_item('OUTFILE', packagename + '.pot')
elem.add_item('FILELIST', os.path.join(self.environment.get_source_dir(), input_file))
elem.add_item('OUTDIR', os.path.join(self.environment.get_source_dir(), subdir))
elem.write(outfile)
self.check_outputs(elem)
for l in languages:
infile = os.path.join(self.environment.get_source_dir(), subdir, l + '.po')
outfilename = os.path.join(subdir, l + '.gmo')
lelem = NinjaBuildElement(outfilename, 'GEN_GMO', infile)
lelem.add_item('INFILE', infile)
lelem.add_item('OUTFILE', outfilename)
lelem.write(outfile)
self.check_outputs(lelem)
def generate_coverage_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement('coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement('coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
self.check_outputs(elem)
if lcov_exe and genhtml_exe:
added_rule = True
phony_elem = NinjaBuildElement('coverage-html', 'phony', 'coveragereport/index.html')
phony_elem.write(outfile)
elem = NinjaBuildElement('coveragereport/index.html', 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),\
'--capture', '--output-file', 'coverage.info', '--no-checksum',\
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),\
'--output-directory', self.environment.get_log_dir(), '--title', 'Code coverage',\
'--legend', '--show-details', 'coverage.info']
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
self.check_outputs(elem)
elem.write(outfile)
if not added_rule:
mlog.log(mlog.red('Warning:'), 'coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
script_root = self.environment.get_script_dir()
install_script = os.path.join(script_root, 'meson_install.py')
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
depfixer = os.path.join(script_root, 'depfixer.py')
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(), depfixer)
elem = NinjaBuildElement('install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, install_script, install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_po_install(d, elem)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
self.check_outputs(elem)
ofile = open(install_data_file, 'wb')
pickle.dump(d, ofile)
def generate_po_install(self, d, elem):
for p in self.build.pot:
(package_name, languages, subdir) = p
# FIXME: assumes only one po package per source
d.po_package_name = package_name
for lang in languages:
rel_src = os.path.join(subdir, lang + '.gmo')
src_file = os.path.join(self.environment.get_build_dir(), rel_src)
d.po.append((src_file, self.environment.coredata.get_builtin_option('localedir'), lang))
elem.add_dep(rel_src)
def generate_target_install(self, d):
libdir = self.environment.get_libdir()
bindir = self.environment.get_bindir()
should_strip = self.environment.coredata.get_builtin_option('strip')
for t in self.build.get_targets().values():
if t.should_install():
outdir = t.get_custom_install_dir()
if outdir is None:
if isinstance(t, build.Executable):
outdir = bindir
else:
outdir = libdir
i = [self.get_target_filename(t), outdir, t.get_aliaslist(),\
should_strip, t.install_rpath]
d.targets.append(i)
def generate_custom_install_script(self, d):
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
abspath = os.path.join(self.environment.get_source_dir(), h.get_source_subdir(), f)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, f + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
if de.in_sourcetree:
srcprefix = self.environment.get_source_dir()
else:
srcprefix = self.environment.get_build_dir()
srcabs = os.path.join(srcprefix, de.source_subdir, f)
dstabs = os.path.join(subdir, f)
i = [srcabs, dstabs]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
src_dir = os.path.join(self.environment.get_source_dir(), sd.source_subdir, sd.installable_subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, dst_dir])
def write_test_suite_targets(self, cmd, outfile):
suites = {}
for t in self.build.get_tests():
for s in t.suite:
suites[s] = True
suites = list(suites.keys())
suites.sort()
for s in suites:
if s == '':
visible_name = 'for top level tests'
else:
visible_name = s
elem = NinjaBuildElement('test-' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd + ['--suite=' + s])
elem.add_item('DESC', 'Running test suite %s.' % visible_name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_tests(self, outfile):
self.serialise_tests()
valgrind = environment.find_valgrind()
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
cmd = [sys.executable, test_script, test_data]
elem = NinjaBuildElement('test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.write_test_suite_targets(cmd, outfile)
if valgrind:
velem = NinjaBuildElement('test-valgrind', 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + valgrind])
velem.add_item('DESC', 'Running test suite under Valgrind.')
velem.add_item('pool', 'console')
velem.write(outfile)
self.check_outputs(velem)
# And then benchmarks.
benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
cmd = [sys.executable, benchmark_script, benchmark_data]
elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s --backend ninja secret-handshake\n" % c)
outfile.write(' description = Regenerating build files\n')
outfile.write(' generator = 1\n\n')
if len(self.build.pot) > 0:
self.generate_gettext_rules(outfile)
outfile.write('\n')
def generate_gettext_rules(self, outfile):
rule = 'rule GEN_POT\n'
command = " command = xgettext --package-name=$PACKAGENAME -p $OUTDIR -f $FILELIST -D '%s' -k_ -o $OUTFILE\n" % \
self.environment.get_source_dir()
desc = " description = Creating pot file for package $PACKAGENAME.\n"
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
rule = 'rule GEN_GMO\n'
command = ' command = msgfmt $INFILE -o $OUTFILE\n'
desc = ' description = Generating gmo file $OUTFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
subdir = target.get_subdir()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'java')
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c+m+e+f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
for cls in class_list:
commands += ['-C', self.get_target_private_dir(target), cls]
elem = NinjaBuildElement(outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
self.check_outputs(elem)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
self.check_outputs(elem)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return (args, deps)
def generate_cs_target(self, target, outfile):
buildtype = self.environment.coredata.get_builtin_option('buildtype')
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'cs')
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
self.check_outputs(elem)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
self.check_outputs(element)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating jar $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def split_vala_sources(self, sources):
src = []
vapi_src = []
for s in sources:
if s.endswith('.vapi'):
vapi_src.append(s)
else:
src.append(s)
return (src, vapi_src)
def determine_dep_vapis(self, target):
result = []
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = os.path.splitext(os.path.split(i)[1])[0] + '.vapi'
fullname = os.path.join(self.get_target_private_dir(dep), vapiname)
result.append(fullname)
break
return result
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
valac = self.environment.coredata.compilers['vala']
(src, vapi_src) = self.split_vala_sources(target.get_sources())
vapi_src = [x.rel_to_builddir(self.build_to_src) for x in vapi_src]
extra_dep_files = []
vala_input_files = []
for s in src:
if s.endswith('.vala'):
vala_input_files.append(s.rel_to_builddir(self.build_to_src))
namebase = os.path.splitext(os.path.split(vala_input_files[0])[1])[0]
hname = namebase + '.h'
vapiname = namebase + '.vapi'
outputs = [vapiname]
args = ['-d', self.get_target_private_dir(target)]
args += ['-C']#, '-o', cname]
if not isinstance(target, build.Executable):
outputs.append(hname)
args += ['-H', hname]
args += ['--vapi=' + vapiname]
for src in vala_input_files:
namebase = os.path.splitext(os.path.split(src)[1])[0] + '.c'
outputs.append(namebase)
if self.environment.coredata.get_builtin_option('werror'):
args += valac.get_werror_args()
for d in target.external_deps:
if isinstance(d, dependencies.PkgConfigDependency):
if d.name == 'glib-2.0' and d.version_requirement is not None \
and d.version_requirement.startswith(('>=', '==')):
args += ['--target-glib', d.version_requirement[2:]]
args += ['--pkg', d.name]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
args += dependency_vapis
outputs = [os.path.join(self.get_target_private_dir(target), x) for x in outputs]
element = NinjaBuildElement(outputs,
valac.get_language() + '_COMPILER',
vala_input_files + vapi_src)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
self.check_outputs(element)
return outputs
def generate_rust_target(self, target, outfile):
rustc = self.environment.coredata.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
depfile = target.name + '.d'
args += ['--out-dir', target.subdir]
args += ['--emit', 'dep-info', '--emit', 'link']
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = {}
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
self.check_outputs(element)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = []
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
for ifile in genlist.get_filename():
rel = os.path.join(self.get_target_dir(genlist), ifile)
all_srcs.append(rel)
else:
for ifile in genlist.get_outfilelist():
rel = os.path.join(self.get_target_private_dir(target), ifile)
all_srcs.append(rel)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return (srcs, others)
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = self.environment.coredata.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
elem = NinjaBuildElement(out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if self.build.has_language('java'):
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
if mesonlib.is_windows():
command_templ = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS %s $in
'''
else:
command_templ = ' command = %s $LINK_ARGS %s $in\n'
command = command_templ %\
(' '.join(static_linker.get_exelist()),
' '.join(static_linker.get_output_args('$out')))
description = ' description = Static linking library $out\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for compiler in complist:
langname = compiler.get_language()
if langname == 'java' or langname == 'vala' or\
langname == 'rust' or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing
'''
else:
command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\n'
command = command_template % \
(' '.join(compiler.get_linker_exelist()),\
' '.join(cross_args),\
' '.join(compiler.get_linker_output_args('$out')))
description = ' description = Linking target $out'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
scriptdir = self.environment.get_script_dir()
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s $CROSS\n' % (ninja_quote(sys.executable),
ninja_quote(os.path.join(scriptdir, 'symbolextractor.py')),
'$in', '$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling cs target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [sys.executable,
os.path.join(self.environment.get_script_dir(), 'dirchanger.py'),
'$RUNDIR'] + compiler.get_exelist()
invoc = ' '.join([ninja_quote(i) for i in full_exe])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s %s %s $in
'''
else:
command_template = ' command = %s %s $ARGS %s %s %s $in\n'
command = command_template % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),
' '.join(quoted_depargs),\
' '.join(compiler.get_output_args('$out')),\
' '.join(compiler.get_compile_only_args()))
description = ' description = Compiling %s object $out\n' % langname
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = %s %s $ARGS %s %s %s $in\n" % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),\
' '.join(quoted_depargs),\
output,\
' '.join(compiler.get_compile_only_args()))
description = ' description = Precompiling header %s\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for compiler in self.build.compilers:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for compiler in cclist:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def generate_custom_generator_rules(self, target, outfile):
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue # Customtarget has already written its output rules
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_infilelist()
outfilelist = genlist.get_outfilelist()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = os.path.join(self.build_to_src, curfile)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)\
for x in base_args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
cmdlist = exe_arr + final_args
elem = NinjaBuildElement(outfiles, 'CUSTOM_COMMAND', infilename)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
self.check_outputs(elem)
def scan_fortran_module_outputs(self, target):
compiler = None
for c in self.build.compilers:
if c.get_language() == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for generated Fortran sources,
# but those are really rare. I hope.
if not compiler.can_compile(s):
continue
for line in open(os.path.join(self.environment.get_source_dir(), s.subdir, s.fname)):
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure': # MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments('Namespace collision: module %s defined in two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps= self.fortran_deps[target.get_basename()]
for line in open(src):
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This is due to
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as OpenMP
# There's no easy way to tell which is which (that I know of)
# so just ignore this and go on. Ideally we would print a
# warning message to the user but this is a common occurrance,
# which would lead to lots of distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
if(isinstance(src, str) and src.endswith('.h')):
raise RuntimeError('Fug')
if isinstance(src, RawFilename) and src.fname.endswith('.h'):
raise RuntimeError('Fug')
extra_orderdeps = []
compiler = self.get_compiler_for_source(src)
commands = self.generate_basic_compiler_args(target, compiler)
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
commands += compiler.get_include_args(tmppath, False)
if curdir == '':
curdir = '.'
commands += compiler.get_include_args(curdir, False)
for d in target.external_deps:
if d.need_threads():
commands += compiler.thread_flags()
break
if isinstance(src, RawFilename):
rel_src = src.fname
elif is_generated:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
abs_src = os.path.join(self.environment.get_source_dir(), rel_src)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise build.InvalidArguments('Invalid source type.')
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, RawFilename):
src_filename = src.fname
elif isinstance(src, File):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
if self.environment.coredata.get_builtin_option('use_pch'):
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
expdir = os.path.join(basedir, d)
srctreedir = os.path.join(self.build_to_src, expdir)
bargs = compiler.get_include_args(expdir, i.is_system)
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += bargs
commands += sargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
custom_target_include_dirs = []
for i in target.generated:
if isinstance(i, build.CustomTarget):
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
for i in custom_target_include_dirs:
commands+= compiler.get_include_args(i, False)
if self.environment.coredata.get_builtin_option('use_pch'):
commands += self.get_pch_include_args(compiler, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
self.check_outputs(depelem)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
self.check_outputs(element)
return rel_obj
def has_dir_part(self, fname):
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, [objname])
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, []) # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if len(pch) == 0:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
raise build.InvalidArguments('Precompiled header of "%s" must not be in the same directory as source, please put it in a subdirectory.' % target.get_basename())
compiler = self.get_compiler_for_lang(lang)
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
self.check_outputs(elem)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
self.check_outputs(elem)
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
commands += linker.get_linker_always_args()
commands += linker.get_buildtype_linker_args(self.environment.coredata.get_builtin_option('buildtype'))
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
if not(isinstance(target, build.StaticLibrary)):
commands += self.environment.coredata.external_link_args[linker.get_language()]
if isinstance(target, build.Executable):
commands += linker.get_std_exe_link_args()
elif isinstance(target, build.SharedLibrary):
commands += linker.get_std_shared_lib_link_args()
commands += linker.get_pic_args()
if hasattr(target, 'soversion'):
soversion = target.soversion
else:
soversion = None
commands += linker.get_soname_args(target.name, abspath, soversion)
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
# Link arguments of static libraries are not put in the command line of
# the library. They are instead appended to the command line where
# the static library is used.
if linker_base == 'STATIC':
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
if not(isinstance(target, build.StaticLibrary)):
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
commands += linker.build_rpath_args(self.environment.get_build_dir(),\
self.determine_rpath_dirs(target), target.install_rpath)
if self.environment.coredata.get_builtin_option('coverage'):
commands += linker.get_coverage_link_args()
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
commands = linker.unixtype_flags_to_native(commands)
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets += [os.path.join(self.environment.source_dir,
target.subdir, t) for t in target.link_depends]
elem = NinjaBuildElement(outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
self.check_outputs(elem)
return elem
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.output:
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if not prospective in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
basename = target.get_filename()
aliases = target.get_aliaslist()
if not mesonlib.is_windows():
for alias in aliases:
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
os.symlink(basename, aliasfile)
else:
mlog.debug("Library versioning disabled because host does not support symlinks.")
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement('clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files')
gcno_elem.write(outfile)
self.check_outputs(gcno_elem)
gcda_elem = NinjaBuildElement('clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files')
gcda_elem.write(outfile)
self.check_outputs(gcda_elem)
def is_compilable_file(self, filename):
if filename.endswith('.cpp') or\
filename.endswith('.c') or\
filename.endswith('.cxx') or\
filename.endswith('.cc') or\
filename.endswith('.C'):
return True
return False
def process_dep_gens(self, outfile, target):
src_deps = []
other_deps = []
for rule in self.dep_rules.values():
srcs = target.get_original_kwargs().get(rule.src_keyword, [])
if isinstance(srcs, str):
srcs = [srcs]
for src in srcs:
plainname = os.path.split(src)[1]
basename = plainname.split('.')[0]
outname = rule.name_templ.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
outfilename = os.path.join(self.get_target_private_dir(target), outname)
infilename = os.path.join(self.build_to_src, target.get_source_subdir(), src)
elem = NinjaBuildElement(outfilename, rule.name, infilename)
elem.write(outfile)
self.check_outputs(elem)
if self.is_compilable_file(outfilename):
src_deps.append(outfilename)
else:
other_deps.append(outfilename)
return (src_deps, other_deps)
def generate_ending(self, outfile):
targetlist = [self.get_target_filename(t) for t in self.build.get_targets().values()\
if not isinstance(t, build.RunTarget)]
elem = NinjaBuildElement('all', 'phony', targetlist)
elem.write(outfile)
self.check_outputs(elem)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect ninja command')
elem = NinjaBuildElement('clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning')
if self.environment.coredata.get_builtin_option('coverage'):
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
self.check_outputs(elem)
deps = self.get_regen_filelist()
elem = NinjaBuildElement('build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(deps, 'phony', '')
elem.write(outfile)
self.check_outputs(elem)
| [
"mesonlib.is_windows",
"re.compile",
"environment.detect_ninja",
"environment.find_valgrind",
"os.remove",
"mlog.red",
"os.path.split",
"subprocess.check_output",
"os.path.isabs",
"shutil.which",
"coredata.MesonException",
"os.path.splitext",
"pickle.dump",
"environment.find_coverage_tools",
"mlog.debug",
"os.path.join",
"os.symlink",
"os.replace",
"os.path.basename",
"build.InvalidArguments"
]
| [((875, 896), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (894, 896), False, 'import environment, mesonlib\n'), ((5495, 5565), 'coredata.MesonException', 'MesonException', (['"""Could not determine vs dep dependency prefix string."""'], {}), "('Could not determine vs dep dependency prefix string.')\n", (5509, 5565), False, 'from coredata import MesonException\n'), ((7046, 7083), 'os.replace', 'os.replace', (['tempfilename', 'outfilename'], {}), '(tempfilename, outfilename)\n', (7056, 7083), False, 'import os, sys, pickle, re\n'), ((7229, 7255), 'environment.detect_ninja', 'environment.detect_ninja', ([], {}), '()\n', (7253, 7255), False, 'import environment, mesonlib\n'), ((7325, 7425), 'subprocess.check_output', 'subprocess.check_output', (["[ninja_exe, '-t', 'compdb', 'c_COMPILER', 'cpp_COMPILER']"], {'cwd': 'builddir'}), "([ninja_exe, '-t', 'compdb', 'c_COMPILER',\n 'cpp_COMPILER'], cwd=builddir)\n", (7348, 7425), False, 'import subprocess, shutil\n'), ((19233, 19266), 'environment.find_coverage_tools', 'environment.find_coverage_tools', ([], {}), '()\n', (19264, 19266), False, 'import environment, mesonlib\n'), ((21307, 21352), 'os.path.join', 'os.path.join', (['script_root', '"""meson_install.py"""'], {}), "(script_root, 'meson_install.py')\n", (21319, 21352), False, 'import os, sys, pickle, re\n'), ((21464, 21504), 'os.path.join', 'os.path.join', (['script_root', '"""depfixer.py"""'], {}), "(script_root, 'depfixer.py')\n", (21476, 21504), False, 'import os, sys, pickle, re\n'), ((22403, 22424), 'pickle.dump', 'pickle.dump', (['d', 'ofile'], {}), '(d, ofile)\n', (22414, 22424), False, 'import os, sys, pickle, re\n'), ((26687, 26714), 'environment.find_valgrind', 'environment.find_valgrind', ([], {}), '()\n', (26712, 26714), False, 'import environment, mesonlib\n'), ((26793, 26835), 'os.path.join', 'os.path.join', (['script_root', '"""meson_test.py"""'], {}), "(script_root, 'meson_test.py')\n", (26805, 26835), False, 'import os, sys, pickle, re\n'), ((27738, 27785), 'os.path.join', 'os.path.join', (['script_root', '"""meson_benchmark.py"""'], {}), "(script_root, 'meson_benchmark.py')\n", (27750, 27785), False, 'import os, sys, pickle, re\n'), ((47234, 47255), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (47253, 47255), False, 'import environment, mesonlib\n'), ((52984, 53005), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (53003, 53005), False, 'import environment, mesonlib\n'), ((54867, 54888), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (54886, 54888), False, 'import environment, mesonlib\n'), ((58402, 58430), 're.compile', 're.compile', (['"""@OUTPUT(\\\\d+)@"""'], {}), "('@OUTPUT(\\\\d+)@')\n", (58412, 58430), False, 'import os, sys, pickle, re\n'), ((61703, 61752), 're.compile', 're.compile', (['"""\\\\s*module\\\\s+(\\\\w+)"""', 're.IGNORECASE'], {}), "('\\\\s*module\\\\s+(\\\\w+)', re.IGNORECASE)\n", (61713, 61752), False, 'import os, sys, pickle, re\n'), ((62800, 62846), 're.compile', 're.compile', (['"""\\\\s*use\\\\s+(\\\\w+)"""', 're.IGNORECASE'], {}), "('\\\\s*use\\\\s+(\\\\w+)', re.IGNORECASE)\n", (62810, 62846), False, 'import os, sys, pickle, re\n'), ((73651, 73700), 'os.path.join', 'os.path.join', (['targetdir', "(target_name + '.symbols')"], {}), "(targetdir, target_name + '.symbols')\n", (73663, 73700), False, 'import os, sys, pickle, re\n'), ((79461, 79506), 'os.path.join', 'os.path.join', (['script_root', '"""delwithsuffix.py"""'], {}), "(script_root, 'delwithsuffix.py')\n", (79473, 79506), False, 'import os, sys, pickle, re\n'), ((79913, 79958), 'os.path.join', 'os.path.join', (['script_root', '"""delwithsuffix.py"""'], {}), "(script_root, 'delwithsuffix.py')\n", (79925, 79958), False, 'import os, sys, pickle, re\n'), ((81956, 81982), 'environment.detect_ninja', 'environment.detect_ninja', ([], {}), '()\n', (81980, 81982), False, 'import environment, mesonlib\n'), ((4613, 4631), 'shutil.which', 'shutil.which', (['"""cl"""'], {}), "('cl')\n", (4625, 4631), False, 'import subprocess, shutil\n'), ((18229, 18261), 'os.path.join', 'os.path.join', (['subdir', '"""POTFILES"""'], {}), "(subdir, 'POTFILES')\n", (18241, 18261), False, 'import os, sys, pickle, re\n'), ((31887, 31936), 'os.path.join', 'os.path.join', (['self.build_to_src', 'target.subdir', 'r'], {}), '(self.build_to_src, target.subdir, r)\n', (31899, 31936), False, 'import os, sys, pickle, re\n'), ((64819, 64858), 'os.path.join', 'os.path.join', (['self.build_to_src', 'curdir'], {}), '(self.build_to_src, curdir)\n', (64831, 64858), False, 'import os, sys, pickle, re\n'), ((71242, 71263), 'os.path.split', 'os.path.split', (['header'], {}), '(header)\n', (71255, 71263), False, 'import os, sys, pickle, re\n'), ((77401, 77460), 'os.path.join', 'os.path.join', (['self.environment.source_dir', 'target.subdir', 't'], {}), '(self.environment.source_dir, target.subdir, t)\n', (77413, 77460), False, 'import os, sys, pickle, re\n'), ((78822, 78843), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (78841, 78843), False, 'import environment, mesonlib\n'), ((79164, 79250), 'mlog.debug', 'mlog.debug', (['"""Library versioning disabled because host does not support symlinks."""'], {}), "(\n 'Library versioning disabled because host does not support symlinks.')\n", (79174, 79250), False, 'import mlog\n'), ((82035, 82083), 'coredata.MesonException', 'MesonException', (['"""Could not detect ninja command"""'], {}), "('Could not detect ninja command')\n", (82049, 82083), False, 'from coredata import MesonException\n'), ((4301, 4402), 'coredata.MesonException', 'MesonException', (['(\'Multiple producers for Ninja target "%s". Please rename your targets.\' % n)'], {}), '(\n \'Multiple producers for Ninja target "%s". Please rename your targets.\' % n\n )\n', (4315, 4402), False, 'from coredata import MesonException\n'), ((18849, 18881), 'os.path.join', 'os.path.join', (['subdir', "(l + '.gmo')"], {}), "(subdir, l + '.gmo')\n", (18861, 18881), False, 'import os, sys, pickle, re\n'), ((21098, 21118), 'mlog.red', 'mlog.red', (['"""Warning:"""'], {}), "('Warning:')\n", (21106, 21118), False, 'import mlog\n'), ((22720, 22755), 'os.path.join', 'os.path.join', (['subdir', "(lang + '.gmo')"], {}), "(subdir, lang + '.gmo')\n", (22732, 22755), False, 'import os, sys, pickle, re\n'), ((24827, 24858), 'os.path.join', 'os.path.join', (['subdir', "(f + '.gz')"], {}), "(subdir, f + '.gz')\n", (24839, 24858), False, 'import os, sys, pickle, re\n'), ((25361, 25405), 'os.path.join', 'os.path.join', (['srcprefix', 'de.source_subdir', 'f'], {}), '(srcprefix, de.source_subdir, f)\n', (25373, 25405), False, 'import os, sys, pickle, re\n'), ((25431, 25454), 'os.path.join', 'os.path.join', (['subdir', 'f'], {}), '(subdir, f)\n', (25443, 25454), False, 'import os, sys, pickle, re\n'), ((33631, 33672), 'coredata.MesonException', 'MesonException', (['"""Unknown C# target type."""'], {}), "('Unknown C# target type.')\n", (33645, 33672), False, 'from coredata import MesonException\n'), ((44699, 44715), 'os.path.split', 'os.path.split', (['i'], {}), '(i)\n', (44712, 44715), False, 'import os, sys, pickle, re\n'), ((46517, 46593), 'coredata.MesonException', 'MesonException', (['"""Swift supports only executable and static library targets."""'], {}), "('Swift supports only executable and static library targets.')\n", (46531, 46593), False, 'from coredata import MesonException\n'), ((48953, 48974), 'mesonlib.is_windows', 'mesonlib.is_windows', ([], {}), '()\n', (48972, 48974), False, 'import environment, mesonlib\n'), ((59363, 59397), 'os.path.join', 'os.path.join', (['self.build_to_src', 'i'], {}), '(self.build_to_src, i)\n', (59375, 59397), False, 'import os, sys, pickle, re\n'), ((59751, 59791), 'os.path.join', 'os.path.join', (['self.build_to_src', 'curfile'], {}), '(self.build_to_src, curfile)\n', (59763, 59791), False, 'import os, sys, pickle, re\n'), ((65977, 65995), 'os.path.isabs', 'os.path.isabs', (['src'], {}), '(src)\n', (65990, 65995), False, 'import os, sys, pickle, re\n'), ((66915, 66939), 'os.path.join', 'os.path.join', (['basedir', 'd'], {}), '(basedir, d)\n', (66927, 66939), False, 'import os, sys, pickle, re\n'), ((66969, 67008), 'os.path.join', 'os.path.join', (['self.build_to_src', 'expdir'], {}), '(self.build_to_src, expdir)\n', (66981, 67008), False, 'import os, sys, pickle, re\n'), ((79106, 79137), 'os.symlink', 'os.symlink', (['basename', 'aliasfile'], {}), '(basename, aliasfile)\n', (79116, 79137), False, 'import os, sys, pickle, re\n'), ((7435, 7482), 'os.path.join', 'os.path.join', (['builddir', '"""compile_commands.json"""'], {}), "(builddir, 'compile_commands.json')\n", (7447, 7482), False, 'import os, sys, pickle, re\n'), ((15644, 15678), 'os.path.join', 'os.path.join', (['self.build_to_src', 'i'], {}), '(self.build_to_src, i)\n', (15656, 15678), False, 'import os, sys, pickle, re\n'), ((16844, 16902), 'coredata.MesonException', 'MesonException', (['"""Unreachable code in generate_run_target."""'], {}), "('Unreachable code in generate_run_target.')\n", (16858, 16902), False, 'from coredata import MesonException\n'), ((24668, 24702), 'os.path.join', 'os.path.join', (['manroot', "('man' + num)"], {}), "(manroot, 'man' + num)\n", (24680, 24702), False, 'import os, sys, pickle, re\n'), ((32709, 32758), 'build.InvalidArguments', 'InvalidArguments', (["('Unknown resource file %s.' % r)"], {}), "('Unknown resource file %s.' % r)\n", (32725, 32758), False, 'from build import InvalidArguments\n'), ((34810, 34857), 'os.path.join', 'os.path.join', (['self.build_to_src', 'i.curdir', 'idir'], {}), '(self.build_to_src, i.curdir, idir)\n', (34822, 34857), False, 'import os, sys, pickle, re\n'), ((36932, 36966), 'os.path.split', 'os.path.split', (['vala_input_files[0]'], {}), '(vala_input_files[0])\n', (36945, 36966), False, 'import os, sys, pickle, re\n'), ((39661, 39711), 'build.InvalidArguments', 'InvalidArguments', (['"""Unknown target type for rustc."""'], {}), "('Unknown target type for rustc.')\n", (39677, 39711), False, 'from build import InvalidArguments\n'), ((44739, 44761), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (44755, 44761), False, 'import os, sys, pickle, re\n'), ((49930, 49975), 'os.path.join', 'os.path.join', (['scriptdir', '"""symbolextractor.py"""'], {}), "(scriptdir, 'symbolextractor.py')\n", (49942, 49975), False, 'import os, sys, pickle, re\n'), ((58644, 58689), 'os.path.join', 'os.path.join', (['private_dir', 'output_list[index]'], {}), '(private_dir, output_list[index])\n', (58656, 58689), False, 'import os, sys, pickle, re\n'), ((64130, 64161), 'os.path.join', 'os.path.join', (['dirname', 'mod_name'], {}), '(dirname, mod_name)\n', (64142, 64161), False, 'import os, sys, pickle, re\n'), ((65688, 65734), 'build.InvalidArguments', 'build.InvalidArguments', (['"""Invalid source type."""'], {}), "('Invalid source type.')\n", (65710, 65734), False, 'import build\n'), ((66024, 66045), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (66040, 66045), False, 'import os, sys, pickle, re\n'), ((79010, 79030), 'os.remove', 'os.remove', (['aliasfile'], {}), '(aliasfile)\n', (79019, 79030), False, 'import os, sys, pickle, re\n'), ((80809, 80827), 'os.path.split', 'os.path.split', (['src'], {}), '(src)\n', (80822, 80827), False, 'import os, sys, pickle, re\n'), ((62427, 62557), 'build.InvalidArguments', 'InvalidArguments', (["('Namespace collision: module %s defined in two files %s and %s.' % (\n modname, module_files[modname], s))"], {}), "(\n 'Namespace collision: module %s defined in two files %s and %s.' % (\n modname, module_files[modname], s))\n", (62443, 62557), False, 'from build import InvalidArguments\n'), ((63966, 63984), 'os.path.split', 'os.path.split', (['src'], {}), '(src)\n', (63979, 63984), False, 'import os, sys, pickle, re\n'), ((71724, 71742), 'os.path.split', 'os.path.split', (['pch'], {}), '(pch)\n', (71737, 71742), False, 'import os, sys, pickle, re\n'), ((37408, 37426), 'os.path.split', 'os.path.split', (['src'], {}), '(src)\n', (37421, 37426), False, 'import os, sys, pickle, re\n'), ((32133, 32152), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (32149, 32152), False, 'import os, sys, pickle, re\n'), ((36146, 36162), 'os.path.split', 'os.path.split', (['i'], {}), '(i)\n', (36159, 36162), False, 'import os, sys, pickle, re\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import oslo_messaging as messaging
from heat.rpc import api as rpc_api
from heat.rpc import listener_client as rpc_client
from heat.tests import common
class ListenerClientTest(common.HeatTestCase):
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_ok(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
ret = listener_client.is_alive(mock_cnxt)
self.assertTrue(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_timeout(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
mock_prepare_client.call.side_effect = messaging.MessagingTimeout(
'too slow')
ret = listener_client.is_alive(mock_cnxt)
self.assertFalse(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
| [
"mock.Mock",
"oslo_messaging.MessagingTimeout",
"heat.rpc.listener_client.EngineListenerClient"
]
| [((1108, 1119), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1117, 1119), False, 'import mock\n'), ((1147, 1192), 'heat.rpc.listener_client.EngineListenerClient', 'rpc_client.EngineListenerClient', (['"""engine-007"""'], {}), "('engine-007')\n", (1178, 1192), True, 'from heat.rpc import listener_client as rpc_client\n'), ((2177, 2188), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2186, 2188), False, 'import mock\n'), ((2216, 2261), 'heat.rpc.listener_client.EngineListenerClient', 'rpc_client.EngineListenerClient', (['"""engine-007"""'], {}), "('engine-007')\n", (2247, 2261), True, 'from heat.rpc import listener_client as rpc_client\n'), ((2723, 2761), 'oslo_messaging.MessagingTimeout', 'messaging.MessagingTimeout', (['"""too slow"""'], {}), "('too slow')\n", (2749, 2761), True, 'import oslo_messaging as messaging\n'), ((846, 857), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (855, 857), False, 'import mock\n'), ((1910, 1921), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1919, 1921), False, 'import mock\n')] |
import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
def get_files_from_path(path, ext):
# use set to remove duplicate files. weird...but it happens
if os.path.isfile(path): return set([os.path.abspath(path)])
else: # i.e., folder
files = glob2.glob(os.path.abspath(os.path.join(path, "**/*.{}".format(ext))))
return set(sorted(files)) # to guarantee the order of files read
"""
handling javajskparser AST
"""
def toAST(files, ext, add_libs):
prg_files = []
for f in files:
prg_files.extend(get_files_from_path(f, "java"))
if not prg_files: exit('jskparser.util: File(s) not found!')
java_in = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/API.java'))
json_out = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/java.json'))
if add_libs:
obj_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Object.java'))
str_path = os.path.abspath(os.path.join(pwd, '../../model/lang/String.java'))
num_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Number.java'))
int_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Integer.java'))
char_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Character.java'))
itbl_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Iterable.java'))
iter_path = os.path.abspath(os.path.join(pwd, '../../model/util/Iterator.java'))
arr_path = os.path.abspath(os.path.join(pwd, '../../model/util/Arrays.java'))
list_path = os.path.abspath(os.path.join(pwd, '../../model/util/List.java'))
alist_path = os.path.abspath(os.path.join(pwd, '../../model/util/ArrayList.java'))
llist_path = os.path.abspath(os.path.join(pwd, '../../model/util/LinkedList.java'))
hmap_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashMap.java'))
hset_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashSet.java'))
if obj_path not in prg_files: prg_files.append(obj_path)
if str_path not in prg_files: prg_files.append(str_path)
if num_path not in prg_files: prg_files.append(num_path)
if int_path not in prg_files: prg_files.append(int_path)
if char_path not in prg_files: prg_files.append(char_path)
if itbl_path not in prg_files: prg_files.append(itbl_path)
if iter_path not in prg_files: prg_files.append(iter_path)
if arr_path not in prg_files: prg_files.append(arr_path)
if list_path not in prg_files: prg_files.append(list_path)
if alist_path not in prg_files: prg_files.append(alist_path)
if llist_path not in prg_files: prg_files.append(llist_path)
if hmap_path not in prg_files: prg_files.append(hmap_path)
if hset_path not in prg_files: prg_files.append(hset_path)
api = ""
for fname in prg_files:
with open(fname, 'r') as fd:
api += fd.read()
with open(java_in, 'w') as fd:
fd.write(api)
# this classpath stuff seems awful. Jsonify is hardcoded, passing a
# single string to subprocess.call is platform dependant, and shell=True
# can be a security vulnerability (if allowed to take user input).
# This just got a whole lot nastier
cmd = 'cd ' + pwd + '/..; /usr/bin/java -cp .:javaparser/javaparser-core/target/classes:$HOME/.m2/repository/com/cedarsoftware/json-io/4.3.0/json-io-4.3.0.jar jskparser.Jsonify ' + java_in + ' ' + json_out
ret = call(cmd, shell=True)
if ret != 0: exit('Problem parsing.')
return json_out
| [
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"subprocess.call",
"os.path.abspath"
]
| [((66, 91), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n'), ((200, 220), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (214, 220), False, 'import os\n'), ((3502, 3523), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (3506, 3523), False, 'from subprocess import call\n'), ((705, 751), 'os.path.join', 'os.path.join', (['pwd', '"""../tests/ir_asts/API.java"""'], {}), "(pwd, '../tests/ir_asts/API.java')\n", (717, 751), False, 'import os\n'), ((784, 831), 'os.path.join', 'os.path.join', (['pwd', '"""../tests/ir_asts/java.json"""'], {}), "(pwd, '../tests/ir_asts/java.json')\n", (796, 831), False, 'import os\n'), ((885, 934), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/Object.java"""'], {}), "(pwd, '../../model/lang/Object.java')\n", (897, 934), False, 'import os\n'), ((971, 1020), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/String.java"""'], {}), "(pwd, '../../model/lang/String.java')\n", (983, 1020), False, 'import os\n'), ((1057, 1106), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/Number.java"""'], {}), "(pwd, '../../model/lang/Number.java')\n", (1069, 1106), False, 'import os\n'), ((1143, 1193), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/Integer.java"""'], {}), "(pwd, '../../model/lang/Integer.java')\n", (1155, 1193), False, 'import os\n'), ((1231, 1283), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/Character.java"""'], {}), "(pwd, '../../model/lang/Character.java')\n", (1243, 1283), False, 'import os\n'), ((1321, 1372), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/lang/Iterable.java"""'], {}), "(pwd, '../../model/lang/Iterable.java')\n", (1333, 1372), False, 'import os\n'), ((1410, 1461), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/Iterator.java"""'], {}), "(pwd, '../../model/util/Iterator.java')\n", (1422, 1461), False, 'import os\n'), ((1498, 1547), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/Arrays.java"""'], {}), "(pwd, '../../model/util/Arrays.java')\n", (1510, 1547), False, 'import os\n'), ((1585, 1632), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/List.java"""'], {}), "(pwd, '../../model/util/List.java')\n", (1597, 1632), False, 'import os\n'), ((1671, 1723), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/ArrayList.java"""'], {}), "(pwd, '../../model/util/ArrayList.java')\n", (1683, 1723), False, 'import os\n'), ((1762, 1815), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/LinkedList.java"""'], {}), "(pwd, '../../model/util/LinkedList.java')\n", (1774, 1815), False, 'import os\n'), ((1853, 1903), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/HashMap.java"""'], {}), "(pwd, '../../model/util/HashMap.java')\n", (1865, 1903), False, 'import os\n'), ((1941, 1991), 'os.path.join', 'os.path.join', (['pwd', '"""../../model/util/HashSet.java"""'], {}), "(pwd, '../../model/util/HashSet.java')\n", (1953, 1991), False, 'import os\n'), ((234, 255), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (249, 255), False, 'import os\n')] |
"""
Patches views.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.utils as etau
import fiftyone.core.aggregations as foa
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.view as fov
_SINGLE_TYPES_MAP = {
fol.Detections: fol.Detection,
fol.Polylines: fol.Polyline,
}
_PATCHES_TYPES = (fol.Detections, fol.Polylines)
_NO_MATCH_ID = ""
class _PatchView(fos.SampleView):
@property
def _sample_id(self):
return self._doc.sample_id
def save(self):
super().save()
self._view._sync_source_sample(self)
class PatchView(_PatchView):
"""A patch in a :class:`PatchesView`.
:class:`PatchView` instances should not be created manually; they are
generated by iterating over :class:`PatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`PatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class EvaluationPatchView(_PatchView):
"""A patch in an :class:`EvaluationPatchesView`.
:class:`EvaluationPatchView` instances should not be created manually; they
are generated by iterating over :class:`EvaluationPatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`EvaluationPatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class _PatchesView(fov.DatasetView):
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
if _stages is None:
_stages = []
self._source_collection = source_collection
self._patches_stage = patches_stage
self._patches_dataset = patches_dataset
self.__stages = _stages
def __copy__(self):
return self.__class__(
self._source_collection,
deepcopy(self._patches_stage),
self._patches_dataset,
_stages=deepcopy(self.__stages),
)
@property
def _base_view(self):
return self.__class__(
self._source_collection,
self._patches_stage,
self._patches_dataset,
)
@property
def _dataset(self):
return self._patches_dataset
@property
def _root_dataset(self):
return self._source_collection._root_dataset
@property
def _stages(self):
return self.__stages
@property
def _all_stages(self):
return (
self._source_collection.view()._all_stages
+ [self._patches_stage]
+ self.__stages
)
@property
def _label_fields(self):
raise NotImplementedError("subclass must implement _label_fields")
@property
def _element_str(self):
return "patch"
@property
def _elements_str(self):
return "patches"
@property
def name(self):
return self.dataset_name + "-patches"
@classmethod
def _get_default_sample_fields(
cls, include_private=False, use_db_fields=False
):
fields = super()._get_default_sample_fields(
include_private=include_private, use_db_fields=use_db_fields
)
if use_db_fields:
return fields + ("_sample_id",)
return fields + ("sample_id",)
def set_values(self, field_name, *args, **kwargs):
field = field_name.split(".", 1)[0]
must_sync = field in self._label_fields
# The `set_values()` operation could change the contents of this view,
# so we first record the sample IDs that need to be synced
if must_sync and self._stages:
ids = self.values("_id")
else:
ids = None
super().set_values(field_name, *args, **kwargs)
if must_sync:
self._sync_source_field(field, ids=ids)
def save(self, fields=None):
"""Overwrites the object patches in the source dataset with the
contents of the view.
If this view contains any additional fields that were not extracted
from the source dataset, these fields are not saved.
.. warning::
This will permanently delete any omitted, filtered, or otherwise
modified patches from the source dataset.
Args:
fields (None): an optional field or list of fields to save. If
specified, only these fields are overwritten
"""
if etau.is_str(fields):
fields = [fields]
super().save(fields=fields)
if fields is None:
fields = self._label_fields
else:
fields = [l for l in fields if l in self._label_fields]
#
# IMPORTANT: we sync the contents of `_patches_dataset`, not `self`
# here because the `save()` call above updated the dataset, which means
# this view may no longer have the same contents (e.g., if `skip()` is
# involved)
#
self._sync_source_root(fields)
def reload(self):
self._root_dataset.reload()
#
# Regenerate the patches dataset
#
# This assumes that calling `load_view()` when the current patches
# dataset has been deleted will cause a new one to be generated
#
self._patches_dataset.delete()
_view = self._patches_stage.load_view(self._source_collection)
self._patches_dataset = _view._patches_dataset
def _sync_source_sample(self, sample):
for field in self._label_fields:
self._sync_source_sample_field(sample, field)
def _sync_source_sample_field(self, sample, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
doc = sample._doc.field_to_mongo(field)
if is_list_field:
doc = doc[label_type._LABEL_LIST_FIELD]
self._source_collection._set_labels_by_id(
field, [sample.sample_id], [doc]
)
def _sync_source_field(self, field, ids=None):
_, label_path = self._patches_dataset._get_label_field_path(field)
if ids is not None:
view = self._patches_dataset.mongo(
[{"$match": {"_id": {"$in": ids}}}]
)
else:
view = self._patches_dataset
sample_ids, docs = view.aggregate(
[foa.Values("sample_id"), foa.Values(label_path, _raw=True)]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
def _sync_source_root(self, fields):
for field in fields:
self._sync_source_root_field(field)
def _sync_source_root_field(self, field):
_, id_path = self._get_label_field_path(field, "id")
label_path = id_path.rsplit(".", 1)[0]
#
# Sync label updates
#
sample_ids, docs, label_ids = self._patches_dataset.aggregate(
[
foa.Values("sample_id"),
foa.Values(label_path, _raw=True),
foa.Values(id_path, unwind=True),
]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
#
# Sync label deletions
#
_, src_id_path = self._source_collection._get_label_field_path(
field, "id"
)
src_ids = self._source_collection.values(src_id_path, unwind=True)
delete_ids = set(src_ids) - set(label_ids)
if delete_ids:
self._source_collection._dataset.delete_labels(
ids=delete_ids, fields=field
)
def _get_ids_map(self, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
_, id_path = self._get_label_field_path(field, "id")
sample_ids, label_ids = self.values(["id", id_path])
ids_map = {}
if is_list_field:
for sample_id, _label_ids in zip(sample_ids, label_ids):
if not _label_ids:
continue
for label_id in _label_ids:
ids_map[label_id] = sample_id
else:
for sample_id, label_id in zip(sample_ids, label_ids):
if not label_id:
continue
ids_map[label_id] = sample_id
return ids_map
class PatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` of patches from a
:class:`fiftyone.core.dataset.Dataset`.
Patches views contain an ordered collection of patch samples, each of which
contains a subset of a sample of the parent dataset corresponding to a
single object or logical grouping of of objects.
Patches retrieved from patches views are returned as :class:`PatchView`
objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that
defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = PatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
self._patches_field = patches_stage.field
@property
def _label_fields(self):
return [self._patches_field]
@property
def patches_field(self):
"""The field from which the patches in this view were extracted."""
return self._patches_field
class EvaluationPatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` containing evaluation patches
from a :class:`fiftyone.core.dataset.Dataset`.
Evalation patches views contain an ordered collection of evaluation
examples, each of which contains the ground truth and/or predicted labels
for a true positive, false positive, or false negative example from an
evaluation run on the underlying dataset.
Patches retrieved from patches views are returned as
:class:`EvaluationPatchView` objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToEvaluationPatches`
stage that defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = EvaluationPatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
eval_key = patches_stage.eval_key
eval_info = source_collection.get_evaluation_info(eval_key)
self._gt_field = eval_info.config.gt_field
self._pred_field = eval_info.config.pred_field
@property
def _label_fields(self):
return [self._gt_field, self._pred_field]
@property
def gt_field(self):
"""The ground truth field for the evaluation patches in this view."""
return self._gt_field
@property
def pred_field(self):
"""The predictions field for the evaluation patches in this view."""
return self._pred_field
def make_patches_dataset(
sample_collection, field, keep_label_lists=False, name=None
):
"""Creates a dataset that contains one sample per object patch in the
specified field of the collection.
Fields other than ``field`` and the default sample fields will not be
included in the returned dataset. A ``sample_id`` field will be added that
records the sample ID from which each patch was taken.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field: the patches field, which must be of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
keep_label_lists (False): whether to store the patches in label list
fields of the same type as the input collection rather than using
their single label variants
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
if keep_label_lists:
field_type = sample_collection._get_label_field_type(field)
else:
field_type = _get_single_label_field_type(sample_collection, field)
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field(
field, fof.EmbeddedDocumentField, embedded_doc_type=field_type
)
patches_view = _make_patches_view(
sample_collection, field, keep_label_lists=keep_label_lists
)
_write_samples(dataset, patches_view)
return dataset
def _get_single_label_field_type(sample_collection, field):
label_type = sample_collection._get_label_field_type(field)
if label_type not in _SINGLE_TYPES_MAP:
raise ValueError("Unsupported label field type %s" % label_type)
return _SINGLE_TYPES_MAP[label_type]
def make_evaluation_dataset(sample_collection, eval_key, name=None):
"""Creates a dataset based on the results of the evaluation with the given
key that contains one sample for each true positive, false positive, and
false negative example in the input collection, respectively.
True positive examples will result in samples with both their ground truth
and predicted fields populated, while false positive/negative examples will
only have one of their corresponding predicted/ground truth fields
populated, respectively.
If multiple predictions are matched to a ground truth object (e.g., if the
evaluation protocol includes a crowd attribute), then all matched
predictions will be stored in the single sample along with the ground truth
object.
The returned dataset will also have top-level ``type`` and ``iou`` fields
populated based on the evaluation results for that example, as well as a
``sample_id`` field recording the sample ID of the example, and a ``crowd``
field if the evaluation protocol defines a crowd attribute.
.. note::
The returned dataset will contain patches for the contents of the input
collection, which may differ from the view on which the ``eval_key``
evaluation was performed. This may exclude some labels that were
evaluated and/or include labels that were not evaluated.
If you would like to see patches for the exact view on which an
evaluation was performed, first call
:meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>`
to load the view and then convert to patches.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
eval_key: an evaluation key that corresponds to the evaluation of
ground truth/predicted fields that are of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
# Parse evaluation info
eval_info = sample_collection.get_evaluation_info(eval_key)
pred_field = eval_info.config.pred_field
gt_field = eval_info.config.gt_field
if hasattr(eval_info.config, "iscrowd"):
crowd_attr = eval_info.config.iscrowd
else:
crowd_attr = None
pred_type = sample_collection._get_label_field_type(pred_field)
gt_type = sample_collection._get_label_field_type(gt_field)
# Setup dataset with correct schema
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
pred_field, fof.EmbeddedDocumentField, embedded_doc_type=pred_type
)
dataset.add_sample_field(
gt_field, fof.EmbeddedDocumentField, embedded_doc_type=gt_type
)
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field("type", fof.StringField)
dataset.add_sample_field("iou", fof.FloatField)
if crowd_attr is not None:
dataset.add_sample_field("crowd", fof.BooleanField)
# Add ground truth patches
gt_view = _make_eval_view(
sample_collection, eval_key, gt_field, crowd_attr=crowd_attr
)
_write_samples(dataset, gt_view)
# Merge matched predictions
_merge_matched_labels(dataset, sample_collection, eval_key, pred_field)
# Add unmatched predictions
unmatched_pred_view = _make_eval_view(
sample_collection, eval_key, pred_field, skip_matched=True
)
_add_samples(dataset, unmatched_pred_view)
return dataset
def _make_patches_view(sample_collection, field, keep_label_lists=False):
if sample_collection._is_frames:
raise ValueError(
"Creating patches views into frame views is not yet supported"
)
if sample_collection._is_frame_field(field):
raise ValueError(
"Frame label patches cannot be directly extracted; you must first "
"convert your video dataset to frames via `to_frames()`"
)
label_type = sample_collection._get_label_field_type(field)
if issubclass(label_type, _PATCHES_TYPES):
list_field = field + "." + label_type._LABEL_LIST_FIELD
else:
raise ValueError(
"Invalid label field type %s. Extracting patches is only "
"supported for the following types: %s"
% (label_type, _PATCHES_TYPES)
)
pipeline = [
{
"$project": {
"_id": True,
"_sample_id": "$_id",
"_media_type": True,
"filepath": True,
"metadata": True,
"tags": True,
field + "._cls": True,
list_field: True,
}
},
{"$unwind": "$" + list_field},
{"$set": {"_rand": {"$rand": {}}}},
{"$set": {"_id": "$" + list_field + "._id"}},
]
if keep_label_lists:
pipeline.append({"$set": {list_field: ["$" + list_field]}})
else:
pipeline.append({"$set": {field: "$" + list_field}})
return sample_collection.mongo(pipeline)
def _make_eval_view(
sample_collection, eval_key, field, skip_matched=False, crowd_attr=None
):
eval_type = field + "." + eval_key
eval_id = field + "." + eval_key + "_id"
eval_iou = field + "." + eval_key + "_iou"
view = _make_patches_view(sample_collection, field)
if skip_matched:
view = view.mongo(
[
{
"$match": {
"$expr": {
"$or": [
{"$eq": ["$" + eval_id, _NO_MATCH_ID]},
{"$not": {"$gt": ["$" + eval_id, None]}},
]
}
}
}
]
)
view = view.mongo(
[{"$set": {"type": "$" + eval_type, "iou": "$" + eval_iou}}]
)
if crowd_attr is not None:
crowd_path1 = "$" + field + "." + crowd_attr
# @todo remove Attributes usage
crowd_path2 = "$" + field + ".attributes." + crowd_attr + ".value"
view = view.mongo(
[
{
"$set": {
"crowd": {
"$cond": {
"if": {"$gt": [crowd_path1, None]},
"then": {"$toBool": crowd_path1},
"else": {
"$cond": {
"if": {"$gt": [crowd_path2, None]},
"then": {"$toBool": crowd_path2},
"else": None,
}
},
}
}
}
}
]
)
return _upgrade_labels(view, field)
def _upgrade_labels(view, field):
tmp_field = "_" + field
label_type = view._get_label_field_type(field)
return view.mongo(
[
{"$set": {tmp_field: "$" + field}},
{"$unset": field},
{
"$set": {
field: {
"_cls": label_type.__name__,
label_type._LABEL_LIST_FIELD: ["$" + tmp_field],
}
}
},
{"$unset": tmp_field},
]
)
def _merge_matched_labels(dataset, src_collection, eval_key, field):
field_type = src_collection._get_label_field_type(field)
list_field = field + "." + field_type._LABEL_LIST_FIELD
eval_id = eval_key + "_id"
eval_field = list_field + "." + eval_id
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.extend(
[
{"$project": {list_field: True}},
{"$unwind": "$" + list_field},
{
"$match": {
"$expr": {
"$and": [
{"$gt": ["$" + eval_field, None]},
{"$ne": ["$" + eval_field, _NO_MATCH_ID]},
]
}
}
},
{
"$group": {
"_id": {"$toObjectId": "$" + eval_field},
"_labels": {"$push": "$" + list_field},
}
},
{
"$project": {
field: {
"_cls": field_type.__name__,
field_type._LABEL_LIST_FIELD: "$_labels",
}
},
},
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "merge",
"whenNotMatched": "discard",
}
},
]
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _write_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append({"$out": dataset._sample_collection_name})
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _add_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append(
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "keepExisting",
"whenNotMatched": "insert",
}
}
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
| [
"eta.core.utils.is_str",
"copy.deepcopy",
"fiftyone.core.aggregations.Values",
"fiftyone.core.dataset.Dataset"
]
| [((13620, 13652), 'fiftyone.core.dataset.Dataset', 'fod.Dataset', (['name'], {'_patches': '(True)'}), '(name, _patches=True)\n', (13631, 13652), True, 'import fiftyone.core.dataset as fod\n'), ((17000, 17032), 'fiftyone.core.dataset.Dataset', 'fod.Dataset', (['name'], {'_patches': '(True)'}), '(name, _patches=True)\n', (17011, 17032), True, 'import fiftyone.core.dataset as fod\n'), ((5228, 5247), 'eta.core.utils.is_str', 'etau.is_str', (['fields'], {}), '(fields)\n', (5239, 5247), True, 'import eta.core.utils as etau\n'), ((2651, 2680), 'copy.deepcopy', 'deepcopy', (['self._patches_stage'], {}), '(self._patches_stage)\n', (2659, 2680), False, 'from copy import deepcopy\n'), ((2737, 2760), 'copy.deepcopy', 'deepcopy', (['self.__stages'], {}), '(self.__stages)\n', (2745, 2760), False, 'from copy import deepcopy\n'), ((7185, 7208), 'fiftyone.core.aggregations.Values', 'foa.Values', (['"""sample_id"""'], {}), "('sample_id')\n", (7195, 7208), True, 'import fiftyone.core.aggregations as foa\n'), ((7210, 7243), 'fiftyone.core.aggregations.Values', 'foa.Values', (['label_path'], {'_raw': '(True)'}), '(label_path, _raw=True)\n', (7220, 7243), True, 'import fiftyone.core.aggregations as foa\n'), ((7757, 7780), 'fiftyone.core.aggregations.Values', 'foa.Values', (['"""sample_id"""'], {}), "('sample_id')\n", (7767, 7780), True, 'import fiftyone.core.aggregations as foa\n'), ((7798, 7831), 'fiftyone.core.aggregations.Values', 'foa.Values', (['label_path'], {'_raw': '(True)'}), '(label_path, _raw=True)\n', (7808, 7831), True, 'import fiftyone.core.aggregations as foa\n'), ((7849, 7881), 'fiftyone.core.aggregations.Values', 'foa.Values', (['id_path'], {'unwind': '(True)'}), '(id_path, unwind=True)\n', (7859, 7881), True, 'import fiftyone.core.aggregations as foa\n')] |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code to initialize the application server
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
import base64
import time
import sys
from pdb import Pdb
from io import BytesIO
from zope.publisher.publish import publish as _publish, debug_call
from zope.publisher.browser import TestRequest, setDefaultSkin
from zope.app.publication.browser import BrowserPublication
from zope.app.appsetup import config, database
try:
from time import process_time as time_process_time # pragma: PY3
except ImportError:
from time import clock as time_process_time # pragma: PY2
try:
import urllib.parse as urllib # pragma: PY3
except ImportError:
import urllib # pragma: PY2
try:
text_type = unicode # pragma: PY2
except NameError:
text_type = str # pragma: PY3
class Debugger(object):
pdb = Pdb
def __init__(self, db=None, config_file=None, stdout=None):
if db is None and config_file is None:
db = 'Data.fs'
config_file = 'site.zcml'
if config_file is not None:
config(config_file)
self.db = database(db)
self.stdout = stdout
@classmethod
def fromDatabase(cls, db):
inst = cls.__new__(cls)
inst.db = db
return inst
def root(self):
"""Get the top-level application object
The object returned is connected to an open database connection.
"""
from zope.app.publication.zopepublication import ZopePublication
return self.db.open().root()[ZopePublication.root_name]
def _request(self,
path='/', stdin='', basic=None,
environment=None, form=None,
request=None, publication=BrowserPublication):
"""Create a request
"""
env = {}
if isinstance(stdin, text_type):
stdin = stdin.encode("utf-8")
if isinstance(stdin, bytes):
stdin = BytesIO(stdin)
p = path.split('?')
if len(p) == 1:
env['PATH_INFO'] = p[0]
elif len(p) == 2:
env['PATH_INFO'], env['QUERY_STRING'] = p
else:
raise ValueError("Too many ?s in path", path)
env['PATH_INFO'] = urllib.unquote(env['PATH_INFO'])
if environment is not None:
env.update(environment)
if basic:
basic_bytes = basic.encode('ascii') if not isinstance(
basic, bytes) else basic
basic64_bytes = base64.b64encode(basic_bytes)
basic64 = basic64_bytes.decode('ascii').strip()
env['HTTP_AUTHORIZATION'] = "Basic %s" % basic64
pub = publication(self.db)
if request is not None:
request = request(stdin, env)
else:
request = TestRequest(stdin, env)
setDefaultSkin(request)
request.setPublication(pub)
if form:
request.form.update(form)
return request
def publish(self, path='/', stdin='', *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(path, stdin, *args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request)
getStatus = getattr(request.response, 'getStatus', lambda: None)
headers = sorted(request.response.getHeaders())
print(
'Status %s\r\n%s\r\n\r\n%s' % (
request.response.getStatusString(),
'\r\n'.join([("%s: %s" % h) for h in headers]),
request.response.consumeBody(),
), file=self.stdout or sys.stdout)
return time.time() - t, time_process_time() - pt, getStatus()
def run(self, *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(*args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request, handle_errors=False)
getStatus = getattr(request.response, 'getStatus', lambda: None)
return time.time() - t, time_process_time() - pt, getStatus()
def debug(self, *args, **kw):
out = self.stdout or sys.stdout
class ZopePdb(self.Pdb):
done_pub = False
done_ob = False
def do_pub(self, arg):
if self.done_pub:
print('pub already done.', file=out)
return
self.do_s('')
self.do_s('')
self.do_c('')
self.done_pub = True
def do_ob(self, arg):
if self.done_ob:
print('ob already done.', file=out)
return
self.do_pub('')
self.do_c('')
self.done_ob = True
dbg = ZopePdb()
request = self._request(*args, **kw)
fbreak(dbg, _publish)
fbreak(dbg, debug_call)
print('* Type c<cr> to jump to published object call.',
file=out)
dbg.runcall(_publish, request)
return dbg
def getlineno(code):
return code.co_firstlineno
def fbreak(db, meth):
try:
meth = meth.__func__
except AttributeError:
pass
code = meth.__code__
lineno = getlineno(code)
filename = code.co_filename
db.set_break(filename, lineno)
| [
"zope.publisher.browser.TestRequest",
"zope.publisher.publish.publish",
"time.clock",
"urllib.unquote",
"zope.app.appsetup.database",
"base64.b64encode",
"io.BytesIO",
"zope.app.appsetup.config",
"zope.publisher.browser.setDefaultSkin",
"time.time"
]
| [((1749, 1761), 'zope.app.appsetup.database', 'database', (['db'], {}), '(db)\n', (1757, 1761), False, 'from zope.app.appsetup import config, database\n'), ((2873, 2905), 'urllib.unquote', 'urllib.unquote', (["env['PATH_INFO']"], {}), "(env['PATH_INFO'])\n", (2887, 2905), False, 'import urllib\n'), ((3936, 3953), 'zope.publisher.publish.publish', '_publish', (['request'], {}), '(request)\n', (3944, 3953), True, 'from zope.publisher.publish import publish as _publish, debug_call\n'), ((4715, 4753), 'zope.publisher.publish.publish', '_publish', (['request'], {'handle_errors': '(False)'}), '(request, handle_errors=False)\n', (4723, 4753), True, 'from zope.publisher.publish import publish as _publish, debug_call\n'), ((1711, 1730), 'zope.app.appsetup.config', 'config', (['config_file'], {}), '(config_file)\n', (1717, 1730), False, 'from zope.app.appsetup import config, database\n'), ((2589, 2603), 'io.BytesIO', 'BytesIO', (['stdin'], {}), '(stdin)\n', (2596, 2603), False, 'from io import BytesIO\n'), ((3134, 3163), 'base64.b64encode', 'base64.b64encode', (['basic_bytes'], {}), '(basic_bytes)\n', (3150, 3163), False, 'import base64\n'), ((3432, 3455), 'zope.publisher.browser.TestRequest', 'TestRequest', (['stdin', 'env'], {}), '(stdin, env)\n', (3443, 3455), False, 'from zope.publisher.browser import TestRequest, setDefaultSkin\n'), ((3468, 3491), 'zope.publisher.browser.setDefaultSkin', 'setDefaultSkin', (['request'], {}), '(request)\n', (3482, 3491), False, 'from zope.publisher.browser import TestRequest, setDefaultSkin\n'), ((3680, 3691), 'time.time', 'time.time', ([], {}), '()\n', (3689, 3691), False, 'import time\n'), ((3693, 3712), 'time.clock', 'time_process_time', ([], {}), '()\n', (3710, 3712), True, 'from time import clock as time_process_time\n'), ((4474, 4485), 'time.time', 'time.time', ([], {}), '()\n', (4483, 4485), False, 'import time\n'), ((4487, 4506), 'time.clock', 'time_process_time', ([], {}), '()\n', (4504, 4506), True, 'from time import clock as time_process_time\n'), ((4370, 4381), 'time.time', 'time.time', ([], {}), '()\n', (4379, 4381), False, 'import time\n'), ((4387, 4406), 'time.clock', 'time_process_time', ([], {}), '()\n', (4404, 4406), True, 'from time import clock as time_process_time\n'), ((4843, 4854), 'time.time', 'time.time', ([], {}), '()\n', (4852, 4854), False, 'import time\n'), ((4860, 4879), 'time.clock', 'time_process_time', ([], {}), '()\n', (4877, 4879), True, 'from time import clock as time_process_time\n')] |
"""
Unit Tests for the pydisque module.
Currently, most of these tests require a fresh instance of
Disque to be valid and pass.
"""
import unittest
import json
import time
import random
import six
from pydisque.client import Client
from redis.exceptions import ResponseError
class TestDisque(unittest.TestCase):
"""TestCase class for pydisque."""
testID = None
def setUp(self):
"""Setup the tests."""
self.client = Client(['localhost:7711'])
self.client.connect()
self.testID = "%d.%d" % (time.time(),
random.randint(1000, 1000000))
def test_publish_and_receive(self):
"""Test the most important functions of pydisque."""
t1 = str(time.time())
self.client.add_job("test_q", t1, timeout=100)
jobs = self.client.get_job(['test_q'])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job(['test_q'], timeout=100)) == 0
def test_nack(self):
"""Fetch the queue, return a job, check that it's back."""
t1 = str(time.time())
queuename = "test_nack." + self.testID
self.client.add_job(queuename, str(t1), timeout=100)
jobs = self.client.get_job([queuename])
# NACK the first read
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert len(jobs) == 1
assert job == six.b(t1)
self.client.nack_job(job_id)
# this time ACK it
jobs = self.client.get_job([queuename])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job([queuename], timeout=100)) == 0
def test_qpeek(self):
"""
Test qpeek.
Ran into some problems with an ENQUEUE/DEQUEUE test that
was using qpeek, checking core functionality of qpeek().
"""
queuename = "test_qpeek-%s" % self.testID
job_id = self.client.add_job(queuename, "Peek A Boo")
peeked = self.client.qpeek(queuename, 1)
assert peeked[0][1] == job_id
def test_qscan(self):
"""
Test the qscan function.
This test relies on add_job() being functional, and
the local disque not being a disque proxy to a mesh.
TODO: unique the queues with self.testID.
"""
t1 = str(time.time())
self.client.add_job("q1", t1, timeout=100)
self.client.add_job("q2", t1, timeout=100)
qb = self.client.qscan()
assert qb[0]
assert qb[1]
assert six.b("q1") in qb[1]
assert six.b("q2") in qb[1]
def test_jscan(self):
"""Simple test of the jscan function."""
t1 = time.time()
queuename = "test_jscan-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1), timeout=100)
jerbs = self.client.jscan(queue=queuename)
assert j1 in jerbs[1]
def test_del_job(self):
"""Simple test of del_job, needs qpeek.
FIXME: This function has grown ugly.
"""
t1 = time.time()
queuename = "test_del_job-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1))
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 in jlist
self.client.del_job(j1)
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 not in jerbs
def test_qlen(self):
"""Simple test of qlen."""
queuename = "test_qlen-%s" % self.testID
lengthOfTest = 100
test_job = "Useless Job."
for x in range(lengthOfTest):
self.client.add_job(queuename, test_job)
assert self.client.qlen(queuename) == lengthOfTest
def test_qstat(self):
"""Testing QSTAT (default behavior)."""
queuename = "test_qstat-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename)
# check the basics
assert 'jobs-in' in stat
assert 'jobs-out' in stat
def test_qstat_dict(self):
"""Testing QSTAT's (new dict behavior)."""
queuename = "test_qstat_dict-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename, True)
assert stat.get('jobs-in', None) is not None
assert stat.get('jobs-out', None) is not None
def test_shownack(self):
"""Test that NACK and SHOW work appropriately."""
queuename = "test_show-%s" % self.testID
test_job = "Show me."
self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
for queue_name, job_id, job in jobs:
self.client.nack_job(job_id)
shown = self.client.show(job_id, True)
assert shown.get('body') == test_job
assert shown.get('nacks') == 1
def test_pause(self):
"""Test that a PAUSE message is acknowledged."""
queuename = "test_show-%s" % self.testID
test_job = "Jerbs, they are a thing"
self.client.pause(queuename, kw_in=True)
try:
job_id = self.client.add_job(queuename, test_job)
except ResponseError:
pass
# can we add a job again?
self.client.pause(queuename, kw_none=True)
job_id = self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
# TODO(canardleteer): add a test of PAUSE SHOW
def test_get_job(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
expected = [(queue_name, job_id, job)]
got = self.client.get_job([queue_name], withcounters=False)
assert expected == got
def test_get_job_withcounters(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
nacks = 0
additional_deliveries = 0
expected = [(queue_name, job_id, job, nacks, additional_deliveries)]
got = self.client.get_job([queue_name], withcounters=True)
assert expected == got
if __name__ == '__main__':
unittest.main()
| [
"six.b",
"pydisque.client.Client",
"unittest.main",
"time.time",
"random.randint"
]
| [((6670, 6685), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6683, 6685), False, 'import unittest\n'), ((450, 476), 'pydisque.client.Client', 'Client', (["['localhost:7711']"], {}), "(['localhost:7711'])\n", (456, 476), False, 'from pydisque.client import Client\n'), ((2895, 2906), 'time.time', 'time.time', ([], {}), '()\n', (2904, 2906), False, 'import time\n'), ((3253, 3264), 'time.time', 'time.time', ([], {}), '()\n', (3262, 3264), False, 'import time\n'), ((736, 747), 'time.time', 'time.time', ([], {}), '()\n', (745, 747), False, 'import time\n'), ((1182, 1193), 'time.time', 'time.time', ([], {}), '()\n', (1191, 1193), False, 'import time\n'), ((2540, 2551), 'time.time', 'time.time', ([], {}), '()\n', (2549, 2551), False, 'import time\n'), ((2749, 2760), 'six.b', 'six.b', (['"""q1"""'], {}), "('q1')\n", (2754, 2760), False, 'import six\n'), ((2785, 2796), 'six.b', 'six.b', (['"""q2"""'], {}), "('q2')\n", (2790, 2796), False, 'import six\n'), ((6017, 6028), 'time.time', 'time.time', ([], {}), '()\n', (6026, 6028), False, 'import time\n'), ((6343, 6354), 'time.time', 'time.time', ([], {}), '()\n', (6352, 6354), False, 'import time\n'), ((540, 551), 'time.time', 'time.time', ([], {}), '()\n', (549, 551), False, 'import time\n'), ((586, 615), 'random.randint', 'random.randint', (['(1000)', '(1000000)'], {}), '(1000, 1000000)\n', (600, 615), False, 'import random\n'), ((952, 961), 'six.b', 'six.b', (['t1'], {}), '(t1)\n', (957, 961), False, 'import six\n'), ((1516, 1525), 'six.b', 'six.b', (['t1'], {}), '(t1)\n', (1521, 1525), False, 'import six\n'), ((1743, 1752), 'six.b', 'six.b', (['t1'], {}), '(t1)\n', (1748, 1752), False, 'import six\n')] |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner.py ]
# Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import argparse
import numpy as np
from data_loader import data_loader
from classifiers import naive_bayes_runner
from classifiers import decision_tree_runner
##################
# CONFIGURATIONS #
##################
def get_config():
parser = argparse.ArgumentParser(description='descrip_msg')
classifier = parser.add_argument_group('classifier')
classifier.add_argument('--classifier', type=str, default='', help='classifier to be specified by user')
classifier.add_argument('--naive_bayes', action='store_true', help='enable Naive Bayes classification mode')
classifier.add_argument('--decision_tree', action='store_true', help='enable Decision Tree classification mode')
mode_args = parser.add_argument_group('mode')
mode_args.add_argument('--search_opt', action='store_true', help='search for optimal parameters for classifiers')
mode_args.add_argument('--run_all', action='store_true', help='run all distribution assumption for the Naive Bayes classifier')
mode_args.add_argument('--visualize_tree', action='store_true', help='plot and visualize the Decision Tree classifier')
data_args = parser.add_argument_group('data')
data_args.add_argument('--data_news', action='store_true', help='Training and testing on the News dataset')
data_args.add_argument('--data_mushroom', action='store_true', help='Training and testing on the Mushroom dataset')
data_args.add_argument('--data_income', action='store_true', help='Training and testing on the Income dataset')
path_args = parser.add_argument_group('train_path')
path_args.add_argument('--train_path', type=str, default='', help='training path to be specified by user')
path_args.add_argument('--train_path_news', type=str, default='../data/news/news_train.csv', help='path to the News training dataset')
path_args.add_argument('--train_path_mushroom', type=str, default='../data/mushroom/mushroom_train.csv', help='path to the Mushroom training dataset')
path_args.add_argument('--train_path_income', type=str, default='../data/income/income_train.csv', help='path to the Income training dataset')
path_args = parser.add_argument_group('test_path')
path_args.add_argument('--test_path', type=str, default='', help='testing path to be specified by user')
path_args.add_argument('--test_path_news', type=str, default='../data/news/news_test.csv', help='path to the News testing dataset')
path_args.add_argument('--test_path_mushroom', type=str, default='../data/mushroom/mushroom_test.csv', help='path to the Mushroom testing dataset')
path_args.add_argument('--test_path_income', type=str, default='../data/income/income_test.csv', help='path to the Income testing dataset')
path_args = parser.add_argument_group('output_path')
path_args.add_argument('--output_path', type=str, default='../result/output.csv', help='path to save model prediction')
args = parser.parse_args()
args = error_handling(args)
return args
##################
# ERROR HANDLING #
##################
def error_handling(args):
if args.classifier != '':
args.naive_bayes = True if args.classifier == 'N' else False
args.decision_tree = True if args.classifier == 'D' else False
if args.naive_bayes and args.decision_tree == True:
raise AssertionError('Please choose one classifier at once, or specify the correct classifier!')
if args.search_opt and args.run_all and args.visualize_tree == True:
raise AssertionError('Please choose one mode at a time!')
if args.data_news and args.data_mushroom and args.income == True:
raise AssertionError('Please choose one and at least one dataset at a time!')
if args.train_path != '' and args.test_path != '':
if not os.path.isfile(args.train_path) or not os.path.isfile(args.test_path):
raise AssertionError('The given file path is invalid!')
if args.data_news:
args.train_path_news = args.train_path
args.test_path_news = args.test_path
elif args.data_mushroom:
args.train_path_mushroom = args.train_path
args.test_path_mushroom = args.test_path
elif args.data_income:
args.train_path_income = args.train_path
args.test_path_income = args.test_path
else:
raise AssertionError('Must choose a dataset!')
return args
#################
# OUTPUT WRITER #
#################
def output_writer(path, result):
with open(path, 'w') as f:
file = csv.writer(f, delimiter=',', quotechar='\r')
for item in result:
file.writerow([int(item)])
print('Results have been successfully saved to: %s' % (path))
return True
########
# MAIN #
########
"""
main function
"""
def main():
args = get_config()
loader = data_loader(args)
#---fetch data---#
if args.data_news:
train_x, train_y, test_x, test_y = loader.fetch_news()
MODEL = 'NEWS'
elif args.data_mushroom:
train_x, train_y, test_x, test_y = loader.fetch_mushroom()
MODEL = 'MUSHROOM'
elif args.data_income:
train_x, train_y, test_x, test_y = loader.fetch_income() # -> test_y == None
MODEL = 'INCOME'
###############
# NAIVE BAYES #
###############
if args.naive_bayes:
#---construct model---#
naive_bayes = naive_bayes_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
naive_bayes.search_alpha()
elif args.run_all:
naive_bayes.run_best_all()
else:
pred_y = naive_bayes.run_best()
output_writer(args.output_path, pred_y)
#################
# DECISION TREE #
#################
if args.decision_tree:
#---construct model---#
decision_tree = decision_tree_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
decision_tree.search_max_depth()
elif args.visualize_tree:
decision_tree.visualize()
else:
pred_y = decision_tree.run_best()
output_writer(args.output_path, pred_y)
if __name__ == '__main__':
main()
| [
"classifiers.naive_bayes_runner",
"argparse.ArgumentParser",
"data_loader.data_loader",
"csv.writer",
"os.path.isfile",
"classifiers.decision_tree_runner"
]
| [((766, 816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""descrip_msg"""'}), "(description='descrip_msg')\n", (789, 816), False, 'import argparse\n'), ((5092, 5109), 'data_loader.data_loader', 'data_loader', (['args'], {}), '(args)\n', (5103, 5109), False, 'from data_loader import data_loader\n'), ((4819, 4863), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""', 'quotechar': "'\\r'"}), "(f, delimiter=',', quotechar='\\r')\n", (4829, 4863), False, 'import csv\n'), ((5571, 5630), 'classifiers.naive_bayes_runner', 'naive_bayes_runner', (['MODEL', 'train_x', 'train_y', 'test_x', 'test_y'], {}), '(MODEL, train_x, train_y, test_x, test_y)\n', (5589, 5630), False, 'from classifiers import naive_bayes_runner\n'), ((5963, 6024), 'classifiers.decision_tree_runner', 'decision_tree_runner', (['MODEL', 'train_x', 'train_y', 'test_x', 'test_y'], {}), '(MODEL, train_x, train_y, test_x, test_y)\n', (5983, 6024), False, 'from classifiers import decision_tree_runner\n'), ((4156, 4187), 'os.path.isfile', 'os.path.isfile', (['args.train_path'], {}), '(args.train_path)\n', (4170, 4187), False, 'import os\n'), ((4195, 4225), 'os.path.isfile', 'os.path.isfile', (['args.test_path'], {}), '(args.test_path)\n', (4209, 4225), False, 'import os\n')] |
import copy
import numpy as np
import pybullet as p
from igibson.metrics.metric_base import MetricBase
class BehaviorRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.agent_grasping = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_local_pos = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_reset = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_work = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_distance = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["left_hand", "right_hand"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]}
agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]}
for part in ["left_hand", "right_hand", "body"]:
self.next_state_cache[part] = {
"position": np.array(p.getBasePositionAndOrientation(robot.parts[part].get_body_id())[0]),
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
if robot.action[19] > 0 and robot.action[27] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
if robot.action[19] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(True)
elif robot.action[27] > 0:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
else:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(False)
for part in self.state_cache:
delta_pos = np.linalg.norm(self.next_state_cache[part]["position"] - self.state_cache[part]["position"])
self.agent_pos[part].append(list(self.state_cache[part]["position"]))
# Exclude agent teleports
delta_pos = np.clip(delta_pos, -self.clip, self.clip)
if robot.parts[part].movement_cid is None:
force = 0
work = 0
else:
force = p.getConstraintState(robot.parts[part].movement_cid)
work = np.abs((delta_pos * np.linalg.norm(force)))
distance = np.abs(delta_pos)
if part in ["left_hand", "right_hand"]:
self.agent_local_pos[part].append(list(robot.parts[part].get_local_position_orientation()[0]))
if part in ["left_hand", "right_hand"] and (
len(p.getContactPoints(robot.parts[part].get_body_id())) > 0
or robot.parts[part].object_in_hand is not None
):
self.delta_agent_grasp_distance[part].append(distance)
self.agent_grasping[part].append(True)
elif part in ["left_hand", "right_hand"]:
self.delta_agent_grasp_distance[part].append(0)
self.agent_grasping[part].append(False)
agent_work[part] = work
agent_distance[part] = distance
self.delta_agent_work[part].append(work)
self.delta_agent_distance[part].append(distance)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"work": {
"timestep": self.delta_agent_work,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
"reset": {
"timestep": self.agent_reset,
},
}
class FetchRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["gripper", "body"]}
self.agent_grasping = {part: [] for part in ["gripper"]}
self.agent_local_pos = {part: [] for part in ["gripper"]}
self.delta_agent_distance = {part: [] for part in ["gripper", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["gripper"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_distance = {part: 0 for part in self.agent_pos}
self.next_state_cache = {
"gripper": {"position": robot.get_end_effector_position()},
"body": {"position": robot.get_position()},
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
self.agent_pos["body"].append(list(self.state_cache["body"]["position"]))
delta_pos = np.linalg.norm(
np.array(self.next_state_cache["body"]["position"]) - self.state_cache["body"]["position"]
)
distance = np.abs(delta_pos)
self.delta_agent_distance["body"].append(distance)
self.agent_pos["gripper"].append(list(self.state_cache["gripper"]["position"]))
delta_pos = np.linalg.norm(
self.next_state_cache["gripper"]["position"] - self.state_cache["gripper"]["position"]
)
gripper_distance = np.abs(delta_pos)
self.delta_agent_distance["gripper"].append(gripper_distance)
self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position()))
contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)
if len(contacts) > 0:
self.delta_agent_grasp_distance["gripper"].append(gripper_distance)
self.agent_grasping["gripper"].append(True)
else:
self.delta_agent_grasp_distance["gripper"].append(0)
self.agent_grasping["gripper"].append(False)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
}
| [
"numpy.clip",
"numpy.abs",
"pybullet.getContactPoints",
"copy.deepcopy",
"pybullet.getConstraintState",
"numpy.array",
"numpy.linalg.norm"
]
| [((3877, 3913), 'copy.deepcopy', 'copy.deepcopy', (['self.next_state_cache'], {}), '(self.next_state_cache)\n', (3890, 3913), False, 'import copy\n'), ((5886, 5903), 'numpy.abs', 'np.abs', (['delta_pos'], {}), '(delta_pos)\n', (5892, 5903), True, 'import numpy as np\n'), ((6072, 6179), 'numpy.linalg.norm', 'np.linalg.norm', (["(self.next_state_cache['gripper']['position'] - self.state_cache['gripper']\n ['position'])"], {}), "(self.next_state_cache['gripper']['position'] - self.\n state_cache['gripper']['position'])\n", (6086, 6179), True, 'import numpy as np\n'), ((6224, 6241), 'numpy.abs', 'np.abs', (['delta_pos'], {}), '(delta_pos)\n', (6230, 6241), True, 'import numpy as np\n'), ((6421, 6495), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'robot.robot_ids[0]', 'linkIndexA': 'robot.eef_link_id'}), '(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)\n', (6439, 6495), True, 'import pybullet as p\n'), ((6826, 6862), 'copy.deepcopy', 'copy.deepcopy', (['self.next_state_cache'], {}), '(self.next_state_cache)\n', (6839, 6862), False, 'import copy\n'), ((1462, 1498), 'copy.deepcopy', 'copy.deepcopy', (['self.next_state_cache'], {}), '(self.next_state_cache)\n', (1475, 1498), False, 'import copy\n'), ((2388, 2485), 'numpy.linalg.norm', 'np.linalg.norm', (["(self.next_state_cache[part]['position'] - self.state_cache[part]['position'])"], {}), "(self.next_state_cache[part]['position'] - self.state_cache[\n part]['position'])\n", (2402, 2485), True, 'import numpy as np\n'), ((2625, 2666), 'numpy.clip', 'np.clip', (['delta_pos', '(-self.clip)', 'self.clip'], {}), '(delta_pos, -self.clip, self.clip)\n', (2632, 2666), True, 'import numpy as np\n'), ((2959, 2976), 'numpy.abs', 'np.abs', (['delta_pos'], {}), '(delta_pos)\n', (2965, 2976), True, 'import numpy as np\n'), ((5562, 5598), 'copy.deepcopy', 'copy.deepcopy', (['self.next_state_cache'], {}), '(self.next_state_cache)\n', (5575, 5598), False, 'import copy\n'), ((2815, 2867), 'pybullet.getConstraintState', 'p.getConstraintState', (['robot.parts[part].movement_cid'], {}), '(robot.parts[part].movement_cid)\n', (2835, 2867), True, 'import pybullet as p\n'), ((5766, 5817), 'numpy.array', 'np.array', (["self.next_state_cache['body']['position']"], {}), "(self.next_state_cache['body']['position'])\n", (5774, 5817), True, 'import numpy as np\n'), ((2911, 2932), 'numpy.linalg.norm', 'np.linalg.norm', (['force'], {}), '(force)\n', (2925, 2932), True, 'import numpy as np\n')] |
import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "<NAME>, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 <NAME>",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
| [
"os.listdir",
"nerds.utils.spans_to_tokens",
"argparse.ArgumentParser",
"spacy.load",
"os.path.join",
"tempfile.mkdtemp",
"nerds.utils.get_logger",
"shutil.rmtree"
]
| [((5767, 5868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to convert BRAT annotations to IOB (NERDS) format."""'}), "(description=\n 'Script to convert BRAT annotations to IOB (NERDS) format.')\n", (5790, 5868), False, 'import argparse\n'), ((6168, 6180), 'nerds.utils.get_logger', 'get_logger', ([], {}), '()\n', (6178, 6180), False, 'from nerds.utils import spans_to_tokens, get_logger\n'), ((6269, 6285), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (6279, 6285), False, 'import spacy\n'), ((3260, 3281), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (3270, 3281), False, 'import os\n'), ((4657, 4685), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': '"""/tmp"""'}), "(dir='/tmp')\n", (4673, 4685), False, 'import tempfile\n'), ((4939, 4974), 'os.path.join', 'os.path.join', (['input_dir', '"""test.iob"""'], {}), "(input_dir, 'test.iob')\n", (4951, 4974), False, 'import os\n'), ((5122, 5146), 'shutil.rmtree', 'shutil.rmtree', (['input_dir'], {}), '(input_dir)\n', (5135, 5146), False, 'import shutil\n'), ((2643, 2693), 'nerds.utils.spans_to_tokens', 'spans_to_tokens', (['sent_text', 'sent_annots', 'tokenizer'], {}), '(sent_text, sent_annots, tokenizer)\n', (2658, 2693), False, 'from nerds.utils import spans_to_tokens, get_logger\n'), ((4703, 4738), 'os.path.join', 'os.path.join', (['input_dir', '"""test.txt"""'], {}), "(input_dir, 'test.txt')\n", (4715, 4738), False, 'import os\n'), ((4801, 4836), 'os.path.join', 'os.path.join', (['input_dir', '"""test.ann"""'], {}), "(input_dir, 'test.ann')\n", (4813, 4836), False, 'import os\n'), ((3748, 3782), 'os.path.join', 'os.path.join', (['input_dir', 'text_file'], {}), '(input_dir, text_file)\n', (3760, 3782), False, 'import os\n'), ((3834, 3869), 'os.path.join', 'os.path.join', (['input_dir', 'annot_file'], {}), '(input_dir, annot_file)\n', (3846, 3869), False, 'import os\n'), ((3488, 3523), 'os.path.join', 'os.path.join', (['input_dir', 'annot_file'], {}), '(input_dir, annot_file)\n', (3500, 3523), False, 'import os\n')] |
"""
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import unicodedata
import numpy as np
from PIL import Image
__all__ = ['pil2array', 'array2pil']
def pil2array(im: Image.Image, alpha: int = 0) -> np.array:
if im.mode == '1':
return np.array(im.convert('L'))
return np.array(im)
def array2pil(a: np.array) -> Image:
if a.dtype == np.dtype("B"):
if a.ndim == 2:
return Image.frombytes("L", (a.shape[1], a.shape[0]),
a.tostring())
elif a.ndim == 3:
return Image.frombytes("RGB", (a.shape[1], a.shape[0]),
a.tostring())
else:
raise Exception("bad image rank")
elif a.dtype == np.dtype('float32'):
return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring())
else:
raise Exception("unknown image type")
def is_bitonal(im: Image.Image) -> bool:
"""
Tests a PIL.Image for bitonality.
Args:
im (PIL.Image.Image): Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
def get_im_str(im: Image.Image) -> str:
return im.filename if hasattr(im, 'filename') else str(im)
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| [
"unicodedata.name",
"numpy.array",
"numpy.dtype",
"unicodedata.category"
]
| [((364, 376), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (372, 376), True, 'import numpy as np\n'), ((434, 447), 'numpy.dtype', 'np.dtype', (['"""B"""'], {}), "('B')\n", (442, 447), True, 'import numpy as np\n'), ((1891, 1917), 'unicodedata.category', 'unicodedata.category', (['char'], {}), '(char)\n', (1911, 1917), False, 'import unicodedata\n'), ((811, 830), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (819, 830), True, 'import numpy as np\n'), ((2363, 2389), 'unicodedata.category', 'unicodedata.category', (['char'], {}), '(char)\n', (2383, 2389), False, 'import unicodedata\n'), ((2480, 2502), 'unicodedata.name', 'unicodedata.name', (['char'], {}), '(char)\n', (2496, 2502), False, 'import unicodedata\n')] |
import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
| [
"data.whale_token_tx_data.identify_investor_type_token",
"sys.path.insert"
]
| [((11, 35), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (26, 35), False, 'import sys\n'), ((1185, 1218), 'data.whale_token_tx_data.identify_investor_type_token', 'identify_investor_type_token', (['out'], {}), '(out)\n', (1213, 1218), False, 'from data.whale_token_tx_data import identify_investor_type_token\n')] |
# @Time : 2020/11/14
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26
# @Author : <NAME>, <NAME>, <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>
r"""
textbox.trainer.trainer
################################
"""
import os
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from torch.utils.data import DataLoader
from time import time
from logging import getLogger
from textbox.module.Optimizer.optim import ScheduledOptim
from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator
from textbox.utils import ensure_dir, early_stopping
class AbstractTrainer(object):
r"""Trainer Class is used to manage the training and evaluation processes of text generation system models.
AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according
to different training and evaluation strategies.
"""
def __init__(self, config, model):
self.config = config
self.model = model
def fit(self, train_data):
r"""Train the model based on the train data.
"""
raise NotImplementedError('Method [next] should be implemented.')
def evaluate(self, eval_data):
r"""Evaluate the model based on the eval data.
"""
raise NotImplementedError('Method [next] should be implemented.')
class Trainer(AbstractTrainer):
r"""The basic Trainer for basic training and evaluation strategies in text generation systems.
This class defines common functions for training and evaluation processes of most text generation system models,
including fit(), evalute(), resume_checkpoint() and some other features helpful for model training and evaluation.
Generally speaking, this class can serve most text generation system models, If the training process of the model
is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning,
pre-training and so on.
Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information
for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on.
More information can be found in [placeholder]. `model` is the instantiated object of a Model Class.
"""
def __init__(self, config, model):
super(Trainer, self).__init__(config, model)
self.logger = getLogger()
self.learner = config['learner']
self.learning_rate = config['learning_rate']
self.epochs = config['epochs']
self.eval_step = min(config['eval_step'], self.epochs)
self.stopping_step = config['stopping_step']
self.test_batch_size = config['eval_batch_size']
self.device = config['device']
self.embedding_size = config['embedding_size']
self.warmup_steps = config['warmup_steps']
self.checkpoint_dir = config['checkpoint_dir']
ensure_dir(self.checkpoint_dir)
saved_model_file = self.config['filename'] + '.pth'
self.saved_model_file = os.path.join(self.checkpoint_dir, saved_model_file)
self.generated_text_dir = config['generated_text_dir']
ensure_dir(self.generated_text_dir)
saved_text_file = self.config['filename'] + '.txt'
self.saved_text_file = os.path.join(self.generated_text_dir, saved_text_file)
self.start_epoch = 0
self.cur_step = 0
self.best_valid_score = 100000000
self.best_valid_result = None
self.train_loss_dict = dict()
self.optimizer = self._build_optimizer()
self.task_type = config['task_type'].lower()
if self.task_type == "translation":
self.evaluator = TranslationEvaluator(config)
elif self.task_type == "summarization":
self.evaluator = SummarizationEvaluator(config)
else:
self.evaluator = NgramEvaluator(config)
self.item_tensor = None
self.tot_item_num = None
self.iid_field = config['ITEM_ID_FIELD']
def _build_optimizer(self):
r"""Init the Optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'schedule':
optimizer = ScheduledOptim(optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.learning_rate, self.embedding_size, self.warmup_steps)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
return optimizer
def _train_epoch(self, train_data, epoch_idx):
r"""Train the model in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
self.optimizer.zero_grad()
losses = self.model.calculate_loss(data, epoch_idx=epoch_idx)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
loss.backward()
self.optimizer.step()
train_loss = total_loss / len(train_data)
return train_loss
def _valid_epoch(self, valid_data):
r"""Valid the model with valid data
Args:
valid_data (DataLoader): the valid data
Returns:
float: valid score
dict: valid result
"""
self.model.eval()
total_loss = None
for batch_idx, data in enumerate(valid_data):
losses = self.model.calculate_loss(data)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
valid_loss = total_loss / len(valid_data)
ppl = np.exp(valid_loss)
return valid_loss, ppl
def _save_checkpoint(self, epoch):
r"""Store the model parameters information and training information.
Args:
epoch (int): the current epoch id
"""
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
torch.save(state, self.saved_model_file)
def _save_generated_text(self, generated_corpus):
r"""Store the generated text by our model.
Args:
corpus (list of string list):
"""
with open(self.saved_text_file, 'w') as fin:
for tokens in generated_corpus:
fin.write(' '.join(tokens) + '\n')
def resume_checkpoint(self, resume_file):
r"""Load the model parameters information and training information.
Args:
resume_file (file): the checkpoint file
"""
resume_file = str(resume_file)
checkpoint = torch.load(resume_file)
self.start_epoch = checkpoint['epoch'] + 1
self.cur_step = checkpoint['cur_step']
self.best_valid_score = checkpoint['best_valid_score']
# load architecture params from checkpoint
if checkpoint['config']['model'].lower() != self.config['model'].lower():
self.logger.warning('Architecture configuration given in config file is different from that of checkpoint. '
'This may yield an exception while state_dict is being loaded.')
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed
self.optimizer.load_state_dict(checkpoint['optimizer'])
message_output = 'Checkpoint loaded. Resume training from epoch {}'.format(self.start_epoch)
self.logger.info(message_output)
def _check_nan(self, loss):
if torch.isnan(loss):
raise ValueError('Training loss is nan')
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
train_loss_output = "epoch %d %straining [time: %.2fs, " % (epoch_idx, train_info, e_time - s_time)
if isinstance(losses, tuple):
for idx, loss in enumerate(losses):
train_loss_output += 'train_loss%d: %.4f, ' % (idx + 1, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
r"""Train the model based on the train data and the valid data.
Args:
train_data (DataLoader): the train data
valid_data (DataLoader, optional): the valid data, default: None.
If it's None, the early_stopping is invalid.
verbose (bool, optional): whether to write training and evaluation information to logger, default: True
saved (bool, optional): whether to save the model parameters, default: True
Returns:
(float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None)
"""
for epoch_idx in range(self.start_epoch, self.epochs):
# train
training_start_time = time()
train_loss = self._train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
self._save_checkpoint(epoch_idx)
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
# eval
if self.eval_step <= 0 or not valid_data:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
continue
if (epoch_idx + 1) % self.eval_step == 0:
valid_start_time = time()
with torch.no_grad():
valid_score, valid_result = self._valid_epoch(valid_data)
# valid_loss, ppl
self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(
valid_score, self.best_valid_score, self.cur_step,
max_step=self.stopping_step, bigger=False)
# better model are supposed to provide smaller perplexity and loss
valid_end_time = time()
valid_score_output = "epoch %d evaluating [time: %.2fs, valid_loss: %f]" % \
(epoch_idx, valid_end_time - valid_start_time, valid_score)
valid_result_output = 'valid ppl: {}'.format(valid_result)
if verbose:
self.logger.info(valid_score_output)
self.logger.info(valid_result_output)
if update_flag:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current best: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
self.best_valid_result = valid_result
if stop_flag:
stop_output = 'Finished training, best eval result in epoch %d' % \
(epoch_idx - self.cur_step * self.eval_step)
if verbose:
self.logger.info(stop_output)
break
return self.best_valid_score, self.best_valid_result
def _evaluate_nll_test(self, eval_data):
r"""Calculate the negative log-likelihood of the eval_data.
Args:
eval_data (DataLoader): the eval data.
Returns:
Float: NLL_test of the eval data.
"""
total_loss = 0
for epoch_idx, eval_batch in enumerate(eval_data):
nll_test = self.model.calculate_nll_test(eval_batch, epoch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
with torch.no_grad():
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
result['nll_test'] = self._evaluate_nll_test(eval_data)
return result
def plot_train_loss(self, show=True, save_path=None):
r"""Plot the train loss in each epoch
Args:
show (bool, optional): whether to show this figure, default: True
save_path (str, optional): the data path to save the figure, default: None.
If it's None, it will not be saved.
"""
epochs = list(self.train_loss_dict.keys())
epochs.sort()
values = [float(self.train_loss_dict[epoch]) for epoch in epochs]
plt.plot(epochs, values)
plt.xticks(epochs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
if show:
plt.show()
if save_path:
plt.savefig(save_path)
class UnconditionalTrainer(Trainer):
r"""UnconditionalTrainer is designed for RNN, which is a typical unconditional generator.
"""
def __init__(self, config, model):
super(UnconditionalTrainer, self).__init__(config, model)
class GANTrainer(Trainer):
r"""GANTrainer is designed for GAN, which is a generative adversarial net method.
"""
def __init__(self, config, model):
super(GANTrainer, self).__init__(config, model)
self.optimizer = None
self.g_optimizer = self._build_module_optimizer(self.model.generator)
self.d_optimizer = self._build_module_optimizer(self.model.discriminator)
self.grad_clip = config['grad_clip']
self.g_pretraining_epochs = config['g_pretraining_epochs']
self.d_pretraining_epochs = config['d_pretraining_epochs']
self.d_sample_num = config['d_sample_num']
self.d_sample_training_epochs = config['d_sample_training_epochs']
self.adversarail_training_epochs = config['adversarail_training_epochs']
self.adversarail_d_epochs = config['adversarail_d_epochs']
self.g_pretraining_loss_dict = dict()
self.d_pretraining_loss_dict = dict()
self.max_length = config['max_seq_length'] + 2
self.pad_idx = model.pad_idx
def _build_module_optimizer(self, module):
r"""Init the Module Optimizer
Args:
module (torch.nn.Mudule): Mudule class of torch.nn needed optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr=self.learning_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""The opt uses the cliped losses to conduct an optimize step to optimize model
and sum up losses to the total_loss.
Args:
losses (torch.Tensor or tuple): The loss to be backward.
total_loss (Float): Total loss in an epoch.
model (torch.nn.Mudule): The model to be optimized.
opt (torch.optim): The optimizer of the model.
Returns:
torch.Tensor or tuple: Total loss in an epoch, shape: [].
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _save_checkpoint(self, epoch):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict()
}
torch.save(state, self.saved_model_file)
def _add_pad(self, data):
r"""Pad the data to the max length of corpus.
Args:
data (torch.Tensor): The data to be padded, shape: [batch_size, max_batch_length].
Returns:
torch.Tensor: The padded data, shape: [batch_size, max_seq_length].
"""
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.pad_idx, dtype=torch.long, device=self.device)
padded_data[:, : data.shape[1]] = data
return padded_data
def _get_real_data(self, train_data):
r"""Get the target text index of the corpus train_datas.
Args:
train_data (DataLoader): the train data.
Returns:
torch.Tensor: The target text index, shape: [batch_size, max_batch_length].
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
real_data = self._add_pad(real_data)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _g_train_epoch(self, train_data, epoch_idx):
r"""Train the generator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(train_data) for l in total_loss] if isinstance(total_loss, tuple) else total_loss / len(
train_data)
total_loss = tuple(total_loss) if isinstance(total_loss, list) else total_loss
return total_loss
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs): # d_epoch
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
losses = self.model.calculate_g_adversarial_loss(epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if verbose:
self.logger.info("Start generator pretraining...")
for epoch_idx in range(self.g_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End generator pretraining...")
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class TextGANTrainer(GANTrainer):
r"""TextGANTrainer is designed for TextGAN.
"""
def __init__(self, config, model):
super(TextGANTrainer, self).__init__(config, model)
self.adversarail_g_epochs = config['adversarail_g_epochs']
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs):
for idx, real_data in enumerate(real_dataloader):
fake_data, z = self.model.sample()
losses = self.model.calculate_d_train_loss(real_data, fake_data, z, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
if (idx * self.model.batch_size >= self.d_sample_num):
break
return total_loss / min(len(real_dataloader), self.d_sample_num // self.model.batch_size) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for idx, real_data in enumerate(real_dataloader):
if (idx == self.adversarail_g_epochs):
break
losses = self.model.calculate_g_adversarial_loss(real_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss / min(len(real_dataloader), self.adversarail_g_epochs)
class RankGANTrainer(GANTrainer):
r"""RankGANTrainer is designed for RankGAN.
"""
def __init__(self, config, model):
super(RankGANTrainer, self).__init__(config, model)
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
for _ in range(self.d_sample_training_epochs):
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
losses = self.model.calculate_g_adversarial_loss(ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
d_loss = 0
for epoch_idx in range(self.adversarail_d_epochs):
d_loss += self._d_train_epoch(train_data, epoch_idx=epoch_idx)
d_loss = d_loss / self.adversarail_d_epochs
return total_loss
class ConditionalTrainer(Trainer):
r"""ConditionalTrainer is designed for seq2seq testing, which is a typically used setting.
"""
def __init__(self, config, model):
super(ConditionalTrainer, self).__init__(config, model)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
return result
class MaskGANTrainer(GANTrainer):
r""" Trainer specifically designed for MaskGAN training process.
"""
def __init__(self, config, model):
super(MaskGANTrainer, self).__init__(config, model)
self.max_length = config["max_seq_length"]
self.eos_token_idx = model.eos_idx
self.adversarail_c_epochs = config['adversarail_c_epochs']
self.g_mask_pretraining_epochs = config['g_mask_pretraining_epochs']
self.g_lr = config['gen_learning_rate']
self.d_lr = config['dis_learning_rate']
self.c_lr = config['critic_learning_rate']
self.g_optimizer = self._build_module_optimizer_(self.model.generator, self.g_lr)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, self.d_lr)
self.c_optimizer = self._build_module_optimizer_(self.model.discriminator.critic_fc_linear, self.c_lr)
self.pre_lm_weight = config["pre_lm_weight"]
self.pretrain_lm_epochs = config["pretrain_lm_epochs"]
self.checkp = config['checkp']
def _build_module_optimizer_(self, module, lr):
r""" Init the Module Optimizer with specified learning rate
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt, retain_graph=False):
r""" Add retain_graph option
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r""" Specified for maskgan output
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def pretrain_lm(self, train_data, valid_data, verbose):
r""" Pretrain rnn-based Language Model with teacher forcing mechanism
"""
def lm_forward(data):
r""" One iteration of LM forward
"""
input = data[:, :-1] # bs * self.max_len - 1
target = data[:, 1:]
bs, seq_len = target.size()
lengths = torch.tensor([seq_len] * bs)
target_present = torch.ones_like(input).byte()
device = target.device
lengths = lengths.cuda(device)
# pretaining
encoder_outputs = pre_train_lm(input, lengths, target, target_present, pretrain=True)
logit = pre_train_lm.vocab_linear(encoder_outputs)
logit = logit.permute([0, 2, 1])
lossf = torch.nn.CrossEntropyLoss()
loss = lossf(logit, target)
return loss
pre_train_lm = self.model.generator
lm_opt = self._build_module_optimizer_(pre_train_lm, lr=0.001)
for epoch in range(self.pretrain_lm_epochs):
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = lm_forward(data)
total_loss = self._optimize_step(loss, total_loss, pre_train_lm, lm_opt)
total_loss = total_loss / len(real_dataloader)
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining loss: {} ".format(epoch+1, self.pretrain_lm_epochs, total_loss))
ppl = 0.0
if (epoch+1) % 1 == 0:
pre_train_lm.eval()
validate_data = self._get_real_data(valid_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ppl = 0.0
for batch_idx, data in enumerate(validate_dataloader):
cross_entropy_loss = lm_forward(data)
ppl += math.exp(cross_entropy_loss.item())
ppl = ppl / len(validate_dataloader)
pre_train_lm.train()
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining PPL: {}...".format(epoch + 1, self.pretrain_lm_epochs, ppl))
if ppl < 110:
state_dict = {
'embedder': pre_train_lm.embedder,
'encoder': pre_train_lm.encoder.encoder,
'vocab_linear': pre_train_lm.vocab_linear
}
self.pre_lm_weight = "saved/pretrain_lm_weight" + str(epoch+1) + ".pkl"
torch.save(state_dict, self.pre_lm_weight)
if verbose:
self.logger.info("End LM pretraining. PPL: {}".format(ppl))
self.logger.info("Weigth saved in {}".format(self.pre_lm_weight))
return pre_train_lm, ppl
def _g_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(loss, total_loss, self.model.generator, self.g_optimizer)
total_loss = total_loss / len(real_dataloader)
return total_loss
def _get_validate_ppl(self, validate_data, epoch_idx):
self.model.generator.eval()
ppl = 0.0
validate_data = self._get_real_data(validate_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(validate_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx, validate=True)
ppl += math.exp(loss.item())
ppl = ppl / len(validate_dataloader)
self.model.generator.train()
return ppl
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
losses = self.model.calculate_d_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / len(real_dataloader)
def _adversarial_train_epoch(self, train_data, epoch_idx):
r""" Specified for MaskGAN adversarial training
"""
dis_total_loss = None
gen_total_loss = None
critic_total_loss = None
g_num = 0.0
d_num = 0.0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
dis_train_data = copy.deepcopy(real_dataloader)
gen_train_data = copy.deepcopy(real_dataloader)
c_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
gen_train_data = iter(gen_train_data)
_ = next(dis_train_data) # have one offset
for g_x in gen_train_data:
g_num += 1
for _ in range(3):
d_num += 1
try:
d_x = next(dis_train_data)
except StopIteration:
del dis_train_data
dis_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
d_x = next(dis_train_data)
losses = self.model.calculate_d_train_loss(d_x, epoch_idx=_)
dis_total_loss = self._optimize_step(losses, dis_total_loss, self.model.discriminator, self.d_optimizer)
gen_losses, critic_losses = self.model.calculate_g_adversarial_loss(g_x, epoch_idx=g_num)
gen_total_loss = self._optimize_step(gen_losses, gen_total_loss, self.model.generator, self.g_optimizer)
critic_total_loss = self._optimize_step(critic_losses, critic_total_loss, self.model.discriminator.critic_fc_linear, self.c_optimizer)
return {"dis_loss": dis_total_loss / d_num, "gen_loss": gen_total_loss / g_num, "critic_loss": critic_total_loss / g_num}
def _evaluate_nll_test(self, eval_data):
total_loss = 0
real_data = self._get_real_data(eval_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
nll_test = self.model.calculate_nll_test(data, batch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
def _add_eos(self, data, length):
batch_size, pad_seq_len = data.size()
padded_data = torch.full((batch_size, self.max_length), self.eos_token_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
l = int(length[i].cpu().data)
if l == self.max_length+2:
padded_data[i, :] = data[i, 1:l-1]
else:
padded_data[i, 0:l-1] = data[i, 1:l]
return padded_data
def _get_real_data(self, train_data):
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx'] # bs*batch_max_seq_len
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _save_checkpoint(self, epoch, postfix=None):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'g_opt': self.g_optimizer.state_dict(),
'd_opt': self.d_optimizer.state_dict(),
'c_opt':self.c_optimizer.state_dict()
}
if postfix is not None:
path = self.saved_model_file + "_" + str(epoch) + "_" + postfix
torch.save(state, path)
return path
else:
torch.save(state, self.saved_model_file)
def _load_generated_text(self):
r""" Load the generated text by our model to log.
"""
with open(self.saved_text_file, 'r') as fin:
samples = []
for i in range(5):
text = fin.readline()
samples.append(text)
return samples
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if self.checkp is not None:
checkpoint = torch.load(self.checkp)
self.model.load_state_dict(checkpoint['state_dict'])
self.d_optimizer.load_state_dict(checkpoint["d_opt"])
self.g_optimizer.load_state_dict(checkpoint["g_opt"])
epoch_check = checkpoint['epoch']
if verbose:
self.logger.info("Load checkpoint file from: {}".format(self.checkp))
else:
if self.pre_lm_weight is None:
if verbose:
self.logger.info("Start LM pretraining...")
pretrain_lm, ppl = self.pretrain_lm(train_data, valid_data, verbose)
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight")
else:
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight from: {}".format(self.pre_lm_weight))
if verbose:
self.logger.info("Start generator mask pretraining...")
for epoch_idx in range(self.g_mask_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
ppl = self._get_validate_ppl(valid_data, epoch_idx)
if verbose:
self.logger.info(
"Epoch {}/{} of mask pretraining PPL: {}...".format(epoch_idx + 1, self.g_mask_pretraining_epochs, ppl))
if ppl <= 90:
if verbose:
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.logger.info(">>>> [Pretrain Gen] PPL: {} save weight in {}".format(ppl, path))
self.logger.info("End generator mask pretraining...")
break
if (epoch_idx) % 10 == 0:
self.logger.info(">>>> [Pretrain Gen] Save pretrain gen check in epoch %d ..." % (epoch_idx + 1))
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>> [Pretrain Gen] test result: {}'.format(test_result))
self.logger.info('>>>> [Pretrain Gen] test result samples: {}'.format(tmp))
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if (epoch_idx+1) % 10 == 0:
path = self._save_checkpoint((epoch_idx + 1), postfix="adv_train")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>>>> [Adv] test result: {}'.format(test_result))
self.logger.info('>>>>>> [Adv] test result samples: {}'.format(tmp))
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class LeakGANTrainer(GANTrainer):
r"""Specified for leakgan trainer
"""
def __init__(self, config, model):
super(LeakGANTrainer, self).__init__(config, model)
self.interleaved_pretrain_epoch = config['interleaved_pretrain_epoch']
self.adversarail_g_epochs = config['adversarail_g_epochs']
gen_lr = config['generator_lr'] # 0.001
dis_lr = config['discriminator_lr'] # 0.00005
self.g_optimizer = self._build_module_optimizer_(self.model.generator, gen_lr) # (manager_opt, worker_opt)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, dis_lr)
self.iters_num = config['iter_num']
self.end_idx = model.end_idx
def _build_module_optimizer_(self, module, learing_rate):
r"""Specified for leakgan
"""
multi_flag = False
if module._get_name() == 'LeakGANGenerator':
manager_params, worker_params = module.split_params()
multi_flag = True
if self.learner.lower() == 'adam':
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'sgd':
if multi_flag:
manager_opt = optim.SGD(manager_params, lr=learing_rate)
worker_opt = optim.SGD(worker_params, lr=learing_rate)
else:
optimizer = optim.SGD(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'adagrad':
if multi_flag:
manager_opt = optim.Adagrad(manager_params, lr=learing_rate)
worker_opt = optim.Adagrad(worker_params, lr=learing_rate)
else:
optimizer = optim.Adagrad(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'rmsprop':
if multi_flag:
manager_opt = optim.RMSprop(manager_params, lr=learing_rate)
worker_opt = optim.RMSprop(worker_params, lr=learing_rate)
else:
optimizer = optim.RMSprop(module.parameters(), lr=learing_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
if multi_flag:
return (manager_opt, worker_opt)
else:
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""Specified for leakgan optimize
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
if isinstance(losses, tuple):
for i, (o, loss) in enumerate(zip(opt, losses)):
o.zero_grad()
loss.backward(retain_graph=True if i < len(opt) - 1 else False)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
o.step()
else:
opt.zero_grad()
losses.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r"""Specified for leakgan output format
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def _add_eos(self, data, length):
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.end_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
len = length[i].cpu().data
padded_data[i, :len] = data[i, :len]
return padded_data
def _get_real_data(self, train_data):
r"""Specified for leakgan which use eos_idx pad not pad_idx
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Specified for leakgan adversarial training
"""
self.model.generator.train()
total_g_loss = None
total_d_loss = 0
total_d_acc = 0
adv_mana_loss = 0
adv_work_loss = 0
adv_d_loss = 0
for e in range(self.adversarail_g_epochs):
losses = self.model.calculate_g_adversarial_loss(epoch_idx=e)
total_g_loss = self._optimize_step(losses, total_g_loss, self.model.generator, self.g_optimizer)
adv_mana_loss, adv_work_loss = total_g_loss
adv_mana_loss = adv_mana_loss / self.adversarail_g_epochs
adv_work_loss = adv_work_loss / self.adversarail_g_epochs
for e in range(self.adversarail_d_epochs):
loss_dict = self._d_train_epoch(train_data, epoch_idx=epoch_idx)
total_d_loss = total_d_loss + loss_dict['total_loss']
total_d_acc = total_d_acc + loss_dict['train_acc']
adv_d_loss = total_d_loss / self.adversarail_d_epochs
adv_c_loss = total_d_acc / self.adversarail_d_epochs
return {"mana_loss": adv_mana_loss, "work_loss": adv_work_loss, "dis_loss": adv_d_loss, "train_acc": adv_c_loss}
def _g_train_epoch(self, train_data, epoch_idx):
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
# interaction = interaction.to(self.device)
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(real_dataloader) for l in total_loss] if isinstance(total_loss,
tuple) else total_loss / len(
train_data)
mana_loss, work_loss = total_loss
return {"mana_loss": mana_loss, "work_loss": work_loss}
def _d_train_epoch(self, train_data, epoch_idx):
total_loss = None
total_acc = 0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
# not need sample self.d_sample_num numbers becauese only train discriminator 5 batch
d_sample_num = (self.d_sample_training_epochs + 1) * self.model.batch_size
fake_data = self.model.sample(d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
idx = 0
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
# self.model.discriminator.eval() # pretraining not use dropout
if idx == self.d_sample_training_epochs:
break
losses, acc = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
total_acc = total_acc + acc
idx += 1
total_loss = total_loss / self.d_sample_training_epochs
total_acc = total_acc / self.d_sample_training_epochs
return {"total_loss": total_loss, "train_acc": total_acc}
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# pretraining
if verbose:
self.logger.info(">> Start pretraining")
# generator pretraining
for epoch_idx in range(self.g_pretraining_epochs): # 80
if verbose:
self.logger.info(">>>> [Pretrain Gen] Start %d / %d epochs generator pretraining" % (
epoch_idx + 1, self.g_pretraining_epochs))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx + 1, training_start_time, training_end_time, train_loss,
"generator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# discriminator pretraining
for epoch_idx in range(self.d_pretraining_epochs): # 5
if verbose:
self.logger.info(">>>> [Pretrain Dis]Start %d / %d epochs discriminator pretraining..." % (
epoch_idx + 1, self.d_pretraining_epochs))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info(">> End pretraining")
# adversarial training
if verbose:
self.logger.info(">> Start adversarial training")
for epoch in range(int(self.iters_num / self.adversarail_training_epochs)):
if verbose:
self.logger.info(">>>> [Adv] Start epoch %d / 10 interleaved adversarial training" % (epoch + 1))
for epoch_idx in range(self.adversarail_training_epochs):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / %d adversarial training" % (
epoch_idx + 1, self.adversarail_training_epochs))
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
# self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
train_info="adv ")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# gen pretrain
for epoch_idx in range(5):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain generator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv generator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# dis pretrain
for epoch_idx in range(5): # d_steps
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain discriminator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv discriminator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
| [
"logging.getLogger",
"textbox.utils.early_stopping",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"textbox.evaluator.TranslationEvaluator",
"copy.deepcopy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"torch.optim.RMSprop",
"textbox.evaluator.SummarizationEvaluator",
"torch.ones_like",
"torch.optim.SGD",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"torch.save",
"textbox.evaluator.NgramEvaluator",
"time.time",
"torch.cat",
"matplotlib.pyplot.show",
"torch.optim.Adam",
"torch.optim.Adagrad",
"torch.full",
"torch.load",
"os.path.join",
"torch.tensor",
"textbox.utils.ensure_dir",
"numpy.random.randint",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.isnan"
]
| [((14204, 14219), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14217, 14219), False, 'import torch\n'), ((32618, 32633), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32631, 32633), False, 'import torch\n'), ((2571, 2582), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (2580, 2582), False, 'from logging import getLogger\n'), ((3097, 3128), 'textbox.utils.ensure_dir', 'ensure_dir', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (3107, 3128), False, 'from textbox.utils import ensure_dir, early_stopping\n'), ((3221, 3272), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', 'saved_model_file'], {}), '(self.checkpoint_dir, saved_model_file)\n', (3233, 3272), False, 'import os\n'), ((3345, 3380), 'textbox.utils.ensure_dir', 'ensure_dir', (['self.generated_text_dir'], {}), '(self.generated_text_dir)\n', (3355, 3380), False, 'from textbox.utils import ensure_dir, early_stopping\n'), ((3471, 3525), 'os.path.join', 'os.path.join', (['self.generated_text_dir', 'saved_text_file'], {}), '(self.generated_text_dir, saved_text_file)\n', (3483, 3525), False, 'import os\n'), ((7564, 7582), 'numpy.exp', 'np.exp', (['valid_loss'], {}), '(valid_loss)\n', (7570, 7582), True, 'import numpy as np\n'), ((8103, 8143), 'torch.save', 'torch.save', (['state', 'self.saved_model_file'], {}), '(state, self.saved_model_file)\n', (8113, 8143), False, 'import torch\n'), ((8730, 8753), 'torch.load', 'torch.load', (['resume_file'], {}), '(resume_file)\n', (8740, 8753), False, 'import torch\n'), ((9666, 9683), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (9677, 9683), False, 'import torch\n'), ((16328, 16352), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'values'], {}), '(epochs, values)\n', (16336, 16352), True, 'import matplotlib.pyplot as plt\n'), ((16361, 16379), 'matplotlib.pyplot.xticks', 'plt.xticks', (['epochs'], {}), '(epochs)\n', (16371, 16379), True, 'import matplotlib.pyplot as plt\n'), ((16388, 16407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (16398, 16407), True, 'import matplotlib.pyplot as plt\n'), ((16416, 16434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (16426, 16434), True, 'import matplotlib.pyplot as plt\n'), ((20219, 20259), 'torch.save', 'torch.save', (['state', 'self.saved_model_file'], {}), '(state, self.saved_model_file)\n', (20229, 20259), False, 'import torch\n'), ((20622, 20719), 'torch.full', 'torch.full', (['(batch_size, self.max_length)', 'self.pad_idx'], {'dtype': 'torch.long', 'device': 'self.device'}), '((batch_size, self.max_length), self.pad_idx, dtype=torch.long,\n device=self.device)\n', (20632, 20719), False, 'import torch\n'), ((21299, 21327), 'torch.cat', 'torch.cat', (['real_datas'], {'dim': '(0)'}), '(real_datas, dim=0)\n', (21308, 21327), False, 'import torch\n'), ((23162, 23251), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (23172, 23251), False, 'from torch.utils.data import DataLoader\n'), ((23331, 23420), 'torch.utils.data.DataLoader', 'DataLoader', (['fake_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(fake_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (23341, 23420), False, 'from torch.utils.data import DataLoader\n'), ((27847, 27936), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (27857, 27936), False, 'from torch.utils.data import DataLoader\n'), ((28753, 28842), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (28763, 28842), False, 'from torch.utils.data import DataLoader\n'), ((30266, 30355), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (30276, 30355), False, 'from torch.utils.data import DataLoader\n'), ((30435, 30524), 'torch.utils.data.DataLoader', 'DataLoader', (['fake_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(fake_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (30445, 30524), False, 'from torch.utils.data import DataLoader\n'), ((30542, 30608), 'numpy.random.randint', 'np.random.randint', (['(0)', 'real_data.shape[0]'], {'size': 'self.model.ref_size'}), '(0, real_data.shape[0], size=self.model.ref_size)\n', (30559, 30608), True, 'import numpy as np\n'), ((31822, 31888), 'numpy.random.randint', 'np.random.randint', (['(0)', 'real_data.shape[0]'], {'size': 'self.model.ref_size'}), '(0, real_data.shape[0], size=self.model.ref_size)\n', (31839, 31888), True, 'import numpy as np\n'), ((40638, 40727), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (40648, 40727), False, 'from torch.utils.data import DataLoader\n'), ((41271, 41364), 'torch.utils.data.DataLoader', 'DataLoader', (['validate_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(validate_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (41281, 41364), False, 'from torch.utils.data import DataLoader\n'), ((41860, 41949), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (41870, 41949), False, 'from torch.utils.data import DataLoader\n'), ((42589, 42678), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (42599, 42678), False, 'from torch.utils.data import DataLoader\n'), ((42701, 42731), 'copy.deepcopy', 'copy.deepcopy', (['real_dataloader'], {}), '(real_dataloader)\n', (42714, 42731), False, 'import copy\n'), ((42757, 42787), 'copy.deepcopy', 'copy.deepcopy', (['real_dataloader'], {}), '(real_dataloader)\n', (42770, 42787), False, 'import copy\n'), ((42811, 42841), 'copy.deepcopy', 'copy.deepcopy', (['real_dataloader'], {}), '(real_dataloader)\n', (42824, 42841), False, 'import copy\n'), ((44262, 44351), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (44272, 44351), False, 'from torch.utils.data import DataLoader\n'), ((44669, 44773), 'torch.full', 'torch.full', (['(batch_size, self.max_length)', 'self.eos_token_idx'], {'dtype': 'torch.long', 'device': 'self.device'}), '((batch_size, self.max_length), self.eos_token_idx, dtype=torch.\n long, device=self.device)\n', (44679, 44773), False, 'import torch\n'), ((45369, 45397), 'torch.cat', 'torch.cat', (['real_datas'], {'dim': '(0)'}), '(real_datas, dim=0)\n', (45378, 45397), False, 'import torch\n'), ((57835, 57932), 'torch.full', 'torch.full', (['(batch_size, self.max_length)', 'self.end_idx'], {'dtype': 'torch.long', 'device': 'self.device'}), '((batch_size, self.max_length), self.end_idx, dtype=torch.long,\n device=self.device)\n', (57845, 57932), False, 'import torch\n'), ((58471, 58499), 'torch.cat', 'torch.cat', (['real_datas'], {'dim': '(0)'}), '(real_datas, dim=0)\n', (58480, 58499), False, 'import torch\n'), ((59924, 60013), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (59934, 60013), False, 'from torch.utils.data import DataLoader\n'), ((60828, 60917), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (60838, 60917), False, 'from torch.utils.data import DataLoader\n'), ((61170, 61259), 'torch.utils.data.DataLoader', 'DataLoader', (['fake_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(fake_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (61180, 61259), False, 'from torch.utils.data import DataLoader\n'), ((3875, 3903), 'textbox.evaluator.TranslationEvaluator', 'TranslationEvaluator', (['config'], {}), '(config)\n', (3895, 3903), False, 'from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator\n'), ((11121, 11127), 'time.time', 'time', ([], {}), '()\n', (11125, 11127), False, 'from time import time\n'), ((11337, 11343), 'time.time', 'time', ([], {}), '()\n', (11341, 11343), False, 'from time import time\n'), ((15175, 15202), 'torch.load', 'torch.load', (['checkpoint_file'], {}), '(checkpoint_file)\n', (15185, 15202), False, 'import torch\n'), ((15455, 15470), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15468, 15470), False, 'import torch\n'), ((16464, 16474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16472, 16474), True, 'import matplotlib.pyplot as plt\n'), ((16509, 16531), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (16520, 16531), True, 'import matplotlib.pyplot as plt\n'), ((25117, 25123), 'time.time', 'time', ([], {}), '()\n', (25121, 25123), False, 'from time import time\n'), ((25343, 25349), 'time.time', 'time', ([], {}), '()\n', (25347, 25349), False, 'from time import time\n'), ((25936, 25942), 'time.time', 'time', ([], {}), '()\n', (25940, 25942), False, 'from time import time\n'), ((26162, 26168), 'time.time', 'time', ([], {}), '()\n', (26166, 26168), False, 'from time import time\n'), ((26760, 26766), 'time.time', 'time', ([], {}), '()\n', (26764, 26766), False, 'from time import time\n'), ((26988, 26994), 'time.time', 'time', ([], {}), '()\n', (26992, 26994), False, 'from time import time\n'), ((33589, 33616), 'torch.load', 'torch.load', (['checkpoint_file'], {}), '(checkpoint_file)\n', (33599, 33616), False, 'import torch\n'), ((37659, 37687), 'torch.tensor', 'torch.tensor', (['([seq_len] * bs)'], {}), '([seq_len] * bs)\n', (37671, 37687), False, 'import torch\n'), ((38077, 38104), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (38102, 38104), False, 'import torch\n'), ((38475, 38564), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(real_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (38485, 38564), False, 'from torch.utils.data import DataLoader\n'), ((45988, 46011), 'torch.save', 'torch.save', (['state', 'path'], {}), '(state, path)\n', (45998, 46011), False, 'import torch\n'), ((46062, 46102), 'torch.save', 'torch.save', (['state', 'self.saved_model_file'], {}), '(state, self.saved_model_file)\n', (46072, 46102), False, 'import torch\n'), ((46589, 46612), 'torch.load', 'torch.load', (['self.checkp'], {}), '(self.checkp)\n', (46599, 46612), False, 'import torch\n'), ((49064, 49070), 'time.time', 'time', ([], {}), '()\n', (49068, 49070), False, 'from time import time\n'), ((49290, 49296), 'time.time', 'time', ([], {}), '()\n', (49294, 49296), False, 'from time import time\n'), ((51221, 51227), 'time.time', 'time', ([], {}), '()\n', (51225, 51227), False, 'from time import time\n'), ((51447, 51453), 'time.time', 'time', ([], {}), '()\n', (51451, 51453), False, 'from time import time\n'), ((52045, 52051), 'time.time', 'time', ([], {}), '()\n', (52049, 52051), False, 'from time import time\n'), ((52273, 52279), 'time.time', 'time', ([], {}), '()\n', (52277, 52279), False, 'from time import time\n'), ((62456, 62462), 'time.time', 'time', ([], {}), '()\n', (62460, 62462), False, 'from time import time\n'), ((62563, 62569), 'time.time', 'time', ([], {}), '()\n', (62567, 62569), False, 'from time import time\n'), ((63248, 63254), 'time.time', 'time', ([], {}), '()\n', (63252, 63254), False, 'from time import time\n'), ((63355, 63361), 'time.time', 'time', ([], {}), '()\n', (63359, 63361), False, 'from time import time\n'), ((3981, 4011), 'textbox.evaluator.SummarizationEvaluator', 'SummarizationEvaluator', (['config'], {}), '(config)\n', (4003, 4011), False, 'from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator\n'), ((4055, 4077), 'textbox.evaluator.NgramEvaluator', 'NgramEvaluator', (['config'], {}), '(config)\n', (4069, 4077), False, 'from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator\n'), ((12047, 12053), 'time.time', 'time', ([], {}), '()\n', (12051, 12053), False, 'from time import time\n'), ((12283, 12396), 'textbox.utils.early_stopping', 'early_stopping', (['valid_score', 'self.best_valid_score', 'self.cur_step'], {'max_step': 'self.stopping_step', 'bigger': '(False)'}), '(valid_score, self.best_valid_score, self.cur_step, max_step=\n self.stopping_step, bigger=False)\n', (12297, 12396), False, 'from textbox.utils import ensure_dir, early_stopping\n'), ((12549, 12555), 'time.time', 'time', ([], {}), '()\n', (12553, 12555), False, 'from time import time\n'), ((39184, 39277), 'torch.utils.data.DataLoader', 'DataLoader', (['validate_data'], {'batch_size': 'self.model.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(validate_data, batch_size=self.model.batch_size, shuffle=True,\n drop_last=True)\n', (39194, 39277), False, 'from torch.utils.data import DataLoader\n'), ((47231, 47261), 'torch.load', 'torch.load', (['self.pre_lm_weight'], {}), '(self.pre_lm_weight)\n', (47241, 47261), False, 'import torch\n'), ((48060, 48090), 'torch.load', 'torch.load', (['self.pre_lm_weight'], {}), '(self.pre_lm_weight)\n', (48070, 48090), False, 'import torch\n'), ((54476, 54519), 'torch.optim.Adam', 'optim.Adam', (['manager_params'], {'lr': 'learing_rate'}), '(manager_params, lr=learing_rate)\n', (54486, 54519), True, 'import torch.optim as optim\n'), ((54549, 54591), 'torch.optim.Adam', 'optim.Adam', (['worker_params'], {'lr': 'learing_rate'}), '(worker_params, lr=learing_rate)\n', (54559, 54591), True, 'import torch.optim as optim\n'), ((64425, 64431), 'time.time', 'time', ([], {}), '()\n', (64429, 64431), False, 'from time import time\n'), ((64667, 64673), 'time.time', 'time', ([], {}), '()\n', (64671, 64673), False, 'from time import time\n'), ((65351, 65357), 'time.time', 'time', ([], {}), '()\n', (65355, 65357), False, 'from time import time\n'), ((65466, 65472), 'time.time', 'time', ([], {}), '()\n', (65470, 65472), False, 'from time import time\n'), ((66167, 66173), 'time.time', 'time', ([], {}), '()\n', (66171, 66173), False, 'from time import time\n'), ((66282, 66288), 'time.time', 'time', ([], {}), '()\n', (66286, 66288), False, 'from time import time\n'), ((12075, 12090), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12088, 12090), False, 'import torch\n'), ((37717, 37739), 'torch.ones_like', 'torch.ones_like', (['input'], {}), '(input)\n', (37732, 37739), False, 'import torch\n'), ((40128, 40170), 'torch.save', 'torch.save', (['state_dict', 'self.pre_lm_weight'], {}), '(state_dict, self.pre_lm_weight)\n', (40138, 40170), False, 'import torch\n'), ((54788, 54830), 'torch.optim.SGD', 'optim.SGD', (['manager_params'], {'lr': 'learing_rate'}), '(manager_params, lr=learing_rate)\n', (54797, 54830), True, 'import torch.optim as optim\n'), ((54860, 54901), 'torch.optim.SGD', 'optim.SGD', (['worker_params'], {'lr': 'learing_rate'}), '(worker_params, lr=learing_rate)\n', (54869, 54901), True, 'import torch.optim as optim\n'), ((43285, 43315), 'copy.deepcopy', 'copy.deepcopy', (['real_dataloader'], {}), '(real_dataloader)\n', (43298, 43315), False, 'import copy\n'), ((55101, 55147), 'torch.optim.Adagrad', 'optim.Adagrad', (['manager_params'], {'lr': 'learing_rate'}), '(manager_params, lr=learing_rate)\n', (55114, 55147), True, 'import torch.optim as optim\n'), ((55177, 55222), 'torch.optim.Adagrad', 'optim.Adagrad', (['worker_params'], {'lr': 'learing_rate'}), '(worker_params, lr=learing_rate)\n', (55190, 55222), True, 'import torch.optim as optim\n'), ((55426, 55472), 'torch.optim.RMSprop', 'optim.RMSprop', (['manager_params'], {'lr': 'learing_rate'}), '(manager_params, lr=learing_rate)\n', (55439, 55472), True, 'import torch.optim as optim\n'), ((55502, 55547), 'torch.optim.RMSprop', 'optim.RMSprop', (['worker_params'], {'lr': 'learing_rate'}), '(worker_params, lr=learing_rate)\n', (55515, 55547), True, 'import torch.optim as optim\n'), ((55812, 55855), 'torch.optim.Adam', 'optim.Adam', (['manager_params'], {'lr': 'learing_rate'}), '(manager_params, lr=learing_rate)\n', (55822, 55855), True, 'import torch.optim as optim\n'), ((55885, 55927), 'torch.optim.Adam', 'optim.Adam', (['worker_params'], {'lr': 'learing_rate'}), '(worker_params, lr=learing_rate)\n', (55895, 55927), True, 'import torch.optim as optim\n')] |
import pandas as pd
# Define our header
col_names = [
"year",
"num_males_with_income",
"male_median_income_curr_dollars",
"male_median_income_2019_dollars",
"num_females_with_income",
"female_median_income_curr_dollars",
"female_median_income_2019_dollars",
]
# Load Asian census data XLS, skipping all headers
dfa = pd.read_excel(
r'p08a.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define col names
names=col_names,
)
# Load White census data XLS, skipping all headers
dfw = pd.read_excel(
r'p08w.xlsx',
skiprows=8,
# Make sure PD doesn't use header row for our DF
header=None,
# Define cold names
names=col_names
)
# Splinter off rows into age group DFs for both sets of data
dfa1524 = dfa.iloc[:20]
dfa2534 = dfa.iloc[25:45]
dfa3544 = dfa.iloc[50:70]
dfa4554 = dfa.iloc[75:95]
dfa5564 = dfa.iloc[100:120]
dfa6574 = dfa.iloc[125:145]
dfa75 = dfa.iloc[150:170]
dfw1524 = dfw.iloc[:20]
dfw2534 = dfw.iloc[25:45]
dfw3544 = dfw.iloc[50:70]
dfw4554 = dfw.iloc[75:95]
dfw5564 = dfw.iloc[100:120]
dfw6574 = dfw.iloc[125:145]
dfw75 = dfw.iloc[150:170]
# Add Age Range col to each DF
dfa1524.insert(0, 'age_range', '15-24')
dfa2534.insert(0, 'age_range', '25-34')
dfa3544.insert(0, 'age_range', '35-44')
dfa4554.insert(0, 'age_range', '45-54')
dfa5564.insert(0, 'age_range', '55-64')
dfa6574.insert(0, 'age_range', '65-74')
dfa75.insert(0, 'age_range', 'Over 75')
dfw1524.insert(0, 'age_range', '15-24')
dfw2534.insert(0, 'age_range', '25-34')
dfw3544.insert(0, 'age_range', '35-44')
dfw4554.insert(0, 'age_range', '45-54')
dfw5564.insert(0, 'age_range', '55-64')
dfw6574.insert(0, 'age_range', '65-74')
dfw75.insert(0, 'age_range', 'Over 75')
# Stack cleaned DF's vertically
dfa = pd.concat([
dfa1524,
dfa2534,
dfa3544,
dfa4554,
dfa5564,
dfa6574,
dfa75
], axis=0)
dfw = pd.concat([
dfw1524,
dfw2534,
dfw3544,
dfw4554,
dfw5564,
dfw6574,
dfw75
], axis=0)
# Add Race col
dfa.insert(0, 'race', 'asian')
dfw.insert(0, 'race', 'white')
# Clean garbage chars in Year col using regex
dfa['year'] = dfa['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
dfw['year'] = dfw['year'].replace(to_replace=r'(\s\(\d+\))', value='', regex=True)
# Stack our cleaned + normalized data into a single DF
df = pd.concat([
dfa,
dfw
], axis=0)
# Convert the DF col types to conform to our CensusRecord model
df = df.astype({
"race": str,
"age_range": str,
"year": int,
"num_males_with_income": int,
"male_median_income_curr_dollars": float,
"male_median_income_2019_dollars": float,
"num_females_with_income": int,
"female_median_income_curr_dollars": float,
"female_median_income_2019_dollars": float,
})
# Pickle the DF
df.to_pickle("./res.pkl")
| [
"pandas.concat",
"pandas.read_excel"
]
| [((347, 415), 'pandas.read_excel', 'pd.read_excel', (['"""p08a.xlsx"""'], {'skiprows': '(8)', 'header': 'None', 'names': 'col_names'}), "('p08a.xlsx', skiprows=8, header=None, names=col_names)\n", (360, 415), True, 'import pandas as pd\n'), ((569, 637), 'pandas.read_excel', 'pd.read_excel', (['"""p08w.xlsx"""'], {'skiprows': '(8)', 'header': 'None', 'names': 'col_names'}), "('p08w.xlsx', skiprows=8, header=None, names=col_names)\n", (582, 637), True, 'import pandas as pd\n'), ((1796, 1881), 'pandas.concat', 'pd.concat', (['[dfa1524, dfa2534, dfa3544, dfa4554, dfa5564, dfa6574, dfa75]'], {'axis': '(0)'}), '([dfa1524, dfa2534, dfa3544, dfa4554, dfa5564, dfa6574, dfa75], axis=0\n )\n', (1805, 1881), True, 'import pandas as pd\n'), ((1914, 1999), 'pandas.concat', 'pd.concat', (['[dfw1524, dfw2534, dfw3544, dfw4554, dfw5564, dfw6574, dfw75]'], {'axis': '(0)'}), '([dfw1524, dfw2534, dfw3544, dfw4554, dfw5564, dfw6574, dfw75], axis=0\n )\n', (1923, 1999), True, 'import pandas as pd\n'), ((2377, 2406), 'pandas.concat', 'pd.concat', (['[dfa, dfw]'], {'axis': '(0)'}), '([dfa, dfw], axis=0)\n', (2386, 2406), True, 'import pandas as pd\n')] |
from AndroidSpider import url_manager, html_downloader, html_parser, html_output
'''
爬取百度百科 Android 关键词相关词及简介并输出为一个HTML tab网页
Extra module:
BeautifulSoup
'''
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.parser = html_parser.HtmlParser()
self.out_put = html_output.HtmlOutput()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print("craw %d : %s" % (count, new_url))
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36"
}
html_content = self.downloader.download(new_url, retry_count=2, headers=headers)
new_urls, new_data = self.parser.parse(new_url, html_content, "utf-8")
self.urls.add_new_urls(new_urls)
self.out_put.collect_data(new_data)
if count >= 30:
break
count = count + 1
except Exception as e:
print("craw failed!\n"+str(e))
self.out_put.output_html()
if __name__ == "__main__":
rootUrl = "http://baike.baidu.com/item/Android"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
| [
"AndroidSpider.url_manager.UrlManager",
"AndroidSpider.html_downloader.HtmlDownLoader",
"AndroidSpider.html_output.HtmlOutput",
"AndroidSpider.html_parser.HtmlParser"
]
| [((230, 254), 'AndroidSpider.url_manager.UrlManager', 'url_manager.UrlManager', ([], {}), '()\n', (252, 254), False, 'from AndroidSpider import url_manager, html_downloader, html_parser, html_output\n'), ((281, 313), 'AndroidSpider.html_downloader.HtmlDownLoader', 'html_downloader.HtmlDownLoader', ([], {}), '()\n', (311, 313), False, 'from AndroidSpider import url_manager, html_downloader, html_parser, html_output\n'), ((336, 360), 'AndroidSpider.html_parser.HtmlParser', 'html_parser.HtmlParser', ([], {}), '()\n', (358, 360), False, 'from AndroidSpider import url_manager, html_downloader, html_parser, html_output\n'), ((384, 408), 'AndroidSpider.html_output.HtmlOutput', 'html_output.HtmlOutput', ([], {}), '()\n', (406, 408), False, 'from AndroidSpider import url_manager, html_downloader, html_parser, html_output\n')] |
import boto
import boto3
from config import Config
dynamodb = boto3.resource('dynamodb',
aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY,
region_name=Config.REGION)
table = dynamodb.Table('user_details')
tables = boto3.resource('dynamodb', aws_access_key_id=Config.AWS_KEY,
aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION).Table('user_details')
print(tables.creation_date_time)
def main():
print("29.7604267")
def insert_into_db(user):
print(user.lastname)
try:
table.put_item(
Item={
'pin': user.pin,
'firstname': user.firstname,
'lastname': user.lastname,
}
)
except Exception as E:
print(E)
return False
return True
if __name__ == "__main__":
main()
| [
"boto3.resource"
]
| [((63, 199), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'aws_access_key_id': 'Config.AWS_KEY', 'aws_secret_access_key': 'Config.AWS_SECRET_KEY', 'region_name': 'Config.REGION'}), "('dynamodb', aws_access_key_id=Config.AWS_KEY,\n aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION)\n", (77, 199), False, 'import boto3\n'), ((323, 459), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'aws_access_key_id': 'Config.AWS_KEY', 'aws_secret_access_key': 'Config.AWS_SECRET_KEY', 'region_name': 'Config.REGION'}), "('dynamodb', aws_access_key_id=Config.AWS_KEY,\n aws_secret_access_key=Config.AWS_SECRET_KEY, region_name=Config.REGION)\n", (337, 459), False, 'import boto3\n')] |
from logging import warning
from requests import get
from .info import Info
from .provider import Provider
from .providers import get_provider
class Parser:
def __init__(self, args: dict):
self.params = args
def init_provider(
self,
chapter_progress: callable = None,
global_progress: callable = None,
log: callable = None,
quest: callable = None,
info: Info = None,
quest_password: callable = None,
):
original_url = self.params.get('url', '')
provider_url = self.params.get('force_provider', None)
provider = get_provider(provider_url or original_url)
if isinstance(provider, bool):
raise AttributeError('Provider not found')
# update url (if redirect)
self.provider = provider(info) # type: Provider
self.provider.original_url = original_url
real_url = self.check_url(original_url)
if self.provider.allow_auto_change_url():
if real_url != original_url:
warning('Manga url changed! New url: {}'.format(real_url))
self.params['url'] = real_url
self.provider.quiet = self.params.get('quiet', False)
self.provider.set_chapter_progress_callback(chapter_progress)
self.provider.set_global_progress_callback(global_progress)
self.provider.set_log_callback(log)
self.provider.set_quest_callback(quest)
self.provider.set_quest_password_callback(quest_password)
def start(self):
self.provider.process(self.params['url'], self.params)
def check_url(self, url):
proxy = self.params.get('proxy', None)
proxies = {
'http': proxy,
'https': proxy,
} if proxy else None
with get(url, stream=True, proxies=proxies) as response:
_url = response.url
if url != _url:
url = _url
return url
| [
"requests.get"
]
| [((1827, 1865), 'requests.get', 'get', (['url'], {'stream': '(True)', 'proxies': 'proxies'}), '(url, stream=True, proxies=proxies)\n', (1830, 1865), False, 'from requests import get\n')] |
"""Holds configurations to read and write with Spark to AWS S3."""
import os
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from butterfree.configs import environment
from butterfree.configs.db import AbstractWriteConfig
from butterfree.dataframe_service import extract_partition_values
class MetastoreConfig(AbstractWriteConfig):
"""Configuration for Spark metastore database stored.
By default the configuration is for AWS S3.
Attributes:
path: database root location.
mode: writing mode used be writers.
format_: expected stored file format.
file_system: file schema uri, like: s3a, file.
"""
def __init__(
self,
path: str = None,
mode: str = None,
format_: str = None,
file_system: str = None,
):
self.path = path
self.mode = mode
self.format_ = format_
self.file_system = file_system
@property
def path(self) -> Optional[str]:
"""Bucket name."""
return self.__path
@path.setter
def path(self, value: str) -> None:
self.__path = value or environment.get_variable("FEATURE_STORE_S3_BUCKET")
@property
def format_(self) -> Optional[str]:
"""Expected stored file format."""
return self.__format
@format_.setter
def format_(self, value: str) -> None:
self.__format = value or "parquet"
@property
def mode(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__mode
@mode.setter
def mode(self, value: str) -> None:
self.__mode = value or "overwrite"
@property
def file_system(self) -> Optional[str]:
"""Writing mode used be writers."""
return self.__file_system
@file_system.setter
def file_system(self, value: str) -> None:
self.__file_system = value or "s3a"
def get_options(self, key: str) -> Dict[Optional[str], Optional[str]]:
"""Get options for Metastore.
Options will be a dictionary with the write and read configuration for
Spark Metastore.
Args:
key: path to save data into Metastore.
Returns:
Options configuration for Metastore.
"""
return {
"mode": self.mode,
"format_": self.format_,
"path": os.path.join(f"{self.file_system}://{self.path}/", key),
}
def get_path_with_partitions(self, key: str, dataframe: DataFrame) -> List:
"""Get options for AWS S3 from partitioned parquet file.
Options will be a dictionary with the write and read configuration for
Spark to AWS S3.
Args:
key: path to save data into AWS S3 bucket.
dataframe: spark dataframe containing data from a feature set.
Returns:
A list of string for file-system backed data sources.
"""
path_list = []
dataframe_values = extract_partition_values(
dataframe, partition_columns=["year", "month", "day"]
)
for row in dataframe_values:
path_list.append(
f"{self.file_system}://{self.path}/{key}/year={row['year']}/"
f"month={row['month']}/day={row['day']}"
)
return path_list
def translate(self, schema: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Translate feature set spark schema to the corresponding database."""
pass
| [
"butterfree.dataframe_service.extract_partition_values",
"butterfree.configs.environment.get_variable",
"os.path.join"
]
| [((2995, 3074), 'butterfree.dataframe_service.extract_partition_values', 'extract_partition_values', (['dataframe'], {'partition_columns': "['year', 'month', 'day']"}), "(dataframe, partition_columns=['year', 'month', 'day'])\n", (3019, 3074), False, 'from butterfree.dataframe_service import extract_partition_values\n'), ((1153, 1204), 'butterfree.configs.environment.get_variable', 'environment.get_variable', (['"""FEATURE_STORE_S3_BUCKET"""'], {}), "('FEATURE_STORE_S3_BUCKET')\n", (1177, 1204), False, 'from butterfree.configs import environment\n'), ((2386, 2441), 'os.path.join', 'os.path.join', (['f"""{self.file_system}://{self.path}/"""', 'key'], {}), "(f'{self.file_system}://{self.path}/', key)\n", (2398, 2441), False, 'import os\n')] |
#!/usr/bin/env python3
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Shared parameters
include_grid = True
include_legend = True
tlim = (0, 2)
xlim = (-2, 2)
# A stationary point object
stationary = phy.MovingObject(0, draw_options={'label': '$v = 0$'})
## Alternate:
# direction = (1, 0)
# point = (0, 0)
# stationary = geom.Line(direction, point, draw_options={'label': '$v = 0$'})
title='Stationary object'
p = vis.stplot(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
p.save('2-objects_stationary_point.png')
p.show()
# A stationary point object, animated
anim = vis.stanimate(stationary, title=title, tlim=tlim, xlim=xlim,
grid=include_grid, legend=include_legend)
anim.save('2-objects_stationary_point_anim.mp4')
anim.show()
# A stationary point object, animated with worldline
anim = vis.stanimate_with_worldline(stationary, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper right')
anim.save('2-objects_stationary_point_anim_worldline.mp4')
anim.show()
# A bunch of moving point objects, animated
moving = phy.MovingObject(0, velocity=1/2,
draw_options={'color': 'red', 'label': '$v = c/2$'})
light = phy.MovingObject(0, velocity=1,
draw_options={'color': 'gold', 'label': '$v = c$'})
ftl = phy.MovingObject(0, velocity=3/2,
draw_options={'color': 'cyan', 'label': '$v = 3c/2$'})
objects = geom.Collection([stationary, moving, light, ftl])
title = 'Various objects'
anim = vis.stanimate_with_worldline(objects, title=title,
current_time_color='magenta', tlim=tlim, xlim=xlim, grid=include_grid,
legend=include_legend, legend_loc='upper left')
anim.save('2-objects_moving_points.mp4')
anim.show()
# A moving meterstick
meterstick = phy.MovingObject(-1/2, length=1, velocity=1/2,
draw_options={'label': 'Meterstick'})
# # Alternate:
# direction = (1, 1/2)
# left = geom.Line(direction, (0, -1/2))
# right = geom.Line(direction, (0, 1/2))
# meterstick = geom.Ribbon(left, right, draw_options={'label': 'Meterstick'})
title = 'Moving meterstick ($v = c/2$)'
anim = vis.stanimate_with_worldline(meterstick, title=title,
tlim=tlim, xlim=xlim, grid=include_grid, legend=include_legend,
legend_loc='upper left')
anim.save('2-objects_moving_meterstick.mp4')
anim.show()
| [
"specrel.visualize.stplot",
"specrel.spacetime.physical.MovingObject",
"specrel.visualize.stanimate_with_worldline",
"specrel.geom.Collection",
"specrel.visualize.stanimate",
"sys.path.append"
]
| [((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((292, 346), 'specrel.spacetime.physical.MovingObject', 'phy.MovingObject', (['(0)'], {'draw_options': "{'label': '$v = 0$'}"}), "(0, draw_options={'label': '$v = 0$'})\n", (308, 346), True, 'import specrel.spacetime.physical as phy\n'), ((507, 610), 'specrel.visualize.stplot', 'vis.stplot', (['stationary'], {'title': 'title', 'tlim': 'tlim', 'xlim': 'xlim', 'grid': 'include_grid', 'legend': 'include_legend'}), '(stationary, title=title, tlim=tlim, xlim=xlim, grid=include_grid,\n legend=include_legend)\n', (517, 610), True, 'import specrel.visualize as vis\n'), ((707, 814), 'specrel.visualize.stanimate', 'vis.stanimate', (['stationary'], {'title': 'title', 'tlim': 'tlim', 'xlim': 'xlim', 'grid': 'include_grid', 'legend': 'include_legend'}), '(stationary, title=title, tlim=tlim, xlim=xlim, grid=\n include_grid, legend=include_legend)\n', (720, 814), True, 'import specrel.visualize as vis\n'), ((936, 1083), 'specrel.visualize.stanimate_with_worldline', 'vis.stanimate_with_worldline', (['stationary'], {'title': 'title', 'tlim': 'tlim', 'xlim': 'xlim', 'grid': 'include_grid', 'legend': 'include_legend', 'legend_loc': '"""upper right"""'}), "(stationary, title=title, tlim=tlim, xlim=xlim,\n grid=include_grid, legend=include_legend, legend_loc='upper right')\n", (964, 1083), True, 'import specrel.visualize as vis\n'), ((1213, 1305), 'specrel.spacetime.physical.MovingObject', 'phy.MovingObject', (['(0)'], {'velocity': '(1 / 2)', 'draw_options': "{'color': 'red', 'label': '$v = c/2$'}"}), "(0, velocity=1 / 2, draw_options={'color': 'red', 'label':\n '$v = c/2$'})\n", (1229, 1305), True, 'import specrel.spacetime.physical as phy\n'), ((1312, 1399), 'specrel.spacetime.physical.MovingObject', 'phy.MovingObject', (['(0)'], {'velocity': '(1)', 'draw_options': "{'color': 'gold', 'label': '$v = c$'}"}), "(0, velocity=1, draw_options={'color': 'gold', 'label':\n '$v = c$'})\n", (1328, 1399), True, 'import specrel.spacetime.physical as phy\n'), ((1406, 1500), 'specrel.spacetime.physical.MovingObject', 'phy.MovingObject', (['(0)'], {'velocity': '(3 / 2)', 'draw_options': "{'color': 'cyan', 'label': '$v = 3c/2$'}"}), "(0, velocity=3 / 2, draw_options={'color': 'cyan', 'label':\n '$v = 3c/2$'})\n", (1422, 1500), True, 'import specrel.spacetime.physical as phy\n'), ((1509, 1558), 'specrel.geom.Collection', 'geom.Collection', (['[stationary, moving, light, ftl]'], {}), '([stationary, moving, light, ftl])\n', (1524, 1558), True, 'import specrel.geom as geom\n'), ((1592, 1771), 'specrel.visualize.stanimate_with_worldline', 'vis.stanimate_with_worldline', (['objects'], {'title': 'title', 'current_time_color': '"""magenta"""', 'tlim': 'tlim', 'xlim': 'xlim', 'grid': 'include_grid', 'legend': 'include_legend', 'legend_loc': '"""upper left"""'}), "(objects, title=title, current_time_color=\n 'magenta', tlim=tlim, xlim=xlim, grid=include_grid, legend=\n include_legend, legend_loc='upper left')\n", (1620, 1771), True, 'import specrel.visualize as vis\n'), ((1859, 1951), 'specrel.spacetime.physical.MovingObject', 'phy.MovingObject', (['(-1 / 2)'], {'length': '(1)', 'velocity': '(1 / 2)', 'draw_options': "{'label': 'Meterstick'}"}), "(-1 / 2, length=1, velocity=1 / 2, draw_options={'label':\n 'Meterstick'})\n", (1875, 1951), True, 'import specrel.spacetime.physical as phy\n'), ((2193, 2339), 'specrel.visualize.stanimate_with_worldline', 'vis.stanimate_with_worldline', (['meterstick'], {'title': 'title', 'tlim': 'tlim', 'xlim': 'xlim', 'grid': 'include_grid', 'legend': 'include_legend', 'legend_loc': '"""upper left"""'}), "(meterstick, title=title, tlim=tlim, xlim=xlim,\n grid=include_grid, legend=include_legend, legend_loc='upper left')\n", (2221, 2339), True, 'import specrel.visualize as vis\n')] |
from __future__ import absolute_import
from six.moves.urllib.parse import urlencode
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from sentry.auth.helper import handle_new_user
from sentry.models import AuthProvider, InviteStatus, OrganizationMember
from sentry.testutils import TestCase
from sentry.utils.compat import mock
class HandleNewUserTest(TestCase):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record):
provider = "dummy"
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
auth_provider = AuthProvider.objects.create(
organization=self.organization, provider=provider
)
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
auth_identity = handle_new_user(auth_provider, self.organization, request, identity)
user = auth_identity.user
assert user.email == identity["email"]
assert OrganizationMember.objects.filter(organization=self.organization, user=user).exists()
signup_record = [r for r in mock_record.call_args_list if r[0][0] == "user.signup"]
assert signup_record == [
mock.call(
"user.signup", user_id=user.id, source="sso", provider=provider, referrer="in-app"
)
]
def test_associated_existing_member_invite_by_email(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = OrganizationMember.objects.create(
organization=self.organization, email=identity["email"]
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
def test_associated_existing_member_invite_request(self):
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
member = self.create_member(
organization=self.organization,
email=identity["email"],
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assert OrganizationMember.objects.filter(
organization=self.organization,
user=auth_identity.user,
invite_status=InviteStatus.APPROVED.value,
).exists()
assert not OrganizationMember.objects.filter(id=member.id).exists()
def test_associate_pending_invite(self):
provider = AuthProvider.objects.create(organization=self.organization, provider="dummy")
identity = {"id": "1234", "email": "<EMAIL>", "name": "Morty"}
# The org member invite should have a non matching email, but the
# member id and token will match from the cookie, allowing association
member = OrganizationMember.objects.create(
organization=self.organization, email="<EMAIL>", token="abc"
)
request = RequestFactory().post("/auth/sso/")
request.user = AnonymousUser()
request.COOKIES["pending-invite"] = urlencode(
{"memberId": member.id, "token": member.token, "url": ""}
)
auth_identity = handle_new_user(provider, self.organization, request, identity)
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user=auth_identity.user
)
assert assigned_member.id == member.id
| [
"django.test.RequestFactory",
"sentry.models.OrganizationMember.objects.create",
"sentry.utils.compat.mock.call",
"django.contrib.auth.models.AnonymousUser",
"sentry.models.AuthProvider.objects.create",
"sentry.models.OrganizationMember.objects.get",
"sentry.utils.compat.mock.patch",
"six.moves.urllib.parse.urlencode",
"sentry.models.OrganizationMember.objects.filter",
"sentry.auth.helper.handle_new_user"
]
| [((415, 452), 'sentry.utils.compat.mock.patch', 'mock.patch', (['"""sentry.analytics.record"""'], {}), "('sentry.analytics.record')\n", (425, 452), False, 'from sentry.utils.compat import mock\n'), ((597, 612), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (610, 612), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((638, 716), 'sentry.models.AuthProvider.objects.create', 'AuthProvider.objects.create', ([], {'organization': 'self.organization', 'provider': 'provider'}), '(organization=self.organization, provider=provider)\n', (665, 716), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((835, 903), 'sentry.auth.helper.handle_new_user', 'handle_new_user', (['auth_provider', 'self.organization', 'request', 'identity'], {}), '(auth_provider, self.organization, request, identity)\n', (850, 903), False, 'from sentry.auth.helper import handle_new_user\n'), ((1501, 1516), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (1514, 1516), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((1537, 1614), 'sentry.models.AuthProvider.objects.create', 'AuthProvider.objects.create', ([], {'organization': 'self.organization', 'provider': '"""dummy"""'}), "(organization=self.organization, provider='dummy')\n", (1564, 1614), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((1704, 1799), 'sentry.models.OrganizationMember.objects.create', 'OrganizationMember.objects.create', ([], {'organization': 'self.organization', 'email': "identity['email']"}), "(organization=self.organization, email=\n identity['email'])\n", (1737, 1799), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((1842, 1905), 'sentry.auth.helper.handle_new_user', 'handle_new_user', (['provider', 'self.organization', 'request', 'identity'], {}), '(provider, self.organization, request, identity)\n', (1857, 1905), False, 'from sentry.auth.helper import handle_new_user\n'), ((1933, 2025), 'sentry.models.OrganizationMember.objects.get', 'OrganizationMember.objects.get', ([], {'organization': 'self.organization', 'user': 'auth_identity.user'}), '(organization=self.organization, user=\n auth_identity.user)\n', (1963, 2025), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((2231, 2246), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (2244, 2246), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((2267, 2344), 'sentry.models.AuthProvider.objects.create', 'AuthProvider.objects.create', ([], {'organization': 'self.organization', 'provider': '"""dummy"""'}), "(organization=self.organization, provider='dummy')\n", (2294, 2344), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((2640, 2703), 'sentry.auth.helper.handle_new_user', 'handle_new_user', (['provider', 'self.organization', 'request', 'identity'], {}), '(provider, self.organization, request, identity)\n', (2655, 2703), False, 'from sentry.auth.helper import handle_new_user\n'), ((3052, 3129), 'sentry.models.AuthProvider.objects.create', 'AuthProvider.objects.create', ([], {'organization': 'self.organization', 'provider': '"""dummy"""'}), "(organization=self.organization, provider='dummy')\n", (3079, 3129), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((3372, 3472), 'sentry.models.OrganizationMember.objects.create', 'OrganizationMember.objects.create', ([], {'organization': 'self.organization', 'email': '"""<EMAIL>"""', 'token': '"""abc"""'}), "(organization=self.organization, email=\n '<EMAIL>', token='abc')\n", (3405, 3472), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((3568, 3583), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (3581, 3583), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((3628, 3696), 'six.moves.urllib.parse.urlencode', 'urlencode', (["{'memberId': member.id, 'token': member.token, 'url': ''}"], {}), "({'memberId': member.id, 'token': member.token, 'url': ''})\n", (3637, 3696), False, 'from six.moves.urllib.parse import urlencode\n'), ((3744, 3807), 'sentry.auth.helper.handle_new_user', 'handle_new_user', (['provider', 'self.organization', 'request', 'identity'], {}), '(provider, self.organization, request, identity)\n', (3759, 3807), False, 'from sentry.auth.helper import handle_new_user\n'), ((3835, 3927), 'sentry.models.OrganizationMember.objects.get', 'OrganizationMember.objects.get', ([], {'organization': 'self.organization', 'user': 'auth_identity.user'}), '(organization=self.organization, user=\n auth_identity.user)\n', (3865, 3927), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((538, 554), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (552, 554), False, 'from django.test import RequestFactory\n'), ((1001, 1077), 'sentry.models.OrganizationMember.objects.filter', 'OrganizationMember.objects.filter', ([], {'organization': 'self.organization', 'user': 'user'}), '(organization=self.organization, user=user)\n', (1034, 1077), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((1226, 1323), 'sentry.utils.compat.mock.call', 'mock.call', (['"""user.signup"""'], {'user_id': 'user.id', 'source': '"""sso"""', 'provider': 'provider', 'referrer': '"""in-app"""'}), "('user.signup', user_id=user.id, source='sso', provider=provider,\n referrer='in-app')\n", (1235, 1323), False, 'from sentry.utils.compat import mock\n'), ((1442, 1458), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (1456, 1458), False, 'from django.test import RequestFactory\n'), ((2172, 2188), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (2186, 2188), False, 'from django.test import RequestFactory\n'), ((2720, 2858), 'sentry.models.OrganizationMember.objects.filter', 'OrganizationMember.objects.filter', ([], {'organization': 'self.organization', 'user': 'auth_identity.user', 'invite_status': 'InviteStatus.APPROVED.value'}), '(organization=self.organization, user=\n auth_identity.user, invite_status=InviteStatus.APPROVED.value)\n', (2753, 2858), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n'), ((3509, 3525), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (3523, 3525), False, 'from django.test import RequestFactory\n'), ((2930, 2977), 'sentry.models.OrganizationMember.objects.filter', 'OrganizationMember.objects.filter', ([], {'id': 'member.id'}), '(id=member.id)\n', (2963, 2977), False, 'from sentry.models import AuthProvider, InviteStatus, OrganizationMember\n')] |
import datetime
import requests
from mbta_python.models import Stop, Direction, Schedule, Mode, \
TripSchedule, Alert, StopWithMode, Prediction
HOST = "http://realtime.mbta.com/developer/api/v2"
def datetime_to_epoch(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
class MBTASDK(object):
"""Wrapper around calls to the MBTA Realtime API
"""
def __init__(self, api_key):
self.api_key = api_key
def _make_request(self, path, params):
url = "{}/{}".format(HOST, path)
response = requests.get(url, params=params)
data = response.json()
error = data.get("error")
if error:
raise Exception(error["message"])
return response.json()
def get_stops_by_location(self, latitude, longitude):
"""Get a List of Stops sorted by proximity to the given
latitude and longitude
"""
params = {
"lat": latitude,
"lon": longitude,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbylocation", params)
stops = [Stop(stop_data) for stop_data in data["stop"]]
return stops
def get_stops_by_route(self, route_id):
"""Return a List of Directions for the route_id
that contain a list of Stops that Direction and Route serve
"""
params = {
"route": route_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("stopsbyroute", params)
return [Direction(d) for d in data["direction"]]
def get_routes_by_stop(self, stop_id):
"""Return a list of routes that serve a particular stop
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("routesbystop", params)
return StopWithMode(data)
def get_schedules_by_stop(self, stop_id, route_id=None, direction_id=None,
date=None, max_time=None, max_trips=None):
"""Return scheduled arrivals and departures for a direction and route for a
particular stop.
stop_id - Stop ID
route_id - Route ID, If not included then schedule for all routes
serving the stop will be returned,
direction_id - Direction ID, If included then route must also be
included if not included then schedule for all
directions of the route serving the stop will be
returned
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days
If not included then schedule starting from the current
datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"route": route_id,
"direction": direction_id,
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebystop", params)
return Schedule(data)
def get_schedules_by_routes(self, route_ids, date=None,
max_time=None, max_trips=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included
then must be within the next seven (7) days If not included
then schedule starting from the current datetime will
be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1
and 100. If not included defaults to 5.
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
"max_time": max_time,
"max_trips": max_trips
}
data = self._make_request("schedulebyroutes", params)
return [Mode(m) for m in data["mode"]]
def get_schedules_by_trip(self, trip_id, date=None):
"""Return the scheduled arrivals and departures in a direction
for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
date - Time after which schedule should be returned. If included then
must be within the next seven (7) days. If not included then
schedule starting from the current datetime will be returned
max_time - Defines maximum range of time (in minutes) within which
trips will be returned. If not included defaults to 60.
max_trips - Defines number of trips to return. Integer between 1 and
100. If not included defaults to 5.
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json",
"datetime": datetime_to_epoch(date) if date else None,
}
data = self._make_request("schedulebytrip", params)
return TripSchedule(data)
def get_predictions_by_stop(self, stop_id, include_access_alerts=False,
include_service_alerts=True):
"""Return predicted arrivals and departures in the next hour for a
direction and route for a particular stop.
stop_id - Stop ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
params = {
"stop": stop_id,
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbystop", params)
return Prediction(data)
def get_predictions_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return predictions for upcoming trips (including trips already underway)
in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("predictionsbyroutes", params)
return Prediction(data)
def get_vehicles_by_routes(self, route_ids, include_access_alerts=False,
include_service_alerts=True):
"""Return vehicle positions for upcoming trips (including trips already
underway) in a direction for a particular route or routes.
route_ids - List of Route IDs, or single Route ID
include_access_alerts - Whether or not alerts pertaining to
accessibility (elevators, escalators) should be
returned
include_service_alerts - Whether or not service alerts should be
returned
"""
if not isinstance(route_ids, list):
route_ids = [route_ids]
params = {
"routes": ",".join(route_ids),
"api_key": self.api_key,
"format": "json",
"include_access_alerts": include_access_alerts,
"include_service_alerts": include_service_alerts
}
data = self._make_request("vehiclesbyroutes", params)
return [Mode(m) for m in data]
def get_predictions_by_trip(self, trip_id):
"""Return the predicted arrivals and departures for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("predictionsbytrip", params)
return TripSchedule(data)
def get_vehicles_by_trip(self, trip_id):
"""Return the predicted vehicle positions for a particular trip.
trip_id - TripID
"""
params = {
"trip": trip_id,
"api_key": self.api_key,
"format": "json"
}
data = self._make_request("vehiclesbytrip", params)
return TripSchedule(data)
| [
"datetime.datetime.utcfromtimestamp",
"mbta_python.models.Stop",
"mbta_python.models.TripSchedule",
"mbta_python.models.Mode",
"mbta_python.models.Direction",
"mbta_python.models.Schedule",
"requests.get",
"mbta_python.models.StopWithMode",
"mbta_python.models.Prediction"
]
| [((242, 279), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (276, 279), False, 'import datetime\n'), ((579, 611), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (591, 611), False, 'import requests\n'), ((1986, 2004), 'mbta_python.models.StopWithMode', 'StopWithMode', (['data'], {}), '(data)\n', (1998, 2004), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((3616, 3630), 'mbta_python.models.Schedule', 'Schedule', (['data'], {}), '(data)\n', (3624, 3630), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((5956, 5974), 'mbta_python.models.TripSchedule', 'TripSchedule', (['data'], {}), '(data)\n', (5968, 5974), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((6908, 6924), 'mbta_python.models.Prediction', 'Prediction', (['data'], {}), '(data)\n', (6918, 6924), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((8008, 8024), 'mbta_python.models.Prediction', 'Prediction', (['data'], {}), '(data)\n', (8018, 8024), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((9497, 9515), 'mbta_python.models.TripSchedule', 'TripSchedule', (['data'], {}), '(data)\n', (9509, 9515), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((9872, 9890), 'mbta_python.models.TripSchedule', 'TripSchedule', (['data'], {}), '(data)\n', (9884, 9890), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((1175, 1190), 'mbta_python.models.Stop', 'Stop', (['stop_data'], {}), '(stop_data)\n', (1179, 1190), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((1627, 1639), 'mbta_python.models.Direction', 'Direction', (['d'], {}), '(d)\n', (1636, 1639), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((4902, 4909), 'mbta_python.models.Mode', 'Mode', (['m'], {}), '(m)\n', (4906, 4909), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n'), ((9106, 9113), 'mbta_python.models.Mode', 'Mode', (['m'], {}), '(m)\n', (9110, 9113), False, 'from mbta_python.models import Stop, Direction, Schedule, Mode, TripSchedule, Alert, StopWithMode, Prediction\n')] |
"""Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
| [
"numpy.mean",
"numpy.nanstd",
"numpy.nanpercentile",
"sel_season_area.sel_area",
"read_netcdf.save_n_2d_fields",
"numpy.array",
"numpy.nanmean",
"read_netcdf.read_iris",
"numpy.nanmax",
"numpy.empty",
"sel_season_area.sel_season"
]
| [((3510, 3534), 'numpy.array', 'np.array', (['varextreme_ens'], {}), '(varextreme_ens)\n', (3518, 3534), True, 'import numpy as np\n'), ((4015, 4092), 'read_netcdf.save_n_2d_fields', 'save_n_2d_fields', (['lat_area', 'lon_area', 'ens_anomalies', 'varsave', 'varunits', 'ofile'], {}), '(lat_area, lon_area, ens_anomalies, varsave, varunits, ofile)\n', (4031, 4092), False, 'from read_netcdf import read_iris, save_n_2d_fields\n'), ((4279, 4304), 'numpy.array', 'np.array', (['vartimemean_ens'], {}), '(vartimemean_ens)\n', (4287, 4304), True, 'import numpy as np\n'), ((4455, 4540), 'read_netcdf.save_n_2d_fields', 'save_n_2d_fields', (['lat_area', 'lon_area', 'ens_climatologies', 'varsave', 'varunits', 'ofile'], {}), '(lat_area, lon_area, ens_climatologies, varsave, varunits,\n ofile)\n', (4471, 4540), False, 'from read_netcdf import read_iris, save_n_2d_fields\n'), ((4733, 4808), 'read_netcdf.save_n_2d_fields', 'save_n_2d_fields', (['lat_area', 'lon_area', 'ens_extreme', 'varsave', 'varunits', 'ofile'], {}), '(lat_area, lon_area, ens_extreme, varsave, varunits, ofile)\n', (4749, 4808), False, 'from read_netcdf import read_iris, save_n_2d_fields\n'), ((1071, 1087), 'read_netcdf.read_iris', 'read_iris', (['ifile'], {}), '(ifile)\n', (1080, 1087), False, 'from read_netcdf import read_iris, save_n_2d_fields\n'), ((1346, 1376), 'sel_season_area.sel_season', 'sel_season', (['var', 'dates', 'season'], {}), '(var, dates, season)\n', (1356, 1376), False, 'from sel_season_area import sel_area, sel_season\n'), ((1476, 1512), 'sel_season_area.sel_area', 'sel_area', (['lat', 'lon', 'var_season', 'area'], {}), '(lat, lon, var_season, area)\n', (1484, 1512), False, 'from sel_season_area import sel_area, sel_season\n'), ((3718, 3755), 'numpy.nanmean', 'np.nanmean', (['varextreme_ens_np'], {'axis': '(0)'}), '(varextreme_ens_np, axis=0)\n', (3728, 3755), True, 'import numpy as np\n'), ((4203, 4230), 'numpy.mean', 'np.mean', (['var_ens[i]'], {'axis': '(0)'}), '(var_ens[i], axis=0)\n', (4210, 4230), True, 'import numpy as np\n'), ((2089, 2119), 'numpy.nanmean', 'np.nanmean', (['var_ens[i]'], {'axis': '(0)'}), '(var_ens[i], axis=0)\n', (2099, 2119), True, 'import numpy as np\n'), ((2361, 2404), 'numpy.nanpercentile', 'np.nanpercentile', (['var_ens[i]', 'quant'], {'axis': '(0)'}), '(var_ens[i], quant, axis=0)\n', (2377, 2404), True, 'import numpy as np\n'), ((2591, 2620), 'numpy.nanmax', 'np.nanmax', (['var_ens[i]'], {'axis': '(0)'}), '(var_ens[i], axis=0)\n', (2600, 2620), True, 'import numpy as np\n'), ((2777, 2806), 'numpy.nanstd', 'np.nanstd', (['var_ens[i]'], {'axis': '(0)'}), '(var_ens[i], axis=0)\n', (2786, 2806), True, 'import numpy as np\n'), ((2957, 3009), 'numpy.empty', 'np.empty', (['(var_ens[0].shape[1], var_ens[0].shape[2])'], {}), '((var_ens[0].shape[1], var_ens[0].shape[2]))\n', (2965, 3009), True, 'import numpy as np\n')] |
import csv
from testdata import SOCIALHISTORY_FILE
from testdata import rndDate
from patient import Patient
SMOKINGCODES = {
'428041000124106': 'Current some day smoker',
'266919005' : 'Never smoker',
'449868002' : 'Current every day smoker',
'266927001' : 'Unknown if ever smoked',
'8517006' : 'Former smoker'
}
class SocialHistory(object):
"""Create instances of SocialHistory; also maintains socialHistory by patient id"""
socialHistories = {} # Dictionary of socialHistory by patient ID
@classmethod
def load(cls):
"""Loads patient SocialHistory"""
# Loop through socialHistories and build patient socialHistory lists:
histories = csv.reader(open(SOCIALHISTORY_FILE, 'U'), dialect='excel-tab')
header = next(histories)
for history in histories:
cls(dict(zip(header, history))) # Create a socialHistory instance
def __init__(self, p):
self.pid = p['PID']
self.id = p['ID']
self.smokingStatusCode = p['SMOKINGSTATUSCODE']
self.smokingStatusText = SMOKINGCODES[self.smokingStatusCode]
# Append socialHistory to the patient's socialHistory list:
if self.pid in self.__class__.socialHistories:
raise "Found >1 socialHistory for a patient"
else:
self.__class__.socialHistories[self.pid] = self
def toJSON(self, prefix=""):
if prefix:
prefix += "-"
patient = Patient.mpi[self.pid]
return {
"request": {
"method": "PUT",
"url": "Observation/" + prefix + "smokingstatus-" + self.id
},
"resource": {
"id": prefix + "smokingstatus-" + self.id,
"resourceType": "Observation",
"status": "final",
"identifier": [
{
"use" : "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value" : prefix + self.id
}
],
"text": {
"status": "generated",
"div": '<div xmlns="http://www.w3.org/1999/xhtml">' +
'Tobacco smoking status: %s</div>'%self.smokingStatusText
},
"performer": [
{
"reference": "Practitioner/" + prefix + "Practitioner-" + patient.gp
}
],
"effectiveDateTime": rndDate(2016).isoformat(),
"code": {
"coding": [
{
"system" : "http://loinc.org",
"code" : "72166-2",
"display": "Tobacco smoking status"
}
],
"text": "Tobacco smoking status"
},
"subject": {
"reference": "Patient/" + prefix + self.pid
},
"category": [
{
"coding": [
{
"system" : "http://hl7.org/fhir/observation-category",
"code" : "social-history",
"display": "Social History"
}
],
"text": "Social History"
}
],
"valueCodeableConcept": {
"coding": [
{
"system" : "http://snomed.info/sct",
"code" : self.smokingStatusCode,
"display": self.smokingStatusText
}
],
"text": self.smokingStatusText
}
}
}
| [
"testdata.rndDate"
]
| [((2604, 2617), 'testdata.rndDate', 'rndDate', (['(2016)'], {}), '(2016)\n', (2611, 2617), False, 'from testdata import rndDate\n')] |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch_missing_cache(tmpdir, _fetch_method):
"""Ensure raise a missing cache file."""
testpath = str(tmpdir)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
with Stage(fetcher, path=testpath):
with pytest.raises(NoCacheError, match=r'No cache'):
fetcher.fetch()
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch(tmpdir, _fetch_method):
"""Ensure a fetch after expanding is effectively a no-op."""
testpath = str(tmpdir)
cache = os.path.join(testpath, 'cache.tar.gz')
touch(cache)
url = 'file:///{0}'.format(cache)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url=url)
with Stage(fetcher, path=testpath) as stage:
source_path = stage.source_path
mkdirp(source_path)
fetcher.fetch()
| [
"llnl.util.filesystem.touch",
"os.path.join",
"pytest.mark.parametrize",
"spack.stage.Stage",
"pytest.raises",
"spack.fetch_strategy.CacheURLFetchStrategy",
"llnl.util.filesystem.mkdirp"
]
| [((394, 454), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""_fetch_method"""', "['curl', 'urllib']"], {}), "('_fetch_method', ['curl', 'urllib'])\n", (417, 454), False, 'import pytest\n'), ((875, 935), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""_fetch_method"""', "['curl', 'urllib']"], {}), "('_fetch_method', ['curl', 'urllib'])\n", (898, 935), False, 'import pytest\n'), ((1079, 1117), 'os.path.join', 'os.path.join', (['testpath', '"""cache.tar.gz"""'], {}), "(testpath, 'cache.tar.gz')\n", (1091, 1117), False, 'import os\n'), ((1122, 1134), 'llnl.util.filesystem.touch', 'touch', (['cache'], {}), '(cache)\n', (1127, 1134), False, 'from llnl.util.filesystem import mkdirp, touch\n'), ((672, 730), 'spack.fetch_strategy.CacheURLFetchStrategy', 'CacheURLFetchStrategy', ([], {'url': '"""file:///not-a-real-cache-file"""'}), "(url='file:///not-a-real-cache-file')\n", (693, 730), False, 'from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError\n'), ((1265, 1295), 'spack.fetch_strategy.CacheURLFetchStrategy', 'CacheURLFetchStrategy', ([], {'url': 'url'}), '(url=url)\n', (1286, 1295), False, 'from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError\n'), ((744, 773), 'spack.stage.Stage', 'Stage', (['fetcher'], {'path': 'testpath'}), '(fetcher, path=testpath)\n', (749, 773), False, 'from spack.stage import Stage\n'), ((1309, 1338), 'spack.stage.Stage', 'Stage', (['fetcher'], {'path': 'testpath'}), '(fetcher, path=testpath)\n', (1314, 1338), False, 'from spack.stage import Stage\n'), ((1405, 1424), 'llnl.util.filesystem.mkdirp', 'mkdirp', (['source_path'], {}), '(source_path)\n', (1411, 1424), False, 'from llnl.util.filesystem import mkdirp, touch\n'), ((792, 837), 'pytest.raises', 'pytest.raises', (['NoCacheError'], {'match': '"""No cache"""'}), "(NoCacheError, match='No cache')\n", (805, 837), False, 'import pytest\n')] |
from typing_extensions import Required
#from sqlalchemy.sql.sqltypes import Boolean
from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int
from models.EventsRelated.EventModel import EventModel
from graphqltypes.Utils import extractSession
class EventType(ObjectType):
id = ID()
name = String()
lastchange = DateTime()
externalId = String()
users = List('graphqltypes.User.UserType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.users
groups = List('graphqltypes.Group.GroupType')
def resolve_users(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.groups
rooms = List('graphqltypes.Room.RoomType')
def resolve_rooms(parent, info):
session = extractSession(info)
dbRecord = session.query(EventModel).get(parent.id)
return dbRecord.rooms
| [
"graphene.String",
"graphene.List",
"graphqltypes.Utils.extractSession",
"graphene.ID",
"graphene.DateTime"
]
| [((317, 321), 'graphene.ID', 'ID', ([], {}), '()\n', (319, 321), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((333, 341), 'graphene.String', 'String', ([], {}), '()\n', (339, 341), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((360, 370), 'graphene.DateTime', 'DateTime', ([], {}), '()\n', (368, 370), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((388, 396), 'graphene.String', 'String', ([], {}), '()\n', (394, 396), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((410, 444), 'graphene.List', 'List', (['"""graphqltypes.User.UserType"""'], {}), "('graphqltypes.User.UserType')\n", (414, 444), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((633, 669), 'graphene.List', 'List', (['"""graphqltypes.Group.GroupType"""'], {}), "('graphqltypes.Group.GroupType')\n", (637, 669), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((858, 892), 'graphene.List', 'List', (['"""graphqltypes.Room.RoomType"""'], {}), "('graphqltypes.Room.RoomType')\n", (862, 892), False, 'from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int\n'), ((500, 520), 'graphqltypes.Utils.extractSession', 'extractSession', (['info'], {}), '(info)\n', (514, 520), False, 'from graphqltypes.Utils import extractSession\n'), ((725, 745), 'graphqltypes.Utils.extractSession', 'extractSession', (['info'], {}), '(info)\n', (739, 745), False, 'from graphqltypes.Utils import extractSession\n'), ((948, 968), 'graphqltypes.Utils.extractSession', 'extractSession', (['info'], {}), '(info)\n', (962, 968), False, 'from graphqltypes.Utils import extractSession\n')] |
from django.shortcuts import render
from .models import Disk
import os
def index(request):
context = {}
disk_list = Disk.objects.all()
context['disk_list'] = disk_list
return render(request, 'index.html', context)
#def index(request):
# module_dir = os.path.dirname(__file__)
# file_path = os.path.join(module_dir, 'data.txt')
# disk_list = open(file_path , 'r')
# data = data_file.read()
# context = {'disk_list': data}
# return render(request, 'index.html', context)
| [
"django.shortcuts.render"
]
| [((193, 231), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (199, 231), False, 'from django.shortcuts import render\n')] |
# Copyright (c) 2021-Present (<NAME>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests as re
import infapy
from infapy.exceptions import InvalidDetailsProvided
class AgentService():
def __init__(self,v3,v3BaseURL,v3SessionID):
self._v3 = v3
self._v3BaseURL = v3BaseURL
self._v3SessionID = v3SessionID
def updateAgentService(self,serviceName, serviceAction, agentId):
url=self._v3BaseURL + "/public/core/v3/agent/service"
headers = {'Content-Type': "application/json", 'Accept': "application/json","INFA-SESSION-ID":self._v3SessionID}
body = {
'serviceName':serviceName,
'serviceAction':serviceAction,
'agentId':agentId}
infapy.log.info("agentService API URL - " + url)
infapy.log.info("API Headers: " + str(headers))
infapy.log.info("Body: " + str(body))
try:
response = re.post(url=url, json=body, headers=headers)
data = response.json()
infapy.log.debug(str(data))
try:
if ("error" in data):
infapy.log.error("Please validate the details passed")
infapy.log.error(str(data))
raise InvalidDetailsProvided
except Exception as e:
infapy.log.exception(e)
raise
except Exception as e:
infapy.log.exception(e)
raise
infapy.log.info(data["message"])
return data | [
"requests.post",
"infapy.log.error",
"infapy.log.exception",
"infapy.log.info"
]
| [((1238, 1286), 'infapy.log.info', 'infapy.log.info', (["('agentService API URL - ' + url)"], {}), "('agentService API URL - ' + url)\n", (1253, 1286), False, 'import infapy\n'), ((1963, 1995), 'infapy.log.info', 'infapy.log.info', (["data['message']"], {}), "(data['message'])\n", (1978, 1995), False, 'import infapy\n'), ((1426, 1470), 'requests.post', 're.post', ([], {'url': 'url', 'json': 'body', 'headers': 'headers'}), '(url=url, json=body, headers=headers)\n', (1433, 1470), True, 'import requests as re\n'), ((1913, 1936), 'infapy.log.exception', 'infapy.log.exception', (['e'], {}), '(e)\n', (1933, 1936), False, 'import infapy\n'), ((1621, 1675), 'infapy.log.error', 'infapy.log.error', (['"""Please validate the details passed"""'], {}), "('Please validate the details passed')\n", (1637, 1675), False, 'import infapy\n'), ((1824, 1847), 'infapy.log.exception', 'infapy.log.exception', (['e'], {}), '(e)\n', (1844, 1847), False, 'import infapy\n')] |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
| [
"absl.testing.absltest.mock.create_autospec",
"tfx.orchestration.experimental.core.service_jobs.ExceptionHandlingServiceJobManagerWrapper",
"absl.testing.absltest.mock.Mock",
"tensorflow.test.main"
]
| [((3178, 3192), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3190, 3192), True, 'import tensorflow as tf\n'), ((998, 1065), 'absl.testing.absltest.mock.create_autospec', 'mock.create_autospec', (['service_jobs.ServiceJobManager'], {'instance': '(True)'}), '(service_jobs.ServiceJobManager, instance=True)\n', (1018, 1065), False, 'from absl.testing.absltest import mock\n'), ((1440, 1531), 'tfx.orchestration.experimental.core.service_jobs.ExceptionHandlingServiceJobManagerWrapper', 'service_jobs.ExceptionHandlingServiceJobManagerWrapper', (['self._mock_service_job_manager'], {}), '(self.\n _mock_service_job_manager)\n', (1494, 1531), False, 'from tfx.orchestration.experimental.core import service_jobs\n'), ((1707, 1718), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1716, 1718), False, 'from absl.testing.absltest import mock\n'), ((1783, 1794), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1792, 1794), False, 'from absl.testing.absltest import mock\n'), ((1861, 1872), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1870, 1872), False, 'from absl.testing.absltest import mock\n'), ((1941, 1952), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1950, 1952), False, 'from absl.testing.absltest import mock\n'), ((2672, 2683), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2681, 2683), False, 'from absl.testing.absltest import mock\n'), ((3018, 3029), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3027, 3029), False, 'from absl.testing.absltest import mock\n')] |
from api import get_result
import os
import shutil
from glob import glob
from PIL import Image
if __name__ == '__main__':
image_files = glob('./test_images/*.*')
result_dir = './test_results'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
txt_file = os.path.join(result_dir, 'result.txt')
txt_f = open(txt_file, 'w')
for image_file in sorted(image_files):
if ".gitkeep" in image_files:
continue
print("Finded file", image_file, end=" ")
result = get_result(Image.open(image_file))
print(":", result)
txt_f.write(image_file.split('/')[-1].split('.')[0] + ':' + result + '\n')
txt_f.close() | [
"os.path.exists",
"PIL.Image.open",
"os.path.join",
"os.mkdir",
"shutil.rmtree",
"glob.glob"
]
| [((141, 166), 'glob.glob', 'glob', (['"""./test_images/*.*"""'], {}), "('./test_images/*.*')\n", (145, 166), False, 'from glob import glob\n'), ((208, 234), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (222, 234), False, 'import os\n'), ((274, 294), 'os.mkdir', 'os.mkdir', (['result_dir'], {}), '(result_dir)\n', (282, 294), False, 'import os\n'), ((311, 349), 'os.path.join', 'os.path.join', (['result_dir', '"""result.txt"""'], {}), "(result_dir, 'result.txt')\n", (323, 349), False, 'import os\n'), ((244, 269), 'shutil.rmtree', 'shutil.rmtree', (['result_dir'], {}), '(result_dir)\n', (257, 269), False, 'import shutil\n'), ((563, 585), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (573, 585), False, 'from PIL import Image\n')] |
# Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import os
import unittest
from unittest.mock import patch
import pendulum
from azure.common import AzureMissingResourceHttpError
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import ContainerProperties
from mag_archiver.azure import create_table
from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \
hide_if_not_none
class TestMag(unittest.TestCase):
def test_hide_if_not_none(self):
# Test that None is returned for None
value = hide_if_not_none(None)
self.assertEqual(value, None)
# Test that 'hidden' is returned: string
value = hide_if_not_none('hello world')
self.assertEqual(value, 'hidden')
# Test that 'hidden' is returned: integer
value = hide_if_not_none(123)
self.assertEqual(value, 'hidden')
def test_make_mag_query(self):
start_date = pendulum.datetime(year=2020, month=4, day=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
# No parameters
query = make_mag_query()
self.assertEqual(query, '')
# State parameter
query = make_mag_query(state=MagState.discovered)
self.assertEqual(query, "State eq 'discovered'")
query = make_mag_query(state=MagState.archived)
self.assertEqual(query, "State eq 'archived'")
query = make_mag_query(state=MagState.done)
self.assertEqual(query, "State eq 'done'")
# Start date parameter
query = make_mag_query(start_date=start_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'")
# End date parameter
query = make_mag_query(end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'")
# Start date, end date and date type
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt "
"datetime'2020-05-01T00:00Z'")
# State, start date, end date and date type
query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date,
date_type=MagDateType.discovered)
self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' "
"and DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date,
date_type=MagDateType.archived)
self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' "
"and ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date,
date_type=MagDateType.done)
self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' "
"and DoneDate lt datetime'2020-05-01T00:00Z'")
def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int):
min_date = pendulum.datetime(1601, 1, 1)
partition_key_ = 'mag'
row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}'
state_ = MagState.discovered
task_ = MagTask.not_started
release_date_ = pendulum.datetime(year=year, month=month, day=day)
source_container_ = row_key_
source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1)
release_container_ = ''
release_path_ = ''
discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2)
archived_date_ = min_date
done_date_ = min_date
return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_,
source_container_last_modified_, release_container_, release_path_, discovered_date_,
archived_date_, done_date_, account_name=account_name, account_key=account_key)
class TestMagRelease(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagRelease, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
release = make_mag_release(account_name, secret, 2020, 1, 1)
self.assertIn('account_key=hidden', release.__repr__())
self.assertNotIn(secret, release.__str__())
self.assertNotIn(secret, release.__repr__())
# Check that account_key is None
release = make_mag_release(account_name, None, 2020, 1, 1)
self.assertIn('account_key=None', release.__repr__())
def test_create(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
success = release.create()
self.assertTrue(success)
finally:
release.delete()
def test_delete(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
# Check that we can create and then delete
release.create()
release.delete()
# Check that second delete fails
with self.assertRaises(AzureMissingResourceHttpError):
release.delete()
def test_update(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
release.create()
# Update release
release.state = MagState.archived
release.archived_date = pendulum.utcnow().microsecond_(0)
release.update()
# Verify that release is updated
service = TableService(account_name=self.account_name, account_key=self.account_key)
entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key)
updated_release = MagRelease.from_entity(entity)
self.assertEqual(release.state, updated_release.state)
self.assertEqual(release.archived_date, updated_release.archived_date)
finally:
release.delete()
def make_containers():
containers = []
cp1 = ContainerProperties()
cp1.name = 'mag-2020-04-17'
cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18)
containers.append(cp1)
cp3 = ContainerProperties()
cp3.name = 'mag-2020-05-01'
cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1)
containers.append(cp3)
cp2 = ContainerProperties()
cp2.name = 'mag-2020-04-24'
cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25)
containers.append(cp2)
return containers
class TestMagArchiverClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagArchiverClient, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret)
expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
self.assertNotIn(secret, client.__str__())
self.assertNotIn(secret, client.__repr__())
# Check that account_key and sas_token are None
client = MagArchiverClient(account_name=account_name)
expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_containers(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Test that 2 containers are returned when last_modified_thresh=1
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers_out = client.list_containers(last_modified_thresh=1)
self.assertEqual(len(containers_out), 2)
# Test that 3 containers are returned when last_modified_thresh=0
containers_out = client.list_containers(last_modified_thresh=0)
self.assertEqual(len(containers_out), 3)
# Test sort order reverse=False
self.assertEqual(containers_in[0].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[1].name, containers_out[2].name)
# Test sort order reverse=True
containers_out = client.list_containers(last_modified_thresh=0, reverse=True)
self.assertEqual(len(containers_out), 3)
self.assertEqual(containers_in[1].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[0].name, containers_out[2].name)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_update_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 2)
self.assertEqual(num_errors, 0)
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 3)
self.assertEqual(num_errors, 0)
# Two releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 2)
# 1 release
start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 1)
# Three releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release, reverse=False)
self.assertEqual(len(releases), 3)
# Sorting reverse=False
self.assertEqual(releases[0].row_key, '2020-04-17')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-05-01')
# Sorting reverse=True
releases = client.list_releases(start_date=start_date, end_date=end_date,
state=MagState.discovered, date_type=MagDateType.release,
reverse=True)
self.assertEqual(releases[0].row_key, '2020-05-01')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-04-17')
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
| [
"mag_archiver.mag.hide_if_not_none",
"mag_archiver.mag.make_mag_query",
"mag_archiver.mag.MagRelease.from_entity",
"mag_archiver.azure.create_table",
"os.getenv",
"pendulum.utcnow",
"azure.storage.blob.ContainerProperties",
"azure.cosmosdb.table.tableservice.TableService",
"pendulum.datetime",
"mag_archiver.mag.MagArchiverClient",
"mag_archiver.mag.MagRelease",
"unittest.mock.patch"
]
| [((5764, 5793), 'pendulum.datetime', 'pendulum.datetime', (['(1601)', '(1)', '(1)'], {}), '(1601, 1, 1)\n', (5781, 5793), False, 'import pendulum\n'), ((5964, 6014), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': 'year', 'month': 'month', 'day': 'day'}), '(year=year, month=month, day=day)\n', (5981, 6014), False, 'import pendulum\n'), ((6086, 6144), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': 'year', 'month': 'month', 'day': 'day', 'hour': '(1)'}), '(year=year, month=month, day=day, hour=1)\n', (6103, 6144), False, 'import pendulum\n'), ((6219, 6277), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': 'year', 'month': 'month', 'day': 'day', 'hour': '(2)'}), '(year=year, month=month, day=day, hour=2)\n', (6236, 6277), False, 'import pendulum\n'), ((6345, 6608), 'mag_archiver.mag.MagRelease', 'MagRelease', (['partition_key_', 'row_key_', 'state_', 'task_', 'release_date_', 'source_container_', 'source_container_last_modified_', 'release_container_', 'release_path_', 'discovered_date_', 'archived_date_', 'done_date_'], {'account_name': 'account_name', 'account_key': 'account_key'}), '(partition_key_, row_key_, state_, task_, release_date_,\n source_container_, source_container_last_modified_, release_container_,\n release_path_, discovered_date_, archived_date_, done_date_,\n account_name=account_name, account_key=account_key)\n', (6355, 6608), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((9084, 9105), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ([], {}), '()\n', (9103, 9105), False, 'from azure.storage.blob import ContainerProperties\n'), ((9162, 9207), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(18)'}), '(year=2020, month=4, day=18)\n', (9179, 9207), False, 'import pendulum\n'), ((9246, 9267), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ([], {}), '()\n', (9265, 9267), False, 'from azure.storage.blob import ContainerProperties\n'), ((9324, 9368), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)'}), '(year=2020, month=5, day=1)\n', (9341, 9368), False, 'import pendulum\n'), ((9407, 9428), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ([], {}), '()\n', (9426, 9428), False, 'from azure.storage.blob import ContainerProperties\n'), ((9485, 9530), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(25)'}), '(year=2020, month=4, day=25)\n', (9502, 9530), False, 'import pendulum\n'), ((10900, 10941), 'unittest.mock.patch', 'patch', (['"""mag_archiver.mag.list_containers"""'], {}), "('mag_archiver.mag.list_containers')\n", (10905, 10941), False, 'from unittest.mock import patch\n'), ((10947, 10977), 'unittest.mock.patch', 'patch', (['"""pendulum.datetime.now"""'], {}), "('pendulum.datetime.now')\n", (10952, 10977), False, 'from unittest.mock import patch\n'), ((12424, 12465), 'unittest.mock.patch', 'patch', (['"""mag_archiver.mag.list_containers"""'], {}), "('mag_archiver.mag.list_containers')\n", (12429, 12465), False, 'from unittest.mock import patch\n'), ((12471, 12501), 'unittest.mock.patch', 'patch', (['"""pendulum.datetime.now"""'], {}), "('pendulum.datetime.now')\n", (12476, 12501), False, 'from unittest.mock import patch\n'), ((13523, 13564), 'unittest.mock.patch', 'patch', (['"""mag_archiver.mag.list_containers"""'], {}), "('mag_archiver.mag.list_containers')\n", (13528, 13564), False, 'from unittest.mock import patch\n'), ((13570, 13600), 'unittest.mock.patch', 'patch', (['"""pendulum.datetime.now"""'], {}), "('pendulum.datetime.now')\n", (13575, 13600), False, 'from unittest.mock import patch\n'), ((1151, 1173), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', (['None'], {}), '(None)\n', (1167, 1173), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((1278, 1309), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', (['"""hello world"""'], {}), "('hello world')\n", (1294, 1309), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((1419, 1440), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', (['(123)'], {}), '(123)\n', (1435, 1440), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((1540, 1584), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(1)'}), '(year=2020, month=4, day=1)\n', (1557, 1584), False, 'import pendulum\n'), ((1604, 1648), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)'}), '(year=2020, month=5, day=1)\n', (1621, 1648), False, 'import pendulum\n'), ((1690, 1706), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {}), '()\n', (1704, 1706), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((1786, 1827), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.discovered'}), '(state=MagState.discovered)\n', (1800, 1827), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((1902, 1941), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.archived'}), '(state=MagState.archived)\n', (1916, 1941), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2014, 2049), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.done'}), '(state=MagState.done)\n', (2028, 2049), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2149, 2217), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'date_type': 'MagDateType.release'}), '(start_date=start_date, date_type=MagDateType.release)\n', (2163, 2217), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2313, 2384), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'date_type': 'MagDateType.discovered'}), '(start_date=start_date, date_type=MagDateType.discovered)\n', (2327, 2384), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2483, 2552), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'date_type': 'MagDateType.archived'}), '(start_date=start_date, date_type=MagDateType.archived)\n', (2497, 2552), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2649, 2714), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'date_type': 'MagDateType.done'}), '(start_date=start_date, date_type=MagDateType.done)\n', (2663, 2714), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2836, 2900), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'end_date': 'end_date', 'date_type': 'MagDateType.release'}), '(end_date=end_date, date_type=MagDateType.release)\n', (2850, 2900), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((2996, 3063), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'end_date': 'end_date', 'date_type': 'MagDateType.discovered'}), '(end_date=end_date, date_type=MagDateType.discovered)\n', (3010, 3063), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((3162, 3227), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'end_date': 'end_date', 'date_type': 'MagDateType.archived'}), '(end_date=end_date, date_type=MagDateType.archived)\n', (3176, 3227), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((3324, 3385), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'end_date': 'end_date', 'date_type': 'MagDateType.done'}), '(end_date=end_date, date_type=MagDateType.done)\n', (3338, 3385), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((3523, 3615), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.release'}), '(start_date=start_date, end_date=end_date, date_type=\n MagDateType.release)\n', (3537, 3615), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((3788, 3883), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.discovered'}), '(start_date=start_date, end_date=end_date, date_type=\n MagDateType.discovered)\n', (3802, 3883), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((4062, 4155), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.archived'}), '(start_date=start_date, end_date=end_date, date_type=\n MagDateType.archived)\n', (4076, 4155), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((4330, 4419), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.done'}), '(start_date=start_date, end_date=end_date, date_type=\n MagDateType.done)\n', (4344, 4419), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((4638, 4760), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.discovered', 'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.discovered'}), '(state=MagState.discovered, start_date=start_date, end_date=\n end_date, date_type=MagDateType.discovered)\n', (4652, 4760), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((4996, 5114), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.archived', 'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.archived'}), '(state=MagState.archived, start_date=start_date, end_date=\n end_date, date_type=MagDateType.archived)\n', (5010, 5114), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((5344, 5454), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ([], {'state': 'MagState.done', 'start_date': 'start_date', 'end_date': 'end_date', 'date_type': 'MagDateType.done'}), '(state=MagState.done, start_date=start_date, end_date=\n end_date, date_type=MagDateType.done)\n', (5358, 5454), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((6815, 6848), 'os.getenv', 'os.getenv', (['"""STORAGE_ACCOUNT_NAME"""'], {}), "('STORAGE_ACCOUNT_NAME')\n", (6824, 6848), False, 'import os\n'), ((6876, 6908), 'os.getenv', 'os.getenv', (['"""STORAGE_ACCOUNT_KEY"""'], {}), "('STORAGE_ACCOUNT_KEY')\n", (6885, 6908), False, 'import os\n'), ((6917, 6989), 'mag_archiver.azure.create_table', 'create_table', (['self.account_name', 'self.account_key', 'MagRelease.TABLE_NAME'], {}), '(self.account_name, self.account_key, MagRelease.TABLE_NAME)\n', (6929, 6989), False, 'from mag_archiver.azure import create_table\n'), ((9770, 9803), 'os.getenv', 'os.getenv', (['"""STORAGE_ACCOUNT_NAME"""'], {}), "('STORAGE_ACCOUNT_NAME')\n", (9779, 9803), False, 'import os\n'), ((9831, 9863), 'os.getenv', 'os.getenv', (['"""STORAGE_ACCOUNT_KEY"""'], {}), "('STORAGE_ACCOUNT_KEY')\n", (9840, 9863), False, 'import os\n'), ((9872, 9944), 'mag_archiver.azure.create_table', 'create_table', (['self.account_name', 'self.account_key', 'MagRelease.TABLE_NAME'], {}), '(self.account_name, self.account_key, MagRelease.TABLE_NAME)\n', (9884, 9944), False, 'from mag_archiver.azure import create_table\n'), ((10165, 10252), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', ([], {'account_name': 'account_name', 'account_key': 'secret', 'sas_token': 'secret'}), '(account_name=account_name, account_key=secret, sas_token=\n secret)\n', (10182, 10252), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((10639, 10683), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', ([], {'account_name': 'account_name'}), '(account_name=account_name)\n', (10656, 10683), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((11098, 11153), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)', 'minute': '(10)'}), '(year=2020, month=5, day=1, minute=10)\n', (11115, 11153), False, 'import pendulum\n'), ((11373, 11452), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (11390, 11452), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((12622, 12677), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)', 'minute': '(10)'}), '(year=2020, month=5, day=1, minute=10)\n', (12639, 12677), False, 'import pendulum\n'), ((12861, 12940), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (12878, 12940), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((13719, 13771), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)', 'hour': '(1)'}), '(year=2020, month=5, day=1, hour=1)\n', (13736, 13771), False, 'import pendulum\n'), ((13955, 14034), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (13972, 14034), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((8594, 8668), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (8606, 8668), False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((8802, 8832), 'mag_archiver.mag.MagRelease.from_entity', 'MagRelease.from_entity', (['entity'], {}), '(entity)\n', (8824, 8832), False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((13297, 13371), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (13309, 13371), False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((14382, 14427), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(17)'}), '(year=2020, month=4, day=17)\n', (14399, 14427), False, 'import pendulum\n'), ((14451, 14495), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)'}), '(year=2020, month=5, day=1)\n', (14468, 14495), False, 'import pendulum\n'), ((14781, 14836), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(17)', 'minute': '(1)'}), '(year=2020, month=4, day=17, minute=1)\n', (14798, 14836), False, 'import pendulum\n'), ((14860, 14904), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)'}), '(year=2020, month=5, day=1)\n', (14877, 14904), False, 'import pendulum\n'), ((15195, 15240), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(4)', 'day': '(17)'}), '(year=2020, month=4, day=17)\n', (15212, 15240), False, 'import pendulum\n'), ((15264, 15318), 'pendulum.datetime', 'pendulum.datetime', ([], {'year': '(2020)', 'month': '(5)', 'day': '(1)', 'minute': '(1)'}), '(year=2020, month=5, day=1, minute=1)\n', (15281, 15318), False, 'import pendulum\n'), ((16335, 16409), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', ([], {'account_name': 'self.account_name', 'account_key': 'self.account_key'}), '(account_name=self.account_name, account_key=self.account_key)\n', (16347, 16409), False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((8463, 8480), 'pendulum.utcnow', 'pendulum.utcnow', ([], {}), '()\n', (8478, 8480), False, 'import pendulum\n')] |
'''
Created on Mar 22, 2018
Edited on Jan 11, 2019
@author: npvance2
@author: curtisd2
Variables that will need to be edited/personalized:
monitorID in Variables() (line 27)
projectStartDate in Variables() (line 28)
projectEndDate in Variables() (line 29)
authToken in getAuthToken() (line 49)
consumer_key in twitterAPI() (line 62)
consumer_secret in twitterAPI() (line 63)
access_token in twitterAPI() (line 64)
access_secret in twitterAPI() (line 65)
'''
from datetime import date, timedelta
import urllib.request
import json
import csv
import tweepy
from tweepy import OAuthHandler
def Variables():
monitorID = "9926183772" # The numerical ID for your Crimson Hexagon monitor
startDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
endDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
variableMap = {}
variableMap['monitorID'] = monitorID
variableMap['startDate'] = startDate
variableMap['endDate'] = endDate
return variableMap
def getURL(): #provides URL for Crimson API
urlStart = "https://api.crimsonhexagon.com/api"
return urlStart
###########
#
# You'll need to generate your own Crimson API key/token from here:
# https://apidocs.crimsonhexagon.com/reference
#
###########
def getAuthToken(): #provides auth token needed to access Crimson API
authToken = ''
authToken = "&auth="+authToken
return authToken
###########
#
# You'll need to add your own Twitter API keys here.
# Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html
# API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html
#
###########
def twitterAPI(): #Provides access keys for Twitter API
consumer_key = '2S1Z7Giq0oOf3w0R0sJUPnLFx'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret = '<KEY>'
if (consumer_key == '') or (consumer_secret =='') or (access_token =='') or (access_secret ==''):
print("Not all Twitter keys have been entered, please add them to the script and try again")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def getTwitterURL(): #provides URL for Twitter api
urlStart = "https://api.twitter.com/1.1/statuses/lookup.json?id="
return urlStart
def DatePull(startdate, enddate):
listArray = []
startdate = date(int(startdate[0:4]), int(startdate[5:7]), int(startdate[8:10]))
enddate = date(int(enddate[0:4]), int(enddate[5:7]), int(enddate[8:10]))
while startdate <= enddate:
listArray.append(str(startdate))
startdate += timedelta(days=1)
return listArray
def main():
monitorID = Variables()['monitorID']
projectStartDate = Variables()['startDate']
projectEndDate = Variables()['endDate']
fPath = "Monitor-"+monitorID+'-from-'+projectStartDate+'-to-'+projectEndDate+'.csv'
lineArray = DatePull(projectStartDate, projectEndDate)
print("------------------------------")
print("MonitorID is "+monitorID)
print(lineArray[0],lineArray[-1])
with open(fPath, 'w', newline = '', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ["PostType","PostDate","PostTime","URL","TweetID","Contents","RetweetCount","FavoriteCount","Location","Language","Sentiment","NeutralScore","PositiveScore","NegativeScore","Followers","Friends","Author","AuthorGender","AuthorTweets"]
writer.writerow(header)
for i in range(len(lineArray)-1):
print(lineArray[i])
startDate = lineArray[i]
endDate = lineArray[i+1]
dates = "&start="+startDate+"&end="+endDate #Combines start and end date into format needed for API call
urlStart = getURL() #Gets URL
authToken = getAuthToken() #Gets auth token
endpoint = "/monitor/posts?id="; #endpoint needed for this query
extendLimit = "&extendLimit=true" #extends call number from 500 to 10,000
fullContents = "&fullContents=true" #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting.
urlData = urlStart+endpoint+monitorID+authToken+dates+extendLimit+fullContents #Combines all API calls parts into full URL
webURL = urllib.request.urlopen(urlData)
if (webURL.getcode() == 200):
with open(fPath, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
data = webURL.read().decode('utf8')
theJSON = json.loads(data)
postDates = [] #These initialize the attributes of the final output
postTimes = []
urls = []
contents = []
authors = []
authorGenders = []
locations = []
languages = []
postTypes = []
sentiments = []
neutralScore = []
positiveScore = []
negativeScore = []
tweetIDs = []
followers = []
friends = []
retweetCounts = []
favoritesCount = []
statusesCount = []
tweetCount = 0
tempTweetIDs = []
api = twitterAPI()
c = 0
for i in theJSON["posts"]:
postDates.append("")
postTimes.append("")
if ('date' in i): #identifies date posted
tempDate = str(i["date"])
dateTime = tempDate.split("T")
postDates[c] = dateTime[0]
postTimes[c] = dateTime[1]
urls.append(i["url"])
contents.append("")
if ('contents' in i): #identifies post contents
contents[c] = i["contents"].replace(",","").replace("\n"," ") #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix
authors.append("")
if ('author' in i): #identifies author
authors[c] = i["author"].replace(",","")
authorGenders.append("")
if ('authorGender' in i): #identifies author gender
authorGenders[c] = i["authorGender"]
locations.append("")
if ('location' in i): #identifies location
locations[c] = i["location"].replace(",","")
languages.append("")
if ('language' in i): #identifies language specified in the author's profile
languages[c] = i["language"]
postTypes.append(i["type"]) #identifies the type of post, i.e. Twitter, Tumblr, Blog
tweetIDs.append("")
followers.append("")
friends.append("")
retweetCounts.append("")
favoritesCount.append("")
statusesCount.append("")
if postTypes[c] == "Twitter": #if the post type is Twitter it goes through more processing
tweetCount = tweetCount + 1 #counts number of tweets
tweetSplit = urls[c].split("status/") #splits URL to get tweetID
tweetIDs[c] = tweetSplit[1]
tempTweetIDs.append(tweetIDs[c])
if tweetCount == 100: #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified
tweepys = api.statuses_lookup(id_=tempTweetIDs) #call to Twitter API
for tweet in tweepys:
tempID = tweet.id_str #finds tweetsID
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID: #matches tweetID in Twitter API call to tweetID stored from Crimson API
tempDate = str(tweet.created_at).replace(" "," ") #These all fill the matching Crimson attributes to those found in the Twitter API
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0 #clears tweet count for a new 100
tempTweetIDs = [] #clears tweetIDs for next call
sentiments.append("")
neutralScore.append("")
positiveScore.append("")
negativeScore.append("")
if ('categoryScores' in i): #finds sentiment value and matching attribute
for l in i["categoryScores"]:
catName = l["categoryName"]
if catName == "Basic Neutral":
neutralScore[c] = l["score"]
elif catName =="Basic Positive":
positiveScore[c] = l["score"]
elif catName == "Basic Negative":
negativeScore[c] = l["score"]
if neutralScore[c] > positiveScore[c] and neutralScore[c] > negativeScore[c]:
sentiments[c] = "Basic Neutral"
if positiveScore[c] > neutralScore[c] and positiveScore[c] > negativeScore[c]:
sentiments[c] = "Basic Positive"
if negativeScore[c] > positiveScore[c] and negativeScore[c] > neutralScore[c]:
sentiments[c] = "Basic Negative"
c = c + 1
if len(tempTweetIDs) != 0: #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100
try:
tweepys = api.statuses_lookup(id_=tempTweetIDs)
for tweet in tweepys:
tempID = tweet.id_str
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID:
tempDate = str(tweet.created_at).replace(" "," ")
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0
except:
print("Tweepy error: skipping cleanup")
pC = 0
for pDate in postDates: #iterates through the word lists and prints matching posts to CSV
csvRow=[postTypes[pC], pDate, postTimes[pC], urls[pC], str(tweetIDs[pC]), contents[pC].replace("\n"," "), retweetCounts[pC], favoritesCount[pC], locations[pC], languages[pC], sentiments[pC], str(neutralScore[pC]), str(positiveScore[pC]), str(negativeScore[pC]), followers[pC], friends[pC], authors[pC], authorGenders[pC], statusesCount[pC]]
writer.writerow(csvRow)
pC = pC + 1
else:
print("Server Error, No Data" + str(webURL.getcode())) #displays error if Crimson URL fails
if __name__ == '__main__':
main()
| [
"json.loads",
"csv.writer",
"tweepy.API",
"datetime.timedelta",
"tweepy.OAuthHandler"
]
| [((2155, 2198), 'tweepy.OAuthHandler', 'OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (2167, 2198), False, 'from tweepy import OAuthHandler\n'), ((2264, 2337), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (2274, 2337), False, 'import tweepy\n'), ((2811, 2828), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2820, 2828), False, 'from datetime import date, timedelta\n'), ((3363, 3376), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3373, 3376), False, 'import csv\n'), ((4680, 4693), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4690, 4693), False, 'import csv\n'), ((4783, 4799), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (4793, 4799), False, 'import json\n')] |
#! /opt/cloud_sdk/bin/python
import asyncio
import logging
import subprocess
import sys
import citc_cloud
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
log.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
async def main() -> None:
nodespace = citc_cloud.get_nodespace()
keys_file = "/home/slurm/opc_authorized_keys"
with open(keys_file) as kf:
ssh_keys = kf.read()
hosts = subprocess.run(["scontrol", "show", "hostnames", sys.argv[1]], stdout=subprocess.PIPE).stdout.decode().split()
await asyncio.gather(*(
citc_cloud.start_node( log, host, nodespace, ssh_keys)
for host in hosts
))
sys.excepthook = handle_exception
if __name__ == "__main__":
log = logging.getLogger("startnode")
log.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/slurm/elastic.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| [
"logging.getLogger",
"logging.Formatter",
"subprocess.run",
"logging.FileHandler",
"citc_cloud.start_node",
"citc_cloud.get_nodespace",
"asyncio.get_event_loop",
"sys.__excepthook__"
]
| [((425, 451), 'citc_cloud.get_nodespace', 'citc_cloud.get_nodespace', ([], {}), '()\n', (449, 451), False, 'import citc_cloud\n'), ((887, 917), 'logging.getLogger', 'logging.getLogger', (['"""startnode"""'], {}), "('startnode')\n", (904, 917), False, 'import logging\n'), ((963, 1012), 'logging.FileHandler', 'logging.FileHandler', (['"""/var/log/slurm/elastic.log"""'], {}), "('/var/log/slurm/elastic.log')\n", (982, 1012), False, 'import logging\n'), ((1029, 1101), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)-10s %(levelname)-8s %(message)s"""'], {}), "('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')\n", (1046, 1101), False, 'import logging\n'), ((1178, 1202), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1200, 1202), False, 'import asyncio\n'), ((224, 278), 'sys.__excepthook__', 'sys.__excepthook__', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (242, 278), False, 'import sys\n'), ((726, 779), 'citc_cloud.start_node', 'citc_cloud.start_node', (['log', 'host', 'nodespace', 'ssh_keys'], {}), '(log, host, nodespace, ssh_keys)\n', (747, 779), False, 'import citc_cloud\n'), ((578, 669), 'subprocess.run', 'subprocess.run', (["['scontrol', 'show', 'hostnames', sys.argv[1]]"], {'stdout': 'subprocess.PIPE'}), "(['scontrol', 'show', 'hostnames', sys.argv[1]], stdout=\n subprocess.PIPE)\n", (592, 669), False, 'import subprocess\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.