code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
import imgaug as ia
import itertools
from imgaug import augmenters as iaa
from tqdm import tqdm
N_seq = iaa.Sequential([
iaa.Fliplr(0.05),
iaa.Flipud(0.05),
iaa.Dropout(),
iaa.PerspectiveTransform(),
iaa.PiecewiseAffine(),
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05)),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255))
], random_order=True)
DIRR = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
IMG = []
def filterr(x, y):
if x[0] == '-':
return y
else:
return 255 - y
for i in tqdm(DIRR):
IMG = os.listdir(i)
imgs = np.array([filterr(x, cv2.cvtColor(cv2.resize(cv2.imread(i + '/' + x), (28, 28)),
cv2.COLOR_BGR2GRAY)) for x in IMG])
normal = N_seq.augment_images(imgs)
for j in range(len(IMG)):
cv2.imwrite('{}/A_{}.jpg'.format(i, j), normal[j,:,:])
| [
"imgaug.augmenters.PiecewiseAffine",
"os.listdir",
"imgaug.augmenters.AdditiveGaussianNoise",
"imgaug.augmenters.Flipud",
"tqdm.tqdm",
"imgaug.augmenters.CoarseDropout",
"imgaug.augmenters.PerspectiveTransform",
"imgaug.augmenters.Fliplr",
"cv2.imread",
"imgaug.augmenters.Dropout"
] | [((612, 622), 'tqdm.tqdm', 'tqdm', (['DIRR'], {}), '(DIRR)\n', (616, 622), False, 'from tqdm import tqdm\n'), ((634, 647), 'os.listdir', 'os.listdir', (['i'], {}), '(i)\n', (644, 647), False, 'import os, cv2\n'), ((194, 210), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.05)'], {}), '(0.05)\n', (204, 210), True, 'from imgaug import augmenters as iaa\n'), ((216, 232), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['(0.05)'], {}), '(0.05)\n', (226, 232), True, 'from imgaug import augmenters as iaa\n'), ((238, 251), 'imgaug.augmenters.Dropout', 'iaa.Dropout', ([], {}), '()\n', (249, 251), True, 'from imgaug import augmenters as iaa\n'), ((257, 283), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {}), '()\n', (281, 283), True, 'from imgaug import augmenters as iaa\n'), ((289, 310), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {}), '()\n', (308, 310), True, 'from imgaug import augmenters as iaa\n'), ((316, 374), 'imgaug.augmenters.CoarseDropout', 'iaa.CoarseDropout', (['(0.03, 0.15)'], {'size_percent': '(0.02, 0.05)'}), '((0.03, 0.15), size_percent=(0.02, 0.05))\n', (333, 374), True, 'from imgaug import augmenters as iaa\n'), ((380, 436), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'loc': '(0)', 'scale': '(0.0, 0.1 * 255)'}), '(loc=0, scale=(0.0, 0.1 * 255))\n', (405, 436), True, 'from imgaug import augmenters as iaa\n'), ((704, 727), 'cv2.imread', 'cv2.imread', (["(i + '/' + x)"], {}), "(i + '/' + x)\n", (714, 727), False, 'import os, cv2\n')] |
# IPython log file
import numpy as np
trees = np.zeros((4, 4), dtype=object)
import itertools
get_ipython().magic('pinfo itertools.combinations')
list(itertools.combinations(range(4), 2))
list(itertools.combinations_with_replacement(range(4), 2))
list(it.product(range(4), range(4))
)
list(itertools.product(range(4), range(4)))
import pickle
for tr, ts in itertools.product(range(4), range(4)):
if tr != ts:
with open('results-%i-%i.pickle' % (ts, tr), 'rb') as fin:
trees[tr, ts] = pickle.load(fin)
t = trees[0, 1]
m01 = t.get_map(0, 1)
m01 = t.get_map(0.5)
len(np.unique(m01))
len(m01)
from gala import imio
wss = list(map(imio.read_image_stack, ['watershed-%i.lzf.h5' % i for i in range(4)]))
images = imio.read_image_stack('/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tif')
images = imio.read_image_stack('/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tiff')
images.shape
wss[0].shape
maps = [t.get_map(0.5) for t in [trees[3, 0], trees[2, 1], trees[1, 2], trees[0, 3]]]
segs = [m[ws] for ws in wss]
segs = [m[ws] for m, ws in zip(maps, wss)]
len(maps[0])
np.max(wss[0])
list(map(len, maps))
list(map(np.max, wss))
trees = trees.T
maps = maps[::-1]
segs = [m[ws] for m, ws in zip(maps, wss)]
segs.dtype
segs[0].dtype
images.dtype
seg = np.zeros(images.shape, dtype=np.uint64)
seg[:, :625, :625] = segs[0]
seg[:, :625, 625:] = segs[1]
seg[:, 625:, :625] = segs[2]
seg[:, 625:, 625:] = segs[3]
np.max(segs[0])
np.max(segs[1])
seg[:, :625, 625:] = segs[1] + np.max(segs[0])
seg[:, 625:, :625] = segs[2] + np.max(segs[0]) + np.max(segs[1])
seg[:, 625:, 625:] = segs[3] + np.max(segs[0]) + np.max(segs[1]) + np.max(segs[2])
from gala import imio
imio.write_h5_stack(images, 'gala-corners-seg-50.h5', group='raw')
imio.write_h5_stack(seg, 'gala-corners-seg-50.h5', group='labels')
import h5py
f = h5py.File('gala-corners-seg-50.h5', 'a')
f['/raw'].attrs
f['/raw'].attrs['resolution'] = np.array([12., 1, 1])
f['/labels'].attrs['resolution'] = np.array([12., 1, 1])
f.close()
from gala import evaluate as ev
gts = list(map(imio.read_image_stack, ['ground-truth-%i.lzf.h5' % i for i in range(4)]))
[ev.split_vi(s, gt) for s, gt in zip(segs, gts)]
[ev.split_vi(s, gt) for s, gt in zip(wss, gts)]
def montage_labels_4x(vols):
y, x = vols[0].shape[1:]
newvol = np.empty((vols[0].shape[0], y, x), dtype=np.uint64)
newvol[:, :y, :x] = vols[0]
newvol[:, :y, x:] = vols[1] + sum(map(np.max, vols[:1]))
newvol[:, y:, :x] = vols[2] + sum(map(np.max, vols[:2]))
newvol[:, y:, x:] = vols[3] + sum(map(np.max, vols[:3]))
return newvol
wsvol = montage_labels_4x(wss)
def montage_labels_4x(vols):
y, x = vols[0].shape[1:]
newvol = np.empty((vols[0].shape[0], 2 * y, 2 * x), dtype=np.uint64)
newvol[:, :y, :x] = vols[0]
newvol[:, :y, x:] = vols[1] + sum(map(np.max, vols[:1]))
newvol[:, y:, :x] = vols[2] + sum(map(np.max, vols[:2]))
newvol[:, y:, x:] = vols[3] + sum(map(np.max, vols[:3]))
return newvol
wsvol = montage_labels_4x(wss)
def write_saalfeld(fn, raw, labels, res=np.array([12., 1, 1])):
imio.write_h5_stack(raw, fn, group='raw')
imio.write_h5_stack(labels, fn, group='labels')
f = h5py.File(fn, 'a')
f['/raw'].attrs['resolution'] = res
f['/labels'].attrs['resolution'] = res
f.close()
write_saalfeld('/groups/saalfeld/saalfeldlab/concha/sample_A/juan/corners-fragments.h5', images, wsvol)
[ev.split_vi(ws, s) for ws, s in zip(wss, segs)]
from gala import agglo2
get_ipython().set_next_input('bpss = [agglo2.best_segmentation');get_ipython().magic('pinfo agglo2.best_segmentation')
get_ipython().set_next_input('bpss = [agglo2.best_segmentation');get_ipython().magic('pinfo agglo2.best_segmentation')
bpss = [agglo2.best_segmentation(ws, gt) for ws, gt in zip(wss, gts)]
[ev.split_vi(s, bp) for s, bp in zip(segs, bpss)]
| [
"gala.agglo2.best_segmentation",
"gala.imio.read_image_stack",
"numpy.unique",
"pickle.load",
"h5py.File",
"numpy.max",
"gala.imio.write_h5_stack",
"numpy.zeros",
"numpy.array",
"numpy.empty",
"gala.evaluate.split_vi"
] | [((48, 78), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {'dtype': 'object'}), '((4, 4), dtype=object)\n', (56, 78), True, 'import numpy as np\n'), ((745, 834), 'gala.imio.read_image_stack', 'imio.read_image_stack', (['"""/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tif"""'], {}), "(\n '/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tif')\n", (766, 834), False, 'from gala import imio\n'), ((839, 929), 'gala.imio.read_image_stack', 'imio.read_image_stack', (['"""/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tiff"""'], {}), "(\n '/groups/saalfeld/saalfeldlab/concha/sample_A/crop/raw/*.tiff')\n", (860, 929), False, 'from gala import imio\n'), ((1122, 1136), 'numpy.max', 'np.max', (['wss[0]'], {}), '(wss[0])\n', (1128, 1136), True, 'import numpy as np\n'), ((1302, 1341), 'numpy.zeros', 'np.zeros', (['images.shape'], {'dtype': 'np.uint64'}), '(images.shape, dtype=np.uint64)\n', (1310, 1341), True, 'import numpy as np\n'), ((1458, 1473), 'numpy.max', 'np.max', (['segs[0]'], {}), '(segs[0])\n', (1464, 1473), True, 'import numpy as np\n'), ((1474, 1489), 'numpy.max', 'np.max', (['segs[1]'], {}), '(segs[1])\n', (1480, 1489), True, 'import numpy as np\n'), ((1707, 1773), 'gala.imio.write_h5_stack', 'imio.write_h5_stack', (['images', '"""gala-corners-seg-50.h5"""'], {'group': '"""raw"""'}), "(images, 'gala-corners-seg-50.h5', group='raw')\n", (1726, 1773), False, 'from gala import imio\n'), ((1774, 1840), 'gala.imio.write_h5_stack', 'imio.write_h5_stack', (['seg', '"""gala-corners-seg-50.h5"""'], {'group': '"""labels"""'}), "(seg, 'gala-corners-seg-50.h5', group='labels')\n", (1793, 1840), False, 'from gala import imio\n'), ((1857, 1897), 'h5py.File', 'h5py.File', (['"""gala-corners-seg-50.h5"""', '"""a"""'], {}), "('gala-corners-seg-50.h5', 'a')\n", (1866, 1897), False, 'import h5py\n'), ((1946, 1968), 'numpy.array', 'np.array', (['[12.0, 1, 1]'], {}), '([12.0, 1, 1])\n', (1954, 1968), True, 'import numpy as np\n'), ((2003, 2025), 'numpy.array', 'np.array', (['[12.0, 1, 1]'], {}), '([12.0, 1, 1])\n', (2011, 2025), True, 'import numpy as np\n'), ((603, 617), 'numpy.unique', 'np.unique', (['m01'], {}), '(m01)\n', (612, 617), True, 'import numpy as np\n'), ((1521, 1536), 'numpy.max', 'np.max', (['segs[0]'], {}), '(segs[0])\n', (1527, 1536), True, 'import numpy as np\n'), ((1586, 1601), 'numpy.max', 'np.max', (['segs[1]'], {}), '(segs[1])\n', (1592, 1601), True, 'import numpy as np\n'), ((1669, 1684), 'numpy.max', 'np.max', (['segs[2]'], {}), '(segs[2])\n', (1675, 1684), True, 'import numpy as np\n'), ((2157, 2175), 'gala.evaluate.split_vi', 'ev.split_vi', (['s', 'gt'], {}), '(s, gt)\n', (2168, 2175), True, 'from gala import evaluate as ev\n'), ((2206, 2224), 'gala.evaluate.split_vi', 'ev.split_vi', (['s', 'gt'], {}), '(s, gt)\n', (2217, 2224), True, 'from gala import evaluate as ev\n'), ((2324, 2375), 'numpy.empty', 'np.empty', (['(vols[0].shape[0], y, x)'], {'dtype': 'np.uint64'}), '((vols[0].shape[0], y, x), dtype=np.uint64)\n', (2332, 2375), True, 'import numpy as np\n'), ((2712, 2771), 'numpy.empty', 'np.empty', (['(vols[0].shape[0], 2 * y, 2 * x)'], {'dtype': 'np.uint64'}), '((vols[0].shape[0], 2 * y, 2 * x), dtype=np.uint64)\n', (2720, 2771), True, 'import numpy as np\n'), ((3077, 3099), 'numpy.array', 'np.array', (['[12.0, 1, 1]'], {}), '([12.0, 1, 1])\n', (3085, 3099), True, 'import numpy as np\n'), ((3105, 3146), 'gala.imio.write_h5_stack', 'imio.write_h5_stack', (['raw', 'fn'], {'group': '"""raw"""'}), "(raw, fn, group='raw')\n", (3124, 3146), False, 'from gala import imio\n'), ((3151, 3198), 'gala.imio.write_h5_stack', 'imio.write_h5_stack', (['labels', 'fn'], {'group': '"""labels"""'}), "(labels, fn, group='labels')\n", (3170, 3198), False, 'from gala import imio\n'), ((3207, 3225), 'h5py.File', 'h5py.File', (['fn', '"""a"""'], {}), "(fn, 'a')\n", (3216, 3225), False, 'import h5py\n'), ((3433, 3451), 'gala.evaluate.split_vi', 'ev.split_vi', (['ws', 's'], {}), '(ws, s)\n', (3444, 3451), True, 'from gala import evaluate as ev\n'), ((3751, 3783), 'gala.agglo2.best_segmentation', 'agglo2.best_segmentation', (['ws', 'gt'], {}), '(ws, gt)\n', (3775, 3783), False, 'from gala import agglo2\n'), ((3814, 3832), 'gala.evaluate.split_vi', 'ev.split_vi', (['s', 'bp'], {}), '(s, bp)\n', (3825, 3832), True, 'from gala import evaluate as ev\n'), ((1568, 1583), 'numpy.max', 'np.max', (['segs[0]'], {}), '(segs[0])\n', (1574, 1583), True, 'import numpy as np\n'), ((1651, 1666), 'numpy.max', 'np.max', (['segs[1]'], {}), '(segs[1])\n', (1657, 1666), True, 'import numpy as np\n'), ((510, 526), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (521, 526), False, 'import pickle\n'), ((1633, 1648), 'numpy.max', 'np.max', (['segs[0]'], {}), '(segs[0])\n', (1639, 1648), True, 'import numpy as np\n')] |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from djrest_wrapper.exceptions.apis.errors import *
from tests.models import ExampleModel
from djrest_wrapper.exceptions.apis import errors
class ExampleAPITestCase(APITestCase):
def setUp(self):
pass
def test_create_example(self):
url = reverse('example-list')
data = {
'text': 'some text'
}
response = self.client.post(path=url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIsNotNone(response.json().get(
'data').get('examplemodel', None))
id = response.json().get('data').get('examplemodel').get('id')
self.assertIsNotNone(ExampleModel.objects.get(id=id))
def test_create_example_failure(self):
url = reverse('example-list')
data = {
}
response = self.client.post(path=url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json().get('err'), True)
self.assertEqual(response.json().get('err_code'),
errors.ERR_INPUT_VALIDATION)
def test_list_example(self):
for i in range(20):
ExampleModel.objects.create(text=f'model number {i}')
url = reverse('example-list')
response = self.client.get(path=url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
total_pages = response.json().get('data').get('page').get('total_pages')
for i in range(1, total_pages+1):
models = response.json().get('data').get('examples')
self.assertIsInstance(models, list)
next_page = response.json().get('data').get('page').get('next')
if next_page != None:
response = self.client.get(path=next_page)
else:
break | [
"tests.models.ExampleModel.objects.create",
"tests.models.ExampleModel.objects.get",
"django.urls.reverse"
] | [((375, 398), 'django.urls.reverse', 'reverse', (['"""example-list"""'], {}), "('example-list')\n", (382, 398), False, 'from django.urls import reverse\n'), ((893, 916), 'django.urls.reverse', 'reverse', (['"""example-list"""'], {}), "('example-list')\n", (900, 916), False, 'from django.urls import reverse\n'), ((1409, 1432), 'django.urls.reverse', 'reverse', (['"""example-list"""'], {}), "('example-list')\n", (1416, 1432), False, 'from django.urls import reverse\n'), ((802, 833), 'tests.models.ExampleModel.objects.get', 'ExampleModel.objects.get', ([], {'id': 'id'}), '(id=id)\n', (826, 833), False, 'from tests.models import ExampleModel\n'), ((1340, 1393), 'tests.models.ExampleModel.objects.create', 'ExampleModel.objects.create', ([], {'text': 'f"""model number {i}"""'}), "(text=f'model number {i}')\n", (1367, 1393), False, 'from tests.models import ExampleModel\n')] |
#! /usr/bin/python3
import os
import sys
if len(sys.argv) < 3:
sys.stderr.write("Requires <out>.py <in> [<in>] ....\n")
sys.exit(-1)
if not sys.argv[1].endswith(".py"):
sys.stderr.write("Requires <out>.py <in> [<in>] ....\n")
sys.exit(-1)
out = open(sys.argv[1], "w")
out.write("resources = {\n")
for in_filename in sys.argv[2:]:
with open(in_filename) as in_file:
data = in_file.read()
out.write('"%s" : "' % os.path.basename(in_filename))
for b in data:
out.write("\\x%02x" % ord(b))
out.write('",\n')
out.write("}")
out.close()
| [
"sys.stderr.write",
"os.path.basename",
"sys.exit"
] | [((69, 125), 'sys.stderr.write', 'sys.stderr.write', (['"""Requires <out>.py <in> [<in>] ....\n"""'], {}), "('Requires <out>.py <in> [<in>] ....\\n')\n", (85, 125), False, 'import sys\n'), ((130, 142), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (138, 142), False, 'import sys\n'), ((184, 240), 'sys.stderr.write', 'sys.stderr.write', (['"""Requires <out>.py <in> [<in>] ....\n"""'], {}), "('Requires <out>.py <in> [<in>] ....\\n')\n", (200, 240), False, 'import sys\n'), ((245, 257), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (253, 257), False, 'import sys\n'), ((452, 481), 'os.path.basename', 'os.path.basename', (['in_filename'], {}), '(in_filename)\n', (468, 481), False, 'import os\n')] |
from setuptools import setup, find_packages
from rasalit import __version__
base_packages = [
"streamlit>=0.57.3",
"pyyaml>=5.3.1",
"pandas>=1.0.3",
"altair>=4.1.0",
"typer>=0.3.0",
"rasa>=2.0",
"spacy>=2.3.2",
"tensorflow>=2.3.1",
]
dev_packages = ["flake8>=3.6.0", "pytest>=4.0.2", "pre-commit>=2.7.1", "black"]
setup(
name="rasalit",
version=__version__,
packages=find_packages(exclude=["notebooks"]),
install_requires=base_packages,
entry_points={
"console_scripts": [
"rasalit = rasalit.__main__:main",
],
},
package_data={"rasalit": ["html/*/*.html", "data/*.*"]},
extras_require={"dev": dev_packages},
)
| [
"setuptools.find_packages"
] | [((414, 450), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['notebooks']"}), "(exclude=['notebooks'])\n", (427, 450), False, 'from setuptools import setup, find_packages\n')] |
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "ReadMe.md").read_text()
setup(
name="dataclasses_ujson",
version="0.0.14",
packages=find_packages(exclude=("tests*","bench_marks.py")),
author="<NAME> ",
author_email="<EMAIL>",
description="fast converter your json to dataclass",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/kislenko-artem/dataclasses-ujson",
license="Apache",
install_requires=[
"ujson>=1.35"
],
python_requires=">=3.7",
extras_require={
"dev": ["pytest"]
},
include_package_data=True,
py_modules=['dataclasses_ujson'],
setup_requires=["pytest-runner"],
tests_require=["pytest"]
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((67, 89), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import pathlib\n'), ((212, 263), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests*', 'bench_marks.py')"}), "(exclude=('tests*', 'bench_marks.py'))\n", (225, 263), False, 'from setuptools import setup, find_packages\n')] |
import numpy as np
from astropy.io import ascii
import types
import scipy.interpolate as spi
import astropy.io.fits as pf
import matplotlib.pyplot as plt
import pdb
def read_table(name, delimiter='\t', comment='#', fmt=None, ds=1):
'''
Reads ascii tables and converts them cleanly into numpy arrays.
'''
if fmt is not None:
datanp = ascii.read(name, guess=False, delimiter=delimiter, \
comment=comment, header_start=0, \
data_start=ds, format=fmt)
else:
datanp = ascii.read(name, guess=False, delimiter=delimiter, \
comment=comment, header_start=0, \
data_start=ds)
return datanp
def angsep(ra1deg, dec1deg, ra2deg, dec2deg, angle=False):
'''
Determine separation in degrees between celestial objects.
ra1deg, dec1deg - primary point(s); can be arrays
ra2deg, dec2deg - secondary point(s); can be arrays or scalars
angle - if True, it will calculate angle E of N.
All arguments are in decimal degrees.
Returns distance in arcdegrees, angles between -180 and 180 degrees.
'''
ra1rad = ra1deg * np.pi / 180
dec1rad = dec1deg * np.pi / 180
ra2rad = ra2deg * np.pi / 180
dec2rad = dec2deg * np.pi / 180
# calculate scalar product for determination of angular separation
x = np.cos(ra1rad) * np.cos(dec1rad) * np.cos(ra2rad) * np.cos(dec2rad)
y = np.sin(ra1rad) * np.cos(dec1rad) * np.sin(ra2rad) * np.cos(dec2rad)
z = np.sin(dec1rad) * np.sin(dec2rad)
rad = np.arccos(x + y + z) # Sometimes gives warnings when coords match
# use Pythagoras approximation if rad < 1 arcsec
sep = np.choose(rad<0.000004848, (np.sqrt((np.cos(dec1rad) * (ra1rad-ra2rad))**2 \
+ (dec1rad - dec2rad)**2), rad))
# Angular separation
sep = sep * 180 / np.pi
if angle:
deltaDEC = dec1rad - dec2rad
deltaRA = ra1rad - ra2rad
angledeg = np.arctan2(-deltaRA, -deltaDEC) * 180 / np.pi
return sep, angledeg
else:
return sep
def deg2sex(ras, decs):
''' Converts RA and DEC from decimal to sexagesimal. Returns string.
Arguments:
ras - string(s) of RA in degrees
decs - string(s) of DEC in degrees
'''
from astropy import units as u
from astropy.coordinates import SkyCoord
if type(ras) == list or type(ras) == np.ndarray:
new_coords = []
for irow in range(0,len(ras)):
c = SkyCoord(float(ras[irow]), float(decs[irow]), \
frame='icrs', unit='deg')
new_coords.append(c.to_string('hmsdms'))
else:
c = SkyCoord(float(ras), float(decs), frame='icrs', unit='deg')
new_coords = c.to_string('hmsdms')
return new_coords
def sex2deg(ras, decs):
''' Converts RA and DEC from sexagesimal to decimal.
Arguments:
ras - string(s) of RA in sexagesimal degrees (HH MM SS.SS)
decs - string(s) of DEC in sexagesimal degrees (+-DD MM SS.SS)
'''
if type(ras) == list or type(ras) == np.ndarray:
new_ras = []
new_decs = []
for irow in range(0,len(ras)):
parts_ra = ras[irow].rsplit(' ')
if len(parts_ra) == 1:
parts_ra = ras[irow].rsplit(':')
parts_dec = decs[irow].rsplit(' ')
if len(parts_dec) == 1:
parts_dec = decss[irow].rsplit(':')
ra_deg = float(parts_ra[0]) * 15. + float(parts_ra[1]) / 4. + float(parts_ra[2]) / 240.
dec_deg = float(parts_dec[0]) + float(parts_dec[1]) / 60. + float(parts_dec[2]) / 3600.
new_ras.append(ra_deg)
new_decs.append(dec_deg)
new_ras = np.array(new_ras)
new_decs = np.array(new_decs)
return new_ras, new_decs
else:
parts_ra = ras.rsplit(' ')
if len(parts_ra) == 1:
parts_ra = ras.rsplit(':')
parts_dec = decs.rsplit(' ')
if len(parts_dec) == 1:
parts_dec = decs.rsplit(':')
ra_deg = float(parts_ra[0]) * 15. + float(parts_ra[1]) / 4. + float(parts_ra[2]) / 240.
dec_deg = float(parts_dec[0]) + float(parts_dec[1]) / 60. + float(parts_dec[2]) / 3600.
return ra_deg, dec_deg
def matchsorted(ra, dec, ra1, dec1, tol, angle=False, closest=True):
''' Find closest ra,dec within tol to a target in an ra-sorted list of ra,dec.
Arguments:
ra - Right Ascension decimal degrees (numpy sorted in ascending order)
dec - Declination decimal degrees (numpy array)
ra1 - RA to match (scalar, decimal degrees)
dec1 - Dec to match (scalar, decimal degrees)
tol - Matching tolerance in arcseconds.
angle - Boolean, whether to return angle formed by matched sources.
closest - Boolean, whether to return only the closest match.
Returns:
ibest - index of the (best) match(es) within tol; -1 if no match within tol
sep - separation (defaults to tol if no match within tol)
angle - angle (defaults to 0 if no match within tol)
'''
tol = tol / 3600.
if isinstance(tol, float):
# Case for one tolerance radius for all objects
i1 = np.searchsorted(ra, ra1 - tol) - 5
i2 = np.searchsorted(ra, ra1 + tol) + 5
else:
# Case for one tolerance radius for each object
i1 = np.searchsorted(ra + tol, ra1) - 5
i2 = np.searchsorted(ra - tol, ra1) + 5
if i1 < 0:
i1 = 0
if angle:
sep, ang = angsep(ra[i1:i2], dec[i1:i2], ra1, dec1, angle=angle)
else:
sep = angsep(ra[i1:i2], dec[i1:i2], ra1, dec1, angle=angle)
if isinstance(tol, float):
imatches = np.where(sep < tol)[0]
else:
imatches = np.where(sep < tol[i1:i2])[0]
if len(imatches) == 0:
if angle:
return [-1], [tol * 3600.], [0]
else:
return [-1], [tol * 3600.]
ibest = np.argmin(sep[imatches])
#indices = np.argsort(sep)
#if sep[indices[0]] > tol:
# if angle:
# return -1, tol * 3600., 0
# else:
# return -1, tol * 3600.
#ibest = indices[0] + i1
#imult = indices[np.where(sep[indices] < tol)[0]] + i1
#imult = np.where(sep < tol)[0]
if angle:
if closest:
return [imatches[ibest] + i1], [sep[imatches][ibest] * 3600.], \
[ang[imatches[ibest]]]
else:
return imatches + i1, sep[imatches] * 3600., ang[imatches]
else:
if closest:
return [imatches[ibest] + i1], [sep[imatches][ibest] * 3600.]
else:
return imatches + i1, sep[imatches] * 3600.
def smooth(x,window_len=11,window='hanning'):
"""
smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat': #moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)]
def mean_comb(spectra, weights=None, mask=None, robust=None, forcesimple=False, extremes=False, renormalize=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Combine spectra using a (weighted) mean. The output is a python list with mask wavelength in position 0, mean flux in position 1, and variance in position 2. If flux uncertainties are given, then mean is a weighted mean, and variance is the "variance of the mean" (|sigma| :sub:`mean` :sup:`2`). If no flux uncertainties are given, then mean is a straight mean (<x>), and variance is the square of the standard error of the mean (|sigma| :sup:`2`/n). If no mask is given, the wavelength array of the first spectrum will be used as mask.
This function mimics IDL mc_meancomb (by <NAME>), with some restrictions.
*spectra*
Python list of spectra, where each spectrum is an array having wavelength in position 0, flux in position 1, and optional uncertainties in position 2.
*weights*
List of weights corresponding to each spectrum (must add up to one). If none, then each spectrum is assumed to have the same weight. THIS ONLY WORKS IF I GIVE IT TWO SPECTRA.
*mask*
Array of wavelengths to be used as mask for all spectra. If none, then the wavelength array of the first spectrum is used as mask.
*robust*
Float, the sigma threshold to throw bad flux data points out. If none given, then all flux data points will be used.
*forcesimple*
Boolean, whether to calculate a straight mean and variance even if weights are available.
*extremes*
Boolean, whether to include the min and max flux values at each masked pixel.
*renormalize*
Boolean, whether to re-normalized the spectra agains the calculated combined spectrum, in which case the spectra will be returned in a list, with masked values.
'''
# Check inputs
try:
spectra[0]
except TypeError:
print('Spectra invalid.')
return
if mask is not None:
try:
mask[0]
except TypeError:
print('Mask invalid.')
return
if robust is not None:
try:
float(robust)
except TypeError:
print('Robust invalid.')
return
# 1. Generate mask using the first spectrum given
if mask is None:
# Use x-axis (i.e. wl) values of first spectrum as mask for all others
wl_mask = spectra[0][0]
else:
wl_mask = mask
numPoints = len(wl_mask)
numSpec = len(spectra)
# 2. Check if uncertainties were given
uncsGiven = True
if forcesimple:
uncsGiven = False
for spec in spectra:
if uncsGiven:
try:
uncs = spec[2]
except IndexError:
uncsGiven = False
continue
nanIdx = np.where(np.isfinite(uncs))
if len(nanIdx[0]) == 0:
uncsGiven = False
# 3D-array that will hold interpolated spectra
# (it omits wavelength dimension, since all spectra have the same one)
if uncsGiven:
dims = 2
else:
dims = 1
ip_spectra = np.zeros((numPoints, dims, numSpec)) * np.nan
# 3. Interpolate spectra using mask
for spIdx, spec in enumerate(spectra):
wl = spec[0]
fluxRaw= spec[1]
if uncsGiven:
unc = spec[2]
# Eliminate outliers if requested
if robust is not None:
flux = clean_outliers(fluxRaw, robust)
else:
flux = fluxRaw
if spIdx == 0:
# No need to interpolate first spectrum
flux_new = flux
if uncsGiven:
unc_new = unc
else:
ip_func_flux = spi.interp1d(wl, flux, bounds_error=False)
flux_new = ip_func_flux(wl_mask.tolist())
if uncsGiven:
ip_func_unc = spi.interp1d(wl, unc, bounds_error=False)
unc_new = ip_func_unc(wl_mask.tolist())
ip_spectra[:,0,spIdx] = flux_new
if uncsGiven:
ip_spectra[:,1,spIdx] = unc_new
# 4. Calculate mean and variance of flux values
if weights is None:
wgts = np.ones(len(spectra))
else:
wgts = weights
if uncsGiven:
mvarraw = 1. / np.nansum(1. / (wgts * ip_spectra[:,1,:]), axis=1) # 1/Sum(1/sigma_i^2)
wmean = np.nansum(wgts * ip_spectra[:,0,:] / ip_spectra[:,1,:], axis=1) # Sum(x_i/sigma_i^2)
mean = wmean * mvarraw
mvar = mvarraw
# Correct weighted sample variance for small sample
#meantile = np.tile(mean, (numSpec,1)).T
#V1 = 1 / mvarraw
#V2 = np.nansum(ip_spectra[:,1,:]**2, axis=1)
#mvar = V1 / (V1**2 - V2) * \
# np.nansum((ip_spectra[:,0,:] - meantile)**2 / ip_spectra[:,1,:], axis=1)
else:
mvar = np.nanstd(ip_spectra[:,0,:], axis=1) ** 2 # /numSpec -- I think I dont need this
mean = np.nanmean(ip_spectra[:,0,:], axis=1)
# 5. Calculate extreme flux values if requested
if extremes:
minF = np.nanmin(ip_spectra[:,0,:], axis=1)
maxF = np.nanmax(ip_spectra[:,0,:], axis=1)
# 5. Create the combined spectrum
if extremes:
specComb = [wl_mask, mean, mvar, minF, maxF]
else:
specComb = [wl_mask, mean, mvar]
# 6. Re-normalize spectra to calculated combined spectrum, if requested
if renormalize:
renorm_spectra = []
for ispec in range(0, numSpec):
tmpflux = ip_spectra[:,0,ispec]
renormfac = np.median(tmpflux / mean) # mean is the flux of the combined spectrum
if uncsGiven:
tmpunc = ip_spectra[:,1,ispec]
renorm_spectra.append([wl_mask, tmpflux / renormfac, tmpunc / renormfac])
else:
renorm_spectra.append([wl_mask, tmpflux / renormfac])
return specComb, renorm_spectra
else:
return specComb
def norm_spec(specData, limits, flag=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Normalize a spectrum using a band (i.e. a portion) of the spectrum specified by *limits*.
*specData*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and (optional) error values in position 2. More than one spectrum can be provided simultaneously, in which case *specData* shall be a list of lists.
*limits*
Python list with lower limit in position 0 and upper limit in position 1. If more than one spectrum provided, these limits will be applied to all spectra.
*flag*
Boolean, whether to warn if normalization limits were shrinked in the case when they fall outside spectrum. If set to *True*, *norm_spec* returns the normalized spectra AND a boolean flag.
'''
# Convert specData to list or spectra if it consists only of one
if len(specData) <= 3 and len(specData[0]) > 10:
specData = [specData]
# Initialize objects
finalData = [None] * len(specData)
# Check that given limits are reasonable
if limits[0] >= limits[1]:
print('norm_spec: the Min and Max values specified are not reasonable.')
return None
# Re-define normalizing band (specified in limits) for each spectrum in case
# the limits fall outside of the spectrum range
all_lims = [None] * len(specData)
flagged = False
for spIdx, spData in enumerate(specData):
smallest = limits[0]
largest = limits[1]
if spData is None:
continue
tmpNans = np.where(np.isfinite(spData[1]))
if len(tmpNans[0]) != 0:
if spData[0][tmpNans[0][0]] > smallest:
smallest = spData[0][tmpNans[0][0]]
flagged = True
if spData[0][tmpNans[0][-1]] < largest:
largest = spData[0][tmpNans[0][-1]]
flagged = True
all_lims[spIdx] = [smallest, largest]
lims = [smallest, largest]
# Loop through each spectral data set
for spIdx, spData in enumerate(specData):
# 1) Skip if data is missing
if spData is None:
continue
# 2) Determine if spectra come with error values
if len(spData) == 3:
errors = True
else:
errors = False
# 3) Determine minimum wavelength value for band
smallIdx = np.where(spData[0] < all_lims[spIdx][0])
# If lower limit < all values in spectrum wavelength points, then
# make band's minimum value = first data point in spectrum
try:
smallIdx[0]
except IndexError:
minIdx = 0
smallIdx = [None]
# If lower limit > all values in spectrum wavelength points, then
# no band can be selected
if smallIdx != [None]:
if len(smallIdx[0]) == len(spData[0]):
print('norm_spec: the wavelength data for object is outside limits.' )
continue
else:
minIdx = smallIdx[0][-1] + 1
# 4) Determine maximum wavelength value for band
largeIdx = np.where(spData[0] > all_lims[spIdx][1])
# If upper limit > all values in spectrum wavelength points, then
# make band's maximum value = last data point in spectrum
try:
largeIdx[0]
except IndexError:
maxIdx = len(spData[0])
largeIdx = [None]
# If upper limit < all values in spectrum wavelength points, then
# no band can be selected
if largeIdx != [None]:
if len(largeIdx[0]) == len(spData[0]):
print('norm_spec: the wavelength data for object is outside limits.')
continue
else:
maxIdx = largeIdx[0][0]
# 5) Check for consistency in the computed band limits
if maxIdx - minIdx < 2:
print('norm_spec: The Min and Max values specified yield no band.')
continue
# 6) Select flux band from spectrum
fluxSelect = spData[1][minIdx:maxIdx]
fluxSelect = np.array(fluxSelect)
# 7) Select error value band from spectrum
if errors is True:
errorSelect = spData[2][minIdx:maxIdx]
errorSelect = np.array(errorSelect)
# 8) Normalize spectrum using arithmetic mean
notNans = np.where(np.isfinite(fluxSelect))
avgFlux = np.mean(fluxSelect[notNans])
finalFlux = spData[1] / avgFlux
finalData[spIdx] = [spData[0], finalFlux]
if errors is True:
#notNans = np.where(np.isfinite(errorSelect))
#avgError = np.mean(errorSelect[notNans])
finalErrors = spData[2] / avgFlux
finalData[spIdx] = [spData[0], finalFlux, finalErrors]
if flag:
return finalData, flagged
else:
return finalData
def read_spec(specFiles, errors=True, atomicron=False, negtonan=False, plot=False, linear=False, templ=False, verbose=True, header=False):
'''
(by <NAME> |uacute| |ntilde| ez, <NAME>)
Read spectral data from fits or ascii files. It returns a list of numpy arrays with wavelength in position 0, flux in position 1 and error values (if requested) in position 2. More than one file name can be provided simultaneously.
**Limitations**: Due to a lack of set framework for ascii file headers, this function assumes ascii files to have wavelength in column 1, flux in column 2, and (optional) error in column 3. Ascii spectra are assumed to be linear, so the kwarg *linear* is disabled for ascii files. Fits files that have multiple spectral orders will not be interpreted correctly with this function.
*specFiles*
String with fits file name (with full path); it can also be a python list of file names.
*errors*
Boolean, whether to return error values for the flux data; return nans if unavailable.
*atomicron*
Boolean, if wavelength units are in Angstrom, whether to convert them to microns.
*negtonan*
Boolean, whether to set negative flux values equal to zero.
*plot*
Boolean, whether to plot the spectral data, including error bars when available.
*linear*
Boolean, whether to return spectrum only if it is linear. If it cannot verify linearity, it will assume linearity.
*templ*
Boolean, whether data to extract is of a template spectrum, which means it includes avg flux, flux variance, min and max flux at each wavelength.
*verbose*
Boolean, whether to print warning messages.
*header*
Boolean, whether to also return the fits file header.
'''
# 1. Convert specFiles into a list type if it is only one file name
if isinstance(specFiles, str):
specFiles = [specFiles,]
try:
specFiles[0]
except TypeError:
print('File name(s) in invalid format.')
return
# 2. Initialize array to store spectra
specData = [None] * len(specFiles)
# 3. Loop through each file name:
for spFileIdx,spFile in enumerate(specFiles):
if spFile is None: continue
# 3.1 Determine the type of file it is
isFits = False
ext = spFile[-4:].lower()
if ext == 'fits' or ext == '.fit':
isFits = True
# 3.2. Get data from file
if isFits:
isSDSS = False
isLAMOST = False
try:
# Determine table index to extract the data
tmpHead = pf.getheader(spFile, ext=0)
# Telescope exceptions
try:
tmptelescope = tmpHead['TELESCOP'].upper()
except KeyError:
tmptelescope = ''
if tmptelescope.find('SDSS') != -1:
isSDSS = True
tmpext = 1
if tmptelescope.find('LAMOST') != -1:
isLAMOST = True
if not isSDSS:
if tmpHead['NAXIS'] == 0:
try:
if tmpHead['NAXIS1'] < 100:
tmpext = 2
else:
tmpext = 1
except KeyError:
tmpext = 1
else:
tmpext = 0
fitsData = pf.getdata(spFile, ext=tmpext)
except IOError:
print('Could not open ' + str(spFile) + '.')
continue
# Re-shape SDSS data array to make it compatible with the rest of this code
if isSDSS:
fitsData = np.array(fitsData.tolist()).T
# Now determine the table index to extract header info with wavelength solution
tmpHead = pf.getheader(spFile, ext=tmpext)
if isSDSS:
fitsHeader = pf.getheader(spFile, ext=0)
else:
fitsHeader = tmpHead.copy()
# Assume ascii file otherwise
else:
try:
aData = ascii.read(spFile)
specData[spFileIdx] = [aData[0].tonumpy(), aData[1].tonumpy()]
if len(aData) >= 3 and errors:
specData[spFileIdx].append(aData[2].tonumpy())
except IOError:
print('Could not open ' + str(spFile) + '.')
continue
# 3.3. Check if data in fits file is linear
if isFits:
KEY_TYPE = ['CTYPE1']
setType = set(KEY_TYPE).intersection(set(fitsHeader.keys()))
if len(setType) == 0:
if verbose:
print('Data in ' + spFile + ' assumed to be linear.')
isLinear = True
else:
valType = fitsHeader[setType.pop()]
if valType.strip().upper() == 'LINEAR':
isLinear = True
else:
isLinear = False
if linear and not isLinear:
if verbose:
print('Data in ' + spFile + ' is not linear.')
return
# 3.4. Get wl, flux & error data from fits file
# (returns wl in pos. 0, flux in pos. 1, error values in pos. 2)
# (If template spec: min flux in pos. 3, max flux in pos. 4)
if isFits:
specData[spFileIdx] = __get_spec(fitsData, fitsHeader, spFile, errors, \
templ=templ, verb=verbose)
if specData[spFileIdx] is None:
continue
# Generate wl axis when needed
if specData[spFileIdx][0] is None:
specData[spFileIdx][0] = __create_waxis(fitsHeader, \
len(specData[spFileIdx][1]), spFile, \
verb=verbose)
# If no wl axis generated, then clear out all retrieved data for object
if specData[spFileIdx][0] is None:
specData[spFileIdx] = None
continue
# 3.5. Convert units in wl-axis from Angstrom into microns if desired
if atomicron:
if specData[spFileIdx][0][-1] > 8000:
specData[spFileIdx][0] = specData[spFileIdx][0] / 10000
# 3.6. Set negative flux values equal to zero (next step sets them to nans)
if negtonan:
negIdx = np.where(specData[spFileIdx][1] < 0)
if len(negIdx[0]) > 0:
specData[spFileIdx][1][negIdx] = 0
if verbose:
print('%i negative data points found in %s.' \
% (len(negIdx[0]), spFile))
# 3.7. Set zero flux values as nans (do this always)
zeros = np.where(specData[spFileIdx][1] == 0)
if len(zeros[0]) > 0:
specData[spFileIdx][1][zeros] = np.nan
# 4. Plot the spectra if desired
if plot:
plot_spec(specData, ploterrors=True)
# 5. Clear up memory
fitsData = ''
if header:
return specData, fitsHeader
else:
return specData
def snr(spec, rng=None):
'''
(by <NAME> |uacute| |ntilde| ez)
Calculate signal-to-noise in a spectrum.
*spec*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and error values in position 2. It can also be a list of spectra. If no errors available, then it calculates SNR based on this: http://www.stecf.org/software/ASTROsoft/DER_SNR/der_snr.py.
*rng*
list, indicating in wavelength space the range of interest. If None, it computes signal-to-noise for the whole spectrum.
'''
# Convert spec into a list type if it is only one spectrum
if len(spec[0]) > 3:
spec = [spec,]
snr = np.array([np.nan] * len(spec))
for js,s in enumerate(spec):
i = np.where((s[1] != 0.0) & (np.isfinite(s[1])))[0]
flux = np.array(s[1][i])
wl = np.array(s[0][i])
try:
e_flux = np.array(s[2][i])
i = np.where(np.isfinite(e_flux))[0]
if len(i) > 0:
errors = True
else:
errors = False
except IndexError:
errors = False
if errors:
if rng is None:
snr[js] = np.median(flux / e_flux)
else:
if rng[0] >= rng[1]:
print('Wavelength range incorrect.')
return
else:
i = np.where((wl > rng[0]) & (wl < rng[1]))[0]
if len(i) == 0:
print('No flux data within specified range.')
return
else:
snr[js] = np.median(flux[i] / e_flux[i])
else:
if rng is None:
n = len(flux)
flx = flux.copy()
else:
if rng[0] >= rng[1]:
print('Wavelength range incorrect.')
return
else:
i = np.where((wl > rng[0]) & (wl < rng[1]))[0]
n = len(i)
flx = flux[i]
if n < 4:
print('At least 4 flux data points are needed for this calculation.')
return
else:
signal = np.median(flx)
noise = 0.6052697 * np.median(np.abs(2.0 * flx[2:n-2] - flx[0:n-4] - flx[4:n]))
snr[js] = signal / noise
return snr
def plot_spec(specData, ploterrors=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Plot a spectrum. If more than one spectrum is provided simultaneously, it will plot all spectra on top of one another.
This is a quick and dirty tool to visualize a set of spectra. It is not meant to be a paper-ready format. You can use it, however, as a starting point.
*specData*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and (optional) error values in position 2. More than one spectrum can be provided simultaneously, in which case *specData* shall be a list of lists.
*ploterrors*
Boolean, whether to include flux error bars when available. This will work only if all spectra have error values.
'''
# Check that there is data to plot
allNone = True
for spData in specData:
if spData is not None:
allNone = False
break
if allNone:
return
# Fix specData list dimensions when necessary
if len(specData) == 2 or len(specData) == 3:
if len(specData[0]) > 3:
specData = [specData]
# Initialize figure
plt.close()
fig = plt.figure(1)
fig.clf()
# Set plot titles
TITLE = 'SPECTRAL DATA'
X_LABEL = 'Wavelength'
Y_LABEL = 'Flux'
# Initialize plot within figure
subPlot = fig.add_subplot(1,1,1)
subPlot.set_title(TITLE)
subPlot.set_xlabel(X_LABEL)
subPlot.set_ylabel(Y_LABEL)
# Check if all spectra have error values
errorsOK = True
for spData in specData:
if len(spData) != 3:
errorsOK = False
# Plot spectra
for spData in specData:
if spData is not None:
if errorsOK and ploterrors:
subPlot.errorbar(spData[0], spData[1], spData[2], \
capsize=2, drawstyle='steps-mid')
else:
subPlot.plot(spData[0], spData[1], drawstyle='steps-mid')
return fig
def edit_header(fitsfiles, keyword, val, hdu=0):
"""
Edit a card on the fits file header using the parameters provided.
Args:
----------
fitsfile - String, the full path of the fits file; if only a filename is provided, it will look for the file in the current directory. It can also be a python list of names.
keyword - String, the name of the keyword to edit.
val - String, the value that the keyword will have.
hdu - Int, the index of the hdu to be edited.
Returns:
----------
- None.
"""
import datetime
# Convert fitsfiles into a list type if it is only one file name
if isinstance(fitsfiles, str):
fitsfiles = [fitsfiles,]
for fitsfl in fitsfiles:
# Read fits file data
FitsHDU = pf.open(fitsfl, 'update')
try:
tmp = FitsHDU[hdu].data.shape
except IndexError:
print('hdu index does not exist for ' + fitsfl)
print('Skipping this file.')
continue
try:
tmp = FitsHDU[hdu].header[keyword]
except KeyError:
print('Keyword does not exist for ' + fitsfl)
print('Skipping this file.')
continue
# Replace keyword value with new one
FitsHDU[hdu].header[keyword] = val
today = datetime.datetime.now().strftime('%Y-%m-%d')
origcomment = FitsHDU[hdu].header.comments[keyword]
FitsHDU[hdu].header.comments[keyword] = origcomment + ' ---Updated on ' + today + ' by antools.py.'
FitsHDU.flush()
return
def crop_fits(fitsfile, xsize, ysize, croploc='center', suffix=None):
"""
Crop a fits image using the parameters provided. If file has more than one image, it only considers the first one.
Args:
----------
fitsfile - String, the full path of the fits file; if only a filename is provided, it will look for the file in the current directory.
xsize - Int, the desired X size (columns) in pixels.
ysize - Int, the desired Y size (rows) in pixels.
croploc - ['center'(default), 'upper right', 'upper left', 'lower left', 'lower right'], set location around which to crop image. If 'center', then it crops image centered in the image center. If 'upper right', then it crops image to size [xsize,ysize] anchored in the upper right corner. And so on...
suffix - String, suffix to add to new fits file. If it is None, then the original fits file is overwritten with the new one.
Returns:
----------
- the new fits HDU, including the original header information.
- It also saves a copy of the newly created fits file in the same folder as the original file, with an added suffix to its name, if "suffix" is specified.
"""
import os
# Get file path, if provided, and filename
filepath = fitsfile.rsplit('/',1)[0]
if filepath == fitsfile:
filepath = ''
filename = fitsfile.rsplit('.',1)[0]
else:
filepath = filepath + '/'
filename = fitsfile.rsplit('/',1)[1].rsplit('.',1)[0]
# Read fits file data
FitsHDU = pf.open(fitsfile)
Im = FitsHDU[0].data
FitsHeader = FitsHDU[0].header
xsizeorig = FitsHeader['NAXIS1']
ysizeorig = FitsHeader['NAXIS2']
# Determine pixel limits for cropping
if croploc == 'center':
center = [int(xsizeorig/2), int(ysizeorig/2)]
xstart = center[0] - int(xsize/2) + 1
xstop = center[0] + int(xsize/2) + 1
ystart = center[1] - int(ysize/2)
ystop = center[1] + int(ysize/2)
elif croploc == 'upper right':
xstart = xsizeorig - xsize + 1
xstop = xsizeorig + 1
ystart = ysizeorig - ysize
ystop = ysizeorig + 1
elif croploc == 'upper left':
xstart = 1
xstop = xsize + 1
ystart = ysizeorig - ysize + 1
ystop = ysizeorig + 1
elif croploc == 'lower left':
xstart = 1
xstop = xsize + 1
ystart = 1
ystop = ysize + 1
elif croploc == 'lower right':
xstart = xsizeorig - xsize + 1
xstop = xsizeorig + 1
ystart = 1
ystop = ysize + 1
else:
print('croploc not recognized.')
return None
# Check that cropping dimensions are OK
if any((xstart<1, xstop<1, ystart<1,ystop<1)):
print('xsize/ysize dimensions are too large.')
return None
if any((xstart>xsizeorig+1, xstop>xsizeorig+1)):
print('xsize dimensions are too large.')
return None
if any((ystart>ysizeorig+1, ystop>ysizeorig+1)):
print('ysize dimensions are too large.')
return None
#Crop the image
Im = Im[ystart:ystop, xstart-1:xstop]
FitsHDU[0].data=Im
#Write it to a new file
if suffix is not None:
suffix = '_' + suffix
else:
suffix = ''
OutFile = filepath + filename + suffix + '.fits'
if os.path.exists(OutFile) : os.remove(OutFile)
FitsHDU.writeto(OutFile)
return FitsHDU
def __create_waxis(fitsHeader, lenData, fileName, verb=True):
# Function used by read_spec only
# (by Alejo)
# Generates a wavelength (wl) axis using header data from fits file.
# Define key names in
KEY_MIN = ['COEFF0','CRVAL1'] # Min wl
KEY_DELT = ['COEFF1','CDELT1','CD1_1'] # Delta of wl
KEY_OFF = ['LTV1'] # Offset in wl to subsection start
# Find key names for minimum wl, delta, and wl offset in fits header
setMin = set(KEY_MIN).intersection(set(fitsHeader.keys()))
setDelt = set(KEY_DELT).intersection(set(fitsHeader.keys()))
setOff = set(KEY_OFF).intersection(set(fitsHeader.keys()))
# Get the values for minimum wl, delta, and wl offset, and generate axis
if len(setMin) >= 1 and len (setDelt) >= 1:
nameMin = setMin.pop()
valMin = fitsHeader[nameMin]
nameDelt = setDelt.pop()
valDelt = fitsHeader[nameDelt]
if len(setOff) == 0:
valOff = 0
else:
nameOff = setOff.pop()
valOff = fitsHeader[nameOff]
# generate wl axis
if nameMin == 'COEFF0':
wAxis = 10 ** (np.arange(lenData) * valDelt + valMin)
else:
wAxis = (np.arange(lenData) * valDelt) + valMin - (valOff * valDelt)
else:
wAxis = None
if verb:
print('Could not re-create wavelength axis for ' + fileName + '.')
return wAxis
def __get_spec(fitsData, fitsHeader, fileName, errorVals, templ=False, verb=True):
# Function used by read_spec only
# (by Alejo)
# Interprets spectral data from fits file.
# Returns wavelength (wl) data in pos. 0, flux data in pos. 1, and if requested, error values in pos. 2.
# If templ, also returns min flux in pos. 3 and max flux in pos. 4
if templ:
validData = [None] * 5
elif errorVals:
validData = [None] * 3
else:
validData = [None] * 2
# Identify number of data sets in fits file
dimNum = len(fitsData)
fluxIdx = None
waveIdx = None
sigmaIdx = None
isSDSS = False
try:
if fitsHeader['TELESCOP'].upper().find('LAMOST') != -1:
isLAMOST = True
else:
isLAMOST = False
except KeyError:
isLAMOST = False
# Identify data sets in fits file
if dimNum == 1:
fluxIdx = 0
elif dimNum == 2:
if len(fitsData[0]) == 1:
sampleData = fitsData[0][0][20]
else:
sampleData = fitsData[0][20]
if sampleData < 0.0001:
# 0-flux, 1-unknown
fluxIdx = 0
else:
waveIdx = 0
fluxIdx = 1
elif dimNum == 3:
waveIdx = 0
fluxIdx = 1
sigmaIdx = 2
elif dimNum == 4:
# 0-flux clean, 1-flux raw, 2-background, 3-sigma clean
fluxIdx = 0
sigmaIdx = 3
elif dimNum == 5:
if templ:
# 0-wl, 1-avg flux, 2-flux variance, 3-min flux, 4-max flux
waveIdx = 0
fluxIdx = 1
sigmaIdx = 2
minIdx = 3
maxIdx = 4
else:
if isLAMOST:
# 0-flux, 1-inv.var, 2-wl, 3-andmask, 4-ormask
fluxIdx = 0
sigmaIdx = 1 # This column is actually 1/sigma^2
waveIdx = 2
else:
# 0-flux, 1-continuum substracted flux, 2-sigma, 3-mask array, 4-unknown
fluxIdx = 0
sigmaIdx = 2
elif dimNum == 8:
# SDSS spectra
fluxIdx = 0
waveIdx = 1 # This column is actually log10(wl)
sigmaIdx = 2 # This column is actually 1/sigma^2
isSDSS = True
elif dimNum > 10:
# Implies that only one data set in fits file: flux
fluxIdx = -1
if np.isscalar(fitsData[0]):
fluxIdx = -1
elif len(fitsData[0]) == 2:
# Data comes in a xxxx by 2 matrix (ascii origin)
tmpWave = []
tmpFlux = []
for pair in fitsData:
tmpWave.append(pair[0])
tmpFlux.append(pair[1])
fitsData = [tmpWave,tmpFlux]
fitsData = np.array(fitsData)
waveIdx = 0
fluxIdx = 1
else:
# Indicates that data is structured in an unrecognized way
fluxIdx = None
else:
fluxIdx = None
# Fetch wave data set from fits file
if fluxIdx is None:
# No interpretation known for fits file data sets
validData = None
if verb:
print('Unable to interpret data in ' + fileName + '.')
return validData
else:
if waveIdx is not None:
if len(fitsData[waveIdx]) == 1:
# Data set may be a 1-item list
validData[0] = fitsData[waveIdx][0]
else:
if isSDSS:
validData[0] = 10**fitsData[waveIdx]
else:
validData[0] = fitsData[waveIdx]
# Convert from vacuum wl to air wl
if isSDSS or isLAMOST:
validData[0] = validData[0] / (1.0 + 5.792105E-2/(238.0185 \
- (1E4/validData[0])**2) + 1.67917E-3/(57.362 \
- (1E4/validData[0])**2))
# Fetch flux data set from fits file
if fluxIdx == -1:
validData[1] = fitsData
else:
if len(fitsData[fluxIdx]) == 1:
validData[1] = fitsData[fluxIdx][0]
else:
validData[1] = fitsData[fluxIdx]
if isSDSS:
validData[1] = validData[1] * 1E-17
# Fetch sigma data set from fits file, if requested
if errorVals:
if sigmaIdx is None:
validData[2] = np.array([np.nan] * len(validData[1]))
else:
if len(fitsData[sigmaIdx]) == 1:
validData[2] = fitsData[sigmaIdx][0]
else:
if isSDSS or isLAMOST:
validData[2] = 1 / np.sqrt(fitsData[sigmaIdx])
else:
validData[2] = fitsData[sigmaIdx]
if isSDSS:
validData[2] = validData[2] * 1E-17
# If all sigma values have the same value, replace them with nans
if np.nanmin(validData[2]) == np.nanmax(validData[2]):
validData[2] = np.array([np.nan] * len(validData[1]))
# Fetch template data when relevant
if templ:
validData[3] = fitsData[minIdx]
# validData[4] = fitsData[maxIdx]
# Check ascending order of spectrum using wavelength axis
if validData[0] is not None:
if validData[0][0] > validData[0][-1]:
for i in range(len(validData)):
if validData[i] is not None:
validData[i] = validData[i][::-1]
return validData
def equivalent_width(spec, xmin, xmax, exclude_min, exclude_max, n, fldr, name=None, errors=True, head=None, normalize=True, band='Halpha', fitted=True, multi=False):
"""Calculate the equivalent width of an absorption or emission line for a given spectrum using PySpecKit. By: <NAME>
Args:
----------
spec - String, fits filename
xmin,xmax - Integers, the specified interval in wavelength space, which defines the region of interest
excludemin, excludemax - Integers, the specified interval in wavelength space of the spectral feature, which binds the edges of the spectral feature itself
n - Integer, the number of times the EqW measurement is repeated in the MCMC step
fldr - String, location where output figure is desired
name - String, if not None, it uses it to label the star
errors - Boolean, whether to perform the MCMC routine to calculate 1-sigma errors for EqW
head - Fits header of fits file with data
normalize - Boolean, whether to normalize the flux values
fitted - Boolean, whether EqW is calculated using fitted model; if False, it uses instead the data points
multi - Boolean, whether two spectral features are fitted simultaneously instead of one (NII or SII features)
Returns:
-------
- the mean and standard deviation of the equivalent width measured n times
- A figure with a plot of the full spectrum; a plot of the spectral feature fit, with the Voigt profile line fit (blue), the pseudo-continuum (orange), and the approximated rectangle (green); and a histogram with the MCMC results of the EqW distribution
"""
import pyspeckit as p
import matplotlib.pyplot as plt, matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm
import astropy.io.fits as pyfits
GRAY = '#999999'
BLORDER = 1 # order for baseline fitting
# Set band parameters
if band == 'Halpha':
normwl = 6555.
bandloc = 6563.
elif band == 'NII':
if multi:
normwl = 6545.
bandloc = 6548.
bandloc2 = 6584.
else:
normwl = 5750.
bandloc = 5755.
elif band == 'SII':
normwl = 6711.
bandloc = 6717.
bandloc2 = 6731.
# Get data from fits file
if spec.endswith('.fits') or spec.endswith('.fit'):
if head is None:
data, head = read_spec(spec, header=True)
data = data[0] # This is needed bc read_spec() returns a list, even if it's just one file
else:
data = read_spec(spec)[0]
else:
tb = read_table(spec, delimiter=' ', ds=0)
data = np.vstack([tb.columns[0], tb.columns[1]])
if data is None: return None
# Get object name
# (Don't get name from fits header, bc sometimes it's wrong)
if name is not None:
objnm = name
else:
objnm = spec.split('/')[-1].split('.')[-2] # This is just the filename
# Set up figure
plt.rc('font', size=8)
fig = plt.figure(1, figsize=(6*1.2,6))
plt.subplots_adjust(top=0.96, bottom=0.07, right=0.98, left=0.08)
if multi:
numcols = 3
else:
numcols = 2
ax1 = plt.subplot2grid((2,numcols), (0,0), 1, numcols) # To plot the fit
ax2 = plt.subplot2grid((2,numcols), (1,0), 1, 1) # To plot the full spectrum
ax3 = plt.subplot2grid((2,numcols), (1,1), 1, 1) # To plot the histogram
if multi:
ax4 = plt.subplot2grid((2,numcols), (1,2), 1, 1) # To plot the second histogram
# Plot plain spectrum
tmpmin = data[0][1]
if tmpmin < 4500:
tmpmin = 4500.
tmpmax = data[0][-2]
irange = np.where((data[0]>=tmpmin) & (data[0]<=tmpmax))[0]
mean = np.nanmean(data[1][irange])
std = np.nanstd(data[1][irange])
iclip = np.where((data[1][irange] > mean - 3*std) & \
(data[1][irange] < mean + 3*std))[0] # Clip 3sigma flux outliers
inorm = np.where(data[0] >= normwl)[0][0] # Always normalize spectrum flux in this figure
normval = data[1][inorm]
ax2.plot(data[0][irange][iclip], data[1][irange][iclip] / normval, \
drawstyle='steps-mid', linewidth=0.8)
ax2.set_xlim(xmin=tmpmin, xmax=tmpmax)
ax2.set_xlabel(r'Wavelength $(\AA)$')
ax2.set_ylabel(r'Flux / Flux(' + format(int(normwl)) + ' $\AA$)')
ax2.axvline(x=bandloc, linestyle='--', color=GRAY, linewidth=0.8)
if band == 'NII':
ax2.axvline(x=bandloc2, linestyle='--', color=GRAY, linewidth=0.8)
# Normalize flux values (bring them close to unity to make aid the fitting routine)
if normalize:
data[1] = data[1] / normval * 10
if len(data) == 3:
data[2] = data[2] / normval * 10
# Load spectrum onto PySpecKit class
if len(data) < 3:
# Only wavelength and flux arrays
sp = p.Spectrum(data=data[1], xarr=data[0], header=head, \
xarrkwargs={'unit':'angstroms'})
else:
# Only wavelength and flux arrays
if np.all(np.isnan(data[2])):
sp = p.Spectrum(data=data[1], xarr=data[0], header=head, \
xarrkwargs={'unit':'angstroms'})
# Wavelength, flux, and e_flux arrays
else:
sp = p.Spectrum(data=data[1], xarr=data[0], error=data[2], header=head, \
xarrkwargs={'unit':'angstroms'})
sp.xarr.xtype = 'wavelength'
if name is not None or sp.specname == '':
sp.specname = objnm
if normalize:
sp.unit = 'Normalized flux'
# Set up plotter and fit baseline
sp.plotter(axis=ax1, clear=False, xmin=xmin, xmax=xmax, ymin=0, \
errstyle='bars', color='grey')
sp.baseline(xmin=xmin, xmax=xmax, exclude=[exclude_min,exclude_max], \
subtract=False, reset_selection=False, highlight_fitregion=False, \
order=BLORDER)
sp.baseline.annotate(loc='upper right', fontsize=8)
# Fit Voigt profile to spectral feature
if multi:
if band == 'NII':
tmpguess = [20,6548.,0.8,0.5,50,6584.,0.8,0.5] # amp, delX, sigma, gamma
elif band == 'SII':
tmpguess = [50,6717,0.8,0.5,50,6731.,0.8,0.5] # amp, delX, sigma, gamma
sp.specfit(fittype='voigt', color='blue', loc='center right', multifit=multi, \
guesses=tmpguess)
# Calculate equivalent width using the fit above
ew = sp.specfit.EQW(plot=True, plotcolor='g', fitted=fitted, components=multi, \
annotate=True, loc='lower left', xmin=None, xmax=None)
else:
tmpguess = [1., bandloc, 1., 1.] # None
sp.specfit(fittype='voigt', color='blue', loc='center right', \
guesses=tmpguess)
# Calculate equivalent width using the fit above
ew = sp.specfit.EQW(plot=True, plotcolor='g', fitted=fitted, xmin=None, xmax=None)
sp.specfit.annotate(loc='center right', fontsize=8)
txt = 'EqW = ' + format(ew,'.2f') + r' $\AA$'
ax1.text(0.86,0.02, txt, transform=ax1.transAxes)
# Beautify plot and save it
sp.specfit.fitleg.set_bbox_to_anchor((1,0.3),transform=sp.plotter.axis.transAxes)
sp.plotter.axis.set_xlabel(r'Wavelength $(\AA)$')
ylbl = sp.plotter.axis.get_ylabel()
sp.plotter.axis.axvline(x=exclude_min, linestyle=':', color=GRAY)
sp.plotter.axis.axvline(x=exclude_max, linestyle=':', color=GRAY)
if multi:
sp.plotter.axis.axvline(x=bandloc, linestyle='--', color='green')
sp.plotter.axis.axvline(x=bandloc2, linestyle='--', color='green')
tmplgd = sp.plotter.axis.get_legend()
tmplgd.set_bbox_to_anchor((0.98,0.3), transform=sp.plotter.axis.transAxes)
tmplgd.set_frame_on(True)
# Print figure to allow user to determine if fit is acceptable
plt.savefig(fldr + objnm + '_EqWfit.pdf')
# Do MCMC using the original result as starting point for param values
if errors:
sp2 = sp.copy()
EQWs = []
for w in range(n):
print(w)
if np.all(np.isnan(data[2])):
# Get error from noise in continuum
icont = np.where(((sp.xarr.value >= xmin) & (sp.xarr.value < exclude_min)) | \
((sp.xarr.value > exclude_max) & (sp.xarr.value <= xmax)))[0]
tmpcont = sp.data[icont]
tmperr = np.std(tmpcont)
else:
# Get error from flux uncertainties
tmperr = sp.error
sp2.data = sp.data + np.random.randn(sp.data.size) * tmperr
sp2.baseline(xmin=xmin, xmax=xmax, exclude=[exclude_min,exclude_max], \
subtract=False, reset_selection=False, order=BLORDER)
if multi:
sp2.specfit(fittype='voigt', guesses=sp.specfit.parinfo.values, \
multifit=multi)
dist = sp2.specfit.EQW(fitted=fitted, components=multi, \
annotate=True, xmin=None, xmax=None)
else:
sp2.specfit(fittype='voigt', guesses=sp.specfit.parinfo.values)
dist = sp2.specfit.EQW(fitted=fitted, \
annotate=True, xmin=None, xmax=None)
EQWs.append(dist)
EQWs = np.array(EQWs)
# Calculate stats of MCMC array and make histogram with results
if multi:
mu, sigma = norm.fit(EQWs[:,0])
mu2, sigma2 = norm.fit(EQWs[:,1])
n,bins,ptchs = ax3.hist(EQWs[:,0], 10, normed=True, facecolor='green', \
histtype='stepfilled')
n,bins2,ptchs = ax4.hist(EQWs[:,1], 10, normed=True, facecolor='green', \
histtype='stepfilled')
else:
mu, sigma = norm.fit(EQWs)
n,bins,ptchs = ax3.hist(EQWs, 10, normed=True, facecolor='green', \
histtype='stepfilled')
# Beautify histogram plot
y = mlab.normpdf(bins, mu, sigma)
ax3.plot(bins,y,'r--',linewidth=2)
ax3.grid(True)
ax3.set_ylabel('Count')
ax3.set_xlabel(r'EQW ($\AA$)')
txt = r'$\mu=$' + format(mu,'.3f') + r', $\sigma=$' + format(sigma,'.3f')
ax3.text(0.02,0.94, txt, transform=ax3.transAxes, fontsize=8, color='white', \
bbox=dict(facecolor='green', ec='none', pad=0.3, boxstyle='round'))
if multi:
y = mlab.normpdf(bins2, mu2, sigma2)
ax4.plot(bins2,y,'r--',linewidth=2)
ax4.grid(True)
ax4.set_ylabel('Count')
ax4.set_xlabel(r'EqW ($\AA$)')
txt = r'$\mu=$' + format(mu2,'.3f') + r', $\sigma=$' + format(sigma2,'.3f')
ax4.text(0.02,0.94, txt, transform=ax4.transAxes, fontsize=8, color='white', \
bbox=dict(facecolor='green', ec='none', pad=0.3, boxstyle='round'))
plt.savefig(fldr + objnm + '_EqWfit.pdf')
if multi:
return np.array([mu, sigma, mu2, sigma2])
else:
return np.array([mu, sigma])
else:
plt.savefig(fldr + objnm + '_EqWfit.pdf')
if multi:
return np.array(ew, 0.)
else:
return np.array([ew, 0.])
| [
"numpy.arccos",
"numpy.sqrt",
"scipy.interpolate.interp1d",
"numpy.nanmean",
"numpy.array",
"pyspeckit.Spectrum",
"numpy.isfinite",
"numpy.arctan2",
"astropy.io.fits.open",
"numpy.sin",
"numpy.nanmin",
"numpy.arange",
"os.remove",
"os.path.exists",
"numpy.mean",
"matplotlib.mlab.normpdf",
"numpy.isscalar",
"numpy.where",
"numpy.searchsorted",
"matplotlib.pyplot.close",
"scipy.stats.norm.fit",
"numpy.nanmax",
"numpy.vstack",
"numpy.argmin",
"numpy.abs",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.isnan",
"numpy.cos",
"numpy.std",
"numpy.nansum",
"numpy.random.randn",
"astropy.io.ascii.read",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.rc",
"numpy.median",
"astropy.io.fits.getheader",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.zeros",
"astropy.io.fits.getdata",
"matplotlib.pyplot.subplot2grid"
] | [((1594, 1614), 'numpy.arccos', 'np.arccos', (['(x + y + z)'], {}), '(x + y + z)\n', (1603, 1614), True, 'import numpy as np\n'), ((6042, 6066), 'numpy.argmin', 'np.argmin', (['sep[imatches]'], {}), '(sep[imatches])\n', (6051, 6066), True, 'import numpy as np\n'), ((30261, 30272), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30270, 30272), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((30283, 30296), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (30293, 30296), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((34183, 34200), 'astropy.io.fits.open', 'pf.open', (['fitsfile'], {}), '(fitsfile)\n', (34190, 34200), True, 'import astropy.io.fits as pf\n'), ((35972, 35995), 'os.path.exists', 'os.path.exists', (['OutFile'], {}), '(OutFile)\n', (35986, 35995), False, 'import os\n'), ((45929, 45951), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(8)'}), "('font', size=8)\n", (45935, 45951), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((45962, 45997), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6 * 1.2, 6)'}), '(1, figsize=(6 * 1.2, 6))\n', (45972, 45997), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((45999, 46064), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.96)', 'bottom': '(0.07)', 'right': '(0.98)', 'left': '(0.08)'}), '(top=0.96, bottom=0.07, right=0.98, left=0.08)\n', (46018, 46064), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((46140, 46190), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, numcols)', '(0, 0)', '(1)', 'numcols'], {}), '((2, numcols), (0, 0), 1, numcols)\n', (46156, 46190), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((46217, 46261), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, numcols)', '(1, 0)', '(1)', '(1)'], {}), '((2, numcols), (1, 0), 1, 1)\n', (46233, 46261), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((46298, 46342), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, numcols)', '(1, 1)', '(1)', '(1)'], {}), '((2, numcols), (1, 1), 1, 1)\n', (46314, 46342), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((46663, 46690), 'numpy.nanmean', 'np.nanmean', (['data[1][irange]'], {}), '(data[1][irange])\n', (46673, 46690), True, 'import numpy as np\n'), ((46701, 46727), 'numpy.nanstd', 'np.nanstd', (['data[1][irange]'], {}), '(data[1][irange])\n', (46710, 46727), True, 'import numpy as np\n'), ((50782, 50823), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fldr + objnm + '_EqWfit.pdf')"], {}), "(fldr + objnm + '_EqWfit.pdf')\n", (50793, 50823), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((358, 472), 'astropy.io.ascii.read', 'ascii.read', (['name'], {'guess': '(False)', 'delimiter': 'delimiter', 'comment': 'comment', 'header_start': '(0)', 'data_start': 'ds', 'format': 'fmt'}), '(name, guess=False, delimiter=delimiter, comment=comment,\n header_start=0, data_start=ds, format=fmt)\n', (368, 472), False, 'from astropy.io import ascii\n'), ((556, 658), 'astropy.io.ascii.read', 'ascii.read', (['name'], {'guess': '(False)', 'delimiter': 'delimiter', 'comment': 'comment', 'header_start': '(0)', 'data_start': 'ds'}), '(name, guess=False, delimiter=delimiter, comment=comment,\n header_start=0, data_start=ds)\n', (566, 658), False, 'from astropy.io import ascii\n'), ((1449, 1464), 'numpy.cos', 'np.cos', (['dec2rad'], {}), '(dec2rad)\n', (1455, 1464), True, 'import numpy as np\n'), ((1525, 1540), 'numpy.cos', 'np.cos', (['dec2rad'], {}), '(dec2rad)\n', (1531, 1540), True, 'import numpy as np\n'), ((1549, 1564), 'numpy.sin', 'np.sin', (['dec1rad'], {}), '(dec1rad)\n', (1555, 1564), True, 'import numpy as np\n'), ((1567, 1582), 'numpy.sin', 'np.sin', (['dec2rad'], {}), '(dec2rad)\n', (1573, 1582), True, 'import numpy as np\n'), ((3786, 3803), 'numpy.array', 'np.array', (['new_ras'], {}), '(new_ras)\n', (3794, 3803), True, 'import numpy as np\n'), ((3823, 3841), 'numpy.array', 'np.array', (['new_decs'], {}), '(new_decs)\n', (3831, 3841), True, 'import numpy as np\n'), ((8327, 8351), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (8334, 8351), True, 'import numpy as np\n'), ((11674, 11710), 'numpy.zeros', 'np.zeros', (['(numPoints, dims, numSpec)'], {}), '((numPoints, dims, numSpec))\n', (11682, 11710), True, 'import numpy as np\n'), ((12900, 12967), 'numpy.nansum', 'np.nansum', (['(wgts * ip_spectra[:, 0, :] / ip_spectra[:, 1, :])'], {'axis': '(1)'}), '(wgts * ip_spectra[:, 0, :] / ip_spectra[:, 1, :], axis=1)\n', (12909, 12967), True, 'import numpy as np\n'), ((13476, 13515), 'numpy.nanmean', 'np.nanmean', (['ip_spectra[:, 0, :]'], {'axis': '(1)'}), '(ip_spectra[:, 0, :], axis=1)\n', (13486, 13515), True, 'import numpy as np\n'), ((13599, 13637), 'numpy.nanmin', 'np.nanmin', (['ip_spectra[:, 0, :]'], {'axis': '(1)'}), '(ip_spectra[:, 0, :], axis=1)\n', (13608, 13637), True, 'import numpy as np\n'), ((13651, 13689), 'numpy.nanmax', 'np.nanmax', (['ip_spectra[:, 0, :]'], {'axis': '(1)'}), '(ip_spectra[:, 0, :], axis=1)\n', (13660, 13689), True, 'import numpy as np\n'), ((16878, 16918), 'numpy.where', 'np.where', (['(spData[0] < all_lims[spIdx][0])'], {}), '(spData[0] < all_lims[spIdx][0])\n', (16886, 16918), True, 'import numpy as np\n'), ((17621, 17661), 'numpy.where', 'np.where', (['(spData[0] > all_lims[spIdx][1])'], {}), '(spData[0] > all_lims[spIdx][1])\n', (17629, 17661), True, 'import numpy as np\n'), ((18602, 18622), 'numpy.array', 'np.array', (['fluxSelect'], {}), '(fluxSelect)\n', (18610, 18622), True, 'import numpy as np\n'), ((18926, 18954), 'numpy.mean', 'np.mean', (['fluxSelect[notNans]'], {}), '(fluxSelect[notNans])\n', (18933, 18954), True, 'import numpy as np\n'), ((26331, 26368), 'numpy.where', 'np.where', (['(specData[spFileIdx][1] == 0)'], {}), '(specData[spFileIdx][1] == 0)\n', (26339, 26368), True, 'import numpy as np\n'), ((27491, 27508), 'numpy.array', 'np.array', (['s[1][i]'], {}), '(s[1][i])\n', (27499, 27508), True, 'import numpy as np\n'), ((27522, 27539), 'numpy.array', 'np.array', (['s[0][i]'], {}), '(s[0][i])\n', (27530, 27539), True, 'import numpy as np\n'), ((31866, 31891), 'astropy.io.fits.open', 'pf.open', (['fitsfl', '"""update"""'], {}), "(fitsfl, 'update')\n", (31873, 31891), True, 'import astropy.io.fits as pf\n'), ((35998, 36016), 'os.remove', 'os.remove', (['OutFile'], {}), '(OutFile)\n', (36007, 36016), False, 'import os\n'), ((45601, 45642), 'numpy.vstack', 'np.vstack', (['[tb.columns[0], tb.columns[1]]'], {}), '([tb.columns[0], tb.columns[1]])\n', (45610, 45642), True, 'import numpy as np\n'), ((46393, 46437), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, numcols)', '(1, 2)', '(1)', '(1)'], {}), '((2, numcols), (1, 2), 1, 1)\n', (46409, 46437), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((46601, 46652), 'numpy.where', 'np.where', (['((data[0] >= tmpmin) & (data[0] <= tmpmax))'], {}), '((data[0] >= tmpmin) & (data[0] <= tmpmax))\n', (46609, 46652), True, 'import numpy as np\n'), ((46740, 46825), 'numpy.where', 'np.where', (['((data[1][irange] > mean - 3 * std) & (data[1][irange] < mean + 3 * std))'], {}), '((data[1][irange] > mean - 3 * std) & (data[1][irange] < mean + 3 *\n std))\n', (46748, 46825), True, 'import numpy as np\n'), ((47787, 47876), 'pyspeckit.Spectrum', 'p.Spectrum', ([], {'data': 'data[1]', 'xarr': 'data[0]', 'header': 'head', 'xarrkwargs': "{'unit': 'angstroms'}"}), "(data=data[1], xarr=data[0], header=head, xarrkwargs={'unit':\n 'angstroms'})\n", (47797, 47876), True, 'import pyspeckit as p\n'), ((52285, 52299), 'numpy.array', 'np.array', (['EQWs'], {}), '(EQWs)\n', (52293, 52299), True, 'import numpy as np\n'), ((53008, 53037), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['bins', 'mu', 'sigma'], {}), '(bins, mu, sigma)\n', (53020, 53037), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((53927, 53968), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fldr + objnm + '_EqWfit.pdf')"], {}), "(fldr + objnm + '_EqWfit.pdf')\n", (53938, 53968), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((54115, 54156), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fldr + objnm + '_EqWfit.pdf')"], {}), "(fldr + objnm + '_EqWfit.pdf')\n", (54126, 54156), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((1432, 1446), 'numpy.cos', 'np.cos', (['ra2rad'], {}), '(ra2rad)\n', (1438, 1446), True, 'import numpy as np\n'), ((1508, 1522), 'numpy.sin', 'np.sin', (['ra2rad'], {}), '(ra2rad)\n', (1514, 1522), True, 'import numpy as np\n'), ((5310, 5340), 'numpy.searchsorted', 'np.searchsorted', (['ra', '(ra1 - tol)'], {}), '(ra, ra1 - tol)\n', (5325, 5340), True, 'import numpy as np\n'), ((5358, 5388), 'numpy.searchsorted', 'np.searchsorted', (['ra', '(ra1 + tol)'], {}), '(ra, ra1 + tol)\n', (5373, 5388), True, 'import numpy as np\n'), ((5472, 5502), 'numpy.searchsorted', 'np.searchsorted', (['(ra + tol)', 'ra1'], {}), '(ra + tol, ra1)\n', (5487, 5502), True, 'import numpy as np\n'), ((5520, 5550), 'numpy.searchsorted', 'np.searchsorted', (['(ra - tol)', 'ra1'], {}), '(ra - tol, ra1)\n', (5535, 5550), True, 'import numpy as np\n'), ((5805, 5824), 'numpy.where', 'np.where', (['(sep < tol)'], {}), '(sep < tol)\n', (5813, 5824), True, 'import numpy as np\n'), ((5857, 5883), 'numpy.where', 'np.where', (['(sep < tol[i1:i2])'], {}), '(sep < tol[i1:i2])\n', (5865, 5883), True, 'import numpy as np\n'), ((12265, 12307), 'scipy.interpolate.interp1d', 'spi.interp1d', (['wl', 'flux'], {'bounds_error': '(False)'}), '(wl, flux, bounds_error=False)\n', (12277, 12307), True, 'import scipy.interpolate as spi\n'), ((12812, 12865), 'numpy.nansum', 'np.nansum', (['(1.0 / (wgts * ip_spectra[:, 1, :]))'], {'axis': '(1)'}), '(1.0 / (wgts * ip_spectra[:, 1, :]), axis=1)\n', (12821, 12865), True, 'import numpy as np\n'), ((13380, 13418), 'numpy.nanstd', 'np.nanstd', (['ip_spectra[:, 0, :]'], {'axis': '(1)'}), '(ip_spectra[:, 0, :], axis=1)\n', (13389, 13418), True, 'import numpy as np\n'), ((14081, 14106), 'numpy.median', 'np.median', (['(tmpflux / mean)'], {}), '(tmpflux / mean)\n', (14090, 14106), True, 'import numpy as np\n'), ((16067, 16089), 'numpy.isfinite', 'np.isfinite', (['spData[1]'], {}), '(spData[1])\n', (16078, 16089), True, 'import numpy as np\n'), ((18779, 18800), 'numpy.array', 'np.array', (['errorSelect'], {}), '(errorSelect)\n', (18787, 18800), True, 'import numpy as np\n'), ((18883, 18906), 'numpy.isfinite', 'np.isfinite', (['fluxSelect'], {}), '(fluxSelect)\n', (18894, 18906), True, 'import numpy as np\n'), ((23357, 23389), 'astropy.io.fits.getheader', 'pf.getheader', (['spFile'], {'ext': 'tmpext'}), '(spFile, ext=tmpext)\n', (23369, 23389), True, 'import astropy.io.fits as pf\n'), ((25979, 26015), 'numpy.where', 'np.where', (['(specData[spFileIdx][1] < 0)'], {}), '(specData[spFileIdx][1] < 0)\n', (25987, 26015), True, 'import numpy as np\n'), ((27574, 27591), 'numpy.array', 'np.array', (['s[2][i]'], {}), '(s[2][i])\n', (27582, 27591), True, 'import numpy as np\n'), ((42382, 42405), 'numpy.nanmin', 'np.nanmin', (['validData[2]'], {}), '(validData[2])\n', (42391, 42405), True, 'import numpy as np\n'), ((42409, 42432), 'numpy.nanmax', 'np.nanmax', (['validData[2]'], {}), '(validData[2])\n', (42418, 42432), True, 'import numpy as np\n'), ((46887, 46914), 'numpy.where', 'np.where', (['(data[0] >= normwl)'], {}), '(data[0] >= normwl)\n', (46895, 46914), True, 'import numpy as np\n'), ((47969, 47986), 'numpy.isnan', 'np.isnan', (['data[2]'], {}), '(data[2])\n', (47977, 47986), True, 'import numpy as np\n'), ((48006, 48095), 'pyspeckit.Spectrum', 'p.Spectrum', ([], {'data': 'data[1]', 'xarr': 'data[0]', 'header': 'head', 'xarrkwargs': "{'unit': 'angstroms'}"}), "(data=data[1], xarr=data[0], header=head, xarrkwargs={'unit':\n 'angstroms'})\n", (48016, 48095), True, 'import pyspeckit as p\n'), ((48211, 48315), 'pyspeckit.Spectrum', 'p.Spectrum', ([], {'data': 'data[1]', 'xarr': 'data[0]', 'error': 'data[2]', 'header': 'head', 'xarrkwargs': "{'unit': 'angstroms'}"}), "(data=data[1], xarr=data[0], error=data[2], header=head,\n xarrkwargs={'unit': 'angstroms'})\n", (48221, 48315), True, 'import pyspeckit as p\n'), ((52415, 52435), 'scipy.stats.norm.fit', 'norm.fit', (['EQWs[:, 0]'], {}), '(EQWs[:, 0])\n', (52423, 52435), False, 'from scipy.stats import norm\n'), ((52461, 52481), 'scipy.stats.norm.fit', 'norm.fit', (['EQWs[:, 1]'], {}), '(EQWs[:, 1])\n', (52469, 52481), False, 'from scipy.stats import norm\n'), ((52808, 52822), 'scipy.stats.norm.fit', 'norm.fit', (['EQWs'], {}), '(EQWs)\n', (52816, 52822), False, 'from scipy.stats import norm\n'), ((53463, 53495), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['bins2', 'mu2', 'sigma2'], {}), '(bins2, mu2, sigma2)\n', (53475, 53495), True, 'import matplotlib.pyplot as plt, matplotlib.mlab as mlab\n'), ((54007, 54041), 'numpy.array', 'np.array', (['[mu, sigma, mu2, sigma2]'], {}), '([mu, sigma, mu2, sigma2])\n', (54015, 54041), True, 'import numpy as np\n'), ((54075, 54096), 'numpy.array', 'np.array', (['[mu, sigma]'], {}), '([mu, sigma])\n', (54083, 54096), True, 'import numpy as np\n'), ((54194, 54211), 'numpy.array', 'np.array', (['ew', '(0.0)'], {}), '(ew, 0.0)\n', (54202, 54211), True, 'import numpy as np\n'), ((54244, 54263), 'numpy.array', 'np.array', (['[ew, 0.0]'], {}), '([ew, 0.0])\n', (54252, 54263), True, 'import numpy as np\n'), ((1397, 1411), 'numpy.cos', 'np.cos', (['ra1rad'], {}), '(ra1rad)\n', (1403, 1411), True, 'import numpy as np\n'), ((1414, 1429), 'numpy.cos', 'np.cos', (['dec1rad'], {}), '(dec1rad)\n', (1420, 1429), True, 'import numpy as np\n'), ((1473, 1487), 'numpy.sin', 'np.sin', (['ra1rad'], {}), '(ra1rad)\n', (1479, 1487), True, 'import numpy as np\n'), ((1490, 1505), 'numpy.cos', 'np.cos', (['dec1rad'], {}), '(dec1rad)\n', (1496, 1505), True, 'import numpy as np\n'), ((2013, 2044), 'numpy.arctan2', 'np.arctan2', (['(-deltaRA)', '(-deltaDEC)'], {}), '(-deltaRA, -deltaDEC)\n', (2023, 2044), True, 'import numpy as np\n'), ((11379, 11396), 'numpy.isfinite', 'np.isfinite', (['uncs'], {}), '(uncs)\n', (11390, 11396), True, 'import numpy as np\n'), ((12418, 12459), 'scipy.interpolate.interp1d', 'spi.interp1d', (['wl', 'unc'], {'bounds_error': '(False)'}), '(wl, unc, bounds_error=False)\n', (12430, 12459), True, 'import scipy.interpolate as spi\n'), ((22004, 22031), 'astropy.io.fits.getheader', 'pf.getheader', (['spFile'], {'ext': '(0)'}), '(spFile, ext=0)\n', (22016, 22031), True, 'import astropy.io.fits as pf\n'), ((22917, 22947), 'astropy.io.fits.getdata', 'pf.getdata', (['spFile'], {'ext': 'tmpext'}), '(spFile, ext=tmpext)\n', (22927, 22947), True, 'import astropy.io.fits as pf\n'), ((23442, 23469), 'astropy.io.fits.getheader', 'pf.getheader', (['spFile'], {'ext': '(0)'}), '(spFile, ext=0)\n', (23454, 23469), True, 'import astropy.io.fits as pf\n'), ((23627, 23645), 'astropy.io.ascii.read', 'ascii.read', (['spFile'], {}), '(spFile)\n', (23637, 23645), False, 'from astropy.io import ascii\n'), ((27883, 27907), 'numpy.median', 'np.median', (['(flux / e_flux)'], {}), '(flux / e_flux)\n', (27892, 27907), True, 'import numpy as np\n'), ((28937, 28951), 'numpy.median', 'np.median', (['flx'], {}), '(flx)\n', (28946, 28951), True, 'import numpy as np\n'), ((32414, 32437), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32435, 32437), False, 'import datetime\n'), ((51028, 51045), 'numpy.isnan', 'np.isnan', (['data[2]'], {}), '(data[2])\n', (51036, 51045), True, 'import numpy as np\n'), ((51356, 51371), 'numpy.std', 'np.std', (['tmpcont'], {}), '(tmpcont)\n', (51362, 51371), True, 'import numpy as np\n'), ((27453, 27470), 'numpy.isfinite', 'np.isfinite', (['s[1]'], {}), '(s[1])\n', (27464, 27470), True, 'import numpy as np\n'), ((27617, 27636), 'numpy.isfinite', 'np.isfinite', (['e_flux'], {}), '(e_flux)\n', (27628, 27636), True, 'import numpy as np\n'), ((51124, 51252), 'numpy.where', 'np.where', (['((sp.xarr.value >= xmin) & (sp.xarr.value < exclude_min) | (sp.xarr.value >\n exclude_max) & (sp.xarr.value <= xmax))'], {}), '((sp.xarr.value >= xmin) & (sp.xarr.value < exclude_min) | (sp.xarr\n .value > exclude_max) & (sp.xarr.value <= xmax))\n', (51132, 51252), True, 'import numpy as np\n'), ((51510, 51539), 'numpy.random.randn', 'np.random.randn', (['sp.data.size'], {}), '(sp.data.size)\n', (51525, 51539), True, 'import numpy as np\n'), ((28093, 28132), 'numpy.where', 'np.where', (['((wl > rng[0]) & (wl < rng[1]))'], {}), '((wl > rng[0]) & (wl < rng[1]))\n', (28101, 28132), True, 'import numpy as np\n'), ((28333, 28363), 'numpy.median', 'np.median', (['(flux[i] / e_flux[i])'], {}), '(flux[i] / e_flux[i])\n', (28342, 28363), True, 'import numpy as np\n'), ((28655, 28694), 'numpy.where', 'np.where', (['((wl > rng[0]) & (wl < rng[1]))'], {}), '((wl > rng[0]) & (wl < rng[1]))\n', (28663, 28694), True, 'import numpy as np\n'), ((28999, 29051), 'numpy.abs', 'np.abs', (['(2.0 * flx[2:n - 2] - flx[0:n - 4] - flx[4:n])'], {}), '(2.0 * flx[2:n - 2] - flx[0:n - 4] - flx[4:n])\n', (29005, 29051), True, 'import numpy as np\n'), ((37231, 37249), 'numpy.arange', 'np.arange', (['lenData'], {}), '(lenData)\n', (37240, 37249), True, 'import numpy as np\n'), ((37305, 37323), 'numpy.arange', 'np.arange', (['lenData'], {}), '(lenData)\n', (37314, 37323), True, 'import numpy as np\n'), ((42109, 42136), 'numpy.sqrt', 'np.sqrt', (['fitsData[sigmaIdx]'], {}), '(fitsData[sigmaIdx])\n', (42116, 42136), True, 'import numpy as np\n'), ((1761, 1776), 'numpy.cos', 'np.cos', (['dec1rad'], {}), '(dec1rad)\n', (1767, 1776), True, 'import numpy as np\n'), ((39892, 39916), 'numpy.isscalar', 'np.isscalar', (['fitsData[0]'], {}), '(fitsData[0])\n', (39903, 39916), True, 'import numpy as np\n'), ((40265, 40283), 'numpy.array', 'np.array', (['fitsData'], {}), '(fitsData)\n', (40273, 40283), True, 'import numpy as np\n')] |
"""A server that execute arbitrary Python code."""
# NOTE: This module is Python 2 compatible.
import argparse
import contextlib
import logging
import os
import os.path
import sys
import threading
from multiprocessing.connection import Listener
try:
import backport
except ImportError:
from . import backport
LOG = logging.getLogger('multiprocessing.server')
LOG.addHandler(logging.NullHandler())
LOG_FORMAT = '%(asctime)s %(threadName)s %(levelname)s %(name)s: %(message)s'
TIMEOUT = 5.0
def run_server(listener, semaphore):
exit_flag = threading.Event()
server_thread = threading.Thread(
name='multiprocessing',
target=server,
args=(listener, semaphore, exit_flag),
)
server_thread.daemon = True
server_thread.start()
wait_forever(exit_flag)
LOG.info('exit')
def wait_forever(event):
# Unfortunately event.wait() without timeout is not uninterruptable.
while not event.is_set():
event.wait(3600)
def server(listener, semaphore, exit_flag):
LOG.info('start server')
worker_serial = 0
global_vars = {}
while not exit_flag.is_set():
conn = listener.accept()
try:
semaphore.acquire(TIMEOUT)
LOG.debug('accept %r', listener.last_accepted)
worker = Worker(
closing(conn),
semaphore,
exit_flag,
global_vars,
listener.last_accepted,
)
worker_serial += 1
worker_thread = threading.Thread(
name='multiprocessing-%02d' % worker_serial,
target=worker.run,
)
worker_thread.daemon = True
worker_thread.start()
conn = None # conn is transfered to the worker.
except backport.Timeout:
LOG.error('exceed concurrent workers limit')
finally:
# Close conn only when it is not transfered to the worker.
if conn is not None:
conn.close()
LOG.info('exit')
class Worker(object):
VERSION_INFO = {'version_info': tuple(sys.version_info)}
OKAY = {}
ERROR_REQUIRE_COMMAND = {'error': 'require command'}
ERROR_REQUIRE_NAME = {'error': 'require name argument'}
ERROR_REQUIRE_VALUE = {'error': 'require value argument'}
ERROR_REQUIRE_SOURCE = {'error': 'require source argument'}
def __init__(
self, conn_manager, semaphore, exit_flag, global_vars, address):
self.conn_manager = conn_manager
self.semaphore = semaphore
self.exit_flag = exit_flag
self.global_vars = global_vars
if isinstance(address, tuple):
self.filename = '%s:%s' % (address)
else:
self.filename = str(address)
def run(self):
LOG.debug('start worker')
try:
with self.conn_manager as conn:
self.serve_forever(conn)
finally:
self.semaphore.release()
LOG.debug('exit')
def serve_forever(self, conn):
conn.send(self.VERSION_INFO)
while not self.exit_flag.is_set():
if self.process_request(conn):
break
def process_request(self, conn):
try:
request = conn.recv()
except EOFError:
return True
command = request.get('command')
LOG.debug('receive command %r', command)
if not command:
conn.send(self.ERROR_REQUIRE_COMMAND)
return
handler = {
'shutdown': self.do_shutdown,
'close': self.do_close,
'get': self.do_get,
'set': self.do_set,
'del': self.do_del,
'execute': self.do_execute,
'call': self.do_call,
}.get(command)
if handler is None:
LOG.warning('unknown command %r', command)
conn.send({'error': 'unknown command', 'command': command})
return
try:
return handler(conn, request)
except Exception as exc:
conn.send({'error': 'uncaught exception', 'exception': str(exc)})
raise
def do_shutdown(self, conn, _):
self.exit_flag.set()
conn.send(self.OKAY)
def do_close(self, conn, _):
conn.send(self.OKAY)
return True
def do_get(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined variable', 'name': name})
return
conn.send({'name': name, 'value': self.global_vars[name]})
def do_set(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if 'value' not in request:
conn.send(self.ERROR_REQUIRE_VALUE)
return
self.global_vars[name] = request['value']
conn.send(self.OKAY)
def do_del(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined variable', 'name': name})
return
del self.global_vars[name]
conn.send(self.OKAY)
def do_execute(self, conn, request):
if 'source' not in request:
conn.send(self.ERROR_REQUIRE_SOURCE)
return
source = request['source']
filename = request.get('filename', self.filename)
try:
code = compile(source, filename, 'exec')
except SyntaxError as exc:
LOG.exception('syntax error in %s', filename)
conn.send({
'error': 'syntax error',
'filename': filename,
'exception': str(exc),
})
return
try:
exec(code, self.global_vars)
except Exception as exc:
LOG.exception('runtime error in exec %s', filename)
conn.send({
'error': 'runtime error',
'filename': filename,
'exception': str(exc),
})
return
conn.send(self.OKAY)
def do_call(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined function', 'name': name})
return
func = self.global_vars[name]
args = request.get('args', ())
kwargs = request.get('kwargs', {})
try:
value = func(*args, **kwargs)
except Exception as exc:
LOG.exception(
'runtime error when calling %s(*%r, **%r)', name, args, kwargs)
conn.send({
'error': 'runtime error',
'name': name,
'exception': str(exc),
})
return
conn.send({'name': name, 'value': value})
def closing(context_manager):
# Some Python 2 objects are not managed.
for attr in ('__enter__', '__exit__'):
if not hasattr(context_manager, attr):
return contextlib.closing(context_manager)
return context_manager
def main(argv):
parser = argparse.ArgumentParser(description="""
A server that executes arbitrary Python codes.
""")
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='verbose output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--listen-net', metavar=('ADDRESS', 'PORT'), nargs=2,
help="""listen on AF_INET style address""")
group.add_argument(
'--listen-sock', metavar='PATH',
help="""listen on AF_UNIX or AF_PIPE style path""")
parser.add_argument(
'--authkey-var', metavar='VAR', default='AUTHKEY',
help="""read authkey from this environment variable
(default %(default)s)""")
parser.add_argument(
'--max-workers', type=int, default=8,
help="""set max concurrent workers""")
args = parser.parse_args(argv[1:])
if args.verbose == 0:
level = logging.WARNING
elif args.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(level=level, format=LOG_FORMAT)
if args.listen_net:
address = (args.listen_net[0], int(args.listen_net[1]))
else:
address = args.listen_sock
authkey = os.getenv(args.authkey_var)
if authkey is None:
parser.error('cannot read authkey from %s' % args.authkey_var)
return 2
if sys.version_info.major > 2:
authkey = bytes(authkey, encoding='ascii')
if args.max_workers <= 0:
semaphore = backport.UnlimitedSemaphore()
else:
semaphore = backport.BoundedSemaphore(args.max_workers)
threading.current_thread().name = 'multiprocessing.server#main'
with closing(Listener(address, authkey=authkey)) as listener:
run_server(listener, semaphore)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"logging.getLogger",
"logging.NullHandler",
"logging.basicConfig",
"threading.current_thread",
"argparse.ArgumentParser",
"os.getenv",
"threading.Event",
"backport.UnlimitedSemaphore",
"contextlib.closing",
"backport.BoundedSemaphore",
"threading.Thread",
"multiprocessing.connection.Listener"
] | [((328, 371), 'logging.getLogger', 'logging.getLogger', (['"""multiprocessing.server"""'], {}), "('multiprocessing.server')\n", (345, 371), False, 'import logging\n'), ((387, 408), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (406, 408), False, 'import logging\n'), ((559, 576), 'threading.Event', 'threading.Event', ([], {}), '()\n', (574, 576), False, 'import threading\n'), ((597, 695), 'threading.Thread', 'threading.Thread', ([], {'name': '"""multiprocessing"""', 'target': 'server', 'args': '(listener, semaphore, exit_flag)'}), "(name='multiprocessing', target=server, args=(listener,\n semaphore, exit_flag))\n", (613, 695), False, 'import threading\n'), ((7438, 7542), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n A server that executes arbitrary Python codes.\n """'}), '(description=\n """\n A server that executes arbitrary Python codes.\n """)\n', (7461, 7542), False, 'import argparse\n'), ((8477, 8528), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': 'LOG_FORMAT'}), '(level=level, format=LOG_FORMAT)\n', (8496, 8528), False, 'import logging\n'), ((8678, 8705), 'os.getenv', 'os.getenv', (['args.authkey_var'], {}), '(args.authkey_var)\n', (8687, 8705), False, 'import os\n'), ((8955, 8984), 'backport.UnlimitedSemaphore', 'backport.UnlimitedSemaphore', ([], {}), '()\n', (8982, 8984), False, 'import backport\n'), ((9015, 9058), 'backport.BoundedSemaphore', 'backport.BoundedSemaphore', (['args.max_workers'], {}), '(args.max_workers)\n', (9040, 9058), False, 'import backport\n'), ((9064, 9090), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (9088, 9090), False, 'import threading\n'), ((1537, 1622), 'threading.Thread', 'threading.Thread', ([], {'name': "('multiprocessing-%02d' % worker_serial)", 'target': 'worker.run'}), "(name='multiprocessing-%02d' % worker_serial, target=worker.run\n )\n", (1553, 1622), False, 'import threading\n'), ((7344, 7379), 'contextlib.closing', 'contextlib.closing', (['context_manager'], {}), '(context_manager)\n', (7362, 7379), False, 'import contextlib\n'), ((9145, 9179), 'multiprocessing.connection.Listener', 'Listener', (['address'], {'authkey': 'authkey'}), '(address, authkey=authkey)\n', (9153, 9179), False, 'from multiprocessing.connection import Listener\n')] |
"""
A Config file reader
"""
import os
import configparser as ConfigParser
class ConfigFile:
"""
Config file reader, will expose typed "ex: _getint()" and untyped "ex: _get()" with
default fallback values to be returned if a configuration directive is not found or
commented
"""
def __init__(self, config_file=None):
self.config_file = config_file
# Parse config files and set options
self.config = ConfigParser.RawConfigParser()
if self.config_file is not None:
self.config.read(config_file)
def getConfigFile(self):
"""Return the current config_file"""
return self.config_file
def _get(self, section, option, default=None):
"""
Will check if section.option exists in config_file, return its value, default
otherwise
"""
if self._convert_to_env_var_str(('%s_%s' % (section, option))) in os.environ:
return os.environ[self._convert_to_env_var_str('%s_%s' % (section, option))]
if not self.config.has_section(section):
return default
if not self.config.has_option(section, option):
return default
if self.config.get(section, option) == 'None':
return None
return self.config.get(section, option)
def _getint(self, section, option, default=None):
"""
Will check if section.option exists in config_file, return its int casted value,
default otherwise
"""
if self._convert_to_env_var_str('%s_%s' % (section, option)) in os.environ:
return int(os.environ[self._convert_to_env_var_str('%s_%s' % (section, option))])
if not self.config.has_section(section):
return default
if not self.config.has_option(section, option):
return default
if self.config.get(section, option) == 'None':
return default
return self.config.getint(section, option)
def _getfloat(self, section, option, default=None):
"""
Will check if section.option exists in config_file, return its float casted value,
default otherwise
"""
if self._convert_to_env_var_str(('%s_%s' % (section, option))) in os.environ:
return float(os.environ[self._convert_to_env_var_str('%s_%s' % (section, option))])
if not self.config.has_section(section):
return default
if not self.config.has_option(section, option):
return default
if self.config.get(section, option) == 'None':
return default
return self.config.getfloat(section, option)
def _getbool(self, section, option, default=None):
"""
Will check if section.option exists in config_file, return its bool casted value,
default otherwise
"""
if self._convert_to_env_var_str(('%s_%s' % (section.replace('-', '_'), option))) in os.environ:
return self._convert_to_bool(os.environ[self._convert_to_env_var_str('%s_%s' % (section, option))])
if not self.config.has_section(section):
return default
if not self.config.has_option(section, option):
return default
return self.config.getboolean(section, option)
def _convert_to_bool(self, value):
if isinstance(value, str):
return value.lower() in ['t', 'true', 'yes', 'y', '1']
return bool(value)
def _convert_to_env_var_str(self, env_str):
"""
Dashes in env strings don't work well in bash so we shouldnt expect them
This is not the value stored in the var but the var name itself
Also converts it to upper case
:param env_str: The string to convert to an env friendly string
:type env_str: str
:return: converted string
:rtype: str
"""
return env_str.replace('-', '_').upper()
| [
"configparser.RawConfigParser"
] | [((451, 481), 'configparser.RawConfigParser', 'ConfigParser.RawConfigParser', ([], {}), '()\n', (479, 481), True, 'import configparser as ConfigParser\n')] |
import base64
import hashlib
import io
import json
import os.path
import re
from urllib.parse import parse_qs
import requests
from PIL import Image
whitelisted_headers = [
'last-modified',
'cache-control',
'content-type',
'etag',
]
def get_size(query):
query = parse_qs(query)
width = None
height = None
if 'width' in query and len(query['width']) > 0:
width = int(query['width'][0])
if 'height' in query and len(query['height']) > 0:
height = int(query['height'][0])
return (width, height,) if width is not None or height is not None else None
def resize_image(image_bytes, size):
image = Image.open(io.BytesIO(image_bytes))
image_w, image_h = image.size
thumb_w = size[0] if size[0] is not None else image_w
thumb_h = size[1] if size[1] is not None else image_h
image.thumbnail((thumb_w, thumb_h,))
output_bytes = io.BytesIO()
if image.mode != 'RGB':
image = image.convert('RGB')
image.save(output_bytes, format='PNG')
return output_bytes.getvalue()
def parse_headers(origin_headers):
headers = {}
for k in origin_headers:
origin_header = origin_headers[k][0]
headers[k] = origin_header['value']
return headers
def get_origin_domain(origin):
for k in origin:
item = origin[k]
if 'domainName' in item:
return item['domainName']
return None
def build_response(response, content, success):
content_length = len(content)
content_hash = '"{0}"'.format(hashlib.md5(content).hexdigest())
content = base64.b64encode(content).decode()
headers = {}
for k in response.headers:
if k.lower() in whitelisted_headers:
headers[k.lower()] = [{
'value': response.headers[k],
}]
if success:
headers['etag'] = [{
'key': 'ETag',
'value': content_hash,
}]
headers['content-type'] = [{
'key': 'Content-Type',
'value': 'image/png',
}]
return {
'bodyEncoding': 'base64',
'body': content,
'status': response.status_code,
'statusDescription': response.reason,
'headers': headers
}
def lambda_handler(event, context):
if event['Records'] and len(event['Records']) > 0:
record = event['Records'][0]
if 'cf' in record and 'request' in record['cf'] and record['cf']['request'] is not None:
origin_request = record['cf']['request']
try:
size = get_size(origin_request['querystring'])
except:
size = None
file_ext = os.path.splitext(origin_request['uri'])
if (origin_request['method'].upper() == 'GET') and (size is not None) and (len(file_ext) == 2) and (file_ext[1].lower() in ['.png', '.jpg', '.jpeg', '.jfif']):
headers = parse_headers(origin_request['headers'])
query = '?{0}'.format(origin_request['querystring']) if len(origin_request['querystring']) > 0 else ''
if 'accept-encoding' in headers:
del headers['accept-encoding']
if 'custom' in origin_request['origin']:
headers['host'] = origin_request['origin']['custom']['domainName']
origin_headers = parse_headers(origin_request['origin']['custom']['customHeaders'])
headers = {**headers, **origin_headers}
url = '{0}://{1}:{2}{3}{4}{5}'.format(
origin_request['origin']['custom']['protocol'],
origin_request['origin']['custom']['domainName'],
origin_request['origin']['custom']['port'],
origin_request['origin']['custom']['path'],
origin_request['uri'],
query)
elif 's3' in origin_request['origin']:
headers['host'] = origin_request['origin']['s3']['domainName']
origin_headers = parse_headers(origin_request['origin']['s3']['customHeaders'])
headers = {**headers, **origin_headers}
url = '{0}://{1}:{2}{3}{4}{5}'.format(
'https',
origin_request['origin']['s3']['domainName'],
443,
origin_request['origin']['s3']['path'],
origin_request['uri'],
query)
else:
return origin_request
try:
response = requests.get(url, headers=headers, timeout=30)
if response.status_code == 200 and response.headers['content-type'] in ['image/png', 'image/jpeg']:
thumbnail = resize_image(response.content, size)
return build_response(response, thumbnail, True)
else:
return build_response(response, response.content, False)
except requests.exceptions.Timeout as e:
return {
'bodyEncoding': 'text',
'body': 'Gateway Timed Out',
'status': 504,
'statusDescription': 'Gateway Timeout',
'headers': {}
}
else:
if 'host' in origin_request['headers']:
origin_request['headers']['host'][0]['value'] = get_origin_domain(origin_request['origin'])
return origin_request
return None
| [
"hashlib.md5",
"base64.b64encode",
"io.BytesIO",
"requests.get",
"urllib.parse.parse_qs"
] | [((286, 301), 'urllib.parse.parse_qs', 'parse_qs', (['query'], {}), '(query)\n', (294, 301), False, 'from urllib.parse import parse_qs\n'), ((905, 917), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (915, 917), False, 'import io\n'), ((668, 691), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (678, 691), False, 'import io\n'), ((1584, 1609), 'base64.b64encode', 'base64.b64encode', (['content'], {}), '(content)\n', (1600, 1609), False, 'import base64\n'), ((1536, 1556), 'hashlib.md5', 'hashlib.md5', (['content'], {}), '(content)\n', (1547, 1556), False, 'import hashlib\n'), ((4709, 4755), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'timeout': '(30)'}), '(url, headers=headers, timeout=30)\n', (4721, 4755), False, 'import requests\n')] |
"""DyNA-PPO explorer."""
from functools import partial
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
import scipy.stats
import sklearn
import sklearn.ensemble
import sklearn.gaussian_process
import sklearn.linear_model
import sklearn.tree
import tensorflow as tf
from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, Matern
from tf_agents.agents.ppo import ppo_agent
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.environments import tf_py_environment
from tf_agents.environments.utils import validate_py_environment
from tf_agents.networks import actor_distribution_network, value_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
import tf_agents.policies.tf_policy
import flexs
from flexs import baselines
from flexs.baselines.explorers.environments.dyna_ppo import (
DynaPPOEnvironment as DynaPPOEnv,
DynaPPOEnvironmentStoppableEpisode as DynaPPOStoppableEnv
)
from flexs.baselines.explorers.environments.dyna_ppo import (
DynaPPOEnvironmentMutative as DynaPPOEnvMut,
)
from flexs.utils import sequence_utils as s_utils
class DynaPPOEnsemble(flexs.Model):
"""
Ensemble from DyNAPPO paper.
Ensembles many models together but only uses those with an $r^2$ above
a certain threshold (on validation data) at test-time.
"""
def __init__(
self,
seq_len: int,
alphabet: str,
r_squared_threshold: float = 0.2,
models: Optional[List[flexs.Model]] = None,
use_gaussian_process: bool = False,
):
"""Create the ensemble from `models`."""
super().__init__(name="DynaPPOEnsemble")
if models is None:
models = [
baselines.models.CNNEnsemble(seq_len, alphabet),
baselines.models.SklearnRegressor(
sklearn.neighbors.KNeighborsRegressor,
alphabet,
"nearest_neighbors",
seq_len,
hparam_tune=True,
hparams_to_search={
'n_neighbors': [2, 5, 10, 15],
},
nfolds=5,
),
baselines.models.BayesianRidge(
alphabet,
seq_len,
hparam_tune=True,
hparams_to_search={
'alpha_1': [1e-5, 1e-6, 1e-7],
'alpha_2': [1e-5, 1e-6, 1e-7],
'lambda_1': [1e-5, 1e-6, 1e-7],
'lambda_1': [1e-5, 1e-6, 1e-7],
},
nfolds=5,
),
baselines.models.RandomForest(
alphabet,
seq_len,
hparam_tune=True,
hparams_to_search={
'max_depth': [8, None],
'max_features': [seq_len // 4, seq_len // 2, seq_len],
'n_estimators': [10, 100, 200],
},
nfolds=5,
),
baselines.models.SklearnRegressor(
sklearn.tree.ExtraTreeRegressor,
alphabet,
"extra_trees",
seq_len,
hparam_tune=True,
hparams_to_search={
'max_depth': [8, None],
'max_features': [seq_len // 4, seq_len // 2, seq_len],
},
nfolds=5,
),
baselines.models.SklearnRegressor(
sklearn.ensemble.GradientBoostingRegressor,
alphabet,
"gradient_boosting",
seq_len,
hparam_tune=True,
hparams_to_search={
'max_depth': [8, None],
'max_features': [seq_len // 4, seq_len // 2, seq_len],
'learning_rate': [1., 1e-1, 1e-2],
},
nfolds=5,
),
]
if use_gaussian_process:
models.append(
baselines.models.SklearnRegressor(
sklearn.gaussian_process.GaussianProcessRegressor,
alphabet,
"gaussian_process",
seq_len,
hparam_tune=True,
hparams_to_search={
'kernel': [RBF(), RationalQuadratic(), Matern()],
},
nfolds=5,
)
)
self.models = models
self.r_squared_vals = np.ones(len(self.models))
self.r_squared_threshold = r_squared_threshold
def _train(self, sequences, labels):
if len(sequences) < 10:
return
self.r_squared_vals = [
model.train(sequences, labels)
for model in self.models
]
def _fitness_function(self, sequences):
passing_models = [
model
for model, r_squared in zip(self.models, self.r_squared_vals)
if r_squared >= self.r_squared_threshold
]
if len(passing_models) == 0:
val = np.argmax(self.r_squared_vals)
return self.models[val].get_fitness(sequences)
#return self.models[np.argmax(self.r_squared_vals)].get_fitness(sequences)
return np.mean(
[model.get_fitness(sequences) for model in passing_models], axis=0
)
def _fitness_function_uncert(self, sequences):
passing_models = [
model
for model, r_squared in zip(self.models, self.r_squared_vals)
if r_squared >= self.r_squared_threshold
]
if len(passing_models) == 0:
val = np.argmax(self.r_squared_vals)
return self.models[val].get_fitness(sequences), np.zeros(len(sequences))
#return self.models[np.argmax(self.r_squared_vals)].get_fitness(sequences)
preds = np.array([model.get_fitness(sequences) for model in passing_models])
return preds.mean(axis=0), preds.std(axis=0)
class DummySeqLenRewardEnsemble(flexs.Model):
def __init__(
self,
seq_len: int,
alphabet: str,
r_squared_threshold: float = 0.5,
models: Optional[List[flexs.Model]] = None,
):
"""Create the ensemble from `models`."""
super().__init__(name="DummySeqLenRewardEnsemble")
def _train(self, sequences, labels):
return
def _fitness_function(self, sequences):
return np.array([len(seq) for seq in sequences], dtype=np.float32)
def _fitness_function_uncert(self, sequences):
return (
np.array([len(seq) for seq in sequences], dtype=np.float32),
np.zeros(len(sequences), dtype=np.float32)
)
class DynaPPO(flexs.Explorer):
"""
Explorer which implements DynaPPO.
This RL-based sequence design algorithm works as follows:
for r in rounds:
train_policy(experimental_data_rewards[r])
for m in model_based_rounds:
train_policy(model_fitness_rewards[m])
An episode for the agent begins with an empty sequence, and at
each timestep, one new residue is generated and added to the sequence
until the desired length of the sequence is reached. The reward
is zero at all timesteps until the last one, when the reward is
`reward = lambda * sequence_density + sequence_fitness` where
sequence density is the density of nearby sequences already proposed.
As described above, this explorer generates sequences *constructively*.
Paper: https://openreview.net/pdf?id=HklxbgBKvr
"""
def __init__(
self,
landscape: flexs.Landscape,
rounds: int,
sequences_batch_size: int,
model_queries_per_batch: int,
starting_sequence: str,
alphabet: str,
log_file: Optional[str] = None,
model: Optional[flexs.Model] = None,
num_experiment_rounds: int = 10,
num_model_rounds: int = 1,
env_batch_size: int = 4,
min_proposal_seq_len: int = 7,
lr=1e-4,
agent_train_epochs=10,
penalty_scale = 0.1,
distance_radius = 2,
use_dummy_model=False,
use_gaussian_process=False,
use_stoppable_env=True,
):
"""
Args:
num_experiment_rounds: Number of experiment-based rounds to run. This is by
default set to 10, the same number of sequence proposal of rounds run.
num_model_rounds: Number of model-based rounds to run.
env_batch_size: Number of epsisodes to batch together and run in parallel.
"""
tf.config.run_functions_eagerly(False)
name = f"DynaPPO_Agent_{num_experiment_rounds}_{num_model_rounds}"
if model is None:
if use_dummy_model:
model = DummySeqLenRewardEnsemble(
len(starting_sequence),
alphabet,
)
else:
model = DynaPPOEnsemble(
60,
#len(starting_sequence),
alphabet,
use_gaussian_process=use_gaussian_process,
)
# Some models in the ensemble need to be trained on dummy dataset before
# they can predict
#model.train(
# s_utils.generate_random_sequences(len(starting_sequence), 10, alphabet),
# [0] * 10,
#)
super().__init__(
model,
name,
rounds,
sequences_batch_size,
model_queries_per_batch,
starting_sequence,
log_file,
)
self.alphabet = alphabet
self.num_experiment_rounds = num_experiment_rounds
self.num_model_rounds = num_model_rounds
self.env_batch_size = env_batch_size
self.min_proposal_seq_len = min_proposal_seq_len
env_type = DynaPPOStoppableEnv if use_stoppable_env else DynaPPOEnv
env = env_type(
self.alphabet,
len(starting_sequence),
model,
landscape,
env_batch_size,
penalty_scale=penalty_scale,
distance_radius=distance_radius
)
self.tf_env = tf_py_environment.TFPyEnvironment(env)
actor_net = actor_distribution_network.ActorDistributionNetwork(
self.tf_env.observation_spec(),
self.tf_env.action_spec(),
fc_layer_params=[128],
)
value_net = value_network.ValueNetwork(
self.tf_env.observation_spec(), fc_layer_params=[128]
)
print(self.tf_env.action_spec())
self.agent = ppo_agent.PPOAgent(
time_step_spec=self.tf_env.time_step_spec(),
action_spec=self.tf_env.action_spec(),
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
actor_net=actor_net,
value_net=value_net,
num_epochs=agent_train_epochs,
summarize_grads_and_vars=False,
)
self.agent.initialize()
self.inner_rounds_iter = 0
self.should_terminate_round = False
self.highest_uncert = 0.0
self.uncert_thresh = 0.5
self.dataset_seqs = set(landscape.get_full_dataset()[0])
print('heyo')
def add_last_seq_in_trajectory(self, experience, new_seqs):
"""Add the last sequence in an episode's trajectory.
Given a trajectory object, checks if the object is the last in the trajectory.
Since the environment ends the episode when the score is non-increasing, it
adds the associated maximum-valued sequence to the batch.
If the episode is ending, it changes the "current sequence" of the environment
to the next one in `last_batch`, so that when the environment resets, mutants
are generated from that new sequence.
"""
for is_bound, obs, reward in zip(experience.is_boundary(), experience.observation, experience.reward):
if is_bound:
seq = s_utils.one_hot_to_string(obs.numpy(), self.alphabet)
new_seqs[seq] = reward.numpy()
if self.tf_env.fitness_model_is_gt:
continue
uncert = self.tf_env.get_cached_uncertainty(seq)
if self.inner_rounds_iter == 1 and uncert >= self.highest_uncert:
self.highest_uncert = uncert
elif self.inner_rounds_iter > 1 and uncert >= (1 + self.uncert_thresh) * self.highest_uncert:
self.should_terminate_round = True
def _is_seq_long_enough(self, seq):
return len(seq) >= self.min_proposal_seq_len
def propose_sequences(
self, measured_sequences_data: pd.DataFrame
) -> Tuple[np.ndarray, np.ndarray]:
"""Propose top `sequences_batch_size` sequences for evaluation."""
replay_buffer_capacity = 10001
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
self.agent.collect_data_spec,
batch_size=self.env_batch_size,
max_length=replay_buffer_capacity,
)
sequences = {}
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
self.tf_env,
self.agent.collect_policy,
observers=[
replay_buffer.add_batch,
partial(self.add_last_seq_in_trajectory, new_seqs=sequences),
],
num_episodes=1,
)
# Experiment-based training round. Each sequence we generate here must be
# evaluated by the ground truth landscape model. So each sequence we evaluate
# reduces our sequence proposal budget by one.
# We amortize this experiment-based training cost to be 1/2 of the sequence
# budget at round one and linearly interpolate to a cost of 0 by the last round.
experiment_based_training_budget = self.sequences_batch_size
self.tf_env.set_fitness_model_to_gt(True)
previous_landscape_cost = self.tf_env.landscape.cost
while (
self.tf_env.landscape.cost - previous_landscape_cost
< experiment_based_training_budget
):
collect_driver.run()
#tf_agents.policies.tf_policy.num_iters += 1
trajectories = replay_buffer.gather_all()
self.agent.train(experience=trajectories)
replay_buffer.clear()
sequences.clear()
# Model-based training rounds
self.should_terminate_round = False
self.inner_rounds_iter = 1
self.tf_env.set_fitness_model_to_gt(False)
previous_model_cost = self.model.cost
for _ in range(self.num_model_rounds):
if self.model.cost - previous_model_cost >= self.model_queries_per_batch:
break
previous_round_model_cost = self.model.cost
while self.model.cost - previous_round_model_cost < int(
self.model_queries_per_batch / self.num_model_rounds
):
collect_driver.run()
if self.should_terminate_round:
break
trajectories = replay_buffer.gather_all()
rewards = trajectories.reward.numpy()[0]
mask = trajectories.is_last().numpy()[0]
masked_reward = rewards[mask]
mean_reward = masked_reward.mean()
self.agent.train(experience=trajectories)
replay_buffer.clear()
self.inner_rounds_iter += 1
measured_seqs = self.dataset_seqs.union(set(measured_sequences_data["sequence"]))
is_usable_seq = (
lambda x: x not in measured_seqs and self._is_seq_long_enough(x)
)
# We propose the top `self.sequences_batch_size` new sequences we have generated
to_propose = {
seq: fitness
for seq, fitness in sequences.items()
if is_usable_seq(seq)
}
while len(to_propose) < self.sequences_batch_size:
previous_round_model_cost = self.model.cost
while self.model.cost - previous_round_model_cost < int(
self.model_queries_per_batch / self.num_model_rounds
):
collect_driver.run()
to_propose = {
seq: fitness
for seq, fitness in sequences.items()
if is_usable_seq(seq)
}
new_seqs = np.array(list(to_propose.keys()))
preds = np.array(list(to_propose.values()))
sorted_order = np.argsort(preds)[::-1][: self.sequences_batch_size]
return new_seqs[sorted_order], preds[sorted_order]
class DynaPPOMutative(flexs.Explorer):
"""
Explorer which implements DynaPPO.
Note that unlike the other DynaPPO explorer, this one is mutative rather than
constructive. Specifically, instead of starting from an empty sequence
and generating residues one-by-one, this explorer starts from a complete
sequence (fitness thresholds to start with good sequences) and mutates it
until the mutant's fitness has started to decrease. Then it ends the episode.
This has proven to be a stronger algorithm than the original DyNAPPO.
Paper: https://openreview.net/pdf?id=HklxbgBKvr
"""
def __init__(
self,
landscape: flexs.Landscape,
rounds: int,
sequences_batch_size: int,
model_queries_per_batch: int,
starting_sequence: str,
alphabet: str,
log_file: Optional[str] = None,
model: Optional[flexs.Model] = None,
num_experiment_rounds: int = 10,
num_model_rounds: int = 1,
):
"""
Args:
num_experiment_rounds: Number of experiment-based rounds to run. This is by
default set to 10, the same number of sequence proposal of rounds run.
num_model_rounds: Number of model-based rounds to run.
"""
tf.config.run_functions_eagerly(False)
name = f"DynaPPO_Agent_{num_experiment_rounds}_{num_model_rounds}"
if model is None:
model = DynaPPOEnsemble(
len(starting_sequence),
alphabet,
)
model.train(
s_utils.generate_random_sequences(len(starting_sequence), 10, alphabet),
[0] * 10,
)
super().__init__(
model,
name,
rounds,
sequences_batch_size,
model_queries_per_batch,
starting_sequence,
log_file,
)
self.alphabet = alphabet
self.num_experiment_rounds = num_experiment_rounds
self.num_model_rounds = num_model_rounds
env = DynaPPOEnvMut(
alphabet=self.alphabet,
starting_seq=starting_sequence,
model=model,
landscape=landscape,
max_num_steps=model_queries_per_batch,
)
validate_py_environment(env, episodes=1)
self.tf_env = tf_py_environment.TFPyEnvironment(env)
encoder_layer = tf.keras.layers.Lambda(lambda obs: obs["sequence"])
actor_net = actor_distribution_network.ActorDistributionNetwork(
self.tf_env.observation_spec(),
self.tf_env.action_spec(),
preprocessing_combiner=encoder_layer,
fc_layer_params=[128],
)
value_net = value_network.ValueNetwork(
self.tf_env.observation_spec(),
preprocessing_combiner=encoder_layer,
fc_layer_params=[128],
)
self.agent = ppo_agent.PPOAgent(
self.tf_env.time_step_spec(),
self.tf_env.action_spec(),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5),
actor_net=actor_net,
value_net=value_net,
num_epochs=10,
summarize_grads_and_vars=False,
)
self.agent.initialize()
def add_last_seq_in_trajectory(self, experience, new_seqs):
"""Add the last sequence in an episode's trajectory.
Given a trajectory object, checks if the object is the last in the trajectory.
Since the environment ends the episode when the score is non-increasing, it
adds the associated maximum-valued sequence to the batch.
If the episode is ending, it changes the "current sequence" of the environment
to the next one in `last_batch`, so that when the environment resets, mutants
are generated from that new sequence.
"""
if experience.is_boundary():
seq = s_utils.one_hot_to_string(
experience.observation["sequence"].numpy()[0], self.alphabet
)
new_seqs[seq] = experience.observation["fitness"].numpy().squeeze()
top_fitness = max(new_seqs.values())
top_sequences = [
seq for seq, fitness in new_seqs.items() if fitness >= 0.9 * top_fitness
]
if len(top_sequences) > 0:
self.tf_env.pyenv.envs[0].seq = np.random.choice(top_sequences)
else:
self.tf_env.pyenv.envs[0].seq = np.random.choice(
[seq for seq, _ in new_seqs.items()]
)
def propose_sequences(
self, measured_sequences_data: pd.DataFrame
) -> Tuple[np.ndarray, np.ndarray]:
"""Propose top `sequences_batch_size` sequences for evaluation."""
num_parallel_environments = 1
replay_buffer_capacity = 10001
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
self.agent.collect_data_spec,
batch_size=num_parallel_environments,
max_length=replay_buffer_capacity,
)
sequences = {}
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
self.tf_env,
self.agent.collect_policy,
observers=[
replay_buffer.add_batch,
partial(self.add_last_seq_in_trajectory, new_seqs=sequences),
],
num_episodes=1,
)
# Experiment-based training round. Each sequence we generate here must be
# evaluated by the ground truth landscape model. So each sequence we evaluate
# reduces our sequence proposal budget by one.
# We amortize this experiment-based training cost to be 1/2 of the sequence
# budget at round one and linearly interpolate to a cost of 0 by the last round.
current_round = measured_sequences_data["round"].max()
experiment_based_training_budget = int(
(self.rounds - current_round + 1)
/ self.rounds
* self.sequences_batch_size
/ 2
)
self.tf_env.envs[0].set_fitness_model_to_gt(True)
previous_landscape_cost = self.tf_env.envs[0].landscape.cost
while (
self.tf_env.envs[0].landscape.cost - previous_landscape_cost
< experiment_based_training_budget
):
collect_driver.run()
trajectories = replay_buffer.gather_all()
self.agent.train(experience=trajectories)
replay_buffer.clear()
sequences.clear()
# Model-based training rounds
self.tf_env.envs[0].set_fitness_model_to_gt(False)
previous_model_cost = self.model.cost
for _ in range(self.num_model_rounds):
if self.model.cost - previous_model_cost >= self.model_queries_per_batch:
break
previous_round_model_cost = self.model.cost
while self.model.cost - previous_round_model_cost < int(
self.model_queries_per_batch / self.num_model_rounds
):
collect_driver.run()
trajectories = replay_buffer.gather_all()
self.agent.train(experience=trajectories)
replay_buffer.clear()
# We propose the top `self.sequences_batch_size` new sequences we have generated
sequences = {
seq: fitness
for seq, fitness in sequences.items()
if seq not in set(measured_sequences_data["sequence"])
}
new_seqs = np.array(list(sequences.keys()))
preds = np.array(list(sequences.values()))
sorted_order = np.argsort(preds)[
: -(self.sequences_batch_size - experiment_based_training_budget) : -1
]
return new_seqs[sorted_order], preds[sorted_order]
| [
"tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer",
"tf_agents.environments.utils.validate_py_environment",
"tf_agents.environments.tf_py_environment.TFPyEnvironment",
"flexs.baselines.models.CNNEnsemble",
"flexs.baselines.models.BayesianRidge",
"sklearn.gaussian_process.kernels.RBF",
"tensorflow.config.run_functions_eagerly",
"tensorflow.keras.layers.Lambda",
"numpy.random.choice",
"numpy.argmax",
"numpy.argsort",
"flexs.baselines.models.SklearnRegressor",
"tensorflow.keras.optimizers.Adam",
"flexs.baselines.explorers.environments.dyna_ppo.DynaPPOEnvironmentMutative",
"functools.partial",
"sklearn.gaussian_process.kernels.RationalQuadratic",
"sklearn.gaussian_process.kernels.Matern",
"flexs.baselines.models.RandomForest"
] | [((8931, 8969), 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(False)'], {}), '(False)\n', (8962, 8969), True, 'import tensorflow as tf\n'), ((10575, 10613), 'tf_agents.environments.tf_py_environment.TFPyEnvironment', 'tf_py_environment.TFPyEnvironment', (['env'], {}), '(env)\n', (10608, 10613), False, 'from tf_agents.environments import tf_py_environment\n'), ((13278, 13425), 'tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer', 'tf_uniform_replay_buffer.TFUniformReplayBuffer', (['self.agent.collect_data_spec'], {'batch_size': 'self.env_batch_size', 'max_length': 'replay_buffer_capacity'}), '(self.agent.collect_data_spec,\n batch_size=self.env_batch_size, max_length=replay_buffer_capacity)\n', (13324, 13425), False, 'from tf_agents.replay_buffers import tf_uniform_replay_buffer\n'), ((18297, 18335), 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(False)'], {}), '(False)\n', (18328, 18335), True, 'import tensorflow as tf\n'), ((19085, 19232), 'flexs.baselines.explorers.environments.dyna_ppo.DynaPPOEnvironmentMutative', 'DynaPPOEnvMut', ([], {'alphabet': 'self.alphabet', 'starting_seq': 'starting_sequence', 'model': 'model', 'landscape': 'landscape', 'max_num_steps': 'model_queries_per_batch'}), '(alphabet=self.alphabet, starting_seq=starting_sequence, model\n =model, landscape=landscape, max_num_steps=model_queries_per_batch)\n', (19098, 19232), True, 'from flexs.baselines.explorers.environments.dyna_ppo import DynaPPOEnvironmentMutative as DynaPPOEnvMut\n'), ((19307, 19347), 'tf_agents.environments.utils.validate_py_environment', 'validate_py_environment', (['env'], {'episodes': '(1)'}), '(env, episodes=1)\n', (19330, 19347), False, 'from tf_agents.environments.utils import validate_py_environment\n'), ((19370, 19408), 'tf_agents.environments.tf_py_environment.TFPyEnvironment', 'tf_py_environment.TFPyEnvironment', (['env'], {}), '(env)\n', (19403, 19408), False, 'from tf_agents.environments import tf_py_environment\n'), ((19434, 19485), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (["(lambda obs: obs['sequence'])"], {}), "(lambda obs: obs['sequence'])\n", (19456, 19485), True, 'import tensorflow as tf\n'), ((21900, 22053), 'tf_agents.replay_buffers.tf_uniform_replay_buffer.TFUniformReplayBuffer', 'tf_uniform_replay_buffer.TFUniformReplayBuffer', (['self.agent.collect_data_spec'], {'batch_size': 'num_parallel_environments', 'max_length': 'replay_buffer_capacity'}), '(self.agent.collect_data_spec,\n batch_size=num_parallel_environments, max_length=replay_buffer_capacity)\n', (21946, 22053), False, 'from tf_agents.replay_buffers import tf_uniform_replay_buffer\n'), ((5376, 5406), 'numpy.argmax', 'np.argmax', (['self.r_squared_vals'], {}), '(self.r_squared_vals)\n', (5385, 5406), True, 'import numpy as np\n'), ((5958, 5988), 'numpy.argmax', 'np.argmax', (['self.r_squared_vals'], {}), '(self.r_squared_vals)\n', (5967, 5988), True, 'import numpy as np\n'), ((24640, 24657), 'numpy.argsort', 'np.argsort', (['preds'], {}), '(preds)\n', (24650, 24657), True, 'import numpy as np\n'), ((1739, 1786), 'flexs.baselines.models.CNNEnsemble', 'baselines.models.CNNEnsemble', (['seq_len', 'alphabet'], {}), '(seq_len, alphabet)\n', (1767, 1786), False, 'from flexs import baselines\n'), ((1804, 2003), 'flexs.baselines.models.SklearnRegressor', 'baselines.models.SklearnRegressor', (['sklearn.neighbors.KNeighborsRegressor', 'alphabet', '"""nearest_neighbors"""', 'seq_len'], {'hparam_tune': '(True)', 'hparams_to_search': "{'n_neighbors': [2, 5, 10, 15]}", 'nfolds': '(5)'}), "(sklearn.neighbors.KNeighborsRegressor,\n alphabet, 'nearest_neighbors', seq_len, hparam_tune=True,\n hparams_to_search={'n_neighbors': [2, 5, 10, 15]}, nfolds=5)\n", (1837, 2003), False, 'from flexs import baselines\n'), ((2219, 2467), 'flexs.baselines.models.BayesianRidge', 'baselines.models.BayesianRidge', (['alphabet', 'seq_len'], {'hparam_tune': '(True)', 'hparams_to_search': "{'alpha_1': [1e-05, 1e-06, 1e-07], 'alpha_2': [1e-05, 1e-06, 1e-07],\n 'lambda_1': [1e-05, 1e-06, 1e-07], 'lambda_1': [1e-05, 1e-06, 1e-07]}", 'nfolds': '(5)'}), "(alphabet, seq_len, hparam_tune=True,\n hparams_to_search={'alpha_1': [1e-05, 1e-06, 1e-07], 'alpha_2': [1e-05,\n 1e-06, 1e-07], 'lambda_1': [1e-05, 1e-06, 1e-07], 'lambda_1': [1e-05, \n 1e-06, 1e-07]}, nfolds=5)\n", (2249, 2467), False, 'from flexs import baselines\n'), ((2698, 2914), 'flexs.baselines.models.RandomForest', 'baselines.models.RandomForest', (['alphabet', 'seq_len'], {'hparam_tune': '(True)', 'hparams_to_search': "{'max_depth': [8, None], 'max_features': [seq_len // 4, seq_len // 2,\n seq_len], 'n_estimators': [10, 100, 200]}", 'nfolds': '(5)'}), "(alphabet, seq_len, hparam_tune=True,\n hparams_to_search={'max_depth': [8, None], 'max_features': [seq_len // \n 4, seq_len // 2, seq_len], 'n_estimators': [10, 100, 200]}, nfolds=5)\n", (2727, 2914), False, 'from flexs import baselines\n'), ((3137, 3377), 'flexs.baselines.models.SklearnRegressor', 'baselines.models.SklearnRegressor', (['sklearn.tree.ExtraTreeRegressor', 'alphabet', '"""extra_trees"""', 'seq_len'], {'hparam_tune': '(True)', 'hparams_to_search': "{'max_depth': [8, None], 'max_features': [seq_len // 4, seq_len // 2, seq_len]}", 'nfolds': '(5)'}), "(sklearn.tree.ExtraTreeRegressor, alphabet,\n 'extra_trees', seq_len, hparam_tune=True, hparams_to_search={\n 'max_depth': [8, None], 'max_features': [seq_len // 4, seq_len // 2,\n seq_len]}, nfolds=5)\n", (3170, 3377), False, 'from flexs import baselines\n'), ((3612, 3908), 'flexs.baselines.models.SklearnRegressor', 'baselines.models.SklearnRegressor', (['sklearn.ensemble.GradientBoostingRegressor', 'alphabet', '"""gradient_boosting"""', 'seq_len'], {'hparam_tune': '(True)', 'hparams_to_search': "{'max_depth': [8, None], 'max_features': [seq_len // 4, seq_len // 2,\n seq_len], 'learning_rate': [1.0, 0.1, 0.01]}", 'nfolds': '(5)'}), "(sklearn.ensemble.\n GradientBoostingRegressor, alphabet, 'gradient_boosting', seq_len,\n hparam_tune=True, hparams_to_search={'max_depth': [8, None],\n 'max_features': [seq_len // 4, seq_len // 2, seq_len], 'learning_rate':\n [1.0, 0.1, 0.01]}, nfolds=5)\n", (3645, 3908), False, 'from flexs import baselines\n'), ((11153, 11195), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (11177, 11195), True, 'import tensorflow as tf\n'), ((16890, 16907), 'numpy.argsort', 'np.argsort', (['preds'], {}), '(preds)\n', (16900, 16907), True, 'import numpy as np\n'), ((20069, 20114), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (20093, 20114), True, 'import tensorflow as tf\n'), ((21413, 21444), 'numpy.random.choice', 'np.random.choice', (['top_sequences'], {}), '(top_sequences)\n', (21429, 21444), True, 'import numpy as np\n'), ((13708, 13768), 'functools.partial', 'partial', (['self.add_last_seq_in_trajectory'], {'new_seqs': 'sequences'}), '(self.add_last_seq_in_trajectory, new_seqs=sequences)\n', (13715, 13768), False, 'from functools import partial\n'), ((22336, 22396), 'functools.partial', 'partial', (['self.add_last_seq_in_trajectory'], {'new_seqs': 'sequences'}), '(self.add_last_seq_in_trajectory, new_seqs=sequences)\n', (22343, 22396), False, 'from functools import partial\n'), ((4596, 4601), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ([], {}), '()\n', (4599, 4601), False, 'from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, Matern\n'), ((4603, 4622), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'RationalQuadratic', ([], {}), '()\n', (4620, 4622), False, 'from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, Matern\n'), ((4624, 4632), 'sklearn.gaussian_process.kernels.Matern', 'Matern', ([], {}), '()\n', (4630, 4632), False, 'from sklearn.gaussian_process.kernels import RBF, RationalQuadratic, Matern\n')] |
import numpy as np
from scipy.signal import ricker
import time
import matplotlib.pyplot as plt
import random as rnd
from objects.seismic.observation import Observation, Source, Receiver
from objects.seismic.seismogram import Trace, Seismogram
from fmodeling.seismic.ray_tracing.case_1D.forward_tracing1D import calculate_rays
from fmodeling.seismic.dynamic.reflection import calculate_reflections
from fmodeling.seismic.dynamic.transmission import calculate_refraction_vectorized
from fmodeling.seismic.dynamic.bounds import calculate_bounds
from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, \
visualize_time_curves, \
visualize_reflection_amplitudes, visualize_seismogram
def add_noise_rays(rays, depths):
for d in depths[1:]:
rays_ = [r for r in rays if r.reflection_z == d]
times_ = np.array([r.time for r in rays_])
mean_time = np.mean(times_)
for r in rays_:
percent_coeff = 0.1
# погрешность как среднее время, помноженное на 10 %
value = mean_time * percent_coeff
# погрешность в 50 мс
value = 0.05
random_noise = (2 * rnd.random() - 1) * value
r.time += random_noise
def forward(model, x_rec, wavetypes, display_stat=False, visualize_res=True, noise=False):
'''
:param model: Геологическая модель
:param x_rec: Массив приемников
:param wavetypes: Лист с типами волн для работы
:return:
'''
if display_stat:
disp_func = lambda x: print(x)
else:
disp_func = lambda x: x # пустая функция при отстутствии написания параметров
disp_func('Calculating rockphysics model...')
rp_start_time = time.time()
model.calculate_rockphysics()
disp_func('Rockphysics model calculated!')
# Создание среды наблюдения (из источников и приемников)
sources = [Source(0, 0, 0)]
receivers = [Receiver(x) for x in x_rec]
observe = Observation(sources, receivers)
result_rays = {}
# calculating dynamics
for wt in wavetypes:
disp_func(f'Calculating {wt.name}-rays...')
result_rays[wt] = calculate_rays(observe, model, wt)
if noise:
add_noise_rays(result_rays[wt], model.get_depths())
calculate_bounds(model, result_rays[wt])
# disp_func(f'Calculating {wt.name}-reflections...')
#
# calculate_reflections(model, result_rays[wt], wt)
#
# disp_func('Calculating p-refractions...')
#
# calculate_refraction_vectorized(model, result_rays[wt], wt)
if visualize_res:
max_depth = model.get_max_boundary_depth() * 1.2
dz = 100
disp_func('Drawing results...')
fig, axes = plt.subplots(nrows=3, ncols=len(result_rays))
for i, key, value in enumerate(result_rays.items()):
# visualize_model_wellogs(axes[2, 0], model, 'vp')
###############HARDCODE ABOUT VEL TYPE!!!!!!!!!
visualize_model1D(axes[2, i], model, observe, max_depth, dz, 'vp', only_boundaries=True)
visualize_rays_model_1D(axes[2, i], value)
axes[2, i].invert_yaxis()
# axes[2, 0].set_title('model and rays for p-waves')
# visualize_model_wellogs(axes[2, 0], model, 'vs')
# axes[2, 1].set_title('model and rays for s-waves')
visualize_time_curves(axes[1, i], model, value, observe)
axes[1, i].set_title('time curves for p-waves')
visualize_reflection_amplitudes(axes[0, i], model.get_depths()[1:], value, absc='angle')
axes[0, i].set_title('avo for p-waves')
plt.show()
return observe, result_rays
def create_seismogram(rays, observe, dt, tracelen):
seismogram = Seismogram()
times = [dt * i for i in range(tracelen)]
for j, rec in enumerate(observe.receivers):
offset = rec.x
# rays_ = [r for r in np.nditer(rays) if abs(r.x_finish - offset) <= 0.2]
rays_ = [rr for r in rays.values() for rr in r if abs(rr.x_finish - offset) <= 0.001]
trace_i = np.zeros(len(times))
for ray in rays_:
# ampl_curve = [r for r in reflections if float(r.boundary_z) == float(ray.reflection_z)][0]
# r_coeff = ampl_curve.get_amplitude_by_offset(offset)
r_coeff = ray.calculate_dynamic_factor()
reflection_index = int(ray.time / dt)
if reflection_index < len(trace_i):
trace_i[reflection_index] = r_coeff.real
signal = ricker(50, 7)
signal /= max(abs(signal))
trace_values = np.convolve(trace_i, signal)[0: len(times)].real
seismogram.add_trace(Trace(trace_values, dt, offset))
return seismogram
def forward_with_trace_calcing(model, x_rec, dt, trace_len, wavetypes, display_stat=False, visualize_res=False,
visualize_seismograms=False):
observe, rays = forward(model, x_rec, wavetypes, display_stat=display_stat, visualize_res=visualize_res,)
res_seismic = {}
for key in rays.keys():
# TODO проверить, что сейсмограммы передаются не по ссылке!!
seismogram = create_seismogram(rays[key], observe, dt, trace_len)
res_seismic[key] = {
"rays": rays[key],
"seismogram": seismogram
}
if visualize_seismograms:
fig, axes = plt.subplots(nrows=1, ncols=len(rays))
for i, key in enumerate(res_seismic.keys()):
if len(rays) == 1:
visualize_seismogram(fig, axes, res_seismic[key]["seismogram"], normalize=True, wiggles=False)
axes.set_title('waves seismogram')
else:
visualize_seismogram(fig, axes[i], res_seismic[key]["seismogram"], normalize=True, wiggles=False)
axes[i].set_title('waves seismogram')
plt.show()
return observe, res_seismic
| [
"numpy.mean",
"scipy.signal.ricker",
"numpy.convolve",
"Visualization.Seismic.visualize_seismogram",
"matplotlib.pyplot.show",
"Visualization.Seismic.visualize_rays_model_1D",
"Visualization.Seismic.visualize_time_curves",
"fmodeling.seismic.dynamic.bounds.calculate_bounds",
"objects.seismic.observation.Observation",
"fmodeling.seismic.ray_tracing.case_1D.forward_tracing1D.calculate_rays",
"numpy.array",
"Visualization.Seismic.visualize_model1D",
"random.random",
"objects.seismic.seismogram.Trace",
"objects.seismic.observation.Source",
"objects.seismic.observation.Receiver",
"time.time",
"objects.seismic.seismogram.Seismogram"
] | [((1717, 1728), 'time.time', 'time.time', ([], {}), '()\n', (1726, 1728), False, 'import time\n'), ((1965, 1996), 'objects.seismic.observation.Observation', 'Observation', (['sources', 'receivers'], {}), '(sources, receivers)\n', (1976, 1996), False, 'from objects.seismic.observation import Observation, Source, Receiver\n'), ((3776, 3788), 'objects.seismic.seismogram.Seismogram', 'Seismogram', ([], {}), '()\n', (3786, 3788), False, 'from objects.seismic.seismogram import Trace, Seismogram\n'), ((845, 878), 'numpy.array', 'np.array', (['[r.time for r in rays_]'], {}), '([r.time for r in rays_])\n', (853, 878), True, 'import numpy as np\n'), ((899, 914), 'numpy.mean', 'np.mean', (['times_'], {}), '(times_)\n', (906, 914), True, 'import numpy as np\n'), ((1889, 1904), 'objects.seismic.observation.Source', 'Source', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1895, 1904), False, 'from objects.seismic.observation import Observation, Source, Receiver\n'), ((1923, 1934), 'objects.seismic.observation.Receiver', 'Receiver', (['x'], {}), '(x)\n', (1931, 1934), False, 'from objects.seismic.observation import Observation, Source, Receiver\n'), ((2151, 2185), 'fmodeling.seismic.ray_tracing.case_1D.forward_tracing1D.calculate_rays', 'calculate_rays', (['observe', 'model', 'wt'], {}), '(observe, model, wt)\n', (2165, 2185), False, 'from fmodeling.seismic.ray_tracing.case_1D.forward_tracing1D import calculate_rays\n'), ((2278, 2318), 'fmodeling.seismic.dynamic.bounds.calculate_bounds', 'calculate_bounds', (['model', 'result_rays[wt]'], {}), '(model, result_rays[wt])\n', (2294, 2318), False, 'from fmodeling.seismic.dynamic.bounds import calculate_bounds\n'), ((3661, 3671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3669, 3671), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4562), 'scipy.signal.ricker', 'ricker', (['(50)', '(7)'], {}), '(50, 7)\n', (4555, 4562), False, 'from scipy.signal import ricker\n'), ((5878, 5888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5886, 5888), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3084), 'Visualization.Seismic.visualize_model1D', 'visualize_model1D', (['axes[2, i]', 'model', 'observe', 'max_depth', 'dz', '"""vp"""'], {'only_boundaries': '(True)'}), "(axes[2, i], model, observe, max_depth, dz, 'vp',\n only_boundaries=True)\n", (3009, 3084), False, 'from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, visualize_time_curves, visualize_reflection_amplitudes, visualize_seismogram\n'), ((3093, 3135), 'Visualization.Seismic.visualize_rays_model_1D', 'visualize_rays_model_1D', (['axes[2, i]', 'value'], {}), '(axes[2, i], value)\n', (3116, 3135), False, 'from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, visualize_time_curves, visualize_reflection_amplitudes, visualize_seismogram\n'), ((3381, 3437), 'Visualization.Seismic.visualize_time_curves', 'visualize_time_curves', (['axes[1, i]', 'model', 'value', 'observe'], {}), '(axes[1, i], model, value, observe)\n', (3402, 3437), False, 'from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, visualize_time_curves, visualize_reflection_amplitudes, visualize_seismogram\n'), ((4701, 4732), 'objects.seismic.seismogram.Trace', 'Trace', (['trace_values', 'dt', 'offset'], {}), '(trace_values, dt, offset)\n', (4706, 4732), False, 'from objects.seismic.seismogram import Trace, Seismogram\n'), ((4622, 4650), 'numpy.convolve', 'np.convolve', (['trace_i', 'signal'], {}), '(trace_i, signal)\n', (4633, 4650), True, 'import numpy as np\n'), ((5536, 5635), 'Visualization.Seismic.visualize_seismogram', 'visualize_seismogram', (['fig', 'axes', "res_seismic[key]['seismogram']"], {'normalize': '(True)', 'wiggles': '(False)'}), "(fig, axes, res_seismic[key]['seismogram'], normalize=\n True, wiggles=False)\n", (5556, 5635), False, 'from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, visualize_time_curves, visualize_reflection_amplitudes, visualize_seismogram\n'), ((5717, 5818), 'Visualization.Seismic.visualize_seismogram', 'visualize_seismogram', (['fig', 'axes[i]', "res_seismic[key]['seismogram']"], {'normalize': '(True)', 'wiggles': '(False)'}), "(fig, axes[i], res_seismic[key]['seismogram'],\n normalize=True, wiggles=False)\n", (5737, 5818), False, 'from Visualization.Seismic import visualize_model1D, visualize_rays_model_1D, visualize_time_curves, visualize_reflection_amplitudes, visualize_seismogram\n'), ((1175, 1187), 'random.random', 'rnd.random', ([], {}), '()\n', (1185, 1187), True, 'import random as rnd\n')] |
import argparse
from Crypto.PublicKey import RSA
from .uploader import Uploader
from .downloader import Downloader
from .keygenerator import KeyGenerator
def RSAKeyType(path):
with open(path, 'rb') as raw_key:
return RSA.importKey(raw_key.read())
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser.add_argument(
'--region', default='eu-west-1', help='''AWS region (default:
eu-west-1)''')
upload = subparsers.add_parser('upload')
upload.set_defaults(cls=Uploader)
upload.add_argument(
'--private-key', dest='rsa_key', type=RSAKeyType, required=True,
help='''Private RSA key to use to sign the manifest''')
upload.add_argument(
'--bucket', required=True, help='''S3 bucket to upload files and
manifest to''')
upload.add_argument(
'--prefix', required=True, help='S3 key prefix for uploaded files')
upload.add_argument(
'--manifest', default='latest', help='''S3 key for manifest (default:
latest)''')
upload.add_argument(
'directory', metavar='SOURCE', help='''Directory to upload to S3; the
local directory structure will be replicated in S3''')
download = subparsers.add_parser('download')
download.set_defaults(cls=Downloader)
download.add_argument(
'--public-key', dest='rsa_key', type=RSAKeyType, required=True,
help='''Public RSA key to use to verify integrity of the manifest''')
download.add_argument(
'--bucket', required=True, help='''S3 bucket to download files and
manifest from''')
download.add_argument(
'--manifest', default='latest', help='''S3 key for manifest (default:
latest)''')
download.add_argument(
'--strip', default=0, type=int, help='''Number of path components to
strip from downloaded files (default: 0)''')
# TODO: caching
download.add_argument(
'--cache', default='.sumsy', help='''Directory to use for caching
downloaded files (default: .sumsy)''')
download.add_argument(
'directory', metavar='DESTINATION', help='''Directory to download files
to; the directory structure in S3 will be replicated locally''')
generate_key = subparsers.add_parser('generate-key')
generate_key.set_defaults(cls=KeyGenerator)
generate_key.add_argument(
'--key-size', default=4096, type=int, help='''Key length, or size (in
bits) of the RSA modulus. Must be a multiple of 256, and no smaller than
1024 (default: 4096)''')
generate_key.add_argument(
'key_name', help='''File name for the generated private key; the public
key will use the same name with an additional '.pub' suffix''')
config = parser.parse_args()
if not hasattr(config, 'cls'):
parser.print_help()
raise SystemExit()
return config.cls(config)
def main():
actor = parse_args()
actor.run()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser"
] | [((296, 321), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (319, 321), False, 'import argparse\n')] |
import argparse
import csv
import requests
import json
import logging
"""Standalone Python3 app to see the JSON for one activity."""
def readActivity(token, identifierType, id, summary):
"""Read a activity. Display JSON.
Parameters:
token Engage Integration API token
identifierType One of the valid indentifer types types:
"TRANSACTION_ID",
"TEMPLATE_ID",
"ACTIVITY_FORM_ID",
"SUPPORTER_ID"
id ID to use for search
summary True for basic activity information.
Useful for supporters and forms.
Errors:
HTTP errors are also noisily fatal.
Engage-specific errors are also noisily fatal.
"""
searchURL = 'https://api.salsalabs.org/api/integration/ext/v1/activities/search'
# HTTP headers to send the API token.
headers = {
'authToken': token,
'content-type': 'application/json'
}
# Payload for the POST.
# All of the unnecessary parameters have been strippped out.
# This works really well for the one UUID that we can specify.
params = {
"payload": {
"activityFormIds": [id],
"type": identifierType,
"count": 20,
"offset": 0
}
}
logging.info(f"Searching activitys for {identifierType} {id}")
r = requests.post(searchURL, headers=headers, data=json.dumps(params))
if (r.status_code != 200):
logging.fatal(f"error: HTTP status code {r.status_code}")
logging.fatal(json.dumps(json.loads(r.text), indent=4))
exit(1)
response = r.json()
if "errors" in response:
logging.fatal("Read errors:")
logging.fatal(json.dumps(response['errors'], indent=4))
dPayload = r.json()['payload']
if summary:
activitys = dPayload['activities']
logging.info(f"{'Activity ID':<36} {'Activity Date':<24} {'Activity Type':<16} {'TrackingCode'}")
for r in activitys:
if r['result'] == 'FOUND':
logging.info(f"{r['activityId']} {r['activityDate']} {r['activityType']:<16} {r['trackingCode']}")
else:
logging.info("No matching trasactions found")
else:
logging.info(f"Results:\n{json.dumps(dPayload, indent=4)}")
def main():
"""Program entry point. Uses a user-provided id, retrieves
activities and outputs JSON to the console."""
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
validActivityTypes = ["SUBSCRIPTION_MANAGEMENT",
"SUBSCRIBE",
"FUNDRAISE",
"PETITION",
"TARGETED_LETTER",
"REGULATION_COMMENTS",
"TICKETED_EVENT",
"P2P_EVENT",
"FACEBOOK_AD"]
parser = argparse.ArgumentParser(
description='Search for one activity')
parser.add_argument('--token', action='store', required=True,
help='Engage Integration API token')
parser.add_argument('--identifierType', choices=validActivityTypes,
default="SUBSCRIBE",
help="Search for this identifier type")
parser.add_argument('--id', action="store", required=True,
help="ID to use for searching")
parser.add_argument('--summary', action="store_true",
help="Only show basic activity information")
args = parser.parse_args()
readActivity(args.token, args.identifierType, args.id, args.summary)
if (__name__) == "__main__":
main()
| [
"logging.basicConfig",
"json.loads",
"argparse.ArgumentParser",
"json.dumps",
"logging.fatal",
"logging.info"
] | [((1370, 1432), 'logging.info', 'logging.info', (['f"""Searching activitys for {identifierType} {id}"""'], {}), "(f'Searching activitys for {identifierType} {id}')\n", (1382, 1432), False, 'import logging\n'), ((2519, 2592), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(message)s', level=logging.INFO)\n", (2538, 2592), False, 'import logging\n'), ((2995, 3057), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Search for one activity"""'}), "(description='Search for one activity')\n", (3018, 3057), False, 'import argparse\n'), ((1547, 1604), 'logging.fatal', 'logging.fatal', (['f"""error: HTTP status code {r.status_code}"""'], {}), "(f'error: HTTP status code {r.status_code}')\n", (1560, 1604), False, 'import logging\n'), ((1746, 1775), 'logging.fatal', 'logging.fatal', (['"""Read errors:"""'], {}), "('Read errors:')\n", (1759, 1775), False, 'import logging\n'), ((1942, 2052), 'logging.info', 'logging.info', (['f"""{\'Activity ID\':<36} {\'Activity Date\':<24} {\'Activity Type\':<16} {\'TrackingCode\'}"""'], {}), '(\n f"{\'Activity ID\':<36} {\'Activity Date\':<24} {\'Activity Type\':<16} {\'TrackingCode\'}"\n )\n', (1954, 2052), False, 'import logging\n'), ((1488, 1506), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (1498, 1506), False, 'import json\n'), ((1798, 1838), 'json.dumps', 'json.dumps', (["response['errors']"], {'indent': '(4)'}), "(response['errors'], indent=4)\n", (1808, 1838), False, 'import json\n'), ((1638, 1656), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1648, 1656), False, 'import json\n'), ((2126, 2237), 'logging.info', 'logging.info', (['f"""{r[\'activityId\']} {r[\'activityDate\']} {r[\'activityType\']:<16} {r[\'trackingCode\']}"""'], {}), '(\n f"{r[\'activityId\']} {r[\'activityDate\']} {r[\'activityType\']:<16} {r[\'trackingCode\']}"\n )\n', (2138, 2237), False, 'import logging\n'), ((2262, 2307), 'logging.info', 'logging.info', (['"""No matching trasactions found"""'], {}), "('No matching trasactions found')\n", (2274, 2307), False, 'import logging\n'), ((2352, 2382), 'json.dumps', 'json.dumps', (['dPayload'], {'indent': '(4)'}), '(dPayload, indent=4)\n', (2362, 2382), False, 'import json\n')] |
from setuptools import setup, find_packages
setup(
name='pyatool',
version='0.3.8',
description='python android toolkit',
author='williamfzc',
author_email='<EMAIL>',
url='https://github.com/williamfzc/pyatool',
packages=find_packages(),
install_requires=[
'requests',
'loguru',
]
)
| [
"setuptools.find_packages"
] | [((251, 266), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (264, 266), False, 'from setuptools import setup, find_packages\n')] |
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package="cinematography",
node_executable="heading_estimation",
node_name="heading_estimation",
output="screen",
remappings=[
("rviz_pose", "/rviz/pose"),
("vision_measurements", "/auto_cinematography/vision/vision_measurements"),
("bounding_box", "/auto_cinematography/vision/bounding_box")
],
parameters=[
{"tensorrt_engine" : "deer_hde_fp32.rt"}
]
)
])
| [
"launch_ros.actions.Node"
] | [((148, 521), 'launch_ros.actions.Node', 'Node', ([], {'package': '"""cinematography"""', 'node_executable': '"""heading_estimation"""', 'node_name': '"""heading_estimation"""', 'output': '"""screen"""', 'remappings': "[('rviz_pose', '/rviz/pose'), ('vision_measurements',\n '/auto_cinematography/vision/vision_measurements'), ('bounding_box',\n '/auto_cinematography/vision/bounding_box')]", 'parameters': "[{'tensorrt_engine': 'deer_hde_fp32.rt'}]"}), "(package='cinematography', node_executable='heading_estimation',\n node_name='heading_estimation', output='screen', remappings=[(\n 'rviz_pose', '/rviz/pose'), ('vision_measurements',\n '/auto_cinematography/vision/vision_measurements'), ('bounding_box',\n '/auto_cinematography/vision/bounding_box')], parameters=[{\n 'tensorrt_engine': 'deer_hde_fp32.rt'}])\n", (152, 521), False, 'from launch_ros.actions import Node\n')] |
"""
Category viewset
Viewset to category serializer
"""
# Django Rest Framework
from rest_framework import viewsets
# Inventory models
from apps.inventory.models import Category
# Inventory serializers
from apps.inventory.serializers import CategorySerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""
Category viewset
CRUD views of the category serializer
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
def perform_destroy(self, instance):
"""
perform_destroy is used to performance a logic delete
"""
instance.is_active = not instance.is_active
instance.save()
| [
"apps.inventory.models.Category.objects.all"
] | [((407, 429), 'apps.inventory.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (427, 429), False, 'from apps.inventory.models import Category\n')] |
"""This module downloads fibber datasets.
To download preprocessed datasets (Recommended), run::
python -m fibber.datasets.download_datasets
To download datasets from their original sources, and preprocess them locally::
python -m fibber.datasets.download_datasets --process_raw 1
"""
import argparse
import glob
import json
import os
from fibber import get_root_dir, log
from fibber.datasets import (
preprocess_ag, preprocess_imdb, preprocess_mnli, preprocess_mr, preprocess_snli,
preprocess_yelp)
from fibber.datasets.dataset_utils import verify_dataset
from fibber.datasets.downloadable_datasets import downloadable_dataset_urls
from fibber.download_utils import download_file
logger = log.setup_custom_logger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("--process_raw", choices=["0", "1"], default="0",
help="Use 1 to download and process raw data on this machine. "
"Use 0 (Default) to download processed data.")
parser.add_argument("--verify", choices=["0", "1"], default="1",
help="Verify each json in each datasets have proper attributes.")
DATASET_PREPROCESS_FN = {
"ag": preprocess_ag.download_and_preprocess_ag,
"imdb": preprocess_imdb.download_and_preprocess_imdb,
"mnli": preprocess_mnli.download_and_preprocess_mnli,
"mr": preprocess_mr.download_and_preprocess_mr,
"snli": preprocess_snli.download_and_preprocess_snli,
"yelp": preprocess_yelp.download_and_preprocess_yelp
}
if __name__ == "__main__":
FLAGS = parser.parse_args()
if FLAGS.process_raw == "1":
for name, processing_func in DATASET_PREPROCESS_FN.items():
logger.info("Start download and process %s.", name)
processing_func()
else:
download_file(subdir="", **downloadable_dataset_urls["processed-datasets"])
if FLAGS.verify == "1":
root_dir = get_root_dir()
datasets_dir = os.path.join(root_dir, "datasets")
dataset_json_list = sorted(glob.glob(datasets_dir + "/*/*.json"))
for json_filename in dataset_json_list:
logger.info("Verify %s.", json_filename)
with open(json_filename) as f:
data = json.load(f)
verify_dataset(data)
| [
"argparse.ArgumentParser",
"os.path.join",
"fibber.get_root_dir",
"json.load",
"fibber.log.setup_custom_logger",
"fibber.download_utils.download_file",
"fibber.datasets.dataset_utils.verify_dataset",
"glob.glob"
] | [((714, 747), 'fibber.log.setup_custom_logger', 'log.setup_custom_logger', (['__name__'], {}), '(__name__)\n', (737, 747), False, 'from fibber import get_root_dir, log\n'), ((758, 783), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (781, 783), False, 'import argparse\n'), ((1796, 1871), 'fibber.download_utils.download_file', 'download_file', ([], {'subdir': '""""""'}), "(subdir='', **downloadable_dataset_urls['processed-datasets'])\n", (1809, 1871), False, 'from fibber.download_utils import download_file\n'), ((1920, 1934), 'fibber.get_root_dir', 'get_root_dir', ([], {}), '()\n', (1932, 1934), False, 'from fibber import get_root_dir, log\n'), ((1958, 1992), 'os.path.join', 'os.path.join', (['root_dir', '"""datasets"""'], {}), "(root_dir, 'datasets')\n", (1970, 1992), False, 'import os\n'), ((2028, 2065), 'glob.glob', 'glob.glob', (["(datasets_dir + '/*/*.json')"], {}), "(datasets_dir + '/*/*.json')\n", (2037, 2065), False, 'import glob\n'), ((2260, 2280), 'fibber.datasets.dataset_utils.verify_dataset', 'verify_dataset', (['data'], {}), '(data)\n', (2274, 2280), False, 'from fibber.datasets.dataset_utils import verify_dataset\n'), ((2234, 2246), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2243, 2246), False, 'import json\n')] |
import json
import logging.config
import os
import sys
from base.singleton import singleton
@singleton
class ConfigUtils:
def __init__(self, cfg_path=os.path.join(os.path.dirname(sys.argv[0]), 'config')):
# config files
config_path = os.path.join(cfg_path, 'config.json')
log_config_path = os.path.join(cfg_path, 'log.conf')
db_config_path = os.path.join(cfg_path, 'db.json')
# logging config
logging.config.fileConfig(log_config_path)
logging.info("config path: " + config_path)
logging.info("log config path: " + log_config_path)
logging.info("db config path: " + db_config_path)
# configs
with open(config_path, 'r') as f:
content = json.loads(f.read())
# members
self._db_config_path = db_config_path
self._db_source = content['db_source']
logging.info("use db source: " + self._db_source)
def get_db_source(self):
return self._db_source
def get_db_config_path(self):
return self._db_config_path
| [
"os.path.dirname",
"os.path.join"
] | [((257, 294), 'os.path.join', 'os.path.join', (['cfg_path', '"""config.json"""'], {}), "(cfg_path, 'config.json')\n", (269, 294), False, 'import os\n'), ((321, 355), 'os.path.join', 'os.path.join', (['cfg_path', '"""log.conf"""'], {}), "(cfg_path, 'log.conf')\n", (333, 355), False, 'import os\n'), ((381, 414), 'os.path.join', 'os.path.join', (['cfg_path', '"""db.json"""'], {}), "(cfg_path, 'db.json')\n", (393, 414), False, 'import os\n'), ((170, 198), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (185, 198), False, 'import os\n')] |
import math
def train_model(hp, model, train_loader, writer, logger):
model.net.train()
for input_, target in train_loader:
model.feed_data(input=input_, GT=target)
model.optimize_parameters()
loss = model.log.loss_v
model.step += 1
if logger is not None and (loss > 1e8 or math.isnan(loss)):
logger.error("Loss exploded to %.02f at step %d!" % (loss, model.step))
raise Exception("Loss exploded")
if model.step % hp.log.summary_interval == 0:
if writer is not None:
writer.train_logging(loss, model.step)
if logger is not None:
logger.info("Train Loss %.04f at step %d" % (loss, model.step))
| [
"math.isnan"
] | [((325, 341), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (335, 341), False, 'import math\n')] |
import pytest
import requests
import os
from util4tests import run_single_test, log
from yaml4parms import read
def _testfile_path(*relative):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *relative)
def _wget(url, fname):
r = requests.get(url)
open(fname, 'wb').write(r.content)
@pytest.fixture
def pema_params():
src = "https://raw.githubusercontent.com/marc-portier/pema/ARMS/analysis_directory/parameters_structured.tsv"
tgt = _testfile_path('tmp', 'pema', 'parameters.tsv')
os.makedirs(os.path.dirname(tgt), exist_ok=True)
try:
_wget(src, tgt)
except requests.exceptions.ConnectionError:
pass
return tgt
@pytest.fixture
def local_test():
return _testfile_path('in', 'local.txt')
def test_local(local_test):
log.debug(f"now testing input from {local_test}")
parms = read(local_test)
assert parms is not None, 'reading the local file should not fail'
assert {'text', 'count', 'measurement'}.issubset(set(parms)), "we expect a crucial set of params to be defined"
assert parms['text']['title'] is not None, "parameter 'text' should have a 'title'"
assert parms.as_json() is not None
assert parms.as_html() is not None
def test_pema_params(pema_params):
log.debug(f"testing input from {pema_params}")
parms = read(pema_params)
assert parms is not None, 'reading the local file should not fail'
if __name__ == "__main__":
run_single_test(__file__)
| [
"yaml4parms.read",
"util4tests.run_single_test",
"util4tests.log.debug",
"requests.get",
"os.path.dirname"
] | [((257, 274), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (269, 274), False, 'import requests\n'), ((801, 850), 'util4tests.log.debug', 'log.debug', (['f"""now testing input from {local_test}"""'], {}), "(f'now testing input from {local_test}')\n", (810, 850), False, 'from util4tests import run_single_test, log\n'), ((863, 879), 'yaml4parms.read', 'read', (['local_test'], {}), '(local_test)\n', (867, 879), False, 'from yaml4parms import read\n'), ((1276, 1322), 'util4tests.log.debug', 'log.debug', (['f"""testing input from {pema_params}"""'], {}), "(f'testing input from {pema_params}')\n", (1285, 1322), False, 'from util4tests import run_single_test, log\n'), ((1335, 1352), 'yaml4parms.read', 'read', (['pema_params'], {}), '(pema_params)\n', (1339, 1352), False, 'from yaml4parms import read\n'), ((1457, 1482), 'util4tests.run_single_test', 'run_single_test', (['__file__'], {}), '(__file__)\n', (1472, 1482), False, 'from util4tests import run_single_test, log\n'), ((539, 559), 'os.path.dirname', 'os.path.dirname', (['tgt'], {}), '(tgt)\n', (554, 559), False, 'import os\n'), ((185, 210), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (200, 210), False, 'import os\n')] |
from Queue import Empty
from bson import BSON
from .thread_with_stop import ThreadWithStop
class Dumper(ThreadWithStop):
"""Writes samples to a file
"""
def __init__(self, sample_queue, dumpfile):
super(Dumper, self).__init__(name='Dumper')
self._dumpfile = dumpfile
self._sample_queue = sample_queue
def _run(self):
while not self._stop.is_set():
try:
sample = self._sample_queue.get(block=True, timeout=1)
self._dumpfile.write(BSON.encode(sample))
except Empty:
pass
self._dumpfile.flush()
| [
"bson.BSON.encode"
] | [((528, 547), 'bson.BSON.encode', 'BSON.encode', (['sample'], {}), '(sample)\n', (539, 547), False, 'from bson import BSON\n')] |
from rest_framework import viewsets
from quests.serializers import QuestSerializer, JournalSerializer
from quests.models import Quest, Journal
class QuestViewSet(viewsets.ModelViewSet):
queryset = Quest.objects.all()
serializer_class = QuestSerializer
def get(self, format=None):
queryset = queryset
serializer = QuestSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QuestSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.error_messages,
status=status.HTTP_400_BAD_REQUEST)
class JournalViewSet(viewsets.ModelViewSet):
queryset = Journal.objects.all()
serializer_class = JournalSerializer
def get(self, format=None):
queryset = queryset
serializer = JournalSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = JournalSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.error_messages,
status=status.HTTP_400_BAD_REQUEST)
| [
"quests.serializers.JournalSerializer",
"quests.models.Quest.objects.all",
"quests.models.Journal.objects.all",
"quests.serializers.QuestSerializer"
] | [((204, 223), 'quests.models.Quest.objects.all', 'Quest.objects.all', ([], {}), '()\n', (221, 223), False, 'from quests.models import Quest, Journal\n'), ((880, 901), 'quests.models.Journal.objects.all', 'Journal.objects.all', ([], {}), '()\n', (899, 901), False, 'from quests.models import Quest, Journal\n'), ((345, 381), 'quests.serializers.QuestSerializer', 'QuestSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (360, 381), False, 'from quests.serializers import QuestSerializer, JournalSerializer\n'), ((475, 509), 'quests.serializers.QuestSerializer', 'QuestSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (490, 509), False, 'from quests.serializers import QuestSerializer, JournalSerializer\n'), ((1025, 1063), 'quests.serializers.JournalSerializer', 'JournalSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (1042, 1063), False, 'from quests.serializers import QuestSerializer, JournalSerializer\n'), ((1156, 1192), 'quests.serializers.JournalSerializer', 'JournalSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (1173, 1192), False, 'from quests.serializers import QuestSerializer, JournalSerializer\n')] |
import os
import torch
import torch.nn as nn
import collections
from pathlib import Path
class BaseModel(nn.Module):
def __init__(self, name, config):
super(BaseModel, self).__init__()
self.name = name
self.config = config
self.iteration = 0
self.eva_iou = 0
self.best_suffix = '_best.pth'
self.suffix = '.pth'
self.skip_names = ['loss']
self.saving_pth = os.path.join(config.PATH,name)
Path(self.saving_pth).mkdir(parents=True, exist_ok=True)
self.config_path = os.path.join(self.saving_pth, 'config')
def saveConfig(self, path):
torch.save({
'iteration': self.iteration,
'eva_iou' : self.eva_iou
}, path)
def loadConfig(self, path):
if os.path.exists(path):
if torch.cuda.is_available():
data = torch.load(path)
else:
data = torch.load(path, map_location=lambda storage, loc: storage)
try:
eva_iou = data['eva_iou']
except:
print('Target saving config file does not contain eva_iou!')
eva_iou = 0
return data['iteration'], eva_iou
else:
return 0, 0
def save(self):
print('\nSaving %s...' % self.name)
if not os.path.exists(self.config_path+self.best_suffix):
print('No previous best model found. Saving this as the best.\n')
suffix = self.best_suffix
else:
print('Found the previous best model.')
_, eva_iou = self.loadConfig(self.config_path+self.best_suffix)
print('current v.s. previous: {:1.3f} {:1.3f}'.format(self.eva_iou,eva_iou))
if self.eva_iou > eva_iou:
print('Current IoU is better. Update best model.\n')
suffix = self.best_suffix
else:
print('Previous IoU is better, save this one as checkpoint.\n')
suffix = self.suffix
self.saveConfig(self.config_path + suffix)
for name,model in self._modules.items():
skip = False
for k in self.skip_names:
if name.find(k) != -1:
skip = True
if skip is False:
self.saveWeights(model, os.path.join(self.saving_pth,name + suffix))
torch.save({'optimizer': self.optimizer.state_dict()}, os.path.join(self.saving_pth,'optimizer'+suffix))
def load(self, best=False):
print('\nLoading %s model...' % self.name)
loaded=True
if os.path.exists(self.config_path+self.best_suffix) and best:
print('\tTrying to load the best model')
suffix = self.best_suffix
elif not os.path.exists(self.config_path+self.suffix) and os.path.exists(self.config_path+self.best_suffix):
print('\tNo checkpoints, but has saved best model. Load the best model')
suffix = self.best_suffix
elif os.path.exists(self.config_path+self.suffix) and os.path.exists(self.config_path+self.best_suffix):
print('\tFound checkpoint model and the best model. Comparing itertaion')
iteration, _= self.loadConfig(self.config_path + self.suffix)
iteration_best, _= self.loadConfig(self.config_path + self.best_suffix)
if iteration > iteration_best:
print('\tcheckpoint has larger iteration value. Load checkpoint')
suffix = self.suffix
else:
print('\tthe best model has larger iteration value. Load the best model')
suffix = self.best_suffix
elif os.path.exists(self.config_path+self.suffix):
print('\tLoad checkpoint')
suffix = self.suffix
else:
print('\tNo saved model found')
return False
self.iteration, self.eva_iou = self.loadConfig(self.config_path + suffix)
for name,model in self._modules.items():
skip = False
for k in self.skip_names:
if name.find(k) != -1:
skip = True
if skip is False:
loaded &= self.loadWeights(model, os.path.join(self.saving_pth,name + suffix))
if os.path.exists(os.path.join(self.saving_pth,'optimizer'+suffix)):
data = torch.load(os.path.join(self.saving_pth,'optimizer'+suffix))
self.optimizer.load_state_dict(data['optimizer'])
if loaded:
print('\tmodel loaded!\n')
else:
print('\tmodel loading failed!\n')
return loaded
def saveWeights(self, model, path):
if isinstance(model, nn.DataParallel):
torch.save({
'model': model.module.state_dict()
}, path)
else:
torch.save({
'model': model.state_dict()
}, path)
def loadWeights(self, model, path):
# print('isinstance(model, nn.DataParallel): ',isinstance(model, nn.DataParallel))
if os.path.exists(path):
if torch.cuda.is_available():
data = torch.load(path)
else:
data = torch.load(path, map_location=lambda storage, loc: storage)
new_dict = collections.OrderedDict()
if isinstance(model, nn.DataParallel):
for k,v in data['model'].items():
if k[:6] != 'module':
name = 'module.' + k
new_dict [name] = v
model.load_state_dict(new_dict)
else:
for k,v in data['model'].items():
if k[:6] == 'module':
name = k[7:]
new_dict [name] = v
model.load_state_dict(data['model'])
return True
else:
return False | [
"os.path.exists",
"collections.OrderedDict",
"pathlib.Path",
"torch.load",
"os.path.join",
"torch.cuda.is_available",
"torch.save"
] | [((441, 472), 'os.path.join', 'os.path.join', (['config.PATH', 'name'], {}), '(config.PATH, name)\n', (453, 472), False, 'import os\n'), ((564, 603), 'os.path.join', 'os.path.join', (['self.saving_pth', '"""config"""'], {}), "(self.saving_pth, 'config')\n", (576, 603), False, 'import os\n'), ((653, 725), 'torch.save', 'torch.save', (["{'iteration': self.iteration, 'eva_iou': self.eva_iou}", 'path'], {}), "({'iteration': self.iteration, 'eva_iou': self.eva_iou}, path)\n", (663, 725), False, 'import torch\n'), ((813, 833), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (827, 833), False, 'import os\n'), ((5206, 5226), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5220, 5226), False, 'import os\n'), ((850, 875), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (873, 875), False, 'import torch\n'), ((1409, 1460), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.best_suffix)'], {}), '(self.config_path + self.best_suffix)\n', (1423, 1460), False, 'import os\n'), ((2521, 2572), 'os.path.join', 'os.path.join', (['self.saving_pth', "('optimizer' + suffix)"], {}), "(self.saving_pth, 'optimizer' + suffix)\n", (2533, 2572), False, 'import os\n'), ((2711, 2762), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.best_suffix)'], {}), '(self.config_path + self.best_suffix)\n', (2725, 2762), False, 'import os\n'), ((4412, 4463), 'os.path.join', 'os.path.join', (['self.saving_pth', "('optimizer' + suffix)"], {}), "(self.saving_pth, 'optimizer' + suffix)\n", (4424, 4463), False, 'import os\n'), ((5243, 5268), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5266, 5268), False, 'import torch\n'), ((5464, 5489), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5487, 5489), False, 'import collections\n'), ((480, 501), 'pathlib.Path', 'Path', (['self.saving_pth'], {}), '(self.saving_pth)\n', (484, 501), False, 'from pathlib import Path\n'), ((900, 916), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (910, 916), False, 'import torch\n'), ((958, 1017), 'torch.load', 'torch.load', (['path'], {'map_location': '(lambda storage, loc: storage)'}), '(path, map_location=lambda storage, loc: storage)\n', (968, 1017), False, 'import torch\n'), ((2928, 2979), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.best_suffix)'], {}), '(self.config_path + self.best_suffix)\n', (2942, 2979), False, 'import os\n'), ((4493, 4544), 'os.path.join', 'os.path.join', (['self.saving_pth', "('optimizer' + suffix)"], {}), "(self.saving_pth, 'optimizer' + suffix)\n", (4505, 4544), False, 'import os\n'), ((5293, 5309), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (5303, 5309), False, 'import torch\n'), ((5351, 5410), 'torch.load', 'torch.load', (['path'], {'map_location': '(lambda storage, loc: storage)'}), '(path, map_location=lambda storage, loc: storage)\n', (5361, 5410), False, 'import torch\n'), ((2413, 2457), 'os.path.join', 'os.path.join', (['self.saving_pth', '(name + suffix)'], {}), '(self.saving_pth, name + suffix)\n', (2425, 2457), False, 'import os\n'), ((2879, 2925), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.suffix)'], {}), '(self.config_path + self.suffix)\n', (2893, 2925), False, 'import os\n'), ((3115, 3161), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.suffix)'], {}), '(self.config_path + self.suffix)\n', (3129, 3161), False, 'import os\n'), ((3164, 3215), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.best_suffix)'], {}), '(self.config_path + self.best_suffix)\n', (3178, 3215), False, 'import os\n'), ((3784, 3830), 'os.path.exists', 'os.path.exists', (['(self.config_path + self.suffix)'], {}), '(self.config_path + self.suffix)\n', (3798, 3830), False, 'import os\n'), ((4332, 4376), 'os.path.join', 'os.path.join', (['self.saving_pth', '(name + suffix)'], {}), '(self.saving_pth, name + suffix)\n', (4344, 4376), False, 'import os\n')] |
# -*- coding:utf8 -*-
from lib.struct.Point import Point
# 表示一个游戏内坐标
class CoordiPoint(Point):
def __init__(self, x, y):
Point.__init__(self, x, y)
# 将地图坐标转成世界坐标
def to_world(self, area):
from lib.struct.WorldPoint import WorldPoint
zoneRange = self.getZoneRangeByZoneName(area)
Y1 = zoneRange["Y1"]
Y2 = zoneRange["Y2"]
X1 = zoneRange["X1"]
X2 = zoneRange["X2"]
ret = []
# 世界坐标的x需要用地图坐标的y来变换,世界坐标的y需要用地图坐标的x来变换,因为世界坐标的x是地图坐标的y,世界坐标的y是地图坐标的x
ret.append(WorldPoint(self.y / 100 * (X2 - X1) + X1, self.x / 100 * (Y2 - Y1) + Y1))
# ret.append(WorldPoint(-coordi.y / 100 * (X2 - X1) + X1,coordi.x / 100 * (Y2 - Y1) + Y1))
# ret.append(WorldPoint(coordi.y / 100 * (X2 - X1) + X1,-coordi.x / 100 * (Y2 - Y1) + Y1))
# ret.append(WorldPoint(-coordi.y / 100 * (X2 - X1) + X1,-coordi.x / 100 * (Y2 - Y1) + Y1))
# 因为正负号丢失的关系,可能得到4个世界坐标,得找出正确的那个
realWorlds = []
for w in ret:
if min(X1, X2) <= w.x <= max(X1, X2) and min(Y1, Y2) <= w.y <= max(Y1, Y2):
realWorlds.append(w)
if len(realWorlds) == 1:
return realWorlds[0]
raise Exception(self.toString() + ":无法转换为世界坐标")
| [
"lib.struct.WorldPoint.WorldPoint",
"lib.struct.Point.Point.__init__"
] | [((135, 161), 'lib.struct.Point.Point.__init__', 'Point.__init__', (['self', 'x', 'y'], {}), '(self, x, y)\n', (149, 161), False, 'from lib.struct.Point import Point\n'), ((549, 621), 'lib.struct.WorldPoint.WorldPoint', 'WorldPoint', (['(self.y / 100 * (X2 - X1) + X1)', '(self.x / 100 * (Y2 - Y1) + Y1)'], {}), '(self.y / 100 * (X2 - X1) + X1, self.x / 100 * (Y2 - Y1) + Y1)\n', (559, 621), False, 'from lib.struct.WorldPoint import WorldPoint\n')] |
"""Core project components of a modelmanager project.
The Project class is the only exposed object of the modelmanager package. If
extending modelmanager for your model, you can inherit this class.
Project setup with the provided commandline script (calls 'initialise' below):
modelmanager --projectdir=.
"""
import os
from os import path as osp
import shutil
import sys
from modelmanager.settings import SettingsManager, SettingsUndefinedError
class Project(object):
"""The central project object.
All variables and fuctions are available to operate on the current model
state.
"""
def __init__(self, projectdir='.', **settings):
self.projectdir = osp.abspath(projectdir)
# initalise settings
self.settings = SettingsManager(self)
# load settings with overridden settings
self.settings.load(**settings)
return
def __repr__(self):
rpd = osp.relpath(self.projectdir, os.getcwd())
r = ('<%s instance in: %s >' % (self.__class__.__name__, rpd))
return r
def __getattr__(self, attr):
"""
Fall-back if requested setting isnt defined.
"""
# make sure AttributeErrors from properties are not misinterpreted
if attr in self.__class__.__dict__:
try:
# acess property without getattr
self.__class__.__dict__[attr].fget(self)
except AttributeError:
import traceback
ex_type, ex, tb = sys.exc_info()
raise AttributeError('While accessing the setting %s,' % attr +
' the below error occurred:\n\n' +
''.join(traceback.format_tb(tb)) +
'AttributeError: '+str(ex))
else:
raise SettingsUndefinedError(attr)
def setup(projectdir='.', resourcedir='mm'):
"""Initialise a default modelmanager project in the current directory."""
resourcedir = osp.join(projectdir, resourcedir)
settings_path = osp.join(resourcedir, SettingsManager.settings_file_name)
print('Initialising a new modelmanager project in: %s\n' % projectdir +
'with settings file in: %s' % settings_path)
# create projectdir if not existing
if not osp.exists(projectdir):
os.mkdir(projectdir)
# create resource dir if it does not exist, raise error otherwise
ermg = ('The modelmanager resource directory seems to exist already:\n' +
resourcedir)
assert not osp.exists(resourcedir), ermg
default_resources = osp.join(osp.dirname(__file__), 'resources')
shutil.copytree(default_resources, resourcedir)
# load project and update/create database
pro = Project(projectdir)
return pro
class ProjectDoesNotExist(Exception):
pass
| [
"os.path.exists",
"traceback.format_tb",
"modelmanager.settings.SettingsManager",
"os.path.join",
"modelmanager.settings.SettingsUndefinedError",
"os.getcwd",
"shutil.copytree",
"os.path.dirname",
"sys.exc_info",
"os.mkdir",
"os.path.abspath"
] | [((2021, 2054), 'os.path.join', 'osp.join', (['projectdir', 'resourcedir'], {}), '(projectdir, resourcedir)\n', (2029, 2054), True, 'from os import path as osp\n'), ((2075, 2132), 'os.path.join', 'osp.join', (['resourcedir', 'SettingsManager.settings_file_name'], {}), '(resourcedir, SettingsManager.settings_file_name)\n', (2083, 2132), True, 'from os import path as osp\n'), ((2660, 2707), 'shutil.copytree', 'shutil.copytree', (['default_resources', 'resourcedir'], {}), '(default_resources, resourcedir)\n', (2675, 2707), False, 'import shutil\n'), ((686, 709), 'os.path.abspath', 'osp.abspath', (['projectdir'], {}), '(projectdir)\n', (697, 709), True, 'from os import path as osp\n'), ((763, 784), 'modelmanager.settings.SettingsManager', 'SettingsManager', (['self'], {}), '(self)\n', (778, 784), False, 'from modelmanager.settings import SettingsManager, SettingsUndefinedError\n'), ((2315, 2337), 'os.path.exists', 'osp.exists', (['projectdir'], {}), '(projectdir)\n', (2325, 2337), True, 'from os import path as osp\n'), ((2347, 2367), 'os.mkdir', 'os.mkdir', (['projectdir'], {}), '(projectdir)\n', (2355, 2367), False, 'import os\n'), ((2556, 2579), 'os.path.exists', 'osp.exists', (['resourcedir'], {}), '(resourcedir)\n', (2566, 2579), True, 'from os import path as osp\n'), ((2620, 2641), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (2631, 2641), True, 'from os import path as osp\n'), ((956, 967), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (965, 967), False, 'import os\n'), ((1848, 1876), 'modelmanager.settings.SettingsUndefinedError', 'SettingsUndefinedError', (['attr'], {}), '(attr)\n', (1870, 1876), False, 'from modelmanager.settings import SettingsManager, SettingsUndefinedError\n'), ((1512, 1526), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1524, 1526), False, 'import sys\n'), ((1724, 1747), 'traceback.format_tb', 'traceback.format_tb', (['tb'], {}), '(tb)\n', (1743, 1747), False, 'import traceback\n')] |
import math
import torch
import torch.nn.functional as F
from torch.nn import ReplicationPad3d
import torchvision
import inflate_utils
from network.layers.inception import inception
from network import hourglass as hourglass
class I3HourGlass(torch.nn.Module):
def __init__(self, inceptionnet2d, frame_nb, inflate_block_convs=False):
super(I3HourGlass, self).__init__()
self.frame_nb = frame_nb
self.inceptionnet3d = inflate_features(
inceptionnet2d, inflate_block_convs=inflate_block_convs)
def forward(self, inp):
out = self.inceptionnet3d(inp)
return out
class _Channel3d(torch.nn.Module):
def __init__(self, channellayer2d, inflate_convs=False):
super(_Channel3d, self).__init__()
self.inflate_convs = inflate_convs
self.list = torch.nn.ModuleList()
self.block = []
self.block1 = torch.nn.Sequential()
self.block2 = torch.nn.Sequential()
self.list.append(self.block1)
self.list.append(self.block2)
for name, child in channellayer2d.named_children():
for nested_name, nested_child in child[0].named_children():
if isinstance(nested_child, torch.nn.BatchNorm2d):
self.block1.add_module(nested_name, inflate_utils.inflate_batch_norm(nested_child))
elif isinstance(nested_child, torch.nn.ReLU):
self.block1.add_module(nested_name, nested_child)
elif isinstance(nested_child, torch.nn.Conv2d):
print('Here')
self.block1.add_module(nested_name, inflate_utils.inflate_conv(nested_child, 1))
elif isinstance(nested_child, torch.nn.MaxPool2d) or isinstance(
nested_child, torch.nn.AvgPool2d):
self.block1.add_module(nested_name, inflate_utils.inflate_pool(nested_child))
elif isinstance(nested_child, torch.nn.UpsamplingNearest2d):
print("Here")
self.block1.add_module(nested_name, inflate_utils.inflate_upsample(nested_child))
elif isinstance(nested_child, inception):
self.block1.add_module(nested_name, inflate_utils.inception3d(nested_child, nested_child.input_size, nested_child.config))
elif isinstance(nested_child, hourglass.Channels4) or isinstance(
nested_child, hourglass.Channels3) or isinstance(nested_child,
hourglass.Channels2) or isinstance(nested_child, hourglass.Channels1):
self.block1.add_module(nested_name, _Channel3d(nested_child, inflate_convs=self.inflate_convs))
else:
raise ValueError(
'{} is not among handled layer types'.format(type(nested_child)))
for nested_name, nested_child in child[1].named_children():
if isinstance(nested_child, torch.nn.BatchNorm2d):
print('Here')
self.block2.add_module(nested_name, inflate_utils.inflate_batch_norm(nested_child))
elif isinstance(nested_child, torch.nn.ReLU):
print('Here')
self.block2.add_module(nested_name, nested_child)
elif isinstance(nested_child, torch.nn.Conv2d):
print('Here')
self.block2.add_module(nested_name, inflate_utils.inflate_conv(nested_child, 1))
elif isinstance(nested_child, torch.nn.MaxPool2d) or isinstance(
nested_child, torch.nn.AvgPool2d):
print('Here')
self.block2.add_module(nested_name, inflate_utils.inflate_pool(nested_child))
elif isinstance(nested_child, torch.nn.UpsamplingNearest2d):
print("Here")
self.block2.add_module(nested_name, inflate_utils.inflate_upsample(nested_child))
elif isinstance(nested_child, inception):
print('Here inception')
self.block2.add_module(nested_name, inflate_utils.inception3d(nested_child, nested_child.input_size, nested_child.config))
elif isinstance(nested_child, hourglass.Channels4) or isinstance(
nested_child, hourglass.Channels3) or isinstance(nested_child,
hourglass.Channels2) or isinstance(nested_child, hourglass.Channels1):
print('Here channel class')
self.block2.add_module(nested_name, _Channel3d(nested_child, inflate_convs=self.inflate_convs))
else:
raise ValueError(
'{} is not among handled layer types'.format(type(nested_child)))
def forward(self, x):
return self.list[0](x) + self.list[1](x)
def inflate_features(inceptionnet2d, inflate_block_convs=False):
"""
Inflates the feature extractor part of InceptionNet by adding the corresponding
inflated modules and transfering the inflated weights
"""
features3d = torch.nn.Sequential()
for name, child in inceptionnet2d.named_children():
if isinstance(child, torch.nn.Sequential):
block = torch.nn.Sequential()
for nested_name, nested_child in child.named_children():
if isinstance(nested_child, torch.nn.BatchNorm2d):
block.add_module(nested_name, inflate_utils.inflate_batch_norm(nested_child))
elif isinstance(nested_child, torch.nn.ReLU):
block.add_module(nested_name, nested_child)
elif isinstance(nested_child, torch.nn.Conv2d):
block.add_module(nested_name, inflate_utils.inflate_conv(nested_child, time_dim=3))
elif isinstance(nested_child, torch.nn.MaxPool2d) or isinstance(
nested_child, torch.nn.AvgPool2d):
block.add_module(nested_name, inflate_utils.inflate_pool(nested_child))
elif isinstance(nested_child, hourglass.Channels4) or isinstance(
nested_child, hourglass.Channels3) or isinstance(nested_child,
hourglass.Channels2) or isinstance(nested_child, hourglass.Channels1):
block.add_module(nested_name, _Channel3d(nested_child, inflate_convs=inflate_block_convs))
else:
raise ValueError(
'{} is not among handled layer types'.format(type(nested_child)))
features3d.add_module(name, block)
else:
raise ValueError(
'{} is not among handled layer types'.format(type(child)))
return features3d
| [
"inflate_utils.inflate_conv",
"inflate_utils.inception3d",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"inflate_utils.inflate_upsample",
"inflate_utils.inflate_batch_norm",
"inflate_utils.inflate_pool"
] | [((5109, 5130), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (5128, 5130), False, 'import torch\n'), ((828, 849), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (847, 849), False, 'import torch\n'), ((896, 917), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (915, 917), False, 'import torch\n'), ((940, 961), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (959, 961), False, 'import torch\n'), ((5258, 5279), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (5277, 5279), False, 'import torch\n'), ((1294, 1340), 'inflate_utils.inflate_batch_norm', 'inflate_utils.inflate_batch_norm', (['nested_child'], {}), '(nested_child)\n', (1326, 1340), False, 'import inflate_utils\n'), ((3077, 3123), 'inflate_utils.inflate_batch_norm', 'inflate_utils.inflate_batch_norm', (['nested_child'], {}), '(nested_child)\n', (3109, 3123), False, 'import inflate_utils\n'), ((5466, 5512), 'inflate_utils.inflate_batch_norm', 'inflate_utils.inflate_batch_norm', (['nested_child'], {}), '(nested_child)\n', (5498, 5512), False, 'import inflate_utils\n'), ((1628, 1671), 'inflate_utils.inflate_conv', 'inflate_utils.inflate_conv', (['nested_child', '(1)'], {}), '(nested_child, 1)\n', (1654, 1671), False, 'import inflate_utils\n'), ((3445, 3488), 'inflate_utils.inflate_conv', 'inflate_utils.inflate_conv', (['nested_child', '(1)'], {}), '(nested_child, 1)\n', (3471, 3488), False, 'import inflate_utils\n'), ((5754, 5806), 'inflate_utils.inflate_conv', 'inflate_utils.inflate_conv', (['nested_child'], {'time_dim': '(3)'}), '(nested_child, time_dim=3)\n', (5780, 5806), False, 'import inflate_utils\n'), ((1869, 1909), 'inflate_utils.inflate_pool', 'inflate_utils.inflate_pool', (['nested_child'], {}), '(nested_child)\n', (1895, 1909), False, 'import inflate_utils\n'), ((3720, 3760), 'inflate_utils.inflate_pool', 'inflate_utils.inflate_pool', (['nested_child'], {}), '(nested_child)\n', (3746, 3760), False, 'import inflate_utils\n'), ((5998, 6038), 'inflate_utils.inflate_pool', 'inflate_utils.inflate_pool', (['nested_child'], {}), '(nested_child)\n', (6024, 6038), False, 'import inflate_utils\n'), ((2078, 2122), 'inflate_utils.inflate_upsample', 'inflate_utils.inflate_upsample', (['nested_child'], {}), '(nested_child)\n', (2108, 2122), False, 'import inflate_utils\n'), ((3929, 3973), 'inflate_utils.inflate_upsample', 'inflate_utils.inflate_upsample', (['nested_child'], {}), '(nested_child)\n', (3959, 3973), False, 'import inflate_utils\n'), ((2238, 2327), 'inflate_utils.inception3d', 'inflate_utils.inception3d', (['nested_child', 'nested_child.input_size', 'nested_child.config'], {}), '(nested_child, nested_child.input_size,\n nested_child.config)\n', (2263, 2327), False, 'import inflate_utils\n'), ((4133, 4222), 'inflate_utils.inception3d', 'inflate_utils.inception3d', (['nested_child', 'nested_child.input_size', 'nested_child.config'], {}), '(nested_child, nested_child.input_size,\n nested_child.config)\n', (4158, 4222), False, 'import inflate_utils\n')] |
import os
import json
from aws_cdk import (
aws_ec2 as ec2,
aws_ecs as ecs,
aws_rds as rds,
aws_iam as iam,
aws_s3 as s3,
aws_route53 as r53,
aws_route53_targets as r53_targets,
aws_certificatemanager as cm,
aws_elasticloadbalancingv2_actions as elbv2_actions,
aws_elasticloadbalancingv2 as elbv2,
aws_secretsmanager as sm,
aws_ecr_assets as ecr_assets,
aws_elasticloadbalancingv2 as elbv2,
aws_apigateway as agw,
aws_lambda as _lambda,
aws_ecs_patterns as ecs_patterns,
RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration,
)
from constructs import Construct
with open(os.path.join(os.getcwd(), "cdk.out/data/cloud9.json")) as cloud9_json:
cloud9_data = json.load(cloud9_json)
cloud9_json.close()
class ExplorerStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
self._availability_zones = self.availability_zones
self.default_vpc = ec2.Vpc.from_lookup(
self, "DefaultVpc",
is_default=True,
)
self.database_subnets = [
ec2.Subnet(
self, "DatabaseSubnet0",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.160.0/20",
availability_zone=self._availability_zones[0],
),
ec2.Subnet(
self, "DatabaseSubnet1",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.176.0/20",
availability_zone=self._availability_zones[1],
),
ec2.Subnet(
self, "DatabaseSubnet2",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.192.0/20",
availability_zone=self._availability_zones[2],
)
]
self.database_username = "explorer"
self.database_password_secret = sm.Secret.from_secret_attributes(
self, 'DatabasePassword',
secret_complete_arn=Fn.import_value('DocumentLedgerExplorerDatabasePasswordArn'),
)
self.database = rds.DatabaseInstance(
self, "Database",
engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_12_3),
instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
vpc=self.default_vpc,
vpc_subnets=ec2.SubnetSelection(
subnets=self.database_subnets,
),
publicly_accessible=False,
credentials=rds.Credentials.from_password(self.database_username,
self.database_password_secret.secret_value)
)
self.cloud9_security_group = ec2.SecurityGroup.from_security_group_id(
self, 'Cloud9SecurityGroup',
cloud9_data["securityGroupId"])
self.database.connections.allow_default_port_from(self.cloud9_security_group)
self.image_directory = os.path.join(os.getcwd(), "explorer")
self.image_asset = ecr_assets.DockerImageAsset(
self, "Image",
directory=self.image_directory,
)
self.image = ecs.ContainerImage.from_docker_image_asset(self.image_asset)
self.domain_name = f"explorer.{os.getenv('LEDGER_DOMAIN_NAME', default='')}"
self.domain_zone = r53.PublicHostedZone(
self, "HostedZone",
zone_name=self.domain_name,
)
self.base_zone = r53.PublicHostedZone.from_hosted_zone_attributes(
self, "BaseZone",
hosted_zone_id=Fn.import_value("DocumentLedgerHostedZoneId"),
zone_name=os.getenv('LEDGER_DOMAIN_NAME', default='')
)
r53.NsRecord(
self, "DelegationRecord",
zone=self.base_zone,
record_name='explorer',
values=self.domain_zone.hosted_zone_name_servers or [],
)
self.validation = cm.CertificateValidation.from_dns(self.domain_zone)
self.certificate = cm.Certificate(
self, 'Certificate',
domain_name=self.domain_name,
validation=self.validation
)
self.service_subnets = [
ec2.Subnet(
self, "ServiceSubnet0",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.208.0/20",
availability_zone=self._availability_zones[0],
),
ec2.Subnet(
self, "ServiceSubnet1",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.224.0/20",
availability_zone=self._availability_zones[1],
),
ec2.Subnet(
self, "ServiceSubnet2",
vpc_id=self.default_vpc.vpc_id,
cidr_block="172.31.240.0/20",
availability_zone=self._availability_zones[2],
)
]
self.cluster = ecs.Cluster(
self, "Cluster",
vpc=self.default_vpc,
)
self.service = ecs_patterns.ApplicationLoadBalancedFargateService(
self, "Service",
cluster=self.cluster,
certificate=self.certificate,
domain_name=self.domain_name,
domain_zone=self.domain_zone,
protocol=elbv2.ApplicationProtocol.HTTPS,
redirect_http=True,
task_subnets=ec2.SubnetSelection(
subnets=self.service_subnets,
),
task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
image=self.image,
container_port=8080,
environment={
"DATABASE_HOST": self.database.instance_endpoint.hostname,
"DATABASE_USERNAME": self.database_username,
"DATABASE_PASSWD": self.database_password_secret.secret_value.to_string(),
"LOG_LEVEL_APP": 'debug',
"LOG_LEVEL_DB": 'debug',
"LOG_LEVEL_CONSOLE": 'debug',
"LOG_CONSOLE_STDOUT": 'true',
"DISCOVERY_AS_LOCALHOST": 'false',
}
)
)
self.database.connections.allow_default_port_from(self.service.service)
self.ledger_port_range = ec2.Port.tcp_range(30001, 30004)
self.ledger_security_group_id = Fn.import_value("DocumentLedgerDefaultVpcEndpointSecurityGroup")
self.ledger_security_group = ec2.SecurityGroup.from_security_group_id(
self, 'DefaultVpcEndpointSecurityGroup',
security_group_id=self.ledger_security_group_id
)
self.ledger_security_group.connections.allow_from(
self.service.service,
port_range=self.ledger_port_range,
)
self.cloud_watch_logs_vpc_endpoint = ec2.InterfaceVpcEndpoint(
self, 'CloudWatchLogsEndpoint',
vpc=self.default_vpc,
subnets=ec2.SubnetSelection(
subnets=self.service_subnets,
),
service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS
)
self.cloud_watch_logs_vpc_endpoint.connections.allow_default_port_from(self.cluster)
self.ecr_vpc_endpoint = ec2.InterfaceVpcEndpoint(
self, 'EcrEndpoint',
vpc=self.default_vpc,
subnets=ec2.SubnetSelection(
subnets=self.service_subnets,
),
service=ec2.InterfaceVpcEndpointAwsService.ECR
)
self.ecr_vpc_endpoint.connections.allow_default_port_from(self.cluster)
self.ecr_docker_vpc_endpoint = ec2.InterfaceVpcEndpoint(
self, 'EcrDockerEndpoint',
vpc=self.default_vpc,
subnets=ec2.SubnetSelection(
subnets=self.service_subnets,
),
service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER,
)
self.ecr_docker_vpc_endpoint.connections.allow_default_port_from(self.cluster)
ec2.GatewayVpcEndpoint(
self, 'S3Endpoint',
vpc=self.default_vpc,
subnets=[ec2.SubnetSelection(
subnets=self.service_subnets,
)],
service=ec2.GatewayVpcEndpointAwsService.S3,
)
CfnOutput(
self, 'ExplorerImageUri',
value=self.image_asset.image_uri,
description="Explorer Docker image URI",
)
CfnOutput(
self, 'DatabaseHostname',
value=self.database.instance_endpoint.hostname,
description='Database hostname',
)
CfnOutput(
self, 'ExplorerUrl',
value=f"https://{self.domain_name}",
description='Explorer user interface URL',
)
"""
new ec2.GatewayVpcEndpoint(this, 'S3Endpoint', {
vpc: defaultVpc,
subnets: [{subnets: serviceSubnets}],
service: ec2.GatewayVpcEndpointAwsService.S3,
});
new cdk.CfnOutput(this, 'ExplorerImageUri', {
value: imageAsset.imageUri,
description: 'Explorer Docker image URI',
});
new cdk.CfnOutput(this, 'DatabaseHostname', {
value: database.instanceEndpoint.hostname,
description: 'Database hostname',
});
new cdk.CfnOutput(this, 'ExplorerUrl', {
value: `https://${domainName}`,
description: 'Explorer user interface URL',
});
}
}
"""
| [
"aws_cdk.aws_ecs.Cluster",
"aws_cdk.aws_route53.NsRecord",
"aws_cdk.aws_ec2.Subnet",
"aws_cdk.Fn.import_value",
"aws_cdk.aws_ec2.InstanceType.of",
"aws_cdk.aws_rds.DatabaseInstanceEngine.postgres",
"aws_cdk.aws_rds.Credentials.from_password",
"aws_cdk.aws_certificatemanager.Certificate",
"aws_cdk.aws_certificatemanager.CertificateValidation.from_dns",
"aws_cdk.aws_ec2.Vpc.from_lookup",
"aws_cdk.aws_ec2.SecurityGroup.from_security_group_id",
"aws_cdk.aws_route53.PublicHostedZone",
"aws_cdk.aws_ecs.ContainerImage.from_docker_image_asset",
"aws_cdk.aws_ecr_assets.DockerImageAsset",
"aws_cdk.CfnOutput",
"aws_cdk.aws_ec2.Port.tcp_range",
"os.getenv",
"os.getcwd",
"json.load",
"aws_cdk.aws_ec2.SubnetSelection"
] | [((750, 772), 'json.load', 'json.load', (['cloud9_json'], {}), '(cloud9_json)\n', (759, 772), False, 'import json\n'), ((1051, 1107), 'aws_cdk.aws_ec2.Vpc.from_lookup', 'ec2.Vpc.from_lookup', (['self', '"""DefaultVpc"""'], {'is_default': '(True)'}), "(self, 'DefaultVpc', is_default=True)\n", (1070, 1107), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2844, 2949), 'aws_cdk.aws_ec2.SecurityGroup.from_security_group_id', 'ec2.SecurityGroup.from_security_group_id', (['self', '"""Cloud9SecurityGroup"""', "cloud9_data['securityGroupId']"], {}), "(self, 'Cloud9SecurityGroup',\n cloud9_data['securityGroupId'])\n", (2884, 2949), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3156, 3230), 'aws_cdk.aws_ecr_assets.DockerImageAsset', 'ecr_assets.DockerImageAsset', (['self', '"""Image"""'], {'directory': 'self.image_directory'}), "(self, 'Image', directory=self.image_directory)\n", (3183, 3230), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3288, 3348), 'aws_cdk.aws_ecs.ContainerImage.from_docker_image_asset', 'ecs.ContainerImage.from_docker_image_asset', (['self.image_asset'], {}), '(self.image_asset)\n', (3330, 3348), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3463, 3531), 'aws_cdk.aws_route53.PublicHostedZone', 'r53.PublicHostedZone', (['self', '"""HostedZone"""'], {'zone_name': 'self.domain_name'}), "(self, 'HostedZone', zone_name=self.domain_name)\n", (3483, 3531), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3832, 3976), 'aws_cdk.aws_route53.NsRecord', 'r53.NsRecord', (['self', '"""DelegationRecord"""'], {'zone': 'self.base_zone', 'record_name': '"""explorer"""', 'values': '(self.domain_zone.hosted_zone_name_servers or [])'}), "(self, 'DelegationRecord', zone=self.base_zone, record_name=\n 'explorer', values=self.domain_zone.hosted_zone_name_servers or [])\n", (3844, 3976), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((4058, 4109), 'aws_cdk.aws_certificatemanager.CertificateValidation.from_dns', 'cm.CertificateValidation.from_dns', (['self.domain_zone'], {}), '(self.domain_zone)\n', (4091, 4109), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((4138, 4235), 'aws_cdk.aws_certificatemanager.Certificate', 'cm.Certificate', (['self', '"""Certificate"""'], {'domain_name': 'self.domain_name', 'validation': 'self.validation'}), "(self, 'Certificate', domain_name=self.domain_name,\n validation=self.validation)\n", (4152, 4235), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((5055, 5105), 'aws_cdk.aws_ecs.Cluster', 'ecs.Cluster', (['self', '"""Cluster"""'], {'vpc': 'self.default_vpc'}), "(self, 'Cluster', vpc=self.default_vpc)\n", (5066, 5105), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((6427, 6459), 'aws_cdk.aws_ec2.Port.tcp_range', 'ec2.Port.tcp_range', (['(30001)', '(30004)'], {}), '(30001, 30004)\n', (6445, 6459), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((6500, 6564), 'aws_cdk.Fn.import_value', 'Fn.import_value', (['"""DocumentLedgerDefaultVpcEndpointSecurityGroup"""'], {}), "('DocumentLedgerDefaultVpcEndpointSecurityGroup')\n", (6515, 6564), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((6602, 6741), 'aws_cdk.aws_ec2.SecurityGroup.from_security_group_id', 'ec2.SecurityGroup.from_security_group_id', (['self', '"""DefaultVpcEndpointSecurityGroup"""'], {'security_group_id': 'self.ledger_security_group_id'}), "(self,\n 'DefaultVpcEndpointSecurityGroup', security_group_id=self.\n ledger_security_group_id)\n", (6642, 6741), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((8408, 8522), 'aws_cdk.CfnOutput', 'CfnOutput', (['self', '"""ExplorerImageUri"""'], {'value': 'self.image_asset.image_uri', 'description': '"""Explorer Docker image URI"""'}), "(self, 'ExplorerImageUri', value=self.image_asset.image_uri,\n description='Explorer Docker image URI')\n", (8417, 8522), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((8575, 8696), 'aws_cdk.CfnOutput', 'CfnOutput', (['self', '"""DatabaseHostname"""'], {'value': 'self.database.instance_endpoint.hostname', 'description': '"""Database hostname"""'}), "(self, 'DatabaseHostname', value=self.database.instance_endpoint.\n hostname, description='Database hostname')\n", (8584, 8696), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((8748, 8862), 'aws_cdk.CfnOutput', 'CfnOutput', (['self', '"""ExplorerUrl"""'], {'value': 'f"""https://{self.domain_name}"""', 'description': '"""Explorer user interface URL"""'}), "(self, 'ExplorerUrl', value=f'https://{self.domain_name}',\n description='Explorer user interface URL')\n", (8757, 8862), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((674, 685), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (683, 685), False, 'import os\n'), ((1190, 1343), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""DatabaseSubnet0"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.160.0/20"""', 'availability_zone': 'self._availability_zones[0]'}), "(self, 'DatabaseSubnet0', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.160.0/20', availability_zone=self._availability_zones[0]\n )\n", (1200, 1343), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((1428, 1581), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""DatabaseSubnet1"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.176.0/20"""', 'availability_zone': 'self._availability_zones[1]'}), "(self, 'DatabaseSubnet1', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.176.0/20', availability_zone=self._availability_zones[1]\n )\n", (1438, 1581), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((1666, 1819), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""DatabaseSubnet2"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.192.0/20"""', 'availability_zone': 'self._availability_zones[2]'}), "(self, 'DatabaseSubnet2', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.192.0/20', availability_zone=self._availability_zones[2]\n )\n", (1676, 1819), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3103, 3114), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3112, 3114), False, 'import os\n'), ((4324, 4476), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""ServiceSubnet0"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.208.0/20"""', 'availability_zone': 'self._availability_zones[0]'}), "(self, 'ServiceSubnet0', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.208.0/20', availability_zone=self._availability_zones[0]\n )\n", (4334, 4476), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((4561, 4713), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""ServiceSubnet1"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.224.0/20"""', 'availability_zone': 'self._availability_zones[1]'}), "(self, 'ServiceSubnet1', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.224.0/20', availability_zone=self._availability_zones[1]\n )\n", (4571, 4713), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((4798, 4950), 'aws_cdk.aws_ec2.Subnet', 'ec2.Subnet', (['self', '"""ServiceSubnet2"""'], {'vpc_id': 'self.default_vpc.vpc_id', 'cidr_block': '"""172.31.240.0/20"""', 'availability_zone': 'self._availability_zones[2]'}), "(self, 'ServiceSubnet2', vpc_id=self.default_vpc.vpc_id,\n cidr_block='172.31.240.0/20', availability_zone=self._availability_zones[2]\n )\n", (4808, 4950), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2090, 2150), 'aws_cdk.Fn.import_value', 'Fn.import_value', (['"""DocumentLedgerExplorerDatabasePasswordArn"""'], {}), "('DocumentLedgerExplorerDatabasePasswordArn')\n", (2105, 2150), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2258, 2337), 'aws_cdk.aws_rds.DatabaseInstanceEngine.postgres', 'rds.DatabaseInstanceEngine.postgres', ([], {'version': 'rds.PostgresEngineVersion.VER_12_3'}), '(version=rds.PostgresEngineVersion.VER_12_3)\n', (2293, 2337), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2365, 2438), 'aws_cdk.aws_ec2.InstanceType.of', 'ec2.InstanceType.of', (['ec2.InstanceClass.BURSTABLE2', 'ec2.InstanceSize.SMALL'], {}), '(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL)\n', (2384, 2438), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2498, 2548), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.database_subnets'}), '(subnets=self.database_subnets)\n', (2517, 2548), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((2644, 2746), 'aws_cdk.aws_rds.Credentials.from_password', 'rds.Credentials.from_password', (['self.database_username', 'self.database_password_secret.secret_value'], {}), '(self.database_username, self.\n database_password_secret.secret_value)\n', (2673, 2746), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3389, 3432), 'os.getenv', 'os.getenv', (['"""LEDGER_DOMAIN_NAME"""'], {'default': '""""""'}), "('LEDGER_DOMAIN_NAME', default='')\n", (3398, 3432), False, 'import os\n'), ((3700, 3745), 'aws_cdk.Fn.import_value', 'Fn.import_value', (['"""DocumentLedgerHostedZoneId"""'], {}), "('DocumentLedgerHostedZoneId')\n", (3715, 3745), False, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((3769, 3812), 'os.getenv', 'os.getenv', (['"""LEDGER_DOMAIN_NAME"""'], {'default': '""""""'}), "('LEDGER_DOMAIN_NAME', default='')\n", (3778, 3812), False, 'import os\n'), ((5517, 5566), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.service_subnets'}), '(subnets=self.service_subnets)\n', (5536, 5566), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((7088, 7137), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.service_subnets'}), '(subnets=self.service_subnets)\n', (7107, 7137), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((7491, 7540), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.service_subnets'}), '(subnets=self.service_subnets)\n', (7510, 7540), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((7882, 7931), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.service_subnets'}), '(subnets=self.service_subnets)\n', (7901, 7931), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n'), ((8249, 8298), 'aws_cdk.aws_ec2.SubnetSelection', 'ec2.SubnetSelection', ([], {'subnets': 'self.service_subnets'}), '(subnets=self.service_subnets)\n', (8268, 8298), True, 'from aws_cdk import aws_ec2 as ec2, aws_ecs as ecs, aws_rds as rds, aws_iam as iam, aws_s3 as s3, aws_route53 as r53, aws_route53_targets as r53_targets, aws_certificatemanager as cm, aws_elasticloadbalancingv2_actions as elbv2_actions, aws_elasticloadbalancingv2 as elbv2, aws_secretsmanager as sm, aws_ecr_assets as ecr_assets, aws_elasticloadbalancingv2 as elbv2, aws_apigateway as agw, aws_lambda as _lambda, aws_ecs_patterns as ecs_patterns, RemovalPolicy, Duration, Aws, Stack, CfnOutput, Environment, Fn, Duration\n')] |
def jupyter_setup():
from IPython.display import set_matplotlib_formats
import matplotlib
set_matplotlib_formats('png', 'pdf') # use PNG inline and PDFs when printing
matplotlib.rcParams['figure.figsize'] = [10, 5]
def print_sympy(*args):
'''
Print the sympy argument in a way that jupyter notebook can print as latex
Takes argument list of
'''
import sympy
import IPython.display
st = []
for arg in args:
if isinstance(arg, str):
st.append(arg)
else:
try:
st.append(sympy.latex(arg))
except:
raise
IPython.display.display(IPython.display.Math("".join(st)))
| [
"sympy.latex",
"IPython.display.set_matplotlib_formats"
] | [((102, 138), 'IPython.display.set_matplotlib_formats', 'set_matplotlib_formats', (['"""png"""', '"""pdf"""'], {}), "('png', 'pdf')\n", (124, 138), False, 'from IPython.display import set_matplotlib_formats\n'), ((573, 589), 'sympy.latex', 'sympy.latex', (['arg'], {}), '(arg)\n', (584, 589), False, 'import sympy\n')] |
""" debug related APIs for N9K """
# Python
import logging
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def enable_backtrace(
device,
service,
module=None,
frame_count=6,
):
""" analyze core by BingoPy
# CISCO INTERNAL
Args:
device (`obj`): Device object
service (`str`): service to enable backtrace
module (`int`): module number for LCs
frame_count (`int`): number of backtraces
Returns:
out (`str`): Output of command
"""
# get sap_id
try:
out = device.parse(
"show system internal sysmgr service name {service}".format(
service=service))
# example:
# {
# "instance": {
# "bgp": {
# "tag": {
# "65000": {
# "internal_id": 87,
# "last_restart_date": "Thu Aug 20 05:49:00 2020",
# "last_terminate_reason": "SYSMGR_DEATH_REASON_FAILURE_SIGNAL",
# "pid": 19262,
# "plugin_id": "1",
# "previous_pid": 18234,
# "process_name": "bgp",
# "restart_count": 12,
# "sap": 308,
# "state": "SRV_STATE_HANDSHAKED",
# "state_start_date": "Thu Aug 20 05:49:00 2020",
# "uuid": "0x11B"
# }
# }
# }
# }
# }
sap_id = out.q.contains(service).get_values('sap', 0)
if not sap_id:
raise Exception("Couldn't get sap id")
except SchemaEmptyParserError:
return ''
# enable backtrace
# example:
# R3_nx# debug service-core sap 308 frame-count 6
# Setting setting frame count 6 for sap 308
if module:
out = device.execute(
'debug service-core module {m} sap {sap} frame-count {fc}'.format(
m=module, sap=sap_id, fc=frame_count))
else:
out = device.execute(
'debug service-core sap {sap} frame-count {fc}'.format(
sap=sap_id, fc=frame_count))
return out
| [
"logging.getLogger"
] | [((144, 171), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'import logging\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Classify soundtypes with unsupervised learning
==============================================
Unsupervised learning algorithms search for structures or patterns in a dataset without requiring labels. In the context of ecoacoustics, this approach can be usefull to draw inferences when manual labelling is inaccesible or too expensive. For example, unsupervised learning can be used to estimate the animal acoustic diversity [1], combine human-reasoning and automated procedures to build reference libraries, and find hidden structures in the soundscapes.
In this example, we will use unsupervised learning to automatically annotate multiple sounds in an audio recording. The process follows four main steps. We will (i) find sounds that can be delimited in time and frequency, here defined as regions of interest (ROIs), (ii) characterize ROIs by features in the time-frequency domain using 2D wavelets [2], (iii) use t-SNE, a dimensionality reduction algorithm, to reduce the dimensionality of the data [3], and (iv) a automatically form homogenous groups using DBSCAN [4]. We will use a real audio file recorded with an omnidirectional microphone. This audio has a poor signal-to-noise ratio, which is typical of automated audio recordings.
**Dependencies**: This example requires the Python package scikit-learn v0.24 or greater.
"""
# sphinx_gallery_thumbnail_path = './_images/sphx_glr_plot_unsupervised_sound_classification_004.png'
import numpy as np
import matplotlib.pyplot as plt
from maad import sound, features, rois
from maad.util import power2dB, plot2d, format_features, overlay_rois
#%%
# Start by loading an example audio file. We will remove low frequency ambient noise with a lowpass filter and then compute the spectrogram.
s, fs = sound.load('../../data/rock_savanna.wav')
s_filt = sound.select_bandwidth(s, fs, fcut=100, forder=3, ftype='highpass')
db_max=70 # used to define the range of the spectrogram
Sxx, tn, fn, ext = sound.spectrogram(s_filt, fs, nperseg=1024, noverlap=512)
Sxx_db = power2dB(Sxx, db_range=db_max) + db_max
plot2d(Sxx_db, **{'extent':ext})
#%%
# 1. Find regions of interest
# ---------------------------
# To find regions of interest in the spectrogram, we will remove stationary background noise and then find isolated sounds using a double threshold method. Small ROIs due to noise in the signal will be removed.
Sxx_db_rmbg, _, _ = sound.remove_background(Sxx_db)
Sxx_db_smooth = sound.smooth(Sxx_db_rmbg, std=1.2)
im_mask = rois.create_mask(im=Sxx_db_smooth, mode_bin ='relative', bin_std=2, bin_per=0.25)
im_rois, df_rois = rois.select_rois(im_mask, min_roi=50, max_roi=None)
# Format ROIs and visualize the bounding box on the audio spectrogram.
df_rois = format_features(df_rois, tn, fn)
ax0, fig0 = overlay_rois(Sxx_db, df_rois, **{'vmin':0, 'vmax':60, 'extent':ext})
#%%
# 2. Compute acoustic features
# ----------------------------
# The ``shape_feaures`` function uses bidimensional wavelets to get the texture and spectro-temporal shape coeficients of each ROI. Wavelets have the advantage of being robust when the signal-to-noise ratio is low, and derive homogeneous descriptors which facilitate the clustering process. The wavelet decomposition is performed on the complete spectrogram, hence the coeficients for ROIs do not vary much even when not the time-frequency bounds are not exact. The centroid features gives an estimate of the median frequency of the ROIs.
df_shape, params = features.shape_features(Sxx_db, resolution='low', rois=df_rois)
df_centroid = features.centroid_features(Sxx_db, df_rois)
# Get median frequency and normalize
median_freq = fn[np.round(df_centroid.centroid_y).astype(int)]
df_centroid['centroid_freq'] = median_freq/fn[-1]
#%%
# 3. Reduce the dimensionality of the features
# --------------------------------------------
# The shape audio features have 26 dimensions. To facilitate the clustering process and visualize the results, it is posible to use non-metric dimensionality reduction algorithm, namely the t-distributed stochastic neighbor embedding (t-SNE), to proyect the data in two dimensions.
from sklearn.manifold import TSNE
X = df_shape.loc[:,df_shape.columns.str.startswith('shp')]
X = X.join(df_centroid.centroid_freq) # add column and normalize values
tsne = TSNE(n_components=2, perplexity=12, init='pca', verbose=True)
Y = tsne.fit_transform(X)
fig, ax = plt.subplots()
ax.scatter(Y[:,0], Y[:,1], c='gray', alpha=0.8)
ax.set_xlabel('tsne dim 1')
ax.set_ylabel('tsne dim 2')
#%%
# 4. Cluster the ROIs into homogeneous groups.
# --------------------------------------------
# In the above plot it is possible to observe how sounds are aggregated. It is posible to group these samples rapidly and objectively using a clustering algorithm. Here, we will use DBSCAN, a simple algorithm that allows to find core samples with high density and expands clusters from them. This algorithm has the advantage to find automatically the number of clusters and can cope with unbalanced classes.
from sklearn.cluster import DBSCAN
cluster = DBSCAN(eps=5, min_samples=4).fit(Y)
print('Number of soundtypes found:', np.unique(cluster.labels_).size)
#%%
# Visualize the clustering results
from maad.util import rand_cmap
fig, ax = plt.subplots()
ax.scatter(Y[:,0], Y[:,1], c=cluster.labels_, cmap=rand_cmap(5 , first_color_black=False), alpha=0.8)
ax.set_xlabel('tsne dim 1')
ax.set_ylabel('tsne dim 2')
# Overlay bounding box on the original spectrogram
df_rois['label'] = cluster.labels_.astype(str)
ax0, fig0 = overlay_rois(Sxx_db, df_rois, **{'vmin':0, 'vmax':60, 'extent':ext})
#%%
# References
# -----------
# 1. <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Estimating animal acoustic diversity in tropical environments using unsupervised multiresolution analysis. Ecological Indicators, 90, 346–355. https://doi.org/10.1016/j.ecolind.2018.03.026
# 2. <NAME>., & <NAME>. (2013). Rotation, scaling and deformation invariant scattering for texture discrimination. Computer Vision and Pattern Recognition (CVPR), 2013 IEEE Conference On, 1233–1240. http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6619007
# 3. <NAME>, & <NAME>. (2008). Visualizing data using t-SNE. Journal of Machine Learning Research, 9(Nov), 2579–2605.
# 4. <NAME>., <NAME>., <NAME>., & <NAME>. (1996). A density-based algorithm for discovering clusters in large spatial databases with noise. Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, 96(34), 226–231. | [
"maad.util.overlay_rois",
"maad.features.centroid_features",
"sklearn.cluster.DBSCAN",
"maad.util.rand_cmap",
"maad.util.plot2d",
"maad.features.shape_features",
"maad.sound.load",
"sklearn.manifold.TSNE",
"numpy.round",
"maad.util.format_features",
"maad.rois.create_mask",
"maad.sound.select_bandwidth",
"maad.util.power2dB",
"maad.sound.remove_background",
"maad.sound.smooth",
"numpy.unique",
"maad.sound.spectrogram",
"maad.rois.select_rois",
"matplotlib.pyplot.subplots"
] | [((1810, 1851), 'maad.sound.load', 'sound.load', (['"""../../data/rock_savanna.wav"""'], {}), "('../../data/rock_savanna.wav')\n", (1820, 1851), False, 'from maad import sound, features, rois\n'), ((1861, 1928), 'maad.sound.select_bandwidth', 'sound.select_bandwidth', (['s', 'fs'], {'fcut': '(100)', 'forder': '(3)', 'ftype': '"""highpass"""'}), "(s, fs, fcut=100, forder=3, ftype='highpass')\n", (1883, 1928), False, 'from maad import sound, features, rois\n'), ((2006, 2063), 'maad.sound.spectrogram', 'sound.spectrogram', (['s_filt', 'fs'], {'nperseg': '(1024)', 'noverlap': '(512)'}), '(s_filt, fs, nperseg=1024, noverlap=512)\n', (2023, 2063), False, 'from maad import sound, features, rois\n'), ((2113, 2146), 'maad.util.plot2d', 'plot2d', (['Sxx_db'], {}), "(Sxx_db, **{'extent': ext})\n", (2119, 2146), False, 'from maad.util import power2dB, plot2d, format_features, overlay_rois\n'), ((2443, 2474), 'maad.sound.remove_background', 'sound.remove_background', (['Sxx_db'], {}), '(Sxx_db)\n', (2466, 2474), False, 'from maad import sound, features, rois\n'), ((2491, 2525), 'maad.sound.smooth', 'sound.smooth', (['Sxx_db_rmbg'], {'std': '(1.2)'}), '(Sxx_db_rmbg, std=1.2)\n', (2503, 2525), False, 'from maad import sound, features, rois\n'), ((2536, 2621), 'maad.rois.create_mask', 'rois.create_mask', ([], {'im': 'Sxx_db_smooth', 'mode_bin': '"""relative"""', 'bin_std': '(2)', 'bin_per': '(0.25)'}), "(im=Sxx_db_smooth, mode_bin='relative', bin_std=2, bin_per=0.25\n )\n", (2552, 2621), False, 'from maad import sound, features, rois\n'), ((2637, 2688), 'maad.rois.select_rois', 'rois.select_rois', (['im_mask'], {'min_roi': '(50)', 'max_roi': 'None'}), '(im_mask, min_roi=50, max_roi=None)\n', (2653, 2688), False, 'from maad import sound, features, rois\n'), ((2771, 2803), 'maad.util.format_features', 'format_features', (['df_rois', 'tn', 'fn'], {}), '(df_rois, tn, fn)\n', (2786, 2803), False, 'from maad.util import power2dB, plot2d, format_features, overlay_rois\n'), ((2816, 2887), 'maad.util.overlay_rois', 'overlay_rois', (['Sxx_db', 'df_rois'], {}), "(Sxx_db, df_rois, **{'vmin': 0, 'vmax': 60, 'extent': ext})\n", (2828, 2887), False, 'from maad.util import power2dB, plot2d, format_features, overlay_rois\n'), ((3512, 3575), 'maad.features.shape_features', 'features.shape_features', (['Sxx_db'], {'resolution': '"""low"""', 'rois': 'df_rois'}), "(Sxx_db, resolution='low', rois=df_rois)\n", (3535, 3575), False, 'from maad import sound, features, rois\n'), ((3590, 3633), 'maad.features.centroid_features', 'features.centroid_features', (['Sxx_db', 'df_rois'], {}), '(Sxx_db, df_rois)\n', (3616, 3633), False, 'from maad import sound, features, rois\n'), ((4342, 4403), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'perplexity': '(12)', 'init': '"""pca"""', 'verbose': '(True)'}), "(n_components=2, perplexity=12, init='pca', verbose=True)\n", (4346, 4403), False, 'from sklearn.manifold import TSNE\n'), ((4441, 4455), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4453, 4455), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5317), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5315, 5317), True, 'import matplotlib.pyplot as plt\n'), ((5587, 5658), 'maad.util.overlay_rois', 'overlay_rois', (['Sxx_db', 'df_rois'], {}), "(Sxx_db, df_rois, **{'vmin': 0, 'vmax': 60, 'extent': ext})\n", (5599, 5658), False, 'from maad.util import power2dB, plot2d, format_features, overlay_rois\n'), ((2073, 2103), 'maad.util.power2dB', 'power2dB', (['Sxx'], {'db_range': 'db_max'}), '(Sxx, db_range=db_max)\n', (2081, 2103), False, 'from maad.util import power2dB, plot2d, format_features, overlay_rois\n'), ((5114, 5142), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(5)', 'min_samples': '(4)'}), '(eps=5, min_samples=4)\n', (5120, 5142), False, 'from sklearn.cluster import DBSCAN\n'), ((5187, 5213), 'numpy.unique', 'np.unique', (['cluster.labels_'], {}), '(cluster.labels_)\n', (5196, 5213), True, 'import numpy as np\n'), ((5369, 5406), 'maad.util.rand_cmap', 'rand_cmap', (['(5)'], {'first_color_black': '(False)'}), '(5, first_color_black=False)\n', (5378, 5406), False, 'from maad.util import rand_cmap\n'), ((3689, 3721), 'numpy.round', 'np.round', (['df_centroid.centroid_y'], {}), '(df_centroid.centroid_y)\n', (3697, 3721), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*-coding:utf-8-*-
import zmq
import sys
from Tools.scripts.treesync import raw_input
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
while (True):
socket.send('1111'.encode('utf-8'))
response = socket.recv()
print(response)
| [
"zmq.Context"
] | [((119, 132), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (130, 132), False, 'import zmq\n')] |
from django.db import models
from django.contrib.auth.models import User
## staffs will be created by superuser
class Staff(models.Model):
email = models.EmailField(unique=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def delete(self, *args, **kwargs):
self.user.delete(*args, **kwargs) ## this will delete the staff
def __str__(self):
return self.user.username
from rest_framework import serializers
'''
class StaffSerializer(serializers.ModelSerializer):
class Meta:
model = Staff
fields = [ 'email', 'user' ]
#'''
| [
"django.db.models.EmailField",
"django.db.models.ForeignKey"
] | [((153, 183), 'django.db.models.EmailField', 'models.EmailField', ([], {'unique': '(True)'}), '(unique=True)\n', (170, 183), False, 'from django.db import models\n'), ((195, 244), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (212, 244), False, 'from django.db import models\n')] |
from panther import lookup_aws_account_name
from panther_base_helpers import deep_get
EVENT_ALLOW_LIST = {'CreateServiceLinkedRole', 'ConsoleLogin'}
def rule(event):
return (deep_get(event, 'userIdentity', 'type') == 'Root' and
event.get('errorMessage') is None and
deep_get(event, 'userIdentity', 'invokedBy') is None and
event.get('eventType') != 'AwsServiceEvent' and
event.get('eventName') not in EVENT_ALLOW_LIST)
def title(event):
return 'AWS root activity detected from [{ip}] in account [{account}]'.format(
ip=event.get('sourceIPAddress'),
account=lookup_aws_account_name(event.get('recipientAccountId')))
def alert_context(event):
return {
'sourceIPAddress': event['sourceIPAddress'],
'userIdentityAccountId': deep_get(event, 'userIdentity', 'accountId'),
'userIdentityArn': deep_get(event, 'userIdentity', 'arn'),
'eventTime': event['eventTime'],
'mfaUsed': deep_get(event, 'additionalEventData', 'MFAUsed')
}
| [
"panther_base_helpers.deep_get"
] | [((819, 863), 'panther_base_helpers.deep_get', 'deep_get', (['event', '"""userIdentity"""', '"""accountId"""'], {}), "(event, 'userIdentity', 'accountId')\n", (827, 863), False, 'from panther_base_helpers import deep_get\n'), ((892, 930), 'panther_base_helpers.deep_get', 'deep_get', (['event', '"""userIdentity"""', '"""arn"""'], {}), "(event, 'userIdentity', 'arn')\n", (900, 930), False, 'from panther_base_helpers import deep_get\n'), ((992, 1041), 'panther_base_helpers.deep_get', 'deep_get', (['event', '"""additionalEventData"""', '"""MFAUsed"""'], {}), "(event, 'additionalEventData', 'MFAUsed')\n", (1000, 1041), False, 'from panther_base_helpers import deep_get\n'), ((181, 220), 'panther_base_helpers.deep_get', 'deep_get', (['event', '"""userIdentity"""', '"""type"""'], {}), "(event, 'userIdentity', 'type')\n", (189, 220), False, 'from panther_base_helpers import deep_get\n'), ((297, 341), 'panther_base_helpers.deep_get', 'deep_get', (['event', '"""userIdentity"""', '"""invokedBy"""'], {}), "(event, 'userIdentity', 'invokedBy')\n", (305, 341), False, 'from panther_base_helpers import deep_get\n')] |
import os
import pickle
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
_testcapi = support.import_module('_testcapi')
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.
__doc__)
InstanceMethod.testfunction.attribute = 'test'
self.assertEqual(testfunction.attribute, 'test')
self.assertRaises(AttributeError, setattr, inst.testfunction,
'attribute', 'test')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, '-c',
'import _testcapi;_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.
make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError('5')
new_exc = TypeError('TEST')
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__,
new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.
__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__,
new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess,
'_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec, 1, Z(), 3,
(1, 2), 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec, 1, Z(),
3, (1, 2), 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
@unittest.skipUnless(_posixsubprocess,
'_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec, Z(), [b'1'
], 3, (1, 2), 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
'Signature information for builtins requires docstrings')
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
'This docstring has no signature.')
self.assertEqual(_testcapi.docstring_no_signature.
__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"""docstring_with_invalid_signature($module, /, boo)
This docstring has an invalid signature."""
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.
__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.
__doc__,
"""docstring_with_invalid_signature2($module, /, boo)
--
This docstring also has an invalid signature."""
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.
__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
'This docstring has a valid signature.')
self.assertEqual(_testcapi.docstring_with_signature.
__text_signature__, '($module, /, sig)')
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.
__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.
__text_signature__, '($module, /, sig)')
self.assertEqual(_testcapi.
docstring_with_signature_and_extra_newlines.__doc__,
"""
This docstring has a valid signature and some extra newlines."""
)
self.assertEqual(_testcapi.
docstring_with_signature_and_extra_newlines.__text_signature__,
'($module, /, parameter)')
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ('matmul', m1, m2))
self.assertEqual(m1 @ 42, ('matmul', m1, 42))
self.assertEqual(42 @ m1, ('matmul', 42, m1))
o = m1
o @= m2
self.assertEqual(o, ('imatmul', m1, m2))
o = m1
o @= 42
self.assertEqual(o, ('imatmul', m1, 42))
o = 42
o @= m1
self.assertEqual(o, ('matmul', 42, m1))
def test_return_null_without_error(self):
if Py_DEBUG:
code = textwrap.dedent(
"""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
"""
)
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
b'Fatal Python error: a function returned NULL without setting an error\\nSystemError: <built-in function return_null_without_error> returned NULL without setting an error\\n\\nCurrent thread.*:\\n File .*", line 6 in <module>'
)
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* returned NULL without setting an error'
)
def test_return_result_with_error(self):
if Py_DEBUG:
code = textwrap.dedent(
"""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
"""
)
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
b'Fatal Python error: a function returned a result with an error set\\nValueError\\n\\nThe above exception was the direct cause of the following exception:\\n\\nSystemError: <built-in function return_result_with_error> returned a result with an error set\\n\\nCurrent thread.*:\\n File .*, line 6 in <module>'
)
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* returned a result with an error set'
)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
l.append(None)
for i in range(n):
time.sleep(random.random() * 0.02)
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context=None):
count = 0
while len(l) != n:
if False and support.verbose:
print('(%i)' % (len(l),))
for i in range(1000):
a = i * i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
'timeout waiting for %i callbacks, got %i' % (n, len(l)))
if False and support.verbose:
print('(%i)' % (len(l),))
def test_pendingcalls_threaded(self):
n = 32
threads = []
class foo(object):
pass
context = foo()
context.l = []
context.n = 2
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread, args=(
context,)) for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print('finished threads: ', nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = (
"""if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
"""
.format(w))
with open(r, 'rb') as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing('Hello', 'World'), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = '_testembed'
if sys.platform.startswith('win'):
ext = ('_d' if '_d' in sys.executable else '') + '.exe'
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, 'Programs')
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess
.PIPE, universal_newlines=True)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, 'bad returncode %d, stderr is %r' %
(p.returncode, err))
return out, err
def test_subinterps(self):
out, err = self.run_embedded_interpreter()
if support.verbose:
print()
print(out)
print(err)
@staticmethod
def _get_default_pipe_encoding():
rp, wp = os.pipe()
try:
with os.fdopen(wp, 'w') as w:
default_pipe_encoding = w.encoding
finally:
os.close(rp)
return default_pipe_encoding
def test_forced_io_encoding(self):
out, err = self.run_embedded_interpreter('forced_io_encoding')
if support.verbose:
print()
print(out)
print(err)
expected_errors = sys.__stdout__.errors
expected_stdin_encoding = sys.__stdin__.encoding
expected_pipe_encoding = self._get_default_pipe_encoding()
expected_output = '\n'.join(['--- Use defaults ---',
'Expected encoding: default', 'Expected errors: default',
'stdin: {in_encoding}:{errors}',
'stdout: {out_encoding}:{errors}',
'stderr: {out_encoding}:backslashreplace',
'--- Set errors only ---', 'Expected encoding: default',
'Expected errors: ignore', 'stdin: {in_encoding}:ignore',
'stdout: {out_encoding}:ignore',
'stderr: {out_encoding}:backslashreplace',
'--- Set encoding only ---', 'Expected encoding: latin-1',
'Expected errors: default', 'stdin: latin-1:{errors}',
'stdout: latin-1:{errors}', 'stderr: latin-1:backslashreplace',
'--- Set encoding and errors ---', 'Expected encoding: latin-1',
'Expected errors: replace', 'stdin: latin-1:replace',
'stdout: latin-1:replace', 'stderr: latin-1:backslashreplace'])
expected_output = expected_output.format(in_encoding=
expected_stdin_encoding, out_encoding=expected_pipe_encoding,
errors=expected_errors)
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = 0,
dict_b = {'b': 1}
keywords = ['a', 'b']
for i in range(32, 127):
c = chr(i)
if c in '()e|$':
continue
format = c + 'i'
try:
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b, format,
keywords)
when_not_skipped = False
except SystemError as e:
s = 'argument 1 (impossible<bad format char>)'
when_not_skipped = str(e) == s
except TypeError:
when_not_skipped = False
optional_format = '|' + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format, keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = str(e) == s
message = (
"test_skipitem_parity: detected mismatch between convertsimple and skipitem for format unit '{}' ({}), not skipped {}, skipped {}"
.format(c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords, (),
{}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords, (
), {}, '', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords, (
), {}, '', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords, (
), {}, '', [42])
def test_bad_use(self):
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '||O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1, 2), {}, '|O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$$O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O$O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$|O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|OO', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|$O', [''])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|OO', ['a', ''])
def test_positional_only(self):
parse = _testcapi.parse_tuple_and_keywords
parse((1, 2, 3), {}, 'OOO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OOO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
'Function takes at least 2 positional arguments \\(1 given\\)'):
parse((1,), {'a': 3}, 'OOO', ['', '', 'a'])
parse((1,), {}, 'O|OO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
'Function takes at least 1 positional arguments \\(0 given\\)'):
parse((), {}, 'O|OO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OO$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
'Function takes exactly 2 positional arguments \\(1 given\\)'):
parse((1,), {'a': 3}, 'OO$O', ['', '', 'a'])
parse((1,), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
'Function takes at least 1 positional arguments \\(0 given\\)'):
parse((), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(SystemError,
'Empty parameter name after \\$'):
parse((1,), {}, 'O|$OO', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
parse((1,), {}, 'O|OO', ['', 'a', ''])
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest('internal', name=name):
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
PTR_REGEX = '(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code, PYTHONMALLOC=self.
PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (
"Debug memory block at address p={ptr}: API 'm'\\n 16 bytes originally requested\\n The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\\n The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \\(0x[0-9a-f]{{2}}\\):\\n at tail\\+0: 0x78 \\*\\*\\* OUCH\\n at tail\\+1: 0xfb\\n at tail\\+2: 0xfb\\n .*\\n The block was made by call #[0-9]+ to debug malloc/realloc.\\n Data at p: cb cb cb .*\\n\\nFatal Python error: bad trailing pad byte"
)
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (
"Debug memory block at address p={ptr}: API 'm'\\n 16 bytes originally requested\\n The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\\n The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\\n The block was made by call #[0-9]+ to debug malloc/realloc.\\n Data at p: cb cb cb .*\\n\\nFatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\\n"
)
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
@unittest.skipUnless(threading, 'Test requires a GIL (multithreading)')
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = (
'Fatal Python error: Python memory allocator called without holding the GIL'
)
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
PYTHONMALLOC = ''
if __name__ == '__main__':
unittest.main()
| [
"re.compile",
"unittest.skipIf",
"sys.platform.startswith",
"sysconfig.get_config_var",
"time.sleep",
"sys.exc_info",
"unittest.main",
"textwrap.dedent",
"os.path.exists",
"threading.Lock",
"subprocess.Popen",
"test.support.script_helper.assert_python_failure",
"test.support.SuppressCrashReport",
"random.random",
"threading.get_ident",
"os.close",
"pickle.load",
"unittest.skipUnless",
"test.support.import_module",
"test.support.run_in_subinterp",
"os.path.dirname",
"os.fdopen",
"test.support.start_threads",
"os.path.join",
"os.getcwd",
"threading.Event",
"os.chdir",
"os.path.abspath",
"threading.Thread",
"os.pipe"
] | [((430, 464), 'test.support.import_module', 'support.import_module', (['"""_testcapi"""'], {}), "('_testcapi')\n", (451, 464), False, 'from test import support\n'), ((8849, 8916), 'unittest.skipUnless', 'unittest.skipUnless', (['threading', '"""Threading required for this test."""'], {}), "(threading, 'Threading required for this test.')\n", (8868, 8916), False, 'import unittest\n'), ((20620, 20687), 'unittest.skipUnless', 'unittest.skipUnless', (['threading', '"""Threading required for this test."""'], {}), "(threading, 'Threading required for this test.')\n", (20639, 20687), False, 'import unittest\n'), ((24243, 24289), 'unittest.skipUnless', 'unittest.skipUnless', (['Py_DEBUG', '"""need Py_DEBUG"""'], {}), "(Py_DEBUG, 'need Py_DEBUG')\n", (24262, 24289), False, 'import unittest\n'), ((1286, 1353), 'unittest.skipUnless', 'unittest.skipUnless', (['threading', '"""Threading required for this test."""'], {}), "(threading, 'Threading required for this test.')\n", (1305, 1353), False, 'import unittest\n'), ((3049, 3134), 'unittest.skipUnless', 'unittest.skipUnless', (['_posixsubprocess', '"""_posixsubprocess required for this test."""'], {}), "(_posixsubprocess,\n '_posixsubprocess required for this test.')\n", (3068, 3134), False, 'import unittest\n'), ((3724, 3809), 'unittest.skipUnless', 'unittest.skipUnless', (['_posixsubprocess', '"""_posixsubprocess required for this test."""'], {}), "(_posixsubprocess,\n '_posixsubprocess required for this test.')\n", (3743, 3809), False, 'import unittest\n'), ((4094, 4193), 'unittest.skipIf', 'unittest.skipIf', (['MISSING_C_DOCSTRINGS', '"""Signature information for builtins requires docstrings"""'], {}), "(MISSING_C_DOCSTRINGS,\n 'Signature information for builtins requires docstrings')\n", (4109, 4193), False, 'import unittest\n'), ((23343, 23413), 'unittest.skipUnless', 'unittest.skipUnless', (['threading', '"""Test requires a GIL (multithreading)"""'], {}), "(threading, 'Test requires a GIL (multithreading)')\n", (23362, 23413), False, 'import unittest\n'), ((24387, 24402), 'unittest.main', 'unittest.main', ([], {}), '()\n', (24400, 24402), False, 'import unittest\n'), ((10067, 10083), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (10081, 10083), False, 'import threading\n'), ((10108, 10125), 'threading.Event', 'threading.Event', ([], {}), '()\n', (10123, 10125), False, 'import threading\n'), ((11070, 11079), 'os.pipe', 'os.pipe', ([], {}), '()\n', (11077, 11079), False, 'import os\n'), ((11784, 11809), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (11799, 11809), False, 'import os\n'), ((11927, 11957), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (11950, 11957), False, 'import sys\n'), ((12209, 12239), 'os.path.join', 'os.path.join', (['exepath', 'exename'], {}), '(exepath, exename)\n', (12221, 12239), False, 'import os\n'), ((12350, 12361), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12359, 12361), False, 'import os\n'), ((12370, 12388), 'os.chdir', 'os.chdir', (['basepath'], {}), '(basepath)\n', (12378, 12388), False, 'import os\n'), ((12422, 12443), 'os.chdir', 'os.chdir', (['self.oldcwd'], {}), '(self.oldcwd)\n', (12430, 12443), False, 'import os\n'), ((12613, 12711), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n', (12629, 12711), False, 'import subprocess\n'), ((13142, 13151), 'os.pipe', 'os.pipe', ([], {}), '()\n', (13149, 13151), False, 'import os\n'), ((21190, 21221), 'threading.Thread', 'threading.Thread', ([], {'target': 'target'}), '(target=target)\n', (21206, 21221), False, 'import threading\n'), ((22623, 22657), 're.compile', 're.compile', (['regex'], {'flags': 're.DOTALL'}), '(regex, flags=re.DOTALL)\n', (22633, 22657), False, 'import re\n'), ((24087, 24128), 'sysconfig.get_config_var', 'sysconfig.get_config_var', (['"""WITH_PYMALLOC"""'], {}), "('WITH_PYMALLOC')\n", (24111, 24128), False, 'import sysconfig\n'), ((1415, 1444), 'test.support.SuppressCrashReport', 'support.SuppressCrashReport', ([], {}), '()\n', (1442, 1444), False, 'from test import support\n'), ((1462, 1615), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, '-c', 'import _testcapi;_testcapi.crash_no_current_thread()']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([sys.executable, '-c',\n 'import _testcapi;_testcapi.crash_no_current_thread()'], stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n", (1478, 1615), False, 'import subprocess\n'), ((6796, 7027), 'textwrap.dedent', 'textwrap.dedent', (['"""\n import _testcapi\n from test import support\n\n with support.SuppressCrashReport():\n _testcapi.return_null_without_error()\n """'], {}), '(\n """\n import _testcapi\n from test import support\n\n with support.SuppressCrashReport():\n _testcapi.return_null_without_error()\n """\n )\n', (6811, 7027), False, 'import textwrap\n'), ((7079, 7112), 'test.support.script_helper.assert_python_failure', 'assert_python_failure', (['"""-c"""', 'code'], {}), "('-c', code)\n", (7100, 7112), False, 'from test.support.script_helper import assert_python_failure\n'), ((7790, 8020), 'textwrap.dedent', 'textwrap.dedent', (['"""\n import _testcapi\n from test import support\n\n with support.SuppressCrashReport():\n _testcapi.return_result_with_error()\n """'], {}), '(\n """\n import _testcapi\n from test import support\n\n with support.SuppressCrashReport():\n _testcapi.return_result_with_error()\n """\n )\n', (7805, 8020), False, 'import textwrap\n'), ((8072, 8105), 'test.support.script_helper.assert_python_failure', 'assert_python_failure', (['"""-c"""', 'code'], {}), "('-c', code)\n", (8093, 8105), False, 'from test.support.script_helper import assert_python_failure\n'), ((10145, 10211), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.pendingcalls_thread', 'args': '(context,)'}), '(target=self.pendingcalls_thread, args=(context,))\n', (10161, 10211), False, 'import threading\n'), ((10272, 10302), 'test.support.start_threads', 'support.start_threads', (['threads'], {}), '(threads)\n', (10293, 10302), False, 'from test import support\n'), ((11383, 11413), 'test.support.run_in_subinterp', 'support.run_in_subinterp', (['code'], {}), '(code)\n', (11407, 11413), False, 'from test import support\n'), ((12076, 12107), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (12091, 12107), False, 'import os\n'), ((12144, 12178), 'os.path.join', 'os.path.join', (['basepath', '"""Programs"""'], {}), "(basepath, 'Programs')\n", (12156, 12178), False, 'import os\n'), ((12255, 12274), 'os.path.exists', 'os.path.exists', (['exe'], {}), '(exe)\n', (12269, 12274), False, 'import os\n'), ((13287, 13299), 'os.close', 'os.close', (['rp'], {}), '(rp)\n', (13295, 13299), False, 'import os\n'), ((21011, 21024), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (21021, 21024), False, 'import time\n'), ((21694, 21723), 'test.support.SuppressCrashReport', 'support.SuppressCrashReport', ([], {}), '()\n', (21721, 21723), False, 'from test import support\n'), ((21743, 21808), 'test.support.script_helper.assert_python_failure', 'assert_python_failure', (['"""-c"""', 'code'], {'PYTHONMALLOC': 'self.PYTHONMALLOC'}), "('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)\n", (21764, 21808), False, 'from test.support.script_helper import assert_python_failure\n'), ((2232, 2246), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2244, 2246), False, 'import sys\n'), ((2379, 2393), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2391, 2393), False, 'import sys\n'), ((2493, 2507), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2505, 2507), False, 'import sys\n'), ((11483, 11497), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11494, 11497), False, 'import pickle\n'), ((11548, 11562), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11559, 11562), False, 'import pickle\n'), ((11861, 11882), 'os.path.dirname', 'os.path.dirname', (['here'], {}), '(here)\n', (11876, 11882), False, 'import os\n'), ((13182, 13200), 'os.fdopen', 'os.fdopen', (['wp', '"""w"""'], {}), "(wp, 'w')\n", (13191, 13200), False, 'import os\n'), ((9104, 9119), 'random.random', 'random.random', ([], {}), '()\n', (9117, 9119), False, 'import random\n'), ((20896, 20917), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (20915, 20917), False, 'import threading\n'), ((21067, 21088), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (21086, 21088), False, 'import threading\n')] |
"""
Signals for the micromasters app
"""
import logging
from django.db.models.signals import (
pre_save,
post_save,
post_delete,
)
from django.dispatch import receiver
from rolepermissions.roles import assign_role, remove_role
from roles.models import Role
log = logging.getLogger(__name__)
@receiver(pre_save, sender=Role, dispatch_uid="save_remove_role_from_user")
def save_remove_role_from_user(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler that happens before a role assignment is done.
If the the save happens for a modification, the previous role must be removed
if not correspondent to other programs.
Theoretically this is not necessary with the current implementation of the
django-role-permission library.
"""
try:
old_instance = Role.objects.get(pk=instance.pk)
except Role.DoesNotExist:
return
# the reason why this check is "> 1" is because this happens BEFORE the save
# so 1 entry is for the current value
if Role.objects.filter(role=old_instance.role).count() > 1:
return
log.debug(
'removing role % for user %s',
instance.role,
instance.user.username,
)
remove_role(instance.user, old_instance.role)
@receiver(post_save, sender=Role, dispatch_uid="save_assign_role_to_user")
def save_assign_role_to_user(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler to assign a logical role to an user every time
the same role is assigned to an user for a program
"""
log.debug(
'assigning role %s to user %s',
instance.role,
instance.user.username,
)
assign_role(instance.user, instance.role)
@receiver(post_delete, sender=Role, dispatch_uid="delete_remove_role_from_user")
def delete_remove_role_from_user(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler that happens after a role removal is done.
The role must be removed only if not correspondent to other programs.
"""
# the reason why this check is "> 0" is because this happens AFTER the delete
# there are no entries for the current value
if Role.objects.filter(role=instance.role).count() > 0:
return
log.debug(
'removing role % for user %s',
instance.role,
instance.user.username,
)
remove_role(instance.user, instance.role)
| [
"logging.getLogger",
"roles.models.Role.objects.filter",
"rolepermissions.roles.assign_role",
"rolepermissions.roles.remove_role",
"django.dispatch.receiver",
"roles.models.Role.objects.get"
] | [((280, 307), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (297, 307), False, 'import logging\n'), ((311, 385), 'django.dispatch.receiver', 'receiver', (['pre_save'], {'sender': 'Role', 'dispatch_uid': '"""save_remove_role_from_user"""'}), "(pre_save, sender=Role, dispatch_uid='save_remove_role_from_user')\n", (319, 385), False, 'from django.dispatch import receiver\n'), ((1286, 1359), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Role', 'dispatch_uid': '"""save_assign_role_to_user"""'}), "(post_save, sender=Role, dispatch_uid='save_assign_role_to_user')\n", (1294, 1359), False, 'from django.dispatch import receiver\n'), ((1755, 1834), 'django.dispatch.receiver', 'receiver', (['post_delete'], {'sender': 'Role', 'dispatch_uid': '"""delete_remove_role_from_user"""'}), "(post_delete, sender=Role, dispatch_uid='delete_remove_role_from_user')\n", (1763, 1834), False, 'from django.dispatch import receiver\n'), ((1237, 1282), 'rolepermissions.roles.remove_role', 'remove_role', (['instance.user', 'old_instance.role'], {}), '(instance.user, old_instance.role)\n', (1248, 1282), False, 'from rolepermissions.roles import assign_role, remove_role\n'), ((1710, 1751), 'rolepermissions.roles.assign_role', 'assign_role', (['instance.user', 'instance.role'], {}), '(instance.user, instance.role)\n', (1721, 1751), False, 'from rolepermissions.roles import assign_role, remove_role\n'), ((2410, 2451), 'rolepermissions.roles.remove_role', 'remove_role', (['instance.user', 'instance.role'], {}), '(instance.user, instance.role)\n', (2421, 2451), False, 'from rolepermissions.roles import assign_role, remove_role\n'), ((837, 869), 'roles.models.Role.objects.get', 'Role.objects.get', ([], {'pk': 'instance.pk'}), '(pk=instance.pk)\n', (853, 869), False, 'from roles.models import Role\n'), ((1045, 1088), 'roles.models.Role.objects.filter', 'Role.objects.filter', ([], {'role': 'old_instance.role'}), '(role=old_instance.role)\n', (1064, 1088), False, 'from roles.models import Role\n'), ((2222, 2261), 'roles.models.Role.objects.filter', 'Role.objects.filter', ([], {'role': 'instance.role'}), '(role=instance.role)\n', (2241, 2261), False, 'from roles.models import Role\n')] |
# -*- coding: utf-8 -*-
from enum import Enum, auto
from dataclasses import dataclass, astuple
@dataclass
class Contest:
class Type(Enum):
CF = auto()
IOI = auto()
ICPC = auto()
class Phase(Enum):
BEFORE = auto()
CODING = auto()
PENDING_SYSTEM_TEST = auto()
SYSTEM_TEST = auto()
FINISHED = auto()
id: int
name: str
type: Type
phase: Phase
frozen: bool
durationSeconds: int
startTimeSeconds: int = -1
relativeTimeSeconds: int = -1
preparedBy: str = ""
websiteUrl: str = ""
description: str = ""
difficulty: int = 1
kind: str = ""
icpcRegion: str = ""
country: str = ""
city: str = ""
season: str = ""
def __post_init__(self):
self.type = self.Type[self.type]
self.phase = self.Phase[self.phase]
def __composite_values__(self):
return astuple(self)
| [
"dataclasses.astuple",
"enum.auto"
] | [((158, 164), 'enum.auto', 'auto', ([], {}), '()\n', (162, 164), False, 'from enum import Enum, auto\n'), ((179, 185), 'enum.auto', 'auto', ([], {}), '()\n', (183, 185), False, 'from enum import Enum, auto\n'), ((201, 207), 'enum.auto', 'auto', ([], {}), '()\n', (205, 207), False, 'from enum import Enum, auto\n'), ((249, 255), 'enum.auto', 'auto', ([], {}), '()\n', (253, 255), False, 'from enum import Enum, auto\n'), ((273, 279), 'enum.auto', 'auto', ([], {}), '()\n', (277, 279), False, 'from enum import Enum, auto\n'), ((310, 316), 'enum.auto', 'auto', ([], {}), '()\n', (314, 316), False, 'from enum import Enum, auto\n'), ((339, 345), 'enum.auto', 'auto', ([], {}), '()\n', (343, 345), False, 'from enum import Enum, auto\n'), ((365, 371), 'enum.auto', 'auto', ([], {}), '()\n', (369, 371), False, 'from enum import Enum, auto\n'), ((911, 924), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (918, 924), False, 'from dataclasses import dataclass, astuple\n')] |
#!/usr/bin/env python
""" bet strategies
# ----
# License: BSD
# ----
# 0.1: init version - 2016.6 - by <NAME>
"""
import random
from cfg import *
def bet_kelly(P, moneyInHand): #P(win) = 1-Q
Q = 1 - float(P)
ODDS_MAX = 15
rW_avg = 8.0688 #rW is:', 8.068785713972469, 'rL is:', -8.06888799114749
rW = 8.06888 # ??? clean Win rate
rL = 8.06888 # ??? clean loss rate
#F = (P*B - Q ) / B # When rL = 1
F = (P*rW - Q*rL)/ rW
#bet = (moneyInHand/ODDS_MAX) * F
#bet = (moneyInHand/rW_avg) * F
bet = (moneyInHand/rW_avg) * F
"""
# 1:KILLED, YOU LOST 2: Killed offen. win little 3: bad 4: half result 6: 70% to best 7: near best
8: 100W~110W/month 12:96W/month. still good 15: Still good. 1.2X times 20:95W/month. 1.5X times
50: #80-100: half result. many plays
"""
if VERBOSE_B == True:
print ("------------------F: %.2f, bet: %d, @%d-----------------" %(F, bet, moneyInHand))
return int(bet)
def bet_gamblerFallacy(lost_sum, rate):
bet_gf = (lost_sum * 1.2)/rate
return bet_gf
def bet_average():
res = bet_kelly(0.56, MyMoneyInEveryDay)
return res
def gen_randomList(total_pair):
list_pair = []
for x in range(0,total_pair*2):
list_pair.append(random.randint(0,9))
return list_pair
if __name__ == "__main__":
main()
| [
"random.randint"
] | [((1402, 1422), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1416, 1422), False, 'import random\n')] |
#!/usr/bin/env python3
"""
A Python script that check, install/update or uninstall the configuration
of your NativeMessaging app for ff2mpv.
Currently requires Python 3.6 minimum.
If you find more issues with setting this up, let's see if we can add to this
script.
"""
import argparse
import json
import os
import subprocess
import winreg
# Command-Line
parser = argparse.ArgumentParser(description="Helper for ff2mpv on windows.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-c",
"--check",
action="store_true",
help="only checks the installation, no modification",
)
group.add_argument(
"-i",
"--install",
action="store_true",
help="installs ff2mpv registry key or updates the path value",
)
group.add_argument(
"-u",
"--uninstall",
action="store_true",
help="removes ff2mpv registry key and all it's values",
)
args = parser.parse_args()
WDIR = os.path.dirname(__file__)
FF2MPV_JSON = fr"{WDIR}\ff2mpv-windows.json"
FF2MPV_KEY = r"Software\Mozilla\NativeMessagingHosts\ff2mpv"
# Assuming current user overrides local machine.
HKEYS = {
"HKEY_CURRENT_USER": winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": winreg.HKEY_LOCAL_MACHINE,
}
error = False
found_key = False
print("- Checking Registry:")
for key_name, reg_key in HKEYS.items():
try:
print(fr"{key_name}\{FF2MPV_KEY} ... ", end="")
key_open = winreg.OpenKey(reg_key, FF2MPV_KEY)
hkey_found = reg_key
print("Found.")
except FileNotFoundError:
print("Not found.")
error = True
continue
error = False
found_key = True
break
if not found_key:
if args.install:
# The intermediate missing key are also created.
key_open = winreg.CreateKey(HKEYS["HKEY_CURRENT_USER"], FF2MPV_KEY)
print("Key created.")
if not args.uninstall:
# Install/Update case
ff2mpv_value = winreg.QueryValue(key_open, "")
if args.install:
if ff2mpv_value != FF2MPV_JSON:
winreg.SetValue(
HKEYS["HKEY_CURRENT_USER"], FF2MPV_KEY, winreg.REG_SZ, FF2MPV_JSON
)
ff2mpv_value = winreg.QueryValue(key_open, "")
print("Value set/updated.\nRestart Firefox if it was running.")
else:
print("Nothing to update.")
# Check case
else:
if ff2mpv_value != "":
print("Value of the key is:", ff2mpv_value)
if os.path.exists(ff2mpv_value):
try:
json.load(open(ff2mpv_value, "r"))
except json.decoder.JSONDecodeError:
print(f"error: Is {os.path.basename(ff2mpv_value)} a JSON file?")
else:
print("error: The file does not exist.")
error = True
else:
print("Empty value in the key.")
print('- Environment variable "Path":')
try:
subprocess.run("mpv --version", check=False)
except FileNotFoundError:
print("error: Path for mpv missing.")
print(
'\nPress Win (key between Ctrl and Alt), then type "Environment Variables".'
)
print(
'Add the mpv folder into system or user variable "Path".\nRestart Firefox if it was running.\n'
)
error = True
else:
print("mpv OK.")
# Uninstall case
else:
error = True
if found_key:
# Remove ff2mpv key and all value under it.
winreg.DeleteKey(hkey_found, FF2MPV_KEY)
print("Key deleted.")
else:
print("Nothing to remove.")
if not error:
print("Looks good! Give it a try from Firefox.")
| [
"os.path.exists",
"winreg.OpenKey",
"argparse.ArgumentParser",
"subprocess.run",
"winreg.DeleteKey",
"winreg.SetValue",
"os.path.dirname",
"winreg.CreateKey",
"os.path.basename",
"winreg.QueryValue"
] | [((367, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Helper for ff2mpv on windows."""'}), "(description='Helper for ff2mpv on windows.')\n", (390, 435), False, 'import argparse\n'), ((937, 962), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (952, 962), False, 'import os\n'), ((1926, 1957), 'winreg.QueryValue', 'winreg.QueryValue', (['key_open', '""""""'], {}), "(key_open, '')\n", (1943, 1957), False, 'import winreg\n'), ((1420, 1455), 'winreg.OpenKey', 'winreg.OpenKey', (['reg_key', 'FF2MPV_KEY'], {}), '(reg_key, FF2MPV_KEY)\n', (1434, 1455), False, 'import winreg\n'), ((1770, 1826), 'winreg.CreateKey', 'winreg.CreateKey', (["HKEYS['HKEY_CURRENT_USER']", 'FF2MPV_KEY'], {}), "(HKEYS['HKEY_CURRENT_USER'], FF2MPV_KEY)\n", (1786, 1826), False, 'import winreg\n'), ((2934, 2978), 'subprocess.run', 'subprocess.run', (['"""mpv --version"""'], {'check': '(False)'}), "('mpv --version', check=False)\n", (2948, 2978), False, 'import subprocess\n'), ((3477, 3517), 'winreg.DeleteKey', 'winreg.DeleteKey', (['hkey_found', 'FF2MPV_KEY'], {}), '(hkey_found, FF2MPV_KEY)\n', (3493, 3517), False, 'import winreg\n'), ((2031, 2118), 'winreg.SetValue', 'winreg.SetValue', (["HKEYS['HKEY_CURRENT_USER']", 'FF2MPV_KEY', 'winreg.REG_SZ', 'FF2MPV_JSON'], {}), "(HKEYS['HKEY_CURRENT_USER'], FF2MPV_KEY, winreg.REG_SZ,\n FF2MPV_JSON)\n", (2046, 2118), False, 'import winreg\n'), ((2172, 2203), 'winreg.QueryValue', 'winreg.QueryValue', (['key_open', '""""""'], {}), "(key_open, '')\n", (2189, 2203), False, 'import winreg\n'), ((2464, 2492), 'os.path.exists', 'os.path.exists', (['ff2mpv_value'], {}), '(ff2mpv_value)\n', (2478, 2492), False, 'import os\n'), ((2662, 2692), 'os.path.basename', 'os.path.basename', (['ff2mpv_value'], {}), '(ff2mpv_value)\n', (2678, 2692), False, 'import os\n')] |
import os
import textwrap
from collections import OrderedDict
from glycopeptidepy.structure.glycan import GlycosylationType
from glycan_profiling import serialize
from glycan_profiling.serialize import (
Protein, Glycopeptide, IdentifiedGlycopeptide,
func, MSScan, DatabaseBoundOperation)
from glycan_profiling.chromatogram_tree import Unmodified
from glycan_profiling.tandem.ref import SpectrumReference
from glycan_profiling.plotting.glycan_visual_classification import (
GlycanCompositionClassifierColorizer,
NGlycanCompositionColorizer)
from glycan_profiling.plotting import (figax, SmoothingChromatogramArtist)
from glycan_profiling.plotting.sequence_fragment_logo import glycopeptide_match_logo
from glycan_profiling.plotting.plot_glycoforms import (
GlycoformLayout)
from glycan_profiling.plotting.spectral_annotation import TidySpectrumMatchAnnotator
from glycan_profiling.tandem.glycopeptide.identified_structure import IdentifiedGlycoprotein
from glycan_profiling.tandem.glycopeptide.scoring import CoverageWeightedBinomialScorer
from glycan_profiling.plotting.entity_bar_chart import (
AggregatedAbundanceArtist, BundledGlycanComposition)
from glycan_profiling.output.report.base import (
svguri_plot, png_plot, ReportCreatorBase)
from ms_deisotope.output.mzml import ProcessedMzMLDeserializer
glycan_colorizer_type_map = {
GlycosylationType.n_linked: NGlycanCompositionColorizer,
GlycosylationType.glycosaminoglycan: GlycanCompositionClassifierColorizer({}, 'slateblue'),
GlycosylationType.o_linked: GlycanCompositionClassifierColorizer({}, 'slateblue')
}
def scale_fix_xml_transform(root):
view_box_str = root.attrib["viewBox"]
x_start, y_start, x_end, y_end = map(float, view_box_str.split(" "))
x_start += 0
updated_view_box_str = " ".join(map(str, [x_start, y_start, x_end, y_end]))
root.attrib["viewBox"] = updated_view_box_str
fig_g = root.find(".//{http://www.w3.org/2000/svg}g[@id=\"figure_1\"]")
fig_g.attrib["transform"] = "scale(1.0, 1.0)"
return root
class IdentifiedGlycopeptideDescriberBase(object):
def __init__(self, database_path, analysis_id, mzml_path=None):
self.database_connection = DatabaseBoundOperation(database_path)
self.analysis_id = analysis_id
self.analysis = self.session.query(serialize.Analysis).get(self.analysis_id)
self.mzml_path = mzml_path
self.scan_loader = None
self._make_scan_loader()
def spectrum_match_info(self, glycopeptide):
spectrum_match_ref = glycopeptide.best_spectrum_match
scan_id = spectrum_match_ref.scan.scan_id
scan = self.scan_loader.get_scan_by_id(scan_id)
try:
mass_shift = spectrum_match_ref.mass_shift
except Exception:
mass_shift = Unmodified
if mass_shift.name != Unmodified.name:
mass_shift = mass_shift.convert()
else:
mass_shift = Unmodified
match = CoverageWeightedBinomialScorer.evaluate(
scan,
glycopeptide.structure.convert(),
error_tolerance=self.analysis.parameters["fragment_error_tolerance"],
mass_shift=mass_shift)
specmatch_artist = TidySpectrumMatchAnnotator(match, ax=figax())
specmatch_artist.draw(fontsize=10, pretty=True)
annotated_match_ax = specmatch_artist.ax
scan_title = scan.id
if len(scan_title) > 60:
scan_title = '\n'.join(textwrap.wrap(scan_title, 60))
annotated_match_ax.set_title(scan_title, fontsize=18)
annotated_match_ax.set_ylabel(
annotated_match_ax.get_ylabel(), fontsize=16)
annotated_match_ax.set_xlabel(
annotated_match_ax.get_xlabel(), fontsize=16)
sequence_logo_plot = glycopeptide_match_logo(match, ax=figax())
xlim = list(sequence_logo_plot.get_xlim())
xlim[0] += 1
sequence_logo_plot.set_xlim(xlim[0], xlim[1])
spectrum_plot = png_plot(
annotated_match_ax, svg_width="100%", bbox_inches='tight', height=3 * 1.5,
width=8 * 1.5,
img_width="100%",
patchless=True)
logo_plot = png_plot(
sequence_logo_plot,
svg_width="100%",
img_width="100%",
xml_transform=scale_fix_xml_transform,
bbox_inches='tight',
height=2, width=6 * 1.5, patchless=True)
return dict(
spectrum_plot=spectrum_plot, logo_plot=logo_plot,
precursor_mass_accuracy=match.precursor_mass_accuracy(),
spectrum_match=match)
def _make_scan_loader(self):
if self.mzml_path is not None:
if not os.path.exists(self.mzml_path):
raise IOError("No such file {}".format(self.mzml_path))
self.scan_loader = ProcessedMzMLDeserializer(self.mzml_path)
else:
self.mzml_path = self.analysis.parameters['sample_path']
if not os.path.exists(self.mzml_path):
raise IOError((
"No such file {}. If {} was relocated, you may need to explicily pass the"
" corrected file path.").format(
self.mzml_path,
self.database_connection._original_connection))
self.scan_loader = ProcessedMzMLDeserializer(self.mzml_path)
class IdentifiedGlycopeptideDescriberWorker(IdentifiedGlycopeptideDescriberBase):
def __call__(self, glycopeptide_id):
glycopeptide = self._glycopeptide_from_id(glycopeptide_id)
return self.spectrum_match_info(glycopeptide)
def _glycopeptide_from_id(self, glycopeptide_id):
return self.database_connection.query(
IdentifiedGlycopeptide).get(glycopeptide_id)
class GlycopeptideDatabaseSearchReportCreator(ReportCreatorBase, IdentifiedGlycopeptideDescriberBase):
def __init__(self, database_path, analysis_id, stream=None, threshold=5,
mzml_path=None):
super(GlycopeptideDatabaseSearchReportCreator, self).__init__(
database_path, analysis_id, stream)
self.set_template_loader(os.path.dirname(__file__))
self.mzml_path = mzml_path
self.scan_loader = None
self.threshold = threshold
self.use_dynamic_display_mode = 0
self.analysis = self.session.query(serialize.Analysis).get(self.analysis_id)
self._resolve_hypothesis_id()
self._build_protein_index()
self._make_scan_loader()
self._glycopeptide_counter = 0
if len(self.protein_index) > 10:
self.use_dynamic_display_mode = 1
def _spawn(self):
return IdentifiedGlycopeptideDescriberWorker(self.database_connection, self.analysis_id, self.mzml_path)
def _resolve_hypothesis_id(self):
self.hypothesis_id = self.analysis.hypothesis_id
hypothesis = self.session.query(serialize.GlycopeptideHypothesis).get(self.hypothesis_id)
if hypothesis is None:
self.hypothesis_id = 1
hypothesis = self.session.query(serialize.GlycopeptideHypothesis).get(
self.hypothesis_id)
if hypothesis is None:
raise ValueError("Could not resolve Glycopeptide Hypothesis!")
def prepare_environment(self):
super(GlycopeptideDatabaseSearchReportCreator, self).prepare_environment()
def _build_protein_index(self):
hypothesis_id = self.hypothesis_id
theoretical_counts = self.session.query(Protein.name, Protein.id, func.count(Glycopeptide.id)).join(
Glycopeptide).group_by(Protein.id).filter(
Protein.hypothesis_id == hypothesis_id).all()
matched_counts = self.session.query(
Protein.name, Protein.id, func.count(IdentifiedGlycopeptide.id)).join(Protein.glycopeptides).join(
IdentifiedGlycopeptide, IdentifiedGlycopeptide.structure_id == Glycopeptide.id).group_by(Protein.id).filter(
IdentifiedGlycopeptide.ms2_score > self.threshold,
IdentifiedGlycopeptide.analysis_id == self.analysis_id).all()
listing = []
index = {}
for protein_name, protein_id, glycopeptide_count in theoretical_counts:
index[protein_id] = {
"protein_name": protein_name,
"protein_id": protein_id,
}
for protein_name, protein_id, glycopeptide_count in matched_counts:
entry = index[protein_id]
entry['identified_glycopeptide_count'] = glycopeptide_count
listing.append(entry)
self.protein_index = sorted(listing, key=lambda x: x["identified_glycopeptide_count"], reverse=True)
for protein_entry in self.protein_index:
protein_entry['protein'] = self.session.query(Protein).get(protein_entry["protein_id"])
return self.protein_index
def iterglycoproteins(self):
n = float(len(self.protein_index))
for i, row in enumerate(self.protein_index, 1):
protein = row['protein']
glycopeptides = self.session.query(
IdentifiedGlycopeptide).join(Glycopeptide).join(
Protein).filter(
IdentifiedGlycopeptide.analysis_id == self.analysis_id,
Glycopeptide.hypothesis_id == self.hypothesis_id,
IdentifiedGlycopeptide.ms2_score > self.threshold,
Protein.id == protein.id).all()
glycoprotein = IdentifiedGlycoprotein(protein, glycopeptides)
self.status_update(
"Processing %s (%d/%d) %0.2f%%" % (
protein.name, i, n, (i / n * 100)))
yield i, glycoprotein
def site_specific_abundance_plots(self, glycoprotein):
axes = OrderedDict()
for glyco_type in glycoprotein.glycosylation_types:
for site in sorted(glycoprotein.glycosylation_sites_for(glyco_type)):
spanning_site = glycoprotein.site_map[glyco_type][site]
if len(spanning_site) == 0:
continue
bundle = BundledGlycanComposition.aggregate(spanning_site)
if len(bundle) == 0:
continue
ax = figax()
AggregatedAbundanceArtist(
bundle, ax=ax, colorizer=glycan_colorizer_type_map[glyco_type]).draw()
ax.set_title("%s Glycans\nat Site %d" % (glyco_type.name, site + 1,), fontsize=18)
axes[site, glyco_type] = svguri_plot(ax, bbox_inches='tight')
return axes
def draw_glycoforms(self, glycoprotein):
ax = figax()
layout = GlycoformLayout(glycoprotein, glycoprotein.identified_glycopeptides, ax=ax)
layout.draw()
svg = layout.to_svg(scale=2.0, height_padding_scale=1.1)
return svg
def chromatogram_plot(self, glycopeptide):
ax = figax()
try:
SmoothingChromatogramArtist(
glycopeptide, ax=ax, label_peaks=False,
colorizer=lambda x: "#48afd0").draw(legend=False)
ax.set_xlabel("Time (Minutes)", fontsize=16)
ax.set_ylabel("Relative Abundance", fontsize=16)
return png_plot(ax, bbox_inches='tight', img_height='100%')
except ValueError:
return "<div style='text-align:center;'>No Chromatogram Found</div>"
def track_entry(self, glycopeptide):
self._glycopeptide_counter += 1
if self._glycopeptide_counter % 15 == 0:
self.status_update(
" ... %d glycopeptides handled" % (self._glycopeptide_counter,))
return self._glycopeptide_counter
def make_template_stream(self):
template_obj = self.env.get_template("overview.templ")
ads = serialize.AnalysisDeserializer(
self.database_connection._original_connection,
analysis_id=self.analysis_id)
hypothesis = ads.analysis.hypothesis
sample_run = ads.analysis.sample_run
if self.use_dynamic_display_mode:
self.status_update("Using dynamic display mode")
template_stream = template_obj.stream(
analysis=ads.analysis,
hypothesis=hypothesis,
sample_run=sample_run,
protein_index=self.protein_index,
glycoprotein_iterator=self.iterglycoproteins(),
renderer=self,
use_dynamic_display_mode=self.use_dynamic_display_mode)
return template_stream
| [
"glycan_profiling.plotting.figax",
"collections.OrderedDict",
"os.path.exists",
"glycan_profiling.serialize.func.count",
"glycan_profiling.output.report.base.svguri_plot",
"glycan_profiling.tandem.glycopeptide.identified_structure.IdentifiedGlycoprotein",
"glycan_profiling.plotting.plot_glycoforms.GlycoformLayout",
"glycan_profiling.output.report.base.png_plot",
"ms_deisotope.output.mzml.ProcessedMzMLDeserializer",
"os.path.dirname",
"glycan_profiling.plotting.SmoothingChromatogramArtist",
"textwrap.wrap",
"glycan_profiling.plotting.entity_bar_chart.BundledGlycanComposition.aggregate",
"glycan_profiling.serialize.DatabaseBoundOperation",
"glycan_profiling.plotting.glycan_visual_classification.GlycanCompositionClassifierColorizer",
"glycan_profiling.serialize.AnalysisDeserializer",
"glycan_profiling.plotting.entity_bar_chart.AggregatedAbundanceArtist"
] | [((1472, 1525), 'glycan_profiling.plotting.glycan_visual_classification.GlycanCompositionClassifierColorizer', 'GlycanCompositionClassifierColorizer', (['{}', '"""slateblue"""'], {}), "({}, 'slateblue')\n", (1508, 1525), False, 'from glycan_profiling.plotting.glycan_visual_classification import GlycanCompositionClassifierColorizer, NGlycanCompositionColorizer\n'), ((1559, 1612), 'glycan_profiling.plotting.glycan_visual_classification.GlycanCompositionClassifierColorizer', 'GlycanCompositionClassifierColorizer', (['{}', '"""slateblue"""'], {}), "({}, 'slateblue')\n", (1595, 1612), False, 'from glycan_profiling.plotting.glycan_visual_classification import GlycanCompositionClassifierColorizer, NGlycanCompositionColorizer\n'), ((2212, 2249), 'glycan_profiling.serialize.DatabaseBoundOperation', 'DatabaseBoundOperation', (['database_path'], {}), '(database_path)\n', (2234, 2249), False, 'from glycan_profiling.serialize import Protein, Glycopeptide, IdentifiedGlycopeptide, func, MSScan, DatabaseBoundOperation\n'), ((3993, 4130), 'glycan_profiling.output.report.base.png_plot', 'png_plot', (['annotated_match_ax'], {'svg_width': '"""100%"""', 'bbox_inches': '"""tight"""', 'height': '(3 * 1.5)', 'width': '(8 * 1.5)', 'img_width': '"""100%"""', 'patchless': '(True)'}), "(annotated_match_ax, svg_width='100%', bbox_inches='tight', height=\n 3 * 1.5, width=8 * 1.5, img_width='100%', patchless=True)\n", (4001, 4130), False, 'from glycan_profiling.output.report.base import svguri_plot, png_plot, ReportCreatorBase\n'), ((4195, 4368), 'glycan_profiling.output.report.base.png_plot', 'png_plot', (['sequence_logo_plot'], {'svg_width': '"""100%"""', 'img_width': '"""100%"""', 'xml_transform': 'scale_fix_xml_transform', 'bbox_inches': '"""tight"""', 'height': '(2)', 'width': '(6 * 1.5)', 'patchless': '(True)'}), "(sequence_logo_plot, svg_width='100%', img_width='100%',\n xml_transform=scale_fix_xml_transform, bbox_inches='tight', height=2,\n width=6 * 1.5, patchless=True)\n", (4203, 4368), False, 'from glycan_profiling.output.report.base import svguri_plot, png_plot, ReportCreatorBase\n'), ((9791, 9804), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9802, 9804), False, 'from collections import OrderedDict\n'), ((10652, 10659), 'glycan_profiling.plotting.figax', 'figax', ([], {}), '()\n', (10657, 10659), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((10677, 10752), 'glycan_profiling.plotting.plot_glycoforms.GlycoformLayout', 'GlycoformLayout', (['glycoprotein', 'glycoprotein.identified_glycopeptides'], {'ax': 'ax'}), '(glycoprotein, glycoprotein.identified_glycopeptides, ax=ax)\n', (10692, 10752), False, 'from glycan_profiling.plotting.plot_glycoforms import GlycoformLayout\n'), ((10920, 10927), 'glycan_profiling.plotting.figax', 'figax', ([], {}), '()\n', (10925, 10927), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((11803, 11915), 'glycan_profiling.serialize.AnalysisDeserializer', 'serialize.AnalysisDeserializer', (['self.database_connection._original_connection'], {'analysis_id': 'self.analysis_id'}), '(self.database_connection.\n _original_connection, analysis_id=self.analysis_id)\n', (11833, 11915), False, 'from glycan_profiling import serialize\n'), ((4847, 4888), 'ms_deisotope.output.mzml.ProcessedMzMLDeserializer', 'ProcessedMzMLDeserializer', (['self.mzml_path'], {}), '(self.mzml_path)\n', (4872, 4888), False, 'from ms_deisotope.output.mzml import ProcessedMzMLDeserializer\n'), ((5346, 5387), 'ms_deisotope.output.mzml.ProcessedMzMLDeserializer', 'ProcessedMzMLDeserializer', (['self.mzml_path'], {}), '(self.mzml_path)\n', (5371, 5387), False, 'from ms_deisotope.output.mzml import ProcessedMzMLDeserializer\n'), ((6162, 6187), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6177, 6187), False, 'import os\n'), ((9495, 9541), 'glycan_profiling.tandem.glycopeptide.identified_structure.IdentifiedGlycoprotein', 'IdentifiedGlycoprotein', (['protein', 'glycopeptides'], {}), '(protein, glycopeptides)\n', (9517, 9541), False, 'from glycan_profiling.tandem.glycopeptide.identified_structure import IdentifiedGlycoprotein\n'), ((11241, 11293), 'glycan_profiling.output.report.base.png_plot', 'png_plot', (['ax'], {'bbox_inches': '"""tight"""', 'img_height': '"""100%"""'}), "(ax, bbox_inches='tight', img_height='100%')\n", (11249, 11293), False, 'from glycan_profiling.output.report.base import svguri_plot, png_plot, ReportCreatorBase\n'), ((3268, 3275), 'glycan_profiling.plotting.figax', 'figax', ([], {}), '()\n', (3273, 3275), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((3480, 3509), 'textwrap.wrap', 'textwrap.wrap', (['scan_title', '(60)'], {}), '(scan_title, 60)\n', (3493, 3509), False, 'import textwrap\n'), ((3832, 3839), 'glycan_profiling.plotting.figax', 'figax', ([], {}), '()\n', (3837, 3839), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((4712, 4742), 'os.path.exists', 'os.path.exists', (['self.mzml_path'], {}), '(self.mzml_path)\n', (4726, 4742), False, 'import os\n'), ((4991, 5021), 'os.path.exists', 'os.path.exists', (['self.mzml_path'], {}), '(self.mzml_path)\n', (5005, 5021), False, 'import os\n'), ((10117, 10166), 'glycan_profiling.plotting.entity_bar_chart.BundledGlycanComposition.aggregate', 'BundledGlycanComposition.aggregate', (['spanning_site'], {}), '(spanning_site)\n', (10151, 10166), False, 'from glycan_profiling.plotting.entity_bar_chart import AggregatedAbundanceArtist, BundledGlycanComposition\n'), ((10254, 10261), 'glycan_profiling.plotting.figax', 'figax', ([], {}), '()\n', (10259, 10261), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((10536, 10572), 'glycan_profiling.output.report.base.svguri_plot', 'svguri_plot', (['ax'], {'bbox_inches': '"""tight"""'}), "(ax, bbox_inches='tight')\n", (10547, 10572), False, 'from glycan_profiling.output.report.base import svguri_plot, png_plot, ReportCreatorBase\n'), ((10953, 11055), 'glycan_profiling.plotting.SmoothingChromatogramArtist', 'SmoothingChromatogramArtist', (['glycopeptide'], {'ax': 'ax', 'label_peaks': '(False)', 'colorizer': "(lambda x: '#48afd0')"}), "(glycopeptide, ax=ax, label_peaks=False,\n colorizer=lambda x: '#48afd0')\n", (10980, 11055), False, 'from glycan_profiling.plotting import figax, SmoothingChromatogramArtist\n'), ((10278, 10372), 'glycan_profiling.plotting.entity_bar_chart.AggregatedAbundanceArtist', 'AggregatedAbundanceArtist', (['bundle'], {'ax': 'ax', 'colorizer': 'glycan_colorizer_type_map[glyco_type]'}), '(bundle, ax=ax, colorizer=\n glycan_colorizer_type_map[glyco_type])\n', (10303, 10372), False, 'from glycan_profiling.plotting.entity_bar_chart import AggregatedAbundanceArtist, BundledGlycanComposition\n'), ((7553, 7580), 'glycan_profiling.serialize.func.count', 'func.count', (['Glycopeptide.id'], {}), '(Glycopeptide.id)\n', (7563, 7580), False, 'from glycan_profiling.serialize import Protein, Glycopeptide, IdentifiedGlycopeptide, func, MSScan, DatabaseBoundOperation\n'), ((7784, 7821), 'glycan_profiling.serialize.func.count', 'func.count', (['IdentifiedGlycopeptide.id'], {}), '(IdentifiedGlycopeptide.id)\n', (7794, 7821), False, 'from glycan_profiling.serialize import Protein, Glycopeptide, IdentifiedGlycopeptide, func, MSScan, DatabaseBoundOperation\n')] |
from plenum.common.ledger import Ledger
from plenum.common.types import f
class ThreePcBatch:
def __init__(self,
ledger_id,
inst_id, view_no, pp_seq_no,
pp_time,
valid_txn_count,
state_root, txn_root,
has_audit_txn=True) -> None:
self.ledger_id = ledger_id
self.inst_id = inst_id
self.view_no = view_no
self.pp_seq_no = pp_seq_no
self.pp_time = pp_time
self.valid_txn_count = valid_txn_count
self.state_root = state_root
self.txn_root = txn_root
self.has_audit_txn = has_audit_txn
@staticmethod
def from_pre_prepare(pre_prepare, valid_txn_count, state_root, txn_root):
return ThreePcBatch(ledger_id=pre_prepare.ledgerId,
inst_id=pre_prepare.instId,
view_no=pre_prepare.viewNo,
pp_seq_no=pre_prepare.ppSeqNo,
pp_time=pre_prepare.ppTime,
# do not trust PrePrepare's root hashes and use the current replica's ones
valid_txn_count=valid_txn_count,
state_root=state_root,
txn_root=txn_root,
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in pre_prepare and pre_prepare.auditTxnRootHash is not None)
@staticmethod
def from_ordered(ordered):
return ThreePcBatch(ledger_id=ordered.ledgerId,
inst_id=ordered.instId,
view_no=ordered.viewNo,
pp_seq_no=ordered.ppSeqNo,
pp_time=ordered.ppTime,
valid_txn_count=len(ordered.valid_reqIdr),
state_root=Ledger.strToHash(ordered.stateRootHash),
txn_root=Ledger.strToHash(ordered.txnRootHash),
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in ordered and ordered.auditTxnRootHash is not None)
@staticmethod
def from_batch_committed_dict(batch_comitted):
return ThreePcBatch(ledger_id=batch_comitted[f.LEDGER_ID.nm],
inst_id=batch_comitted[f.INST_ID.nm],
view_no=batch_comitted[f.VIEW_NO.nm],
pp_seq_no=batch_comitted[f.PP_SEQ_NO.nm],
pp_time=batch_comitted[f.PP_TIME.nm],
valid_txn_count=batch_comitted[f.SEQ_NO_END.nm] - batch_comitted[f.SEQ_NO_START.nm] + 1,
state_root=Ledger.strToHash(batch_comitted[f.STATE_ROOT.nm]),
txn_root=Ledger.strToHash(batch_comitted[f.TXN_ROOT.nm]),
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in batch_comitted and batch_comitted[
f.AUDIT_TXN_ROOT_HASH.nm] is not None)
| [
"plenum.common.ledger.Ledger.strToHash"
] | [((1862, 1901), 'plenum.common.ledger.Ledger.strToHash', 'Ledger.strToHash', (['ordered.stateRootHash'], {}), '(ordered.stateRootHash)\n', (1878, 1901), False, 'from plenum.common.ledger import Ledger\n'), ((1940, 1977), 'plenum.common.ledger.Ledger.strToHash', 'Ledger.strToHash', (['ordered.txnRootHash'], {}), '(ordered.txnRootHash)\n', (1956, 1977), False, 'from plenum.common.ledger import Ledger\n'), ((2663, 2712), 'plenum.common.ledger.Ledger.strToHash', 'Ledger.strToHash', (['batch_comitted[f.STATE_ROOT.nm]'], {}), '(batch_comitted[f.STATE_ROOT.nm])\n', (2679, 2712), False, 'from plenum.common.ledger import Ledger\n'), ((2751, 2798), 'plenum.common.ledger.Ledger.strToHash', 'Ledger.strToHash', (['batch_comitted[f.TXN_ROOT.nm]'], {}), '(batch_comitted[f.TXN_ROOT.nm])\n', (2767, 2798), False, 'from plenum.common.ledger import Ledger\n')] |
from PIL import Image, ImageDraw, ImageFont
import io
import random
from ..config import IMAGE_PATH
class Photo:
def __init__(self, path=None, xy=(220, 220)):
self.image = Image.new("RGB", xy, (255, 255, 255))
self.idraw = ImageDraw.Draw(self.image)
def resize(self, size):
self.image.thumbnail((size[0], size[1]))
def rectangle(self, size1, size2, color):
self.idraw.rectangle((size1, size2), fill=color)
def text(self, color, xy, text, p=3, shadowcolor="white", outline=True):
x, y = xy[0], xy[1]
if outline:
self.idraw.text((x-p, y), text, font=self.font, fill=shadowcolor)
self.idraw.text((x+p, y), text, font=self.font, fill=shadowcolor)
self.idraw.text((x, y-p), text, font=self.font, fill=shadowcolor)
self.idraw.text((x, y+p), text, font=self.font, fill=shadowcolor)
# thicker border
self.idraw.text((x-p, y-p), text, font=self.font, fill=shadowcolor)
self.idraw.text((x+p, y-p), text, font=self.font, fill=shadowcolor)
self.idraw.text((x-p, y+p), text, font=self.font, fill=shadowcolor)
self.idraw.text((x+p, y+p), text, font=self.font, fill=shadowcolor)
self.idraw.text((x, y), text, font=self.font, fill=color)
def parseXY(self, xy):
xy = xy.split("x")
if int(xy[0]) > 99999:
raise ValueError
if int(xy[1]) > 99999:
raise ValueError
return (int(xy[0]), int(xy[1]))
def save(self):
file = io.BytesIO()
self.image.save(file, "PNG")
file.seek(0)
return file
def font(self, path, size):
self.font = ImageFont.truetype(path, size=size) | [
"PIL.Image.new",
"PIL.ImageDraw.Draw",
"io.BytesIO",
"PIL.ImageFont.truetype"
] | [((188, 225), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'xy', '(255, 255, 255)'], {}), "('RGB', xy, (255, 255, 255))\n", (197, 225), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((247, 273), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.image'], {}), '(self.image)\n', (261, 273), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1566, 1578), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1576, 1578), False, 'import io\n'), ((1711, 1746), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['path'], {'size': 'size'}), '(path, size=size)\n', (1729, 1746), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
# Setup input functions and datasets
import numpy as np
from scipy.spatial.distance import cdist,pdist,squareform
def pdist_based(a, b, m):
return squareform(pdist(a, metric=m))
funcs = [cdist,pdist_based]
inputs = {(i,j,metric):(np.random.rand(i,3),np.random.rand(j,3),metric) for i in [10,20,50,100] for j in [100,200,500,1000] for metric in ['euclidean', 'cityblock']}
inputs = {(i,j,metric):(np.random.rand(i,3),np.random.rand(j,3),metric) for i in [10,20,30] for j in [100,200] for metric in ['euclidean', 'cityblock', 'minkowski', 'cosine']}
inputs = {(i,j,metric):(np.random.rand(i,3),np.random.rand(j,3),metric) for i in [16, 29, 56] for j in [134,225] for metric in ['euclidean', 'cityblock', 'minkowski', 'cosine']}
inputs = {(i,j,metric):(np.random.rand(i,3),np.random.rand(j,3),metric) for i in [16, 790, 10900] for j in [134,2250] for metric in ['euclidean', 'cityblock', 'minkowski', 'cosine']}
# Benchmark
import benchit
t = benchit.timings(funcs, inputs, multivar=True, input_name=['Array1', 'Array2', 'metric'])
t.plot(logx=True, sp_argID=0, sp_ncols=2)
t.plot(logx=True, sp_argID=1, sp_ncols=2)
t.plot(logx=False, sp_argID=2, sp_ncols=2)
| [
"scipy.spatial.distance.pdist",
"numpy.random.rand",
"benchit.timings"
] | [((955, 1047), 'benchit.timings', 'benchit.timings', (['funcs', 'inputs'], {'multivar': '(True)', 'input_name': "['Array1', 'Array2', 'metric']"}), "(funcs, inputs, multivar=True, input_name=['Array1',\n 'Array2', 'metric'])\n", (970, 1047), False, 'import benchit\n'), ((164, 182), 'scipy.spatial.distance.pdist', 'pdist', (['a'], {'metric': 'm'}), '(a, metric=m)\n', (169, 182), False, 'from scipy.spatial.distance import cdist, pdist, squareform\n'), ((237, 257), 'numpy.random.rand', 'np.random.rand', (['i', '(3)'], {}), '(i, 3)\n', (251, 257), True, 'import numpy as np\n'), ((257, 277), 'numpy.random.rand', 'np.random.rand', (['j', '(3)'], {}), '(j, 3)\n', (271, 277), True, 'import numpy as np\n'), ((405, 425), 'numpy.random.rand', 'np.random.rand', (['i', '(3)'], {}), '(i, 3)\n', (419, 425), True, 'import numpy as np\n'), ((425, 445), 'numpy.random.rand', 'np.random.rand', (['j', '(3)'], {}), '(j, 3)\n', (439, 445), True, 'import numpy as np\n'), ((583, 603), 'numpy.random.rand', 'np.random.rand', (['i', '(3)'], {}), '(i, 3)\n', (597, 603), True, 'import numpy as np\n'), ((603, 623), 'numpy.random.rand', 'np.random.rand', (['j', '(3)'], {}), '(j, 3)\n', (617, 623), True, 'import numpy as np\n'), ((763, 783), 'numpy.random.rand', 'np.random.rand', (['i', '(3)'], {}), '(i, 3)\n', (777, 783), True, 'import numpy as np\n'), ((783, 803), 'numpy.random.rand', 'np.random.rand', (['j', '(3)'], {}), '(j, 3)\n', (797, 803), True, 'import numpy as np\n')] |
import torch
class FootPrinter:
def __init__(self, device="cpu", encoder=None):
self.device = device
self.encoder = encoder
def update_encoder(self, encoder):
self.encoder = encoder
self.encoder.to(self.device)
def culc_footprint(self, local_data, dataloader=True):
if dataloader is True:
latent_representation = []
for batch_idx, (x, labels) in enumerate(local_data):
x, labels = x.to(self.device), labels.to(self.device)
output = self.encoder(x)
latent_representation.append(output)
latent_representation = torch.cat(latent_representation)
else:
latent_representation = self.encoder(local_data.to(self.device))
u = torch.mean(latent_representation, axis=0)
sigma = torch.std(latent_representation, axis=0)
footprint = (u, sigma)
return footprint
def kldiv_between_server_and_client(self, server_footprint, client_footprint):
server_u, server_sigma = server_footprint
client_u, client_sigma = client_footprint
kl = torch.log(server_sigma / client_sigma) + (
(client_sigma ** 2) + (client_u - server_u) ** 2
) / (2 * (server_sigma ** 2))
return torch.mean(kl).item()
| [
"torch.mean",
"torch.log",
"torch.std",
"torch.cat"
] | [((786, 827), 'torch.mean', 'torch.mean', (['latent_representation'], {'axis': '(0)'}), '(latent_representation, axis=0)\n', (796, 827), False, 'import torch\n'), ((844, 884), 'torch.std', 'torch.std', (['latent_representation'], {'axis': '(0)'}), '(latent_representation, axis=0)\n', (853, 884), False, 'import torch\n'), ((649, 681), 'torch.cat', 'torch.cat', (['latent_representation'], {}), '(latent_representation)\n', (658, 681), False, 'import torch\n'), ((1138, 1176), 'torch.log', 'torch.log', (['(server_sigma / client_sigma)'], {}), '(server_sigma / client_sigma)\n', (1147, 1176), False, 'import torch\n'), ((1296, 1310), 'torch.mean', 'torch.mean', (['kl'], {}), '(kl)\n', (1306, 1310), False, 'import torch\n')] |
from src.environement.TexasHoldemLimit.rank_hands import compare_hands_5, get_best_hand_7, HandCompareResult, is_same_hand
from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue
import unittest
straight_flush = [
FullDeckCard(CardValue.EIGHT, CardSymbol.CLUBS),
FullDeckCard(CardValue.SEVEN, CardSymbol.CLUBS),
FullDeckCard(CardValue.SIX, CardSymbol.CLUBS),
FullDeckCard(CardValue.FIVE, CardSymbol.CLUBS),
FullDeckCard(CardValue.FOUR, CardSymbol.CLUBS)
]
four = [
FullDeckCard(CardValue.SIX, CardSymbol.CLUBS),
FullDeckCard(CardValue.SIX, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.FOUR, CardSymbol.CLUBS)
]
full_house = [
FullDeckCard(CardValue.TEN, CardSymbol.CLUBS),
FullDeckCard(CardValue.TEN, CardSymbol.HEART),
FullDeckCard(CardValue.TEN, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.SIX, CardSymbol.CLUBS)
]
flush = [
FullDeckCard(CardValue.TEN, CardSymbol.HEART),
FullDeckCard(CardValue.Q, CardSymbol.HEART),
FullDeckCard(CardValue.TWO, CardSymbol.HEART),
FullDeckCard(CardValue.FIVE, CardSymbol.HEART),
FullDeckCard(CardValue.EIGHT, CardSymbol.HEART)
]
straight = [
FullDeckCard(CardValue.EIGHT, CardSymbol.CLUBS),
FullDeckCard(CardValue.SEVEN, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.FIVE, CardSymbol.CLUBS),
FullDeckCard(CardValue.FOUR, CardSymbol.HEART)
]
three = [
FullDeckCard(CardValue.TEN, CardSymbol.CLUBS),
FullDeckCard(CardValue.TEN, CardSymbol.HEART),
FullDeckCard(CardValue.TEN, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.SEVEN, CardSymbol.CLUBS)
]
two_pair = [
FullDeckCard(CardValue.A, CardSymbol.CLUBS),
FullDeckCard(CardValue.A, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.SEVEN, CardSymbol.CLUBS)
]
pair = [
FullDeckCard(CardValue.J, CardSymbol.CLUBS),
FullDeckCard(CardValue.J, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.THREE, CardSymbol.SPADE),
FullDeckCard(CardValue.TWO, CardSymbol.CLUBS)
]
high_card = [
FullDeckCard(CardValue.K, CardSymbol.CLUBS),
FullDeckCard(CardValue.J, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.THREE, CardSymbol.SPADE),
FullDeckCard(CardValue.TWO, CardSymbol.CLUBS)
]
winning_order = [straight_flush, four, full_house, flush, straight, three, two_pair, pair, high_card]
class TestHandRankingTexasHoldemLimit(unittest.TestCase):
def test_hand_ranking_win_loss(self):
for i in range(len(winning_order) - 1):
for i2 in range(i + 1, len(winning_order)):
self.assertEqual(compare_hands_5(winning_order[i], winning_order[i2]), HandCompareResult.WIN)
self.assertEqual(compare_hands_5(winning_order[i2], winning_order[i]), HandCompareResult.LOSS)
for i in range(len(winning_order)):
self.assertEqual(compare_hands_5(winning_order[i], winning_order[i]), HandCompareResult.EQUAL)
def test_card_diff_ranking(self):
self.assertEqual(compare_hands_5([
FullDeckCard(CardValue.A, CardSymbol.CLUBS),
FullDeckCard(CardValue.A, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.SEVEN, CardSymbol.CLUBS)
], [
FullDeckCard(CardValue.A, CardSymbol.CLUBS),
FullDeckCard(CardValue.A, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SIX, CardSymbol.SPADE),
FullDeckCard(CardValue.EIGHT, CardSymbol.CLUBS)
]), HandCompareResult.LOSS)
self.assertEqual(compare_hands_5([
FullDeckCard(CardValue.J, CardSymbol.CLUBS),
FullDeckCard(CardValue.J, CardSymbol.HEART),
FullDeckCard(CardValue.SIX, CardSymbol.DIAMOND),
FullDeckCard(CardValue.THREE, CardSymbol.SPADE),
FullDeckCard(CardValue.TWO, CardSymbol.CLUBS)
], [
FullDeckCard(CardValue.A, CardSymbol.CLUBS),
FullDeckCard(CardValue.K, CardSymbol.HEART),
FullDeckCard(CardValue.Q, CardSymbol.DIAMOND),
FullDeckCard(CardValue.THREE, CardSymbol.SPADE),
FullDeckCard(CardValue.TWO, CardSymbol.CLUBS)
]), HandCompareResult.WIN)
def test_get_best_hand(self):
self.assertEqual(is_same_hand(straight, get_best_hand_7(straight + [
FullDeckCard(CardValue.A, CardSymbol.CLUBS),
FullDeckCard(CardValue.K, CardSymbol.CLUBS)
])), True)
expected = [
FullDeckCard(CardValue.TEN, CardSymbol.CLUBS),
FullDeckCard(CardValue.TEN, CardSymbol.HEART),
FullDeckCard(CardValue.TEN, CardSymbol.DIAMOND),
FullDeckCard(CardValue.SEVEN, CardSymbol.SPADE),
FullDeckCard(CardValue.SEVEN, CardSymbol.CLUBS)
]
self.assertEqual(is_same_hand(expected, get_best_hand_7(expected + [
FullDeckCard(CardValue.SIX, CardSymbol.CLUBS),
FullDeckCard(CardValue.SIX, CardSymbol.CLUBS)
])), True)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"src.environement.TexasHoldemLimit.rank_hands.compare_hands_5",
"src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard"
] | [((264, 311), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.EIGHT', 'CardSymbol.CLUBS'], {}), '(CardValue.EIGHT, CardSymbol.CLUBS)\n', (276, 311), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((317, 364), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.CLUBS'], {}), '(CardValue.SEVEN, CardSymbol.CLUBS)\n', (329, 364), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((370, 415), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.CLUBS'], {}), '(CardValue.SIX, CardSymbol.CLUBS)\n', (382, 415), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((421, 467), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FIVE', 'CardSymbol.CLUBS'], {}), '(CardValue.FIVE, CardSymbol.CLUBS)\n', (433, 467), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((473, 519), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FOUR', 'CardSymbol.CLUBS'], {}), '(CardValue.FOUR, CardSymbol.CLUBS)\n', (485, 519), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((536, 581), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.CLUBS'], {}), '(CardValue.SIX, CardSymbol.CLUBS)\n', (548, 581), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((587, 632), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.HEART'], {}), '(CardValue.SIX, CardSymbol.HEART)\n', (599, 632), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((638, 685), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (650, 685), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((691, 736), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (703, 736), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((742, 788), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FOUR', 'CardSymbol.CLUBS'], {}), '(CardValue.FOUR, CardSymbol.CLUBS)\n', (754, 788), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((811, 856), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.CLUBS'], {}), '(CardValue.TEN, CardSymbol.CLUBS)\n', (823, 856), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((862, 907), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.HEART'], {}), '(CardValue.TEN, CardSymbol.HEART)\n', (874, 907), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((913, 960), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.DIAMOND'], {}), '(CardValue.TEN, CardSymbol.DIAMOND)\n', (925, 960), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((966, 1011), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (978, 1011), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1017, 1062), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.CLUBS'], {}), '(CardValue.SIX, CardSymbol.CLUBS)\n', (1029, 1062), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1081, 1126), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.HEART'], {}), '(CardValue.TEN, CardSymbol.HEART)\n', (1093, 1126), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1132, 1175), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.Q', 'CardSymbol.HEART'], {}), '(CardValue.Q, CardSymbol.HEART)\n', (1144, 1175), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1181, 1226), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TWO', 'CardSymbol.HEART'], {}), '(CardValue.TWO, CardSymbol.HEART)\n', (1193, 1226), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1232, 1278), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FIVE', 'CardSymbol.HEART'], {}), '(CardValue.FIVE, CardSymbol.HEART)\n', (1244, 1278), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1284, 1331), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.EIGHT', 'CardSymbol.HEART'], {}), '(CardValue.EIGHT, CardSymbol.HEART)\n', (1296, 1331), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1352, 1399), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.EIGHT', 'CardSymbol.CLUBS'], {}), '(CardValue.EIGHT, CardSymbol.CLUBS)\n', (1364, 1399), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1405, 1452), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.HEART'], {}), '(CardValue.SEVEN, CardSymbol.HEART)\n', (1417, 1452), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1458, 1505), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (1470, 1505), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1511, 1557), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FIVE', 'CardSymbol.CLUBS'], {}), '(CardValue.FIVE, CardSymbol.CLUBS)\n', (1523, 1557), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1563, 1609), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.FOUR', 'CardSymbol.HEART'], {}), '(CardValue.FOUR, CardSymbol.HEART)\n', (1575, 1609), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1627, 1672), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.CLUBS'], {}), '(CardValue.TEN, CardSymbol.CLUBS)\n', (1639, 1672), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1678, 1723), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.HEART'], {}), '(CardValue.TEN, CardSymbol.HEART)\n', (1690, 1723), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1729, 1776), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.DIAMOND'], {}), '(CardValue.TEN, CardSymbol.DIAMOND)\n', (1741, 1776), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1782, 1827), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (1794, 1827), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1833, 1880), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.CLUBS'], {}), '(CardValue.SEVEN, CardSymbol.CLUBS)\n', (1845, 1880), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1901, 1944), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.CLUBS'], {}), '(CardValue.A, CardSymbol.CLUBS)\n', (1913, 1944), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1950, 1993), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.HEART'], {}), '(CardValue.A, CardSymbol.HEART)\n', (1962, 1993), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((1999, 2046), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (2011, 2046), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2052, 2097), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (2064, 2097), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2103, 2150), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.CLUBS'], {}), '(CardValue.SEVEN, CardSymbol.CLUBS)\n', (2115, 2150), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2167, 2210), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.J', 'CardSymbol.CLUBS'], {}), '(CardValue.J, CardSymbol.CLUBS)\n', (2179, 2210), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2216, 2259), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.J', 'CardSymbol.HEART'], {}), '(CardValue.J, CardSymbol.HEART)\n', (2228, 2259), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2265, 2312), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (2277, 2312), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2318, 2365), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.THREE', 'CardSymbol.SPADE'], {}), '(CardValue.THREE, CardSymbol.SPADE)\n', (2330, 2365), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2371, 2416), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TWO', 'CardSymbol.CLUBS'], {}), '(CardValue.TWO, CardSymbol.CLUBS)\n', (2383, 2416), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2438, 2481), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.K', 'CardSymbol.CLUBS'], {}), '(CardValue.K, CardSymbol.CLUBS)\n', (2450, 2481), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2487, 2530), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.J', 'CardSymbol.HEART'], {}), '(CardValue.J, CardSymbol.HEART)\n', (2499, 2530), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2536, 2583), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (2548, 2583), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2589, 2636), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.THREE', 'CardSymbol.SPADE'], {}), '(CardValue.THREE, CardSymbol.SPADE)\n', (2601, 2636), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((2642, 2687), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TWO', 'CardSymbol.CLUBS'], {}), '(CardValue.TWO, CardSymbol.CLUBS)\n', (2654, 2687), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5629, 5644), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5642, 5644), False, 'import unittest\n'), ((5086, 5131), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.CLUBS'], {}), '(CardValue.TEN, CardSymbol.CLUBS)\n', (5098, 5131), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5145, 5190), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.HEART'], {}), '(CardValue.TEN, CardSymbol.HEART)\n', (5157, 5190), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5204, 5251), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TEN', 'CardSymbol.DIAMOND'], {}), '(CardValue.TEN, CardSymbol.DIAMOND)\n', (5216, 5251), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5265, 5312), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.SPADE'], {}), '(CardValue.SEVEN, CardSymbol.SPADE)\n', (5277, 5312), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5326, 5373), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.CLUBS'], {}), '(CardValue.SEVEN, CardSymbol.CLUBS)\n', (5338, 5373), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3292, 3343), 'src.environement.TexasHoldemLimit.rank_hands.compare_hands_5', 'compare_hands_5', (['winning_order[i]', 'winning_order[i]'], {}), '(winning_order[i], winning_order[i])\n', (3307, 3343), False, 'from src.environement.TexasHoldemLimit.rank_hands import compare_hands_5, get_best_hand_7, HandCompareResult, is_same_hand\n'), ((3031, 3083), 'src.environement.TexasHoldemLimit.rank_hands.compare_hands_5', 'compare_hands_5', (['winning_order[i]', 'winning_order[i2]'], {}), '(winning_order[i], winning_order[i2])\n', (3046, 3083), False, 'from src.environement.TexasHoldemLimit.rank_hands import compare_hands_5, get_best_hand_7, HandCompareResult, is_same_hand\n'), ((3141, 3193), 'src.environement.TexasHoldemLimit.rank_hands.compare_hands_5', 'compare_hands_5', (['winning_order[i2]', 'winning_order[i]'], {}), '(winning_order[i2], winning_order[i])\n', (3156, 3193), False, 'from src.environement.TexasHoldemLimit.rank_hands import compare_hands_5, get_best_hand_7, HandCompareResult, is_same_hand\n'), ((3468, 3511), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.CLUBS'], {}), '(CardValue.A, CardSymbol.CLUBS)\n', (3480, 3511), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3529, 3572), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.HEART'], {}), '(CardValue.A, CardSymbol.HEART)\n', (3541, 3572), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3590, 3637), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (3602, 3637), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3655, 3700), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (3667, 3700), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3718, 3765), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SEVEN', 'CardSymbol.CLUBS'], {}), '(CardValue.SEVEN, CardSymbol.CLUBS)\n', (3730, 3765), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3796, 3839), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.CLUBS'], {}), '(CardValue.A, CardSymbol.CLUBS)\n', (3808, 3839), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3857, 3900), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.HEART'], {}), '(CardValue.A, CardSymbol.HEART)\n', (3869, 3900), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3918, 3965), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (3930, 3965), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((3983, 4028), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.SPADE'], {}), '(CardValue.SIX, CardSymbol.SPADE)\n', (3995, 4028), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4046, 4093), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.EIGHT', 'CardSymbol.CLUBS'], {}), '(CardValue.EIGHT, CardSymbol.CLUBS)\n', (4058, 4093), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4186, 4229), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.J', 'CardSymbol.CLUBS'], {}), '(CardValue.J, CardSymbol.CLUBS)\n', (4198, 4229), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4243, 4286), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.J', 'CardSymbol.HEART'], {}), '(CardValue.J, CardSymbol.HEART)\n', (4255, 4286), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4300, 4347), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.DIAMOND'], {}), '(CardValue.SIX, CardSymbol.DIAMOND)\n', (4312, 4347), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4361, 4408), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.THREE', 'CardSymbol.SPADE'], {}), '(CardValue.THREE, CardSymbol.SPADE)\n', (4373, 4408), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4422, 4467), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TWO', 'CardSymbol.CLUBS'], {}), '(CardValue.TWO, CardSymbol.CLUBS)\n', (4434, 4467), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4493, 4536), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.CLUBS'], {}), '(CardValue.A, CardSymbol.CLUBS)\n', (4505, 4536), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4550, 4593), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.K', 'CardSymbol.HEART'], {}), '(CardValue.K, CardSymbol.HEART)\n', (4562, 4593), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4607, 4652), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.Q', 'CardSymbol.DIAMOND'], {}), '(CardValue.Q, CardSymbol.DIAMOND)\n', (4619, 4652), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4666, 4713), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.THREE', 'CardSymbol.SPADE'], {}), '(CardValue.THREE, CardSymbol.SPADE)\n', (4678, 4713), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4727, 4772), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.TWO', 'CardSymbol.CLUBS'], {}), '(CardValue.TWO, CardSymbol.CLUBS)\n', (4739, 4772), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4933, 4976), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.A', 'CardSymbol.CLUBS'], {}), '(CardValue.A, CardSymbol.CLUBS)\n', (4945, 4976), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((4990, 5033), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.K', 'CardSymbol.CLUBS'], {}), '(CardValue.K, CardSymbol.CLUBS)\n', (5002, 5033), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5473, 5518), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.CLUBS'], {}), '(CardValue.SIX, CardSymbol.CLUBS)\n', (5485, 5518), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n'), ((5532, 5577), 'src.environement.TexasHoldemLimit.TexasHoldemDealer.FullDeckCard', 'FullDeckCard', (['CardValue.SIX', 'CardSymbol.CLUBS'], {}), '(CardValue.SIX, CardSymbol.CLUBS)\n', (5544, 5577), False, 'from src.environement.TexasHoldemLimit.TexasHoldemDealer import FullDeckCard, CardSymbol, CardValue\n')] |
#used to organize ESCs from various sources into the component database
import os
import sqlite3 as sql
from dbfread import DBF
def isNum(x):
try:
float(x)
return True
except ValueError:
return False
except TypeError:
return False
databaseFile = os.getcwd() + "/components.db"
connection = sql.connect(databaseFile)
cursor = connection.cursor()
cursor.execute("drop table ESCs")
cursor.execute("""create table ESCs (id INTEGER PRIMARY KEY,
Name VARCHAR(40),
manufacturer VARCHAR,
Imax FLOAT,
Ipeak FLOAT,
Weight FLOAT,
Ri FLOAT);""")
print("Reading MotoCalc Database")
escFilePath = os.getcwd() + "/ESCs/ESC8.DBF"
escFile = DBF(escFilePath)
for record in escFile:
print(record)
if record["MAXCURRENT"] == 0 or record["MAXCURRENT"] == None:
continue
formatStr = """INSERT INTO ESCs (Name, manufacturer, Weight, Imax, Ri) VALUES ("{name}", "{manu}", {weight}, {iMax}, {Ri});"""
command = formatStr.format(name = record["ESCNAME"].strip(), manu = record["ESCNAME"].split(" " )[0].upper(), weight = record["WEIGHT"], iMax = record["MAXCURRENT"], Ri = record["RESISTANCE"])
cursor.execute(command)
print("Reading Database after MotoCalc")
cursor.execute("SELECT * FROM ESCs")
result = cursor.fetchall()
for r in result:
print(r)
print("Reading DriveCalc database")
inDatabaseFile = os.getcwd() + "/ESCs/DCbase.dcd"
inConnection = sql.connect(inDatabaseFile)
inCursor = inConnection.cursor()
inCursor.execute("SELECT * FROM ESC")
escs = inCursor.fetchall()
for esc in escs:
if esc[4] == 0 or esc[4] == None:
continue
formatStr = """INSERT INTO ESCs (Name, manufacturer, Imax, Ipeak, Weight, Ri) VALUES ("{name}", "{manu}", {iMax}, {iPeak}, {weight}, {res});"""
command = formatStr.format(name = esc[2].strip(), manu = esc[2].split(" ")[0].upper(), iMax = esc[4], iPeak = esc[5], weight = esc[7]*0.035274, res = esc[6])
cursor.execute(command)
print("Reading Database after DriveCalc")
cursor.execute("SELECT * FROM ESCs")
result = cursor.fetchall()
for r in result:
print(r)
inCursor.close()
connection.commit()
connection.close()
| [
"sqlite3.connect",
"dbfread.DBF",
"os.getcwd"
] | [((351, 376), 'sqlite3.connect', 'sql.connect', (['databaseFile'], {}), '(databaseFile)\n', (362, 376), True, 'import sqlite3 as sql\n'), ((933, 949), 'dbfread.DBF', 'DBF', (['escFilePath'], {}), '(escFilePath)\n', (936, 949), False, 'from dbfread import DBF\n'), ((1692, 1719), 'sqlite3.connect', 'sql.connect', (['inDatabaseFile'], {}), '(inDatabaseFile)\n', (1703, 1719), True, 'import sqlite3 as sql\n'), ((306, 317), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (315, 317), False, 'import os\n'), ((891, 902), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (900, 902), False, 'import os\n'), ((1643, 1654), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1652, 1654), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
multithreading_in_python-3.py
Consider the program below to understand the concept of race condition:
A race condition occurs when two or more threads can access shared data
and they try to change it at the same time. As a result, the values of
variables may be unpredictable and vary depending on the timings of
context switches of the processes.
Multithreading in Python | Set 2 (Synchronization)
https://www.geeksforgeeks.org/multithreading-in-python-set-2-synchronization/
"""
import threading
# global variable x
x = 0
def increment():
"""
function to increment global variable x
"""
global x
x += 1
def thread_task():
"""
task for thread
calls increment function 100000 times.
"""
for _ in range(100000):
increment()
def main_task():
global x
# setting global variable x as 0
x = 0
# creating threads
t1 = threading.Thread(target=thread_task)
t2 = threading.Thread(target=thread_task)
# start threads
t1.start()
t2.start()
# wait until threads finish their job
t1.join()
t2.join()
if __name__ == "__main__":
for i in range(10):
main_task()
print("Iteration {0}: x = {1}".format(i,x))
'''
First run
Iteration 0: x = 200000
Iteration 1: x = 200000
Iteration 2: x = 200000
Iteration 3: x = 200000
Iteration 4: x = 200000
Iteration 5: x = 200000
Iteration 6: x = 200000
Iteration 7: x = 200000
Iteration 8: x = 200000
Iteration 9: x = 200000
'''
'''
Second run
Iteration 0: x = 161556
Iteration 1: x = 200000
Iteration 2: x = 200000
Iteration 3: x = 200000
Iteration 4: x = 200000
Iteration 5: x = 200000
Iteration 6: x = 200000
Iteration 7: x = 200000
Iteration 8: x = 138571
Iteration 9: x = 200000
''' | [
"threading.Thread"
] | [((983, 1019), 'threading.Thread', 'threading.Thread', ([], {'target': 'thread_task'}), '(target=thread_task)\n', (999, 1019), False, 'import threading\n'), ((1030, 1066), 'threading.Thread', 'threading.Thread', ([], {'target': 'thread_task'}), '(target=thread_task)\n', (1046, 1066), False, 'import threading\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 20:57:10 2018
@author: jonathan
"""
import gpib
import numpy as np
# GPIB interface 0, address 15
con = gpib.dev(0,15)
status = gpib.write(con, "COMM_FORMAT OFF")
#status = gpib.write(con, "COMM_FORMAT OFF,WORD,BIN")
status = gpib.write(con, "COMM_HEADER OFF")
status = gpib.write(con, "*IDN?")
deviceID = gpib.read(con, 1000).decode()
print("found device: " + deviceID)
# get template
print("fetching template")
status = gpib.write(con, "TEMPLATE?")
template = ""
chunk_size = 1024
keepFetching = True
while keepFetching:
temp = gpib.read(con, chunk_size).decode()
template += temp
print("read " + np.str(len(template)) + " to " + np.str(len(template)+len(temp)))
if len(temp) < chunk_size:
keepFetching = False
with open("template.txt", "w") as f:
f.write(template)
# fetch trace 1
print("fetching T1...")
status = gpib.write(con, "T1: WF?")
trace1 = b''
chunk_size = 1024
keepFetching = True
while keepFetching:
temp = gpib.read(con, chunk_size)
trace1 += temp
print("read " + np.str(len(trace1)) + " to " + np.str(len(trace1)+len(temp)))
if len(temp) < chunk_size:
keepFetching = False
with open("trace1.000", "wb") as f:
f.write(trace1)
# fetch trace 2
print("fetching T2...")
status = gpib.write(con, "T2: WF?")
trace2 = b''
chunk_size = 1024
keepFetching = True
while keepFetching:
temp = gpib.read(con, chunk_size)
trace2 += temp
print("read " + np.str(len(trace2)) + " to " + np.str(len(trace2)+len(temp)))
if len(temp) < chunk_size:
keepFetching = False
with open("trace2.000", "wb") as f:
f.write(trace2)
# fetch trace 3
print("fetching T3...")
status = gpib.write(con, "T3: WF?")
trace3 = b''
chunk_size = 1024
keepFetching = True
while keepFetching:
temp = gpib.read(con, chunk_size)
trace3 += temp
print("read " + np.str(len(trace3)) + " to " + np.str(len(trace3)+len(temp)))
if len(temp) < chunk_size:
keepFetching = False
with open("trace3.000", "wb") as f:
f.write(trace3)
# fetch trace 2
print("fetching T4...")
status = gpib.write(con, "T4: WF?")
trace4 = b''
chunk_size = 1024
keepFetching = True
while keepFetching:
temp = gpib.read(con, chunk_size)
trace4 += temp
print("read " + np.str(len(trace4)) + " to " + np.str(len(trace4)+len(temp)))
if len(temp) < chunk_size:
keepFetching = False
with open("trace4.000", "wb") as f:
f.write(trace4) | [
"gpib.dev",
"gpib.read",
"gpib.write"
] | [((180, 195), 'gpib.dev', 'gpib.dev', (['(0)', '(15)'], {}), '(0, 15)\n', (188, 195), False, 'import gpib\n'), ((205, 239), 'gpib.write', 'gpib.write', (['con', '"""COMM_FORMAT OFF"""'], {}), "(con, 'COMM_FORMAT OFF')\n", (215, 239), False, 'import gpib\n'), ((303, 337), 'gpib.write', 'gpib.write', (['con', '"""COMM_HEADER OFF"""'], {}), "(con, 'COMM_HEADER OFF')\n", (313, 337), False, 'import gpib\n'), ((348, 372), 'gpib.write', 'gpib.write', (['con', '"""*IDN?"""'], {}), "(con, '*IDN?')\n", (358, 372), False, 'import gpib\n'), ((502, 530), 'gpib.write', 'gpib.write', (['con', '"""TEMPLATE?"""'], {}), "(con, 'TEMPLATE?')\n", (512, 530), False, 'import gpib\n'), ((929, 955), 'gpib.write', 'gpib.write', (['con', '"""T1: WF?"""'], {}), "(con, 'T1: WF?')\n", (939, 955), False, 'import gpib\n'), ((1334, 1360), 'gpib.write', 'gpib.write', (['con', '"""T2: WF?"""'], {}), "(con, 'T2: WF?')\n", (1344, 1360), False, 'import gpib\n'), ((1739, 1765), 'gpib.write', 'gpib.write', (['con', '"""T3: WF?"""'], {}), "(con, 'T3: WF?')\n", (1749, 1765), False, 'import gpib\n'), ((2144, 2170), 'gpib.write', 'gpib.write', (['con', '"""T4: WF?"""'], {}), "(con, 'T4: WF?')\n", (2154, 2170), False, 'import gpib\n'), ((1039, 1065), 'gpib.read', 'gpib.read', (['con', 'chunk_size'], {}), '(con, chunk_size)\n', (1048, 1065), False, 'import gpib\n'), ((1444, 1470), 'gpib.read', 'gpib.read', (['con', 'chunk_size'], {}), '(con, chunk_size)\n', (1453, 1470), False, 'import gpib\n'), ((1849, 1875), 'gpib.read', 'gpib.read', (['con', 'chunk_size'], {}), '(con, chunk_size)\n', (1858, 1875), False, 'import gpib\n'), ((2254, 2280), 'gpib.read', 'gpib.read', (['con', 'chunk_size'], {}), '(con, chunk_size)\n', (2263, 2280), False, 'import gpib\n'), ((384, 404), 'gpib.read', 'gpib.read', (['con', '(1000)'], {}), '(con, 1000)\n', (393, 404), False, 'import gpib\n'), ((615, 641), 'gpib.read', 'gpib.read', (['con', 'chunk_size'], {}), '(con, chunk_size)\n', (624, 641), False, 'import gpib\n')] |
# RUN: %PYTHON %s 2>&1 | FileCheck %s
import sys, time
from collections.abc import Callable
import numpy as np
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.execution_engine import *
from mlir.runtime import *
from harness import *
from experts import *
from compilation import compile_and_callback, f32
def compile_and_test_linalg_matmul(M: int, N: int, K: int, ITERS: int,
np_type: np.dtype, transform: Callable):
A = np.random.rand(M, K).astype(np_type)
B = np.random.rand(K, N).astype(np_type)
C = np.random.rand(M, N).astype(np_type)
C.fill(0.)
# Arguments must be passed as pointers.
A_memref_ptr = ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(A)))
B_memref_ptr = ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(B)))
C_memref_ptr = ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(C)))
index_ptr_t = ctypes.c_longlong * 1
def callback(execution_engine):
def execute(m, n, k, iters):
execution_engine.invoke('main', A_memref_ptr, B_memref_ptr, C_memref_ptr,
index_ptr_t(iters))
# Dry-run.
n_iters_dry_run = 1
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, n_iters_dry_run, M, N, K, n_iters_dry_run)
print(f'dry_run in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
# Run for ITERS and report timing.
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, ITERS, M, N, K, ITERS)
print(f'run in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
# Check results vs NP and print timings.
success = 'SUCCESS' if np.allclose(C, np.dot(A, B)) else 'FAILURE'
if success == 'SUCCESS':
print(f'{success} ')
else:
delta = C - np.dot(A, B)
max_abs_delta = max(delta.max(), delta.min(), key=abs)
print(f'max_abs_delta: {max_abs_delta} -> {success} ')
compile_and_callback(
linalg.matmul, transform, callback, M=M, N=N, K=K, T1=f32, T2=f32, U=f32)
def test_numpy_matmul(M: int, N: int, K: int, ITERS, np_type):
A = np.random.rand(M, K).astype(np_type)
B = np.random.rand(K, N).astype(np_type)
C = np.random.rand(M, N).astype(np_type)
C.fill(0.)
def execute(m, n, k, iters):
for iters in range(iters):
# TODO: True GEMM semantics ?
C.fill(0.)
np.dot(A, B, out=C)
# Dry-run.
n_iters_dry_run = 1
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, n_iters_dry_run, M, N, K, n_iters_dry_run)
print(f'xxxxxxxxxx : numpy dry_run time on {1} threads '
f'in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
# Run for ITERS and report timing.
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, ITERS, M, N, K, ITERS)
print(f'xxxxxxxxxx : numpy time on {1} threads '
f'in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
def test_torch_matmul(M: int, N: int, K: int, ITERS: int, np_type,
num_threads: int):
import torch
torch.set_num_threads(num_threads)
A = torch.rand(M, K)
B = torch.rand(K, N)
C = torch.rand(M, N)
C.fill_(0.)
def execute(m, n, k, iters):
for iters in range(iters):
# TODO: True GEMM semantics ?
C.fill_(0.)
torch.mm(A, B, out=C)
# Dry-run.
n_iters_dry_run = 1
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, n_iters_dry_run, M, N, K, n_iters_dry_run)
print(f'xxxxxxxxxx : torch dry_run time on {torch.get_num_threads()} threads '
f'in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
# Run for ITERS and report timing.
elapsed_s_per_iter, gflop_per_s_per_iter = timed_invoke(
execute, ITERS, M, N, K, ITERS)
print(f'xxxxxxxxxx : torch time on {torch.get_num_threads()} threads '
f'in {elapsed_s_per_iter:.{4}}s per iter '
f'sec ({gflop_per_s_per_iter:.{4}} GFlop/s) ')
# CHECK-NOT: FAILURE
n_iters = 10
benchmark_torch = False
problem_size_list = [[128, 192, 256], [256, 256, 256], [1024, 1024, 1024]]
for np_type in [np.float32]:
for problem_sizes in problem_size_list:
M, N, K = problem_sizes
# Init printing.
print(f'\n###############################################################\n'
f'Problem size {M}x{N}x{K}')
for expert in [expert_compilerr_1, expert_compilerr_2, expert_compilerr_3]:
compile_and_test_linalg_matmul(M, N, K, n_iters, np_type, expert)
# For single-threaded apples-to-apples comparisons, run with:
# MKL_NUM_THREADS=1 ATEN_NUM_THREADS=1 OMP_NUM_THREADS=1 TBB_NUM_THREADS=1
import os
if os.environ.get('BENCHMARK_NUMPY'):
test_numpy_matmul(M, N, K, n_iters, np_type)
if os.environ.get('BENCHMARK_TORCH'):
test_torch_matmul(M, N, K, n_iters, np_type, 1)
| [
"numpy.random.rand",
"compilation.compile_and_callback",
"os.environ.get",
"torch.set_num_threads",
"torch.mm",
"numpy.dot",
"torch.get_num_threads",
"torch.rand"
] | [((2078, 2177), 'compilation.compile_and_callback', 'compile_and_callback', (['linalg.matmul', 'transform', 'callback'], {'M': 'M', 'N': 'N', 'K': 'K', 'T1': 'f32', 'T2': 'f32', 'U': 'f32'}), '(linalg.matmul, transform, callback, M=M, N=N, K=K, T1=\n f32, T2=f32, U=f32)\n', (2098, 2177), False, 'from compilation import compile_and_callback, f32\n'), ((3266, 3300), 'torch.set_num_threads', 'torch.set_num_threads', (['num_threads'], {}), '(num_threads)\n', (3287, 3300), False, 'import torch\n'), ((3307, 3323), 'torch.rand', 'torch.rand', (['M', 'K'], {}), '(M, K)\n', (3317, 3323), False, 'import torch\n'), ((3330, 3346), 'torch.rand', 'torch.rand', (['K', 'N'], {}), '(K, N)\n', (3340, 3346), False, 'import torch\n'), ((3353, 3369), 'torch.rand', 'torch.rand', (['M', 'N'], {}), '(M, N)\n', (3363, 3369), False, 'import torch\n'), ((4876, 4909), 'os.environ.get', 'os.environ.get', (['"""BENCHMARK_NUMPY"""'], {}), "('BENCHMARK_NUMPY')\n", (4890, 4909), False, 'import os\n'), ((4969, 5002), 'os.environ.get', 'os.environ.get', (['"""BENCHMARK_TORCH"""'], {}), "('BENCHMARK_TORCH')\n", (4983, 5002), False, 'import os\n'), ((546, 566), 'numpy.random.rand', 'np.random.rand', (['M', 'K'], {}), '(M, K)\n', (560, 566), True, 'import numpy as np\n'), ((589, 609), 'numpy.random.rand', 'np.random.rand', (['K', 'N'], {}), '(K, N)\n', (603, 609), True, 'import numpy as np\n'), ((632, 652), 'numpy.random.rand', 'np.random.rand', (['M', 'N'], {}), '(M, N)\n', (646, 652), True, 'import numpy as np\n'), ((2251, 2271), 'numpy.random.rand', 'np.random.rand', (['M', 'K'], {}), '(M, K)\n', (2265, 2271), True, 'import numpy as np\n'), ((2294, 2314), 'numpy.random.rand', 'np.random.rand', (['K', 'N'], {}), '(K, N)\n', (2308, 2314), True, 'import numpy as np\n'), ((2337, 2357), 'numpy.random.rand', 'np.random.rand', (['M', 'N'], {}), '(M, N)\n', (2351, 2357), True, 'import numpy as np\n'), ((2509, 2528), 'numpy.dot', 'np.dot', (['A', 'B'], {'out': 'C'}), '(A, B, out=C)\n', (2515, 2528), True, 'import numpy as np\n'), ((3507, 3528), 'torch.mm', 'torch.mm', (['A', 'B'], {'out': 'C'}), '(A, B, out=C)\n', (3515, 3528), False, 'import torch\n'), ((1827, 1839), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (1833, 1839), True, 'import numpy as np\n'), ((1940, 1952), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (1946, 1952), True, 'import numpy as np\n'), ((3728, 3751), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (3749, 3751), False, 'import torch\n'), ((4042, 4065), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (4063, 4065), False, 'import torch\n')] |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import data_helpers
from tensorflow.contrib import learn
import csv
from sklearn import metrics
import yaml
import itertools
preps = ['at', 'on', 'in', 'by', 'for', 'against', 'to', 'from', 'between', 'during', 'with', 'about', 'of']
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Parameters
# ==================================================
# Data Parameters
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "/u/a/n/anant/539_project/runs/2017-12-10 17:11:50.923482,glove,baseline,fc-3-layer,quadruple-hidden-neurons/best_checkpoints", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
datasets = None
# CHANGE THIS: Load data. Load your own data here
dataset_name = cfg["datasets"]["default"]
if FLAGS.eval_train:
if dataset_name == "mrpolarity":
datasets = data_helpers.get_datasets_mrpolarity(cfg["datasets"][dataset_name]["positive_data_file"]["path"],
cfg["datasets"][dataset_name]["negative_data_file"]["path"])
elif dataset_name == "20newsgroup":
datasets = data_helpers.get_datasets_20newsgroup(subset="test",
categories=cfg["datasets"][dataset_name]["categories"],
shuffle=cfg["datasets"][dataset_name]["shuffle"],
random_state=cfg["datasets"][dataset_name]["random_state"])
x_raw, y_test = data_helpers.load_data_labels(datasets)
y_test = np.argmax(y_test, axis=1)
print("Total number of test examples: {}".format(len(y_test)))
else:
if dataset_name == "mrpolarity":
datasets = {"target_names": ['positive_examples', 'negative_examples']}
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = [1, 0]
else:
datasets = {"target_names": ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']}
x_raw = ["The number of reported cases of gonorrhea in Colorado increased",
"I am in the market for a 24-bit graphics card for a PC"]
y_test = [2, 1]
x_words_raw, x_tags, x_labels, x_trees, x_indices, y, y_labels = data_helpers.load_data_labels('/u/a/n/anant/Dropbox/539_project/generated_test_data/')
x_words = x_words_raw
# x_words = x_words[1:1000]
# x_tags = x_tags[1:1000]
# x_labels = x_labels[1:1000]
# x_trees = x_trees[1:1000]
# x_indices = x_indices[1:1000]
# y_labels = y_labels[1:1000]
max_document_length = 50
valid_indices = []
for i in range(len(x_words)):
if len(x_words[i].split(" ")) <= max_document_length:
valid_indices.append(i)
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_words = np.array(list(vocab_processor.transform(x_words)))
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "tags_vocab")
tags_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_tags = np.array(list(tags_vocab_processor.transform(x_tags)))
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "labels_vocab")
labels_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_labels = np.array(list(labels_vocab_processor.transform(x_labels)))
for i in range(max(max_document_length, len(x_words))):
if x_indices[i] < max_document_length:
x_words[i][int(x_indices[i])] = 0
x_indices = np.array(x_indices)
x_trees = np.array(x_trees)
# x_trees = x_trees.reshape(len(x_words), -1)
x_feats = (list(zip(x_words, x_tags, x_labels, x_indices, x_trees)))
x_feats = np.array([x_feats[i] for i in valid_indices])
x_words = np.array([x_words[i] for i in valid_indices])
x_words_raw = np.array([x_words_raw[i] for i in valid_indices])
y_labels = np.array([y_labels[i] for i in valid_indices])
y_test = y_labels
print("\nEvaluating...\n")
# Evaluation
# ==================================================
# checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir[:-11] + "best_checkpoints")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
# checkpoint_file = "/u/a/n/anant/539_project/runs/2017-12-10 19:01:26.103352,glove,init-embeddings-curr-weight-nontrainable,fancy-inits,label-weights-per-edge-per-dim,6-layers,word-plus-label-plus-tag-embedding,p-c-p-child-label-p-tag-embedding,fc/best_checkpoints/model-9700"
print("checkpoint file: " + checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_words = graph.get_operation_by_name("input_words").outputs[0]
input_tags = graph.get_operation_by_name("input_tags").outputs[0]
input_labels = graph.get_operation_by_name("input_labels").outputs[0]
input_indices = graph.get_operation_by_name("input_indices").outputs[0]
input_trees = graph.get_operation_by_name("input_trees").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(zip(x_feats, y)), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_probabilities = None
for x_test_batch in batches:
x_batch, y_batch = zip(*x_test_batch)
x_words_batch, x_tags_batch, x_labels_batch, x_indices_batch, x_trees_batch = zip(*x_batch)
# print(np.shape(x_trees_batch))
x_words_batch = np.array(x_words_batch)
# print(np.shape(x_words_batch))
x_tags_batch = np.array(x_tags_batch)
# print(np.shape(x_tags_batch))
x_labels_batch = np.array(x_labels_batch)
# print(np.shape(x_labels_batch))
x_indices_batch = np.array(x_indices_batch)
# print(np.shape(x_indices_batch))
x_trees_batch = list(x_trees_batch)
x_trees_batch2 = np.zeros([x_words_batch.shape[0], x_words_batch.shape[1], x_words_batch.shape[1]])
for i in range(len(x_trees_batch)):
bla = eval(x_trees_batch[i])
x_trees_batch2[i,0:len(bla),0:len(bla)] = bla
# x_trees_batch = np.array(x_trees_batch)
x_trees_batch = x_trees_batch2
feed_dict = {
input_words: x_words_batch,
input_tags: x_tags_batch,
input_labels: x_labels_batch,
input_indices: x_indices_batch,
input_trees: x_trees_batch,
input_y: y_batch,
dropout_keep_prob: 1.0
# cnn.seq: x_words_batch.shape[1]
}
batch_predictions_scores = sess.run([predictions, scores], feed_dict)
all_predictions = np.concatenate([all_predictions, batch_predictions_scores[0]])
probabilities = softmax(batch_predictions_scores[1])
if all_probabilities is not None:
all_probabilities = np.concatenate([all_probabilities, probabilities])
else:
all_probabilities = probabilities
# Print accuracy if y_test is defined
if y_test is not None:
print(y_test)
print(all_predictions)
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
print(metrics.classification_report(y_test, all_predictions, target_names=['at', 'on', 'in', 'by', 'for', 'against', 'to', 'from', 'between', 'during', 'with', 'about', 'of']))
print(metrics.confusion_matrix(y_test, all_predictions))
# Save the evaluation to a csv
# print(x_words.shape)
# print(len(all_predictions))
predictions_human_readable = np.column_stack((x_words_raw,
[preps[int(prediction)] for prediction in all_predictions],
[ "{}".format(probability) for probability in all_probabilities]))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
| [
"sklearn.metrics.classification_report",
"yaml.load",
"numpy.array",
"tensorflow.flags.DEFINE_string",
"tensorflow.Graph",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Session",
"numpy.max",
"numpy.exp",
"data_helpers.get_datasets_20newsgroup",
"numpy.concatenate",
"tensorflow.ConfigProto",
"sklearn.metrics.confusion_matrix",
"csv.writer",
"numpy.argmax",
"tensorflow.train.latest_checkpoint",
"os.path.join",
"data_helpers.load_data_labels",
"data_helpers.get_datasets_mrpolarity",
"numpy.sum",
"numpy.zeros",
"tensorflow.flags.DEFINE_integer",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore"
] | [((754, 823), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (777, 823), True, 'import tensorflow as tf\n'), ((824, 1043), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_dir"""', '"""/u/a/n/anant/539_project/runs/2017-12-10 17:11:50.923482,glove,baseline,fc-3-layer,quadruple-hidden-neurons/best_checkpoints"""', '"""Checkpoint directory from training run"""'], {}), "('checkpoint_dir',\n '/u/a/n/anant/539_project/runs/2017-12-10 17:11:50.923482,glove,baseline,fc-3-layer,quadruple-hidden-neurons/best_checkpoints'\n , 'Checkpoint directory from training run')\n", (846, 1043), True, 'import tensorflow as tf\n'), ((1035, 1112), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""eval_train"""', '(False)', '"""Evaluate on all training data"""'], {}), "('eval_train', False, 'Evaluate on all training data')\n", (1058, 1112), True, 'import tensorflow as tf\n'), ((1132, 1227), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (1155, 1227), True, 'import tensorflow as tf\n'), ((1224, 1317), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (1247, 1317), True, 'import tensorflow as tf\n'), ((3057, 3148), 'data_helpers.load_data_labels', 'data_helpers.load_data_labels', (['"""/u/a/n/anant/Dropbox/539_project/generated_test_data/"""'], {}), "(\n '/u/a/n/anant/Dropbox/539_project/generated_test_data/')\n", (3086, 3148), False, 'import data_helpers\n'), ((3545, 3594), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '""".."""', '"""vocab"""'], {}), "(FLAGS.checkpoint_dir, '..', 'vocab')\n", (3557, 3594), False, 'import os\n'), ((3613, 3672), 'tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore', 'learn.preprocessing.VocabularyProcessor.restore', (['vocab_path'], {}), '(vocab_path)\n', (3660, 3672), False, 'from tensorflow.contrib import learn\n'), ((3748, 3802), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '""".."""', '"""tags_vocab"""'], {}), "(FLAGS.checkpoint_dir, '..', 'tags_vocab')\n", (3760, 3802), False, 'import os\n'), ((3826, 3885), 'tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore', 'learn.preprocessing.VocabularyProcessor.restore', (['vocab_path'], {}), '(vocab_path)\n', (3873, 3885), False, 'from tensorflow.contrib import learn\n'), ((3964, 4020), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '""".."""', '"""labels_vocab"""'], {}), "(FLAGS.checkpoint_dir, '..', 'labels_vocab')\n", (3976, 4020), False, 'import os\n'), ((4046, 4105), 'tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore', 'learn.preprocessing.VocabularyProcessor.restore', (['vocab_path'], {}), '(vocab_path)\n', (4093, 4105), False, 'from tensorflow.contrib import learn\n'), ((4338, 4357), 'numpy.array', 'np.array', (['x_indices'], {}), '(x_indices)\n', (4346, 4357), True, 'import numpy as np\n'), ((4368, 4385), 'numpy.array', 'np.array', (['x_trees'], {}), '(x_trees)\n', (4376, 4385), True, 'import numpy as np\n'), ((4512, 4557), 'numpy.array', 'np.array', (['[x_feats[i] for i in valid_indices]'], {}), '([x_feats[i] for i in valid_indices])\n', (4520, 4557), True, 'import numpy as np\n'), ((4568, 4613), 'numpy.array', 'np.array', (['[x_words[i] for i in valid_indices]'], {}), '([x_words[i] for i in valid_indices])\n', (4576, 4613), True, 'import numpy as np\n'), ((4628, 4677), 'numpy.array', 'np.array', (['[x_words_raw[i] for i in valid_indices]'], {}), '([x_words_raw[i] for i in valid_indices])\n', (4636, 4677), True, 'import numpy as np\n'), ((4689, 4735), 'numpy.array', 'np.array', (['[y_labels[i] for i in valid_indices]'], {}), '([y_labels[i] for i in valid_indices])\n', (4697, 4735), True, 'import numpy as np\n'), ((4963, 5011), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (4989, 5011), True, 'import tensorflow as tf\n'), ((5343, 5353), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5351, 5353), True, 'import tensorflow as tf\n'), ((9635, 9693), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '""".."""', '"""prediction.csv"""'], {}), "(FLAGS.checkpoint_dir, '..', 'prediction.csv')\n", (9647, 9693), False, 'import os\n'), ((502, 519), 'numpy.exp', 'np.exp', (['(x - max_x)'], {}), '(x - max_x)\n', (508, 519), True, 'import numpy as np\n'), ((630, 648), 'yaml.load', 'yaml.load', (['ymlfile'], {}), '(ymlfile)\n', (639, 648), False, 'import yaml\n'), ((2317, 2356), 'data_helpers.load_data_labels', 'data_helpers.load_data_labels', (['datasets'], {}), '(datasets)\n', (2346, 2356), False, 'import data_helpers\n'), ((2370, 2395), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (2379, 2395), True, 'import numpy as np\n'), ((5398, 5514), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (5412, 5514), True, 'import tensorflow as tf\n'), ((5535, 5566), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (5545, 5566), True, 'import tensorflow as tf\n'), ((1677, 1845), 'data_helpers.get_datasets_mrpolarity', 'data_helpers.get_datasets_mrpolarity', (["cfg['datasets'][dataset_name]['positive_data_file']['path']", "cfg['datasets'][dataset_name]['negative_data_file']['path']"], {}), "(cfg['datasets'][dataset_name][\n 'positive_data_file']['path'], cfg['datasets'][dataset_name][\n 'negative_data_file']['path'])\n", (1713, 1845), False, 'import data_helpers\n'), ((9029, 9206), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_test', 'all_predictions'], {'target_names': "['at', 'on', 'in', 'by', 'for', 'against', 'to', 'from', 'between',\n 'during', 'with', 'about', 'of']"}), "(y_test, all_predictions, target_names=['at',\n 'on', 'in', 'by', 'for', 'against', 'to', 'from', 'between', 'during',\n 'with', 'about', 'of'])\n", (9058, 9206), False, 'from sklearn import metrics\n'), ((9210, 9259), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'all_predictions'], {}), '(y_test, all_predictions)\n', (9234, 9259), False, 'from sklearn import metrics\n'), ((455, 472), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (461, 472), True, 'import numpy as np\n'), ((1940, 2173), 'data_helpers.get_datasets_20newsgroup', 'data_helpers.get_datasets_20newsgroup', ([], {'subset': '"""test"""', 'categories': "cfg['datasets'][dataset_name]['categories']", 'shuffle': "cfg['datasets'][dataset_name]['shuffle']", 'random_state': "cfg['datasets'][dataset_name]['random_state']"}), "(subset='test', categories=cfg[\n 'datasets'][dataset_name]['categories'], shuffle=cfg['datasets'][\n dataset_name]['shuffle'], random_state=cfg['datasets'][dataset_name][\n 'random_state'])\n", (1977, 2173), False, 'import data_helpers\n'), ((7115, 7138), 'numpy.array', 'np.array', (['x_words_batch'], {}), '(x_words_batch)\n', (7123, 7138), True, 'import numpy as np\n'), ((7211, 7233), 'numpy.array', 'np.array', (['x_tags_batch'], {}), '(x_tags_batch)\n', (7219, 7233), True, 'import numpy as np\n'), ((7307, 7331), 'numpy.array', 'np.array', (['x_labels_batch'], {}), '(x_labels_batch)\n', (7315, 7331), True, 'import numpy as np\n'), ((7408, 7433), 'numpy.array', 'np.array', (['x_indices_batch'], {}), '(x_indices_batch)\n', (7416, 7433), True, 'import numpy as np\n'), ((7558, 7645), 'numpy.zeros', 'np.zeros', (['[x_words_batch.shape[0], x_words_batch.shape[1], x_words_batch.shape[1]]'], {}), '([x_words_batch.shape[0], x_words_batch.shape[1], x_words_batch.\n shape[1]])\n', (7566, 7645), True, 'import numpy as np\n'), ((8377, 8439), 'numpy.concatenate', 'np.concatenate', (['[all_predictions, batch_predictions_scores[0]]'], {}), '([all_predictions, batch_predictions_scores[0]])\n', (8391, 8439), True, 'import numpy as np\n'), ((9780, 9793), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (9790, 9793), False, 'import csv\n'), ((539, 560), 'numpy.sum', 'np.sum', (['exp_x'], {'axis': '(1)'}), '(exp_x, axis=1)\n', (545, 560), True, 'import numpy as np\n'), ((8587, 8637), 'numpy.concatenate', 'np.concatenate', (['[all_probabilities, probabilities]'], {}), '([all_probabilities, probabilities])\n', (8601, 8637), True, 'import numpy as np\n')] |
"""
Un automata finito no-determinista
Referencias:
-> https://stackoverflow.com/questions/30551731/data-structure-in-python-for-nfa-regex
-> https://github.com/caleb531/automata/blob/master/automata/fa/nfa.py
"""
from automata import Automata, EstadoAutomata
class AFN(Automata):
"""
Clase para definir un AFN
"""
def __init__(self, inital):
# Llamamos a init de clase Automata
super().__init__(inital)
def to_dfa(self):
"""Convert this NFA to an equivalent DFA."""
from subconjuntos import Subconjuntos
return Subconjuntos(self)()
class EstadoAFN(EstadoAutomata):
"""
Un estado de un autómata finito no determinista
"""
def __init__(self, accept=None):
super().__init__(accept)
def all_transitions(self):
#Inicializamos el conjunto de transiciones
transitions = set()
for symbol, targets in self.transitions.items():
#union de conjuntos
transitions |= {(symbol, target) for target in targets}
return transitions
def add_transition(self, symbol, state):
"""
Función para añadir una transición al estado
"""
self._ensure_not_numbered()
try:
self.transitions[symbol].add(state)
except KeyError:
self.transitions[symbol] = {state}
def e_closure(self):
"""
Función para computar la cerradura ephsilon de este estado
"""
ephsilon = {self}
stack = [self]
while stack:
state = stack.pop()
for target in state.transitions.get(None, set()):
if target not in ephsilon:
ephsilon.add(target)
stack.append(target)
self.inmutable_ephsilon = frozenset(ephsilon)
return self.inmutable_ephsilon
| [
"subconjuntos.Subconjuntos"
] | [((537, 555), 'subconjuntos.Subconjuntos', 'Subconjuntos', (['self'], {}), '(self)\n', (549, 555), False, 'from subconjuntos import Subconjuntos\n')] |
import openstack
from getpass import getpass
username = input("Enter your username: ")
password = getpass("Enter your password: ")
conn = openstack.connect(cloud='ovh', username=username, password=password)
servers = conn.list_servers()
for server in servers:
print(server.name)
| [
"openstack.connect",
"getpass.getpass"
] | [((99, 131), 'getpass.getpass', 'getpass', (['"""Enter your password: """'], {}), "('Enter your password: ')\n", (106, 131), False, 'from getpass import getpass\n'), ((140, 208), 'openstack.connect', 'openstack.connect', ([], {'cloud': '"""ovh"""', 'username': 'username', 'password': 'password'}), "(cloud='ovh', username=username, password=password)\n", (157, 208), False, 'import openstack\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: split_doi.py
# Project: DOI_Auslesen
# Created Date: Monday 25.02.2019, 12:12
# Author: Apop85
# -----
# Last Modified: Monday 25.02.2019, 12:24
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description: Split doi_results.txt into smaller pieces
###
import os
os.chdir(os.path.dirname(__file__))
source_file=r'.\doi_results.txt'
file_reader=open(source_file, 'r', encoding='UTF-8')
file_content=file_reader.readlines()
file_reader.close()
split_amount=63
counter=0
splits=1
for line in file_content:
target_folder='.\\doi_split_'+str(splits)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
file_writer=open(target_folder+'\\doi_results.txt', 'w', encoding='UTF-8')
file_writer.write(line)
counter+=1
if counter == split_amount:
counter=0
splits+=1
file_writer.close()
| [
"os.path.dirname",
"os.path.exists",
"os.mkdir"
] | [((453, 478), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (468, 478), False, 'import os\n'), ((743, 772), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (757, 772), False, 'import os\n'), ((782, 805), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (790, 805), False, 'import os\n')] |
#!/usr/bin/env python3
# See: https://github.com/pr3d4t0r/COVIDvu/blob/master/LICENSE
# vim: set fileencoding=utf-8:
import json
import numpy as np
import os
import pandas as pd
import re
from numpy import ndarray
from os.path import join
from pandas.core.frame import DataFrame
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.series import Series
from pystan.model import StanModel
from covidvu.predict import _castPredictionsAsTS
from covidvu.predict import _dumpRegionPrediction
from covidvu.predict import _dumpPredictionCollectionAsJSON
from covidvu.predict import _dumpTimeSeriesAsJSON
from covidvu.predict import _getPredictionsFromPosteriorSamples
from covidvu.predict import buildLogisticModel
from covidvu.predict import getSavedShortCountryNames
from covidvu.predict import load
from covidvu.predict import loadAll
from covidvu.predict import MIN_CASES_FILTER
from covidvu.predict import predictRegions
from covidvu.predict import PREDICTIONS_PERCENTILES
from covidvu.predict import predictLogisticGrowth
from covidvu.predict import PRIOR_GROWTH_RATE
from covidvu.predict import PRIOR_LOG_CARRYING_CAPACITY
from covidvu.predict import PRIOR_MID_POINT
from covidvu.predict import PRIOR_SIGMA
# *** constants ***
TEST_JH_CSSE_PATH = os.path.join(os.getcwd(), 'resources', 'test_COVID-19',)
TEST_JH_CSSE_FILE_CONFIRMED = os.path.join(TEST_JH_CSSE_PATH, 'csse_covid_19_data',
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global.csv')
TEST_JH_CSSE_FILE_DEATHS = os.path.join(TEST_JH_CSSE_PATH, 'csse_covid_19_data',
'csse_covid_19_time_series',
'time_series_covid19_deaths_global.csv')
TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED = os.path.join(TEST_JH_CSSE_PATH, 'archived_data', 'archived_time_series',
'time_series_19-covid-Confirmed_archived_0325.csv')
TEST_JH_CSSE_FILE_DEATHS_DEPRECATED = os.path.join(TEST_JH_CSSE_PATH, 'archived_data', 'archived_time_series',
'time_series_19-covid-Deaths_archived_0325.csv')
TEST_STATE_CODES_PATH = os.path.join(os.getcwd(), 'stateCodesUS.csv')
TEST_SITE_DATA = os.path.join(os.getcwd(), 'resources', 'test_site_data')
TEST_JH_CSSE_REPORT_PATH = os.path.join(os.getcwd(), 'resources', 'test_COVID-19', 'csse_covid_19_data',
'csse_covid_19_daily_reports')
TEST_JH_CSSE_FILE_CONFIRMED_SMALL = os.path.join(TEST_JH_CSSE_PATH, 'csse_covid_19_data',
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global_small.csv')
TEST_N_SAMPLES = 1000
TEST_N_CHAINS = 2
# *** functions ***
def _purge(purgeDirectory, pattern):
for f in os.listdir(purgeDirectory):
if re.search(pattern, f):
os.remove(join(purgeDirectory, f))
def _assertValidJSON(fname):
assert os.path.exists(fname)
with open(fname) as f:
jsonObject = json.load(f)
assert isinstance(jsonObject, dict)
assert len(jsonObject.keys()) > 0
# *** tests ***
def test__dumpTimeSeriesAsJSON():
lenTS = 10
startDate = '2020-01-01'
startDate = pd.to_datetime(startDate).date()
endDate = startDate + pd.Timedelta(lenTS - 1, 'D')
data = np.arange(lenTS)
ts = pd.Series(index = pd.date_range(start=startDate,
end=endDate,
),
data = data,
)
try:
_dumpTimeSeriesAsJSON(ts, target=join(TEST_SITE_DATA, 'test-ts.json'))
_assertValidJSON(join(TEST_SITE_DATA,'test-ts.json'))
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
# ----------------------------------------------------------------
# THESE TESTS MUST BE RUN IN ORDER
logRegModel = None
def test_buildLogisticModel():
global logRegModel
logRegModel = buildLogisticModel(priorLogCarryingCapacity=PRIOR_LOG_CARRYING_CAPACITY,
priorMidPoint=PRIOR_MID_POINT,
priorGrowthRate=PRIOR_GROWTH_RATE,
priorSigma=PRIOR_SIGMA, )
assert isinstance(logRegModel, StanModel)
def test_predictLogisticGrowth():
nDaysPredict = 10
prediction = predictLogisticGrowth(logRegModel,
regionName = 'US',
siteData = TEST_SITE_DATA,
jhCSSEFileConfirmed = TEST_JH_CSSE_FILE_CONFIRMED,
jhCSSEFileDeaths = TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated = TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath = TEST_JH_CSSE_REPORT_PATH,
nSamples = TEST_N_SAMPLES,
nChains = TEST_N_CHAINS,
nDaysPredict = nDaysPredict,
)
predictionIndex = pd.date_range(start = prediction['regionTSClean'].index[0],
end = prediction['regionTSClean'].index[-1] + pd.Timedelta(nDaysPredict, 'D'),
)
assert (prediction['predictionsMeanTS'].index == predictionIndex).all()
assert (prediction['predictionsPercentilesTS'][0][0].index == predictionIndex).all()
assert isinstance(prediction['predictionsMeanTS'], Series)
assert isinstance(prediction['predictionsPercentilesTS'][0][0], Series)
assert (prediction['predictionsMeanTS'].isnull().values).sum() == 0
assert (prediction['predictionsPercentilesTS'][0][0].isnull().values).sum() == 0
assert isinstance(prediction['trace'], DataFrame)
assert (prediction['regionTSClean'] > MIN_CASES_FILTER).all()
return prediction
def test__dumpCountryPrediction():
prediction = test_predictLogisticGrowth()
try:
_dumpRegionPrediction(prediction, TEST_SITE_DATA, PREDICTIONS_PERCENTILES)
_assertValidJSON(join(TEST_SITE_DATA,'prediction-world-mean-US.json'))
_assertValidJSON(join(TEST_SITE_DATA,'prediction-world-conf-int-US.json'))
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
def test__getPredictionsFromPosteriorSamples():
nDaysPredict = 14
prediction = test_predictLogisticGrowth()
predictionsMean, predictionsPercentiles = _getPredictionsFromPosteriorSamples(prediction['t'],
prediction['trace'],
nDaysPredict,
PREDICTIONS_PERCENTILES,
)
assert isinstance(predictionsMean, ndarray)
assert len(predictionsMean) == prediction['regionTSClean'].shape[0] + nDaysPredict
assert len(predictionsPercentiles) == len(PREDICTIONS_PERCENTILES)
assert isinstance(predictionsPercentiles[0][0], ndarray)
assert len(predictionsPercentiles[0][0]) == prediction['regionTSClean'].shape[0] + nDaysPredict
prediction['predictionsMean'] = predictionsMean
prediction['predictionsPercentiles'] = predictionsPercentiles
prediction['nDaysPredict'] = nDaysPredict
return prediction
def test__castPredictionsAsTS():
predictions = test__getPredictionsFromPosteriorSamples()
startDate = '2020-01-01'
startDate = pd.to_datetime(startDate).date()
endDate = startDate + pd.Timedelta(len(predictions['regionTSClean'])-1, 'D')
predictionIndex = pd.date_range(start = startDate,
end = endDate,
)
regionTSClean = pd.Series(index = predictionIndex, data = predictions['regionTSClean'])
predictionsMeanTS, predictionsPercentilesTS = _castPredictionsAsTS(regionTSClean,
predictions['nDaysPredict'],
predictions['predictionsMean'],
predictions['predictionsPercentiles'],
)
assert isinstance(predictionsMeanTS, Series)
assert predictionsMeanTS.shape[0] == len(predictions['regionTSClean']) + predictions['nDaysPredict']
assert isinstance(predictionsMeanTS.index, DatetimeIndex)
assert len(predictionsPercentilesTS) == len(PREDICTIONS_PERCENTILES)
assert isinstance(predictionsPercentilesTS[0][0], Series)
assert isinstance(predictionsPercentilesTS[0][0].index, DatetimeIndex)
assert predictionsPercentilesTS[0][0].shape[0] == len(predictions['regionTSClean']) + predictions['nDaysPredict']
return predictionsMeanTS, predictionsPercentilesTS, predictions
def test__dumpPredictionCollectionAsJSON():
predictionsMeanTS, predictionsPercentilesTS, predictions = test__castPredictionsAsTS()
try:
_dumpPredictionCollectionAsJSON(predictionsPercentilesTS,
'US',
PREDICTIONS_PERCENTILES,
join(TEST_SITE_DATA,'test-ts-collection.json'),
)
_assertValidJSON(join(TEST_SITE_DATA, 'test-ts-collection.json'))
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
def test_predictCountries():
try:
predictRegions(0,
nDaysPredict = 10,
siteData=TEST_SITE_DATA,
jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED,
jhCSSEFileDeaths=TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated=TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath=TEST_JH_CSSE_REPORT_PATH,
logRegModel = logRegModel,
nSamples=TEST_N_SAMPLES,
nChains=TEST_N_CHAINS,
)
_assertValidJSON(join(TEST_SITE_DATA,'prediction-world-mean-China.json'))
_assertValidJSON(join(TEST_SITE_DATA, 'prediction-world-conf-int-China.json'))
predictRegions('all',
nDaysPredict=10,
siteData=TEST_SITE_DATA,
jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL,
jhCSSEFileDeaths=TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated=TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath=TEST_JH_CSSE_REPORT_PATH,
logRegModel=logRegModel,
nSamples=TEST_N_SAMPLES,
nChains=TEST_N_CHAINS,
)
_assertValidJSON(join(TEST_SITE_DATA, 'prediction-world-mean-Italy.json'))
_assertValidJSON(join(TEST_SITE_DATA, 'prediction-world-conf-int-Italy.json'))
_assertValidJSON(join(TEST_SITE_DATA, 'prediction-world-mean-US.json'))
_assertValidJSON(join(TEST_SITE_DATA, 'prediction-world-conf-int-US.json'))
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
def test_load():
try:
predictRegions('all',
siteData=TEST_SITE_DATA,
nDaysPredict=10,
jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL,
jhCSSEFileDeaths=TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated=TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath=TEST_JH_CSSE_REPORT_PATH,
logRegModel=logRegModel,
nSamples=TEST_N_SAMPLES,
nChains=TEST_N_CHAINS,
)
meanPredictionTS, percentilesTS, regionName = load(0, siteData=TEST_SITE_DATA)
assert isinstance(meanPredictionTS, Series)
assert isinstance(percentilesTS, DataFrame)
assert isinstance(regionName, str)
assert (percentilesTS.columns.isin(['97.5', '2.5', '25', '75'])).all()
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
def test_getSavedShortCountryNames():
try:
predictRegions('all',
siteData=TEST_SITE_DATA,
jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL,
jhCSSEFileDeaths=TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated=TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath=TEST_JH_CSSE_REPORT_PATH,
logRegModel=logRegModel,
nSamples=TEST_N_SAMPLES,
nChains=TEST_N_CHAINS,
)
regionNameShortAll = getSavedShortCountryNames(siteData=TEST_SITE_DATA)
assert isinstance(regionNameShortAll, list)
assert len(regionNameShortAll) == 3
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
def test_loadAll():
try:
confirmedCasesAll, meanPredictionTSAll, percentilesTSAll, = loadAll(siteData=join(TEST_SITE_DATA,'test-predictions'),
jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL,
jhCSSEFileDeaths=TEST_JH_CSSE_FILE_DEATHS_DEPRECATED,
jhCSSEFileConfirmedDeprecated=TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED,
jsCSSEReportPath=TEST_JH_CSSE_REPORT_PATH,
)
assert isinstance(confirmedCasesAll, DataFrame)
assert isinstance(meanPredictionTSAll, DataFrame)
assert isinstance(percentilesTSAll, DataFrame)
except Exception as e:
raise e
finally:
_purge(TEST_SITE_DATA, '.json')
# test__dumpTimeSeriesAsJSON()
# test_buildLogisticModel()
# test_predictLogisticGrowth()
# test__dumpCountryPrediction()
# test__getPredictionsFromPosteriorSamples()
# test__castPredictionsAsTS()
# test__dumpPredictionCollectionAsJSON()
# test_predictCountries()
# test_load()
# test_getSavedShortCountryNames()
test_loadAll()
| [
"covidvu.predict.getSavedShortCountryNames",
"pandas.date_range",
"pandas.to_datetime",
"numpy.arange",
"os.path.exists",
"re.search",
"os.listdir",
"covidvu.predict._getPredictionsFromPosteriorSamples",
"covidvu.predict._dumpRegionPrediction",
"covidvu.predict._castPredictionsAsTS",
"covidvu.predict.buildLogisticModel",
"covidvu.predict.load",
"covidvu.predict.predictLogisticGrowth",
"pandas.Series",
"pandas.Timedelta",
"os.path.join",
"covidvu.predict.predictRegions",
"os.getcwd",
"json.load"
] | [((1372, 1502), 'os.path.join', 'os.path.join', (['TEST_JH_CSSE_PATH', '"""csse_covid_19_data"""', '"""csse_covid_19_time_series"""', '"""time_series_covid19_confirmed_global.csv"""'], {}), "(TEST_JH_CSSE_PATH, 'csse_covid_19_data',\n 'csse_covid_19_time_series', 'time_series_covid19_confirmed_global.csv')\n", (1384, 1502), False, 'import os\n'), ((1652, 1779), 'os.path.join', 'os.path.join', (['TEST_JH_CSSE_PATH', '"""csse_covid_19_data"""', '"""csse_covid_19_time_series"""', '"""time_series_covid19_deaths_global.csv"""'], {}), "(TEST_JH_CSSE_PATH, 'csse_covid_19_data',\n 'csse_covid_19_time_series', 'time_series_covid19_deaths_global.csv')\n", (1664, 1779), False, 'import os\n'), ((1929, 2057), 'os.path.join', 'os.path.join', (['TEST_JH_CSSE_PATH', '"""archived_data"""', '"""archived_time_series"""', '"""time_series_19-covid-Confirmed_archived_0325.csv"""'], {}), "(TEST_JH_CSSE_PATH, 'archived_data', 'archived_time_series',\n 'time_series_19-covid-Confirmed_archived_0325.csv')\n", (1941, 2057), False, 'import os\n'), ((2152, 2277), 'os.path.join', 'os.path.join', (['TEST_JH_CSSE_PATH', '"""archived_data"""', '"""archived_time_series"""', '"""time_series_19-covid-Deaths_archived_0325.csv"""'], {}), "(TEST_JH_CSSE_PATH, 'archived_data', 'archived_time_series',\n 'time_series_19-covid-Deaths_archived_0325.csv')\n", (2164, 2277), False, 'import os\n'), ((2713, 2853), 'os.path.join', 'os.path.join', (['TEST_JH_CSSE_PATH', '"""csse_covid_19_data"""', '"""csse_covid_19_time_series"""', '"""time_series_covid19_confirmed_global_small.csv"""'], {}), "(TEST_JH_CSSE_PATH, 'csse_covid_19_data',\n 'csse_covid_19_time_series',\n 'time_series_covid19_confirmed_global_small.csv')\n", (2725, 2853), False, 'import os\n'), ((1285, 1296), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1294, 1296), False, 'import os\n'), ((2374, 2385), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2383, 2385), False, 'import os\n'), ((2450, 2461), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2459, 2461), False, 'import os\n'), ((2537, 2548), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2546, 2548), False, 'import os\n'), ((3067, 3093), 'os.listdir', 'os.listdir', (['purgeDirectory'], {}), '(purgeDirectory)\n', (3077, 3093), False, 'import os\n'), ((3218, 3239), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (3232, 3239), False, 'import os\n'), ((3601, 3617), 'numpy.arange', 'np.arange', (['lenTS'], {}), '(lenTS)\n', (3610, 3617), True, 'import numpy as np\n'), ((4274, 4444), 'covidvu.predict.buildLogisticModel', 'buildLogisticModel', ([], {'priorLogCarryingCapacity': 'PRIOR_LOG_CARRYING_CAPACITY', 'priorMidPoint': 'PRIOR_MID_POINT', 'priorGrowthRate': 'PRIOR_GROWTH_RATE', 'priorSigma': 'PRIOR_SIGMA'}), '(priorLogCarryingCapacity=PRIOR_LOG_CARRYING_CAPACITY,\n priorMidPoint=PRIOR_MID_POINT, priorGrowthRate=PRIOR_GROWTH_RATE,\n priorSigma=PRIOR_SIGMA)\n', (4292, 4444), False, 'from covidvu.predict import buildLogisticModel\n'), ((4671, 5062), 'covidvu.predict.predictLogisticGrowth', 'predictLogisticGrowth', (['logRegModel'], {'regionName': '"""US"""', 'siteData': 'TEST_SITE_DATA', 'jhCSSEFileConfirmed': 'TEST_JH_CSSE_FILE_CONFIRMED', 'jhCSSEFileDeaths': 'TEST_JH_CSSE_FILE_DEATHS_DEPRECATED', 'jhCSSEFileConfirmedDeprecated': 'TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED', 'jsCSSEReportPath': 'TEST_JH_CSSE_REPORT_PATH', 'nSamples': 'TEST_N_SAMPLES', 'nChains': 'TEST_N_CHAINS', 'nDaysPredict': 'nDaysPredict'}), "(logRegModel, regionName='US', siteData=TEST_SITE_DATA,\n jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED, jhCSSEFileDeaths=\n TEST_JH_CSSE_FILE_DEATHS_DEPRECATED, jhCSSEFileConfirmedDeprecated=\n TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED, jsCSSEReportPath=\n TEST_JH_CSSE_REPORT_PATH, nSamples=TEST_N_SAMPLES, nChains=\n TEST_N_CHAINS, nDaysPredict=nDaysPredict)\n", (4692, 5062), False, 'from covidvu.predict import predictLogisticGrowth\n'), ((7027, 7143), 'covidvu.predict._getPredictionsFromPosteriorSamples', '_getPredictionsFromPosteriorSamples', (["prediction['t']", "prediction['trace']", 'nDaysPredict', 'PREDICTIONS_PERCENTILES'], {}), "(prediction['t'], prediction['trace'],\n nDaysPredict, PREDICTIONS_PERCENTILES)\n", (7062, 7143), False, 'from covidvu.predict import _getPredictionsFromPosteriorSamples\n'), ((8309, 8352), 'pandas.date_range', 'pd.date_range', ([], {'start': 'startDate', 'end': 'endDate'}), '(start=startDate, end=endDate)\n', (8322, 8352), True, 'import pandas as pd\n'), ((8453, 8520), 'pandas.Series', 'pd.Series', ([], {'index': 'predictionIndex', 'data': "predictions['regionTSClean']"}), "(index=predictionIndex, data=predictions['regionTSClean'])\n", (8462, 8520), True, 'import pandas as pd\n'), ((8576, 8715), 'covidvu.predict._castPredictionsAsTS', '_castPredictionsAsTS', (['regionTSClean', "predictions['nDaysPredict']", "predictions['predictionsMean']", "predictions['predictionsPercentiles']"], {}), "(regionTSClean, predictions['nDaysPredict'],\n predictions['predictionsMean'], predictions['predictionsPercentiles'])\n", (8596, 8715), False, 'from covidvu.predict import _castPredictionsAsTS\n'), ((3106, 3127), 're.search', 're.search', (['pattern', 'f'], {}), '(pattern, f)\n', (3115, 3127), False, 'import re\n'), ((3288, 3300), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3297, 3300), False, 'import json\n'), ((3556, 3584), 'pandas.Timedelta', 'pd.Timedelta', (['(lenTS - 1)', '"""D"""'], {}), "(lenTS - 1, 'D')\n", (3568, 3584), True, 'import pandas as pd\n'), ((6526, 6600), 'covidvu.predict._dumpRegionPrediction', '_dumpRegionPrediction', (['prediction', 'TEST_SITE_DATA', 'PREDICTIONS_PERCENTILES'], {}), '(prediction, TEST_SITE_DATA, PREDICTIONS_PERCENTILES)\n', (6547, 6600), False, 'from covidvu.predict import _dumpRegionPrediction\n'), ((10281, 10653), 'covidvu.predict.predictRegions', 'predictRegions', (['(0)'], {'nDaysPredict': '(10)', 'siteData': 'TEST_SITE_DATA', 'jhCSSEFileConfirmed': 'TEST_JH_CSSE_FILE_CONFIRMED', 'jhCSSEFileDeaths': 'TEST_JH_CSSE_FILE_DEATHS_DEPRECATED', 'jhCSSEFileConfirmedDeprecated': 'TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED', 'jsCSSEReportPath': 'TEST_JH_CSSE_REPORT_PATH', 'logRegModel': 'logRegModel', 'nSamples': 'TEST_N_SAMPLES', 'nChains': 'TEST_N_CHAINS'}), '(0, nDaysPredict=10, siteData=TEST_SITE_DATA,\n jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED, jhCSSEFileDeaths=\n TEST_JH_CSSE_FILE_DEATHS_DEPRECATED, jhCSSEFileConfirmedDeprecated=\n TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED, jsCSSEReportPath=\n TEST_JH_CSSE_REPORT_PATH, logRegModel=logRegModel, nSamples=\n TEST_N_SAMPLES, nChains=TEST_N_CHAINS)\n', (10295, 10653), False, 'from covidvu.predict import predictRegions\n'), ((10961, 11343), 'covidvu.predict.predictRegions', 'predictRegions', (['"""all"""'], {'nDaysPredict': '(10)', 'siteData': 'TEST_SITE_DATA', 'jhCSSEFileConfirmed': 'TEST_JH_CSSE_FILE_CONFIRMED_SMALL', 'jhCSSEFileDeaths': 'TEST_JH_CSSE_FILE_DEATHS_DEPRECATED', 'jhCSSEFileConfirmedDeprecated': 'TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED', 'jsCSSEReportPath': 'TEST_JH_CSSE_REPORT_PATH', 'logRegModel': 'logRegModel', 'nSamples': 'TEST_N_SAMPLES', 'nChains': 'TEST_N_CHAINS'}), "('all', nDaysPredict=10, siteData=TEST_SITE_DATA,\n jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL, jhCSSEFileDeaths\n =TEST_JH_CSSE_FILE_DEATHS_DEPRECATED, jhCSSEFileConfirmedDeprecated=\n TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED, jsCSSEReportPath=\n TEST_JH_CSSE_REPORT_PATH, logRegModel=logRegModel, nSamples=\n TEST_N_SAMPLES, nChains=TEST_N_CHAINS)\n", (10975, 11343), False, 'from covidvu.predict import predictRegions\n'), ((11952, 12334), 'covidvu.predict.predictRegions', 'predictRegions', (['"""all"""'], {'siteData': 'TEST_SITE_DATA', 'nDaysPredict': '(10)', 'jhCSSEFileConfirmed': 'TEST_JH_CSSE_FILE_CONFIRMED_SMALL', 'jhCSSEFileDeaths': 'TEST_JH_CSSE_FILE_DEATHS_DEPRECATED', 'jhCSSEFileConfirmedDeprecated': 'TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED', 'jsCSSEReportPath': 'TEST_JH_CSSE_REPORT_PATH', 'logRegModel': 'logRegModel', 'nSamples': 'TEST_N_SAMPLES', 'nChains': 'TEST_N_CHAINS'}), "('all', siteData=TEST_SITE_DATA, nDaysPredict=10,\n jhCSSEFileConfirmed=TEST_JH_CSSE_FILE_CONFIRMED_SMALL, jhCSSEFileDeaths\n =TEST_JH_CSSE_FILE_DEATHS_DEPRECATED, jhCSSEFileConfirmedDeprecated=\n TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED, jsCSSEReportPath=\n TEST_JH_CSSE_REPORT_PATH, logRegModel=logRegModel, nSamples=\n TEST_N_SAMPLES, nChains=TEST_N_CHAINS)\n", (11966, 12334), False, 'from covidvu.predict import predictRegions\n'), ((12530, 12562), 'covidvu.predict.load', 'load', (['(0)'], {'siteData': 'TEST_SITE_DATA'}), '(0, siteData=TEST_SITE_DATA)\n', (12534, 12562), False, 'from covidvu.predict import load\n'), ((12942, 13308), 'covidvu.predict.predictRegions', 'predictRegions', (['"""all"""'], {'siteData': 'TEST_SITE_DATA', 'jhCSSEFileConfirmed': 'TEST_JH_CSSE_FILE_CONFIRMED_SMALL', 'jhCSSEFileDeaths': 'TEST_JH_CSSE_FILE_DEATHS_DEPRECATED', 'jhCSSEFileConfirmedDeprecated': 'TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED', 'jsCSSEReportPath': 'TEST_JH_CSSE_REPORT_PATH', 'logRegModel': 'logRegModel', 'nSamples': 'TEST_N_SAMPLES', 'nChains': 'TEST_N_CHAINS'}), "('all', siteData=TEST_SITE_DATA, jhCSSEFileConfirmed=\n TEST_JH_CSSE_FILE_CONFIRMED_SMALL, jhCSSEFileDeaths=\n TEST_JH_CSSE_FILE_DEATHS_DEPRECATED, jhCSSEFileConfirmedDeprecated=\n TEST_JH_CSSE_FILE_CONFIRMED_DEPRECATED, jsCSSEReportPath=\n TEST_JH_CSSE_REPORT_PATH, logRegModel=logRegModel, nSamples=\n TEST_N_SAMPLES, nChains=TEST_N_CHAINS)\n", (12956, 13308), False, 'from covidvu.predict import predictRegions\n'), ((13463, 13513), 'covidvu.predict.getSavedShortCountryNames', 'getSavedShortCountryNames', ([], {'siteData': 'TEST_SITE_DATA'}), '(siteData=TEST_SITE_DATA)\n', (13488, 13513), False, 'from covidvu.predict import getSavedShortCountryNames\n'), ((3495, 3520), 'pandas.to_datetime', 'pd.to_datetime', (['startDate'], {}), '(startDate)\n', (3509, 3520), True, 'import pandas as pd\n'), ((3650, 3693), 'pandas.date_range', 'pd.date_range', ([], {'start': 'startDate', 'end': 'endDate'}), '(start=startDate, end=endDate)\n', (3663, 3693), True, 'import pandas as pd\n'), ((3945, 3981), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""test-ts.json"""'], {}), "(TEST_SITE_DATA, 'test-ts.json')\n", (3949, 3981), False, 'from os.path import join\n'), ((6626, 6679), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-mean-US.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-mean-US.json')\n", (6630, 6679), False, 'from os.path import join\n'), ((6705, 6762), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-conf-int-US.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-conf-int-US.json')\n", (6709, 6762), False, 'from os.path import join\n'), ((8169, 8194), 'pandas.to_datetime', 'pd.to_datetime', (['startDate'], {}), '(startDate)\n', (8183, 8194), True, 'import pandas as pd\n'), ((9973, 10020), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""test-ts-collection.json"""'], {}), "(TEST_SITE_DATA, 'test-ts-collection.json')\n", (9977, 10020), False, 'from os.path import join\n'), ((10088, 10135), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""test-ts-collection.json"""'], {}), "(TEST_SITE_DATA, 'test-ts-collection.json')\n", (10092, 10135), False, 'from os.path import join\n'), ((10808, 10864), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-mean-China.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-mean-China.json')\n", (10812, 10864), False, 'from os.path import join\n'), ((10890, 10950), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-conf-int-China.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-conf-int-China.json')\n", (10894, 10950), False, 'from os.path import join\n'), ((11510, 11566), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-mean-Italy.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-mean-Italy.json')\n", (11514, 11566), False, 'from os.path import join\n'), ((11593, 11653), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-conf-int-Italy.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-conf-int-Italy.json')\n", (11597, 11653), False, 'from os.path import join\n'), ((11680, 11733), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-mean-US.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-mean-US.json')\n", (11684, 11733), False, 'from os.path import join\n'), ((11760, 11817), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""prediction-world-conf-int-US.json"""'], {}), "(TEST_SITE_DATA, 'prediction-world-conf-int-US.json')\n", (11764, 11817), False, 'from os.path import join\n'), ((3151, 3174), 'os.path.join', 'join', (['purgeDirectory', 'f'], {}), '(purgeDirectory, f)\n', (3155, 3174), False, 'from os.path import join\n'), ((3882, 3918), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""test-ts.json"""'], {}), "(TEST_SITE_DATA, 'test-ts.json')\n", (3886, 3918), False, 'from os.path import join\n'), ((5751, 5782), 'pandas.Timedelta', 'pd.Timedelta', (['nDaysPredict', '"""D"""'], {}), "(nDaysPredict, 'D')\n", (5763, 5782), True, 'import pandas as pd\n'), ((13823, 13863), 'os.path.join', 'join', (['TEST_SITE_DATA', '"""test-predictions"""'], {}), "(TEST_SITE_DATA, 'test-predictions')\n", (13827, 13863), False, 'from os.path import join\n')] |
# Copyright (c) 2016, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Scheduler."""
import logging
import threading
from zoe_lib.state import Execution
from zoe_master.backends.interface import start_all, terminate_execution
from zoe_master.scheduler.base_scheduler import ZoeBaseScheduler
from zoe_master.exceptions import UnsupportedSchedulerPolicyError
log = logging.getLogger(__name__)
class ZoeSimpleScheduler(ZoeBaseScheduler):
"""The Scheduler class."""
def __init__(self, state, policy):
super().__init__(state)
if policy != 'FIFO':
raise UnsupportedSchedulerPolicyError
self.fifo_queue = []
self.trigger_semaphore = threading.Semaphore(0)
self.async_threads = []
self.loop_quit = False
self.loop_th = threading.Thread(target=self.loop_start_th, name='scheduler')
self.loop_th.start()
def trigger(self):
"""Trigger a scheduler run."""
self.trigger_semaphore.release()
def incoming(self, execution: Execution):
"""
This method adds the execution to the end of the FIFO queue and triggers the scheduler.
:param execution: The execution
:return:
"""
self.fifo_queue.append(execution)
self.trigger()
def terminate(self, execution: Execution) -> None:
"""
Inform the master that an execution has been terminated. This can be done asynchronously.
:param execution: the terminated execution
:return: None
"""
def async_termination():
"""Actual termination run in a thread."""
terminate_execution(execution)
self.trigger()
try:
self.fifo_queue.remove(execution)
except ValueError:
pass
th = threading.Thread(target=async_termination, name='termination_{}'.format(execution.id))
th.start()
self.async_threads.append(th)
def loop_start_th(self):
"""The Scheduler thread loop."""
auto_trigger_base = 60 # seconds
auto_trigger = auto_trigger_base
while True:
ret = self.trigger_semaphore.acquire(timeout=1)
if not ret: # Semaphore timeout, do some thread cleanup
counter = len(self.async_threads)
while counter > 0:
if len(self.async_threads) == 0:
break
th = self.async_threads.pop(0)
th.join(0.1)
if th.isAlive(): # join failed
log.debug('Thread {} join failed'.format(th.name))
self.async_threads.append(th)
counter -= 1
auto_trigger -= 1
if auto_trigger == 0:
auto_trigger = auto_trigger_base
self.trigger()
continue
if self.loop_quit:
break
log.debug("Scheduler start loop has been triggered")
if len(self.fifo_queue) == 0:
continue
e = self.fifo_queue[0]
assert isinstance(e, Execution)
e.set_starting()
self.fifo_queue.pop(0) # remove the execution form the queue
ret = start_all(e)
if ret == 'requeue':
self.fifo_queue.append(e)
else:
e.set_running()
def quit(self):
"""Stop the scheduler thread."""
self.loop_quit = True
self.trigger()
self.loop_th.join()
def stats(self):
"""Scheduler statistics."""
return {
'queue_length': len(self.fifo_queue),
'termination_threads_count': len(self.async_threads)
}
| [
"logging.getLogger",
"zoe_master.backends.interface.terminate_execution",
"threading.Semaphore",
"threading.Thread",
"zoe_master.backends.interface.start_all"
] | [((879, 906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (896, 906), False, 'import logging\n'), ((1196, 1218), 'threading.Semaphore', 'threading.Semaphore', (['(0)'], {}), '(0)\n', (1215, 1218), False, 'import threading\n'), ((1305, 1366), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.loop_start_th', 'name': '"""scheduler"""'}), "(target=self.loop_start_th, name='scheduler')\n", (1321, 1366), False, 'import threading\n'), ((2139, 2169), 'zoe_master.backends.interface.terminate_execution', 'terminate_execution', (['execution'], {}), '(execution)\n', (2158, 2169), False, 'from zoe_master.backends.interface import start_all, terminate_execution\n'), ((3800, 3812), 'zoe_master.backends.interface.start_all', 'start_all', (['e'], {}), '(e)\n', (3809, 3812), False, 'from zoe_master.backends.interface import start_all, terminate_execution\n')] |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import json
from argparse import ArgumentParser
from configparser import ConfigParser
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
import logging
import socket
# TODO: move to config file
URL_IP_EXTERNAL = [
"http://ifconfig.me/ip",
"http://ipecho.net/plain",
"http://myexternalip.com/raw"]
def get_external_ip(url_list, index=0):
""" get the external IP address by querying web providers """
try:
response = urlopen(url_list[index], None, 3)
except (URLError, socket.timeout) as e:
#on error, try the next url
ip = get_external_ip(url_list, index + 1)
else:
data = response.read().decode('utf-8')
ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', data)[0]
return ip
class RecordManager(object):
""" manage (create/update) DNS records from config_file """
def __init__(self):
self.config = None
def load_config(self, config_file):
self.config = ConfigParser()
self.config.default_section = "general"
if os.path.isfile(config_file) and os.access(config_file, os.R_OK):
self.config.read(config_file)
def _get_record(self, url, headers):
try:
response = urlopen(Request(url, headers=headers))
except HTTPError as e:
# we have to handle http return codes in the 400-599 range (errors)
return None
if (response.getcode() != 200):
return None
encoding = response.info().get_content_charset('utf-8')
return json.loads(response.read().decode(encoding))
def update_records(self):
ip = get_external_ip(URL_IP_EXTERNAL)
for r in self.config.sections():
headers = { 'Content-Type': 'application/json',
'X-Api-Key': self.config[r]["api_key"]}
logging.info("Record {} ({}) for domain {}".format(self.config[r]["name"],
self.config[r]["type"],
self.config[r]["domain"]))
url = '{}domains/{}/records/{}/{}'.format(self.config[r]["api"],
self.config[r]["domain"],
self.config[r]["name"],
self.config[r]["type"])
data = {'rrset_ttl': self.config[r]["ttl"],
'rrset_values': [ip]}
current_record = self._get_record(url, headers)
if current_record is None:
logging.info(" Record does not exist. Let's create it...")
method = 'POST'
else:
if current_record['rrset_values'][0] == ip:
logging.info(" No IP change. Nothing to do...")
continue
logging.info(" IP change detected. Updating...")
method = 'PUT'
json_data = json.dumps(data).encode('utf-8')
req = Request(url, data=json_data, headers=headers, method=method)
try:
response = urlopen(req)
except HTTPError as e:
# something has gone wrong
logging.info(" Record update failed with error code: {}".format(e.code))
continue
if response.getcode() != 201:
logging.info(" Record update failed with status code: {}".format(response.getcode()))
continue
logging.info(" Zone record updated succesfuly")
if __name__ == "__main__":
parser = ArgumentParser(description='Update Gandi DNS records.')
parser.add_argument('-c', '--config-file', help="configuration file", dest='config_file', required=True)
parser.add_argument('-v', '--verbose', help="increase output verbosity", action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
rm = RecordManager()
rm.load_config(args.config_file)
rm.update_records()
| [
"logging.basicConfig",
"configparser.ConfigParser",
"argparse.ArgumentParser",
"urllib.request.Request",
"os.access",
"json.dumps",
"os.path.isfile",
"re.findall",
"logging.info",
"urllib.request.urlopen"
] | [((4810, 4865), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Update Gandi DNS records."""'}), "(description='Update Gandi DNS records.')\n", (4824, 4865), False, 'from argparse import ArgumentParser\n'), ((1631, 1664), 'urllib.request.urlopen', 'urlopen', (['url_list[index]', 'None', '(3)'], {}), '(url_list[index], None, 3)\n', (1638, 1664), False, 'from urllib.request import Request, urlopen\n'), ((2139, 2153), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2151, 2153), False, 'from configparser import ConfigParser\n'), ((5133, 5210), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s: %(message)s', level=logging.DEBUG)\n", (5152, 5210), False, 'import logging\n'), ((1865, 1907), 're.findall', 're.findall', (['"""[0-9]+(?:\\\\.[0-9]+){3}"""', 'data'], {}), "('[0-9]+(?:\\\\.[0-9]+){3}', data)\n", (1875, 1907), False, 'import re\n'), ((2213, 2240), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (2227, 2240), False, 'import os\n'), ((2245, 2276), 'os.access', 'os.access', (['config_file', 'os.R_OK'], {}), '(config_file, os.R_OK)\n', (2254, 2276), False, 'import os\n'), ((4229, 4289), 'urllib.request.Request', 'Request', (['url'], {'data': 'json_data', 'headers': 'headers', 'method': 'method'}), '(url, data=json_data, headers=headers, method=method)\n', (4236, 4289), False, 'from urllib.request import Request, urlopen\n'), ((4720, 4767), 'logging.info', 'logging.info', (['""" Zone record updated succesfuly"""'], {}), "(' Zone record updated succesfuly')\n", (4732, 4767), False, 'import logging\n'), ((2406, 2435), 'urllib.request.Request', 'Request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2413, 2435), False, 'from urllib.request import Request, urlopen\n'), ((3792, 3850), 'logging.info', 'logging.info', (['""" Record does not exist. Let\'s create it..."""'], {}), '(" Record does not exist. Let\'s create it...")\n', (3804, 3850), False, 'import logging\n'), ((4074, 4122), 'logging.info', 'logging.info', (['""" IP change detected. Updating..."""'], {}), "(' IP change detected. Updating...')\n", (4086, 4122), False, 'import logging\n'), ((4334, 4346), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (4341, 4346), False, 'from urllib.request import Request, urlopen\n'), ((3981, 4028), 'logging.info', 'logging.info', (['""" No IP change. Nothing to do..."""'], {}), "(' No IP change. Nothing to do...')\n", (3993, 4028), False, 'import logging\n'), ((4178, 4194), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4188, 4194), False, 'import json\n')] |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Descriptors
def Raise(ErrorType=Exception,*args): raise ErrorType(*args)
class ClassProperty(property):
fget = lambda *args:Raise(AttributeError,"unreadable attribute")
def __init__(self, fget=None, doc=None):
if fget: self.fget = fget
self.__doc__ = doc if doc else fget.__doc__
def __get__(self, inst, cls=None):
return self.fget(cls or type(inst))
class TestClass:
testVal = 5
@ClassProperty
def testGet(cls):return cls.testVal
print(TestClass.testVal)
print(TestClass.testGet)
TestClass.testGet = 54
from typing import Callable,TypeVar,Generic
Return = TypeVar('Return')
class Action(Generic[Return]):
def __init__(self, actionFunc:Callable[...,Return], *args,**kwds):
self.computeFunc,self.args,self.kwds = actionFunc,args,kwds
def compute(self):return self.computeFunc(*self.args,**self.kwds)
def __call__(self):return self.compute()
| [
"typing.TypeVar"
] | [((750, 767), 'typing.TypeVar', 'TypeVar', (['"""Return"""'], {}), "('Return')\n", (757, 767), False, 'from typing import Callable, TypeVar, Generic\n')] |
'''
Main module for "modeling" endpoints
'''
__author__ = '<NAME>'
from quart import request, render_template, flash, redirect, url_for
from captioner.database.models import ModelHistory
from simpleml.utils import PersistableLoader
import base64
import pandas as pd
import numpy as np
import tensorflow as tf
import requests
class ModelWrapper(object):
'''
Lot of hackery to get the model to load in parallel when the service
starts up
Had trouble getting asyncio to actually execute in parallel so hacked the following:
1) Load in thread
2) Create new event loop for thread
3) Save graph from thread to use in main thread at predict time
'''
def __init__(self):
self._image_model = None
self._text_model = None
self._graph = None
# self.concurrent_load_model()
@property
def image_model(self):
if self._image_model is None:
self.load_image_model()
return self._image_model
@property
def text_model(self):
if self._text_model is None:
self.load_text_model()
return self._text_model
@property
def graph(self):
if self._graph is None:
self.load_image_model()
return self._graph
def predict(self, image_source):
with self.graph.as_default():
X = pd.DataFrame({'image': image_source, 'caption': [self.text_model.initial_response]})
tokens = self.image_model.predict(X, end_index=self.text_model.external_model.end_index, max_length=15)
return self.text_model.inverse_transform(tokens[0])
def load_image_model(self):
self._image_model = PersistableLoader.load_model('image_model')
self._image_model.load(load_externals=True)
self._graph = tf.get_default_graph()
def load_text_model(self):
self._text_model = PersistableLoader.load_model('text_model')
self._text_model.load(load_externals=True)
MODEL = ModelWrapper()
async def upload():
if request.method == 'POST': # For inputs with a binary image file
files = await request.files
if not 'photo' in files:
raise ValueError('Missing photo')
filename = files['photo'].filename
image_stream = files['photo'].stream.read()
elif request.method == 'GET': # For inputs with an image url
filename = request.args.get('url')
image_stream = requests.get(filename, stream=True).raw.read()
prediction = await predict(filename, image_stream)
# .decode is necessary on python 3 for bytes to str conversion
return await render_template(
'pages/prediction.html',
prediction=prediction.caption,
image=base64.b64encode(image_stream).decode(),
prediction_id=prediction.id
)
async def predict(filename, image_stream):
caption = MODEL.predict(image_stream)
# DB
history = ModelHistory.create(
filename=filename,
caption=caption
)
return history
async def model_feedback():
form = await request.form
prediction_id = form['prediction_id']
user_rank = form['user_rank']
user_caption = form['user_caption']
history = ModelHistory.find(prediction_id)
history.update(user_rank=user_rank, user_caption=user_caption)
await flash("Thank you for making caption-bot smarter!")
return redirect(url_for('home'))
| [
"simpleml.utils.PersistableLoader.load_model",
"quart.flash",
"captioner.database.models.ModelHistory.find",
"pandas.DataFrame",
"quart.request.args.get",
"base64.b64encode",
"requests.get",
"quart.url_for",
"captioner.database.models.ModelHistory.create",
"tensorflow.get_default_graph"
] | [((2919, 2974), 'captioner.database.models.ModelHistory.create', 'ModelHistory.create', ([], {'filename': 'filename', 'caption': 'caption'}), '(filename=filename, caption=caption)\n', (2938, 2974), False, 'from captioner.database.models import ModelHistory\n'), ((3207, 3239), 'captioner.database.models.ModelHistory.find', 'ModelHistory.find', (['prediction_id'], {}), '(prediction_id)\n', (3224, 3239), False, 'from captioner.database.models import ModelHistory\n'), ((1680, 1723), 'simpleml.utils.PersistableLoader.load_model', 'PersistableLoader.load_model', (['"""image_model"""'], {}), "('image_model')\n", (1708, 1723), False, 'from simpleml.utils import PersistableLoader\n'), ((1798, 1820), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1818, 1820), True, 'import tensorflow as tf\n'), ((1880, 1922), 'simpleml.utils.PersistableLoader.load_model', 'PersistableLoader.load_model', (['"""text_model"""'], {}), "('text_model')\n", (1908, 1922), False, 'from simpleml.utils import PersistableLoader\n'), ((3317, 3367), 'quart.flash', 'flash', (['"""Thank you for making caption-bot smarter!"""'], {}), "('Thank you for making caption-bot smarter!')\n", (3322, 3367), False, 'from quart import request, render_template, flash, redirect, url_for\n'), ((3388, 3403), 'quart.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (3395, 3403), False, 'from quart import request, render_template, flash, redirect, url_for\n'), ((1354, 1443), 'pandas.DataFrame', 'pd.DataFrame', (["{'image': image_source, 'caption': [self.text_model.initial_response]}"], {}), "({'image': image_source, 'caption': [self.text_model.\n initial_response]})\n", (1366, 1443), True, 'import pandas as pd\n'), ((2388, 2411), 'quart.request.args.get', 'request.args.get', (['"""url"""'], {}), "('url')\n", (2404, 2411), False, 'from quart import request, render_template, flash, redirect, url_for\n'), ((2435, 2470), 'requests.get', 'requests.get', (['filename'], {'stream': '(True)'}), '(filename, stream=True)\n', (2447, 2470), False, 'import requests\n'), ((2725, 2755), 'base64.b64encode', 'base64.b64encode', (['image_stream'], {}), '(image_stream)\n', (2741, 2755), False, 'import base64\n')] |
# Day 12 of <NAME>'s "100 Days of Python" on udemy.
from random import randint
from art import logo
import os
# Game Setup
print(logo)
print("Welcome to the Number Guessing Game!\nI am thinking of a number between 1 and 100")
EASY_MODE = 10
HARD_MODE = 5
def difficulty():
level = input("Choose a difficulty. Type 'easy' or 'hard':\n").upper()
if level == "EASY":
return EASY_MODE
else:
level == "HARD"
return HARD_MODE
# Game Logic
def game():
guesses = difficulty()
answer = randint(1, 100)
print(f"You have {guesses} attempts remaining.")
attempt = int(input("Make a guess:\n"))
while not attempt == answer and guesses > 1:
if attempt < answer:
guesses -= 1
print(f"Too low.\nYou have {guesses} attempts remaining.")
attempt = int(input("Make a guess:\n"))
elif attempt > answer:
guesses -= 1
print(f"Too high.\nYou have {guesses} attempts remaining.")
attempt = int(input("Make a guess:\n"))
if not attempt == answer and guesses == 1:
print(f"You lose!\nThe answer was {answer}")
reset()
else:
print(f"You got it! The answer was {answer}")
reset()
def reset():
play_again = input("Would you like to play again? 'Y' or 'N':\n").upper()
if play_again == "Y":
game()
else:
os.system("clear")
game() | [
"os.system",
"random.randint"
] | [((525, 540), 'random.randint', 'randint', (['(1)', '(100)'], {}), '(1, 100)\n', (532, 540), False, 'from random import randint\n'), ((1393, 1411), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1402, 1411), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import os, re
from urllib import parse
failcnt = 0
successcnt = 0
path = os.path.abspath('.')
for htmlfile in os.listdir(path+'/html/'):
flag = True
with open(path+'/html/'+htmlfile,'r',errors='ignore') as f:
for line in f.readlines():
p = re.search('<title> SexInSex! Board </title>', line)
if p:
print(parse.unquote_plus(htmlfile))
flag = False
failcnt += 1
break
if flag:
print(parse.unquote_plus(htmlfile),'OK')
successcnt += 1
print('Failed:',failcnt)
print('Succeeded:',successcnt)
| [
"os.path.abspath",
"os.listdir",
"urllib.parse.unquote_plus",
"re.search"
] | [((99, 119), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (114, 119), False, 'import os, re\n'), ((137, 164), 'os.listdir', 'os.listdir', (["(path + '/html/')"], {}), "(path + '/html/')\n", (147, 164), False, 'import os, re\n'), ((295, 346), 're.search', 're.search', (['"""<title> SexInSex! Board </title>"""', 'line'], {}), "('<title> SexInSex! Board </title>', line)\n", (304, 346), False, 'import os, re\n'), ((524, 552), 'urllib.parse.unquote_plus', 'parse.unquote_plus', (['htmlfile'], {}), '(htmlfile)\n', (542, 552), False, 'from urllib import parse\n'), ((387, 415), 'urllib.parse.unquote_plus', 'parse.unquote_plus', (['htmlfile'], {}), '(htmlfile)\n', (405, 415), False, 'from urllib import parse\n')] |
import os
os.system("docker push urodoz/sailfish-git-puller:1.0") | [
"os.system"
] | [((10, 65), 'os.system', 'os.system', (['"""docker push urodoz/sailfish-git-puller:1.0"""'], {}), "('docker push urodoz/sailfish-git-puller:1.0')\n", (19, 65), False, 'import os\n')] |
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
import os
from stat import S_IXUSR, S_IXGRP, S_IXOTH, S_IRUSR, S_IRGRP, S_IROTH
from jinja2 import Environment, PackageLoader, StrictUndefined
S_IXALL = S_IXUSR | S_IXGRP | S_IXOTH
S_IRALL = S_IRUSR | S_IRGRP | S_IROTH
def make_executable(fh):
"""make open file fh executable"""
fileno = fh.fileno()
mode = os.fstat(fileno).st_mode
mode_r = mode & S_IRALL
mode_x = mode_r >> 2
mode = mode | mode_x
os.fchmod(fileno, mode)
def get_templates_environment(templates_dir):
"""Create and return a Jinja environment to deal with the templates."""
env = Environment(
loader=PackageLoader('charmcraft', 'templates/{}'.format(templates_dir)),
autoescape=False, # no need to escape things here :-)
keep_trailing_newline=True, # they're not text files if they don't end in newline!
optimized=False, # optimization doesn't make sense for one-offs
undefined=StrictUndefined) # fail on undefined
return env
| [
"os.fchmod",
"os.fstat"
] | [((1073, 1096), 'os.fchmod', 'os.fchmod', (['fileno', 'mode'], {}), '(fileno, mode)\n', (1082, 1096), False, 'import os\n'), ((966, 982), 'os.fstat', 'os.fstat', (['fileno'], {}), '(fileno)\n', (974, 982), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# import scipy as sp
from scipy import signal
import time
from acconeer_utils.clients.reg.client import RegClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.mpl_process import PlotProcess, PlotProccessDiedException, FigureUpdater
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = config_setup()
config.sensor = args.sensors
tid = 10
sekvenser = tid * config.sweep_rate
filename = "Reflektor_2.csv"
info = client.setup_session(config)
num_points = info["data_length"]
amplitude_y_max = 22000
N_avg = 10
tracking = Tracking(num_points, config.range_interval, N_avg)
print("numpoints: ", num_points)
fig, (amplitude_ax) = plt.subplots(1)
fig.set_size_inches(12, 6)
fig.canvas.set_window_title(filename)
for ax in [amplitude_ax]:
# ax.set_xlabel("Depth (m)")
# ax.set_xlim(config.range_interval)
ax.set_xlabel("Time (s)")
ax.set_xlim(0, 100)
# amplitude_ax.set_ylabel("Amplitude")
# amplitude_ax.set_ylim(0, 1.1 * amplitude_y_max)
amplitude_ax.set_ylabel("tracked distance (m)")
amplitude_ax.set_ylim(config.range_interval)
xs = np.linspace(0, 100, num=100)
amplitude_line = amplitude_ax.plot(xs, np.zeros_like(xs))[0]
fig.tight_layout()
plt.ion()
plt.show()
list = np.zeros(100)
i = 0
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
client.start_streaming()
matris = np.zeros((sekvenser, 2))
counter = 0
while not interrupt_handler.got_signal:
# for i in range(0, sekvenser):
info, sweep = client.get_next()
start = round(time.time()*1000)/1000
track = tracking.tracking(sweep)
end = round(time.time()*1000)/1000
print("Time for tracking loop {}".format(end-start))
list[i] = track
amplitude_line.set_ydata(list)
i += 1
if i == 100:
i = 0
list = np.zeros(100)
if not plt.fignum_exists(1): # Simple way to check if plot is closed
break
fig.canvas.flush_events()
# annotate.remove()
# matris = np.mean(matris, axis=0)
# np.savetxt(filename, matris, delimiter=",")
print("Disconnecting...")
plt.close()
client.disconnect()
def config_setup():
config = configs.EnvelopeServiceConfig()
# config = configs.IQServiceConfig()
config.range_interval = [0.4, 0.8]
config.sweep_rate = 10
config.gain = 1
config.session_profile = configs.EnvelopeServiceConfig.MAX_SNR
return config
class Tracking:
def __init__(self, num_points, range_interval, N_avg):
self.N_avg = N_avg
self.num_points = num_points
self.config_range_interval = range_interval
self.I_peaks = np.zeros(self.N_avg)
self.locs = np.zeros(self.N_avg)
self.I_peaks_filtered = np.zeros(self.N_avg)
self.tracked_distance = np.zeros(self.N_avg)
self.tracked_amplitude = np.zeros(self.N_avg)
self.tracked_phase = np.zeros(self.N_avg)
self.threshold = 0 # variable for finding peaks above threshold
self.data_idx = 0
# converts index to real length
self.real_dist = np.linspace(
self.config_range_interval[0], self.config_range_interval[1], num=self.num_points)
self.counter = 0 # Used only for if statement only for first iteration and not when data_idx goes back to zero
def tracking(self, data):
self.data = data
if self.data_idx == 0 and self.counter == 0: # things that only happens first time
I = np.argmax(np.abs(self.data))
self.I_peaks[:] = I
self.I_peaks_filtered[0] = self.I_peaks[0]
self.tracked_distance[0] = self.real_dist[int(self.I_peaks_filtered[0])]
self.tracked_amplitude[0] = np.abs(self.data[int(self.I_peaks_filtered[0])])
self.tracked_phase[0] = np.angle(self.data[int(self.I_peaks_filtered[0])])
# After first seq continous tracking
else:
self.locs, _ = signal.find_peaks(np.abs(self.data)) # find local maximas in data
# removes local maxima if under threshhold
self.locs = [x for x in self.locs if(np.abs(self.data[x]) > self.threshold)]
difference = np.subtract(self.locs, self.I_peaks_filtered[self.data_idx])
print("locks: ", self.locs)
print("Last I_peaks_filtered: ", self.I_peaks_filtered[self.data_idx])
print("difference: ", difference)
abs = np.abs(difference)
argmin = np.argmin(abs)
Index_in_locks = argmin # index of closest peak in locs
# Index_in_locks = np.argmin(np.abs(self.locks - self.I_peaks_filtered[self.data_idx - 1])) # difference between current peak index and last peak index
if len(self.locs) == 0: # if no peak is found
self.I_peaks[self.data_idx] = self.I_peaks[self.data_idx - 1]
print("Last peak value. Not updated.")
else:
I = self.locs[int(Index_in_locks)]
self.I_peaks[self.data_idx] = I
print("I_peaks: ", self.I_peaks)
# if self.counter == 0: # Questions about this part.
# self.i_avg_start = 0 # this will be 0 as long as counter == 0
# if self.data_idx == self.N_avg - 1: # change dist to nmbr of sequences later
# self.counter = 1
# else:
# self.i_avg_start = self.data_idx - (self.N_avg - 1)
self.I_peaks_filtered[self.data_idx] = np.round(
np.mean(self.I_peaks)) # mean value of N_avg latest peaks
# determines threshold
self.threshold = np.abs(self.data[int(self.I_peaks_filtered[self.data_idx])])*0.5
self.tracked_distance[self.data_idx] = self.real_dist[int(
self.I_peaks_filtered[self.data_idx])]
self.tracked_amplitude[self.data_idx] = np.abs(
self.data[int(self.I_peaks_filtered[self.data_idx])])
self.tracked_phase[self.data_idx] = np.angle(
self.data[int(self.I_peaks_filtered[self.data_idx])])
# print("I_peaks_filtered: ", self.I_peaks_filtered)
self.data_idx += 1
if self.data_idx == self.N_avg:
self.data_idx = 0
return self.tracked_distance[self.data_idx - 1]
if __name__ == "__main__":
main()
| [
"acconeer_utils.clients.json.client.JSONClient",
"acconeer_utils.example_utils.config_logging",
"acconeer_utils.clients.configs.EnvelopeServiceConfig",
"numpy.mean",
"acconeer_utils.example_utils.ExampleInterruptHandler",
"numpy.subtract",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.argmin",
"acconeer_utils.example_utils.autodetect_serial_port",
"numpy.abs",
"acconeer_utils.clients.reg.client.RegClient",
"matplotlib.pyplot.ion",
"time.time",
"matplotlib.pyplot.show",
"acconeer_utils.example_utils.ExampleArgumentParser",
"matplotlib.pyplot.fignum_exists",
"numpy.zeros",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] | [((532, 566), 'acconeer_utils.example_utils.config_logging', 'example_utils.config_logging', (['args'], {}), '(args)\n', (560, 566), False, 'from acconeer_utils import example_utils\n'), ((1156, 1171), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1168, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1658), 'numpy.linspace', 'np.linspace', (['(0)', '(100)'], {'num': '(100)'}), '(0, 100, num=100)\n', (1641, 1658), True, 'import numpy as np\n'), ((1752, 1761), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1759, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1774, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1801), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (1796, 1801), True, 'import numpy as np\n'), ((1836, 1875), 'acconeer_utils.example_utils.ExampleInterruptHandler', 'example_utils.ExampleInterruptHandler', ([], {}), '()\n', (1873, 1875), False, 'from acconeer_utils import example_utils\n'), ((1960, 1984), 'numpy.zeros', 'np.zeros', (['(sekvenser, 2)'], {}), '((sekvenser, 2))\n', (1968, 1984), True, 'import numpy as np\n'), ((2748, 2759), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2757, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2819, 2850), 'acconeer_utils.clients.configs.EnvelopeServiceConfig', 'configs.EnvelopeServiceConfig', ([], {}), '()\n', (2848, 2850), False, 'from acconeer_utils.clients import configs\n'), ((610, 638), 'acconeer_utils.clients.json.client.JSONClient', 'JSONClient', (['args.socket_addr'], {}), '(args.socket_addr)\n', (620, 638), False, 'from acconeer_utils.clients.json.client import JSONClient\n'), ((740, 755), 'acconeer_utils.clients.reg.client.RegClient', 'RegClient', (['port'], {}), '(port)\n', (749, 755), False, 'from acconeer_utils.clients.reg.client import RegClient\n'), ((3280, 3300), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3288, 3300), True, 'import numpy as np\n'), ((3321, 3341), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3329, 3341), True, 'import numpy as np\n'), ((3374, 3394), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3382, 3394), True, 'import numpy as np\n'), ((3427, 3447), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3435, 3447), True, 'import numpy as np\n'), ((3481, 3501), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3489, 3501), True, 'import numpy as np\n'), ((3531, 3551), 'numpy.zeros', 'np.zeros', (['self.N_avg'], {}), '(self.N_avg)\n', (3539, 3551), True, 'import numpy as np\n'), ((3716, 3814), 'numpy.linspace', 'np.linspace', (['self.config_range_interval[0]', 'self.config_range_interval[1]'], {'num': 'self.num_points'}), '(self.config_range_interval[0], self.config_range_interval[1],\n num=self.num_points)\n', (3727, 3814), True, 'import numpy as np\n'), ((467, 514), 'acconeer_utils.example_utils.ExampleArgumentParser', 'example_utils.ExampleArgumentParser', ([], {'num_sens': '(1)'}), '(num_sens=1)\n', (502, 514), False, 'from acconeer_utils import example_utils\n'), ((684, 722), 'acconeer_utils.example_utils.autodetect_serial_port', 'example_utils.autodetect_serial_port', ([], {}), '()\n', (720, 722), False, 'from acconeer_utils import example_utils\n'), ((1702, 1719), 'numpy.zeros_like', 'np.zeros_like', (['xs'], {}), '(xs)\n', (1715, 1719), True, 'import numpy as np\n'), ((2452, 2465), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (2460, 2465), True, 'import numpy as np\n'), ((2481, 2501), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['(1)'], {}), '(1)\n', (2498, 2501), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4879), 'numpy.subtract', 'np.subtract', (['self.locs', 'self.I_peaks_filtered[self.data_idx]'], {}), '(self.locs, self.I_peaks_filtered[self.data_idx])\n', (4830, 4879), True, 'import numpy as np\n'), ((5067, 5085), 'numpy.abs', 'np.abs', (['difference'], {}), '(difference)\n', (5073, 5085), True, 'import numpy as np\n'), ((5107, 5121), 'numpy.argmin', 'np.argmin', (['abs'], {}), '(abs)\n', (5116, 5121), True, 'import numpy as np\n'), ((4123, 4140), 'numpy.abs', 'np.abs', (['self.data'], {}), '(self.data)\n', (4129, 4140), True, 'import numpy as np\n'), ((4595, 4612), 'numpy.abs', 'np.abs', (['self.data'], {}), '(self.data)\n', (4601, 4612), True, 'import numpy as np\n'), ((6180, 6201), 'numpy.mean', 'np.mean', (['self.I_peaks'], {}), '(self.I_peaks)\n', (6187, 6201), True, 'import numpy as np\n'), ((2147, 2158), 'time.time', 'time.time', ([], {}), '()\n', (2156, 2158), False, 'import time\n'), ((2231, 2242), 'time.time', 'time.time', ([], {}), '()\n', (2240, 2242), False, 'import time\n'), ((4754, 4774), 'numpy.abs', 'np.abs', (['self.data[x]'], {}), '(self.data[x])\n', (4760, 4774), True, 'import numpy as np\n')] |
import os, sys
sys.path.append(os.path.dirname(__file__))
from auth_required import auth_required
from db_required import db_required | [
"os.path.dirname"
] | [((31, 56), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (46, 56), False, 'import os, sys\n')] |
'''
Created on Aug 6, 2020
@author: <NAME>
'''
#===========
# IMPORTS
#===========
import tkinter as tk
from tkinter import Menu
from tkinter import ttk
#============
# FUNCTIONS
#============
# Exit GUI Cleanly
def _quit():
win.quit()
win.destroy()
exit()
#============
# PROCEDURAL
#============
# Create instance:
win = tk.Tk()
# Add a title:
win.title("Simple GUI")
# ---------------------
# Creating a Menu Bar
menu_bar = Menu()
win.config(menu=menu_bar)
# Add Menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(
label="Exit", command=_quit)
menu_bar.add_cascade(
label="File", menu=file_menu)
# Add a Secondary Menu
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About")
menu_bar.add_cascade(
label="Help", menu=help_menu)
# ---------------------
# Tab Control / Notebook
tab_control = ttk.Notebook(win) # Create Tab Control
tab_1 = ttk.Frame(tab_control) # Create 1st Tab
tab_control.add(tab_1, text="Tab 1") # Add 1st Tab
tab_2 = ttk.Frame(tab_control) # Create 2nd Tab
tab_control.add(tab_2, text="Tab 2") # Add 2nd Tab
tab_control.pack(expand=1, fill="both")
# ---------------------
# Container frame to hold all other widgets:
test_frame = ttk.LabelFrame(tab_1, text=' Test Frame 1 ')
# Tkinter grid layout manager:
test_frame.grid(column=0, row=0, padx=8, pady=4)
# Adding a label:
ttk.Label(test_frame, text="LABEL: ").grid(
column=0, row=0, sticky='W')
# ---------------------
test_label = tk.StringVar()
test_selected = ttk.Combobox(
test_frame, width=12, textvariable=test_label)
# Create dictionary of values:
test_selected['values'] = ('Selection 1', 'Selection 2', 'Selection 3')
test_selected.grid(column=1, row=0)
test_selected.current(0)
# ---------------------
# Increase combobox to longest text
max_width = max([len(x) for x in test_selected['values']])
# Adjust for extra spacing:
new_width = max_width - 2
test_selected.config(width=new_width)
#==========================
ENTRY_WIDTH = max_width + 3
#==========================
# Adding Label and
# Textbox Entry Widgets
#==========================
ttk.Label(test_frame, text="Last Updated: ").grid(
column=0,
row=1,
sticky='E')
updated = tk.StringVar()
updated_entry = ttk.Entry(
test_frame,
width=ENTRY_WIDTH,
textvariable=updated,
state='readonly')
updated_entry.grid(
column=1,
row=1,
sticky='W')
ttk.Label(test_frame, text="Weather: ").grid(
column=0, row=2, sticky='E')
weather = tk.StringVar()
weather_entry = ttk.Entry(
test_frame,
width=ENTRY_WIDTH,
textvariable=weather,
state='readonly')
weather_entry.grid(
column=1,
row=2,
sticky='W')
ttk.Label(test_frame, text="Temperature: ").grid(
column=0, row=3, sticky='E')
temperature = tk.StringVar()
temperature_entry = ttk.Entry(
test_frame,
width=ENTRY_WIDTH,
textvariable=temperature,
state='readonly')
temperature_entry.grid(
column=1,
row=3,
sticky='W')
ttk.Label(test_frame, text="Dew Point: ").grid(
column=0, row=4, sticky='E')
dew_point = tk.StringVar()
dew_point_entry = ttk.Entry(
test_frame,
width=ENTRY_WIDTH,
textvariable=dew_point,
state='readonly')
dew_point_entry.grid(
column=1,
row=4,
sticky='W')
ttk.Label(test_frame, text="Relative Humidity: ").grid(
column=0, row=5, sticky='E')
humidity = tk.StringVar()
humidity_entry = ttk.Entry(
test_frame,
width=ENTRY_WIDTH,
textvariable=humidity,
state='readonly')
humidity_entry.grid(
column=1,
row=5,
sticky='W')
# Spacing around labels:
for child in test_frame.winfo_children():
child.grid_configure(padx=4, pady=2)
#============
# START GUI
#============
win.mainloop()
| [
"tkinter.Menu",
"tkinter.ttk.Combobox",
"tkinter.ttk.Entry",
"tkinter.ttk.Frame",
"tkinter.ttk.Label",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.ttk.LabelFrame",
"tkinter.ttk.Notebook"
] | [((342, 349), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (347, 349), True, 'import tkinter as tk\n'), ((448, 454), 'tkinter.Menu', 'Menu', ([], {}), '()\n', (452, 454), False, 'from tkinter import Menu\n'), ((511, 536), 'tkinter.Menu', 'Menu', (['menu_bar'], {'tearoff': '(0)'}), '(menu_bar, tearoff=0)\n', (515, 536), False, 'from tkinter import Menu\n'), ((746, 771), 'tkinter.Menu', 'Menu', (['menu_bar'], {'tearoff': '(0)'}), '(menu_bar, tearoff=0)\n', (750, 771), False, 'from tkinter import Menu\n'), ((929, 946), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['win'], {}), '(win)\n', (941, 946), False, 'from tkinter import ttk\n'), ((985, 1007), 'tkinter.ttk.Frame', 'ttk.Frame', (['tab_control'], {}), '(tab_control)\n', (994, 1007), False, 'from tkinter import ttk\n'), ((1096, 1118), 'tkinter.ttk.Frame', 'ttk.Frame', (['tab_control'], {}), '(tab_control)\n', (1105, 1118), False, 'from tkinter import ttk\n'), ((1322, 1366), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['tab_1'], {'text': '""" Test Frame 1 """'}), "(tab_1, text=' Test Frame 1 ')\n", (1336, 1366), False, 'from tkinter import ttk\n'), ((1582, 1596), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1594, 1596), True, 'import tkinter as tk\n'), ((1613, 1672), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['test_frame'], {'width': '(12)', 'textvariable': 'test_label'}), '(test_frame, width=12, textvariable=test_label)\n', (1625, 1672), False, 'from tkinter import ttk\n'), ((2315, 2329), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2327, 2329), True, 'import tkinter as tk\n'), ((2346, 2431), 'tkinter.ttk.Entry', 'ttk.Entry', (['test_frame'], {'width': 'ENTRY_WIDTH', 'textvariable': 'updated', 'state': '"""readonly"""'}), "(test_frame, width=ENTRY_WIDTH, textvariable=updated, state='readonly'\n )\n", (2355, 2431), False, 'from tkinter import ttk\n'), ((2598, 2612), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2610, 2612), True, 'import tkinter as tk\n'), ((2629, 2714), 'tkinter.ttk.Entry', 'ttk.Entry', (['test_frame'], {'width': 'ENTRY_WIDTH', 'textvariable': 'weather', 'state': '"""readonly"""'}), "(test_frame, width=ENTRY_WIDTH, textvariable=weather, state='readonly'\n )\n", (2638, 2714), False, 'from tkinter import ttk\n'), ((2887, 2901), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2899, 2901), True, 'import tkinter as tk\n'), ((2922, 3011), 'tkinter.ttk.Entry', 'ttk.Entry', (['test_frame'], {'width': 'ENTRY_WIDTH', 'textvariable': 'temperature', 'state': '"""readonly"""'}), "(test_frame, width=ENTRY_WIDTH, textvariable=temperature, state=\n 'readonly')\n", (2931, 3011), False, 'from tkinter import ttk\n'), ((3184, 3198), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3196, 3198), True, 'import tkinter as tk\n'), ((3217, 3304), 'tkinter.ttk.Entry', 'ttk.Entry', (['test_frame'], {'width': 'ENTRY_WIDTH', 'textvariable': 'dew_point', 'state': '"""readonly"""'}), "(test_frame, width=ENTRY_WIDTH, textvariable=dew_point, state=\n 'readonly')\n", (3226, 3304), False, 'from tkinter import ttk\n'), ((3482, 3496), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3494, 3496), True, 'import tkinter as tk\n'), ((3514, 3600), 'tkinter.ttk.Entry', 'ttk.Entry', (['test_frame'], {'width': 'ENTRY_WIDTH', 'textvariable': 'humidity', 'state': '"""readonly"""'}), "(test_frame, width=ENTRY_WIDTH, textvariable=humidity, state=\n 'readonly')\n", (3523, 3600), False, 'from tkinter import ttk\n'), ((1467, 1504), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""LABEL: """'}), "(test_frame, text='LABEL: ')\n", (1476, 1504), False, 'from tkinter import ttk\n'), ((2211, 2255), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""Last Updated: """'}), "(test_frame, text='Last Updated: ')\n", (2220, 2255), False, 'from tkinter import ttk\n'), ((2509, 2548), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""Weather: """'}), "(test_frame, text='Weather: ')\n", (2518, 2548), False, 'from tkinter import ttk\n'), ((2790, 2833), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""Temperature: """'}), "(test_frame, text='Temperature: ')\n", (2799, 2833), False, 'from tkinter import ttk\n'), ((3091, 3132), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""Dew Point: """'}), "(test_frame, text='Dew Point: ')\n", (3100, 3132), False, 'from tkinter import ttk\n'), ((3382, 3431), 'tkinter.ttk.Label', 'ttk.Label', (['test_frame'], {'text': '"""Relative Humidity: """'}), "(test_frame, text='Relative Humidity: ')\n", (3391, 3431), False, 'from tkinter import ttk\n')] |
from fr.ortec.dsi.dao.ConnectionManager import ConnectionManager
class MongoDao(object):
__database = None
__connection = None
def __init__(self, database):
connection = ConnectionManager("localhost", "27017")
self.__database = connection.get_database(database)
def create(self, collection, json_data):
coll = self.__database.get_collection(collection)
result = coll.insert_one(json_data)
print(result.inserted_id)
def update(self):
raise NotImplementedError
def read(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
if __name__ == '__main__':
mongo_dao = MongoDao("logs")
| [
"fr.ortec.dsi.dao.ConnectionManager.ConnectionManager"
] | [((194, 233), 'fr.ortec.dsi.dao.ConnectionManager.ConnectionManager', 'ConnectionManager', (['"""localhost"""', '"""27017"""'], {}), "('localhost', '27017')\n", (211, 233), False, 'from fr.ortec.dsi.dao.ConnectionManager import ConnectionManager\n')] |
# this use play store twitter corpus
# http://textblob.readthedocs.io/en/dev/classifiers.html#evaluating-classifiers
# http://streamhacker.com/2010/05/24/text-classification-sentiment-analysis-stopwords-collocations/
from sklearn.externals import joblib
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import *
from textblob import TextBlob
from textblob.classifiers import NaiveBayesClassifier
from textblob.sentiments import NaiveBayesAnalyzer
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import *
import random
import collections
from nltk.stem.snowball import EnglishStemmer
from nltk.tokenize import word_tokenize
import itertools
import _pickle as cPickle
import os
# variables
stopset = set(stopwords.words('english')) - {'over', 'under', 'below', 'more', 'most', 'no', 'not', 'only', 'such',
'few', 'so', 'too', 'very', 'just', 'any', 'once'}
path = os.path.expanduser("~/Python/SamplePython3/com/radityalabs/")
stemmer = EnglishStemmer()
def end_word_extractor(document):
tokens = document.split()
first_word, last_word = tokens[0], tokens[-1]
feats = {}
feats["first({0})".format(first_word)] = True
feats["last({0})".format(last_word)] = False
return feats
def train():
with open(path + "/Python/bimbingan_data/twitter_train_23536_1.pickle", "rb") as handle:
return cPickle.load(handle)
def test():
with open(path + "/Python/bimbingan_data/twitter_test_15691_1.pickle", "rb") as handle:
return cPickle.load(handle)
def is_string_not_empty(string):
if string == "":
return False
return True
def preprocessing(sentences):
documents = []
for sentence in sentences:
tokens = word_tokenize(sentence, language="english")
new_sentence = ""
for token in tokens:
if len(token) > 3:
t = token.lower()
t = stemmer.stem(t)
if is_string_not_empty(t):
valid = t not in set(stopwords.words('english'))
if valid:
new_sentence += t + " "
documents.append(new_sentence)
return documents
def precision_recall(classifier):
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
print('pos precision:', classifier.metrics.precision(refsets['pos'], testsets['pos']))
print('pos recall:', classifier.metrics.recall(refsets['pos'], testsets['pos']))
print('neg precision:', classifier.metrics.precision(refsets['neg'], testsets['neg']))
print('neg recall:', classifier.metrics.recall(refsets['neg'], testsets['neg']))
def testing(sentence):
# cl = joblib.load(path + '/Python/bimbingan_data/sklearn-joblib-train-twitter-1.pkl')
classifier = NaiveBayesClassifier(train(), feature_extractor=end_word_extractor)
blob = TextBlob(sentence, classifier=classifier)
print(sentence + " label : ", blob.classify())
print("polarity", blob.sentiment.polarity) # polarity and subjectivity
print("subjectivity", blob.sentiment.subjectivity)
## calc neg and pos
sentiment = TextBlob(sentence, classifier=classifier, analyzer=NaiveBayesAnalyzer())
print("positive", sentiment.sentiment.p_pos)
print("negative", sentiment.sentiment.p_neg)
# print("Accuracy: {0}".format(classifier.accuracy(test())))
# test_result = []
# gold_result = []
# for i in range(len(test())):
# test_result.append(classifier.classify(test()[i][0]))
# gold_result.append(test()[i][1])
# print('Clasification report:\n', classification_report(gold_result, test_result))
# print('Confussion matrix:\n', confusion_matrix(gold_result, test_result))
def collection():
datas = []
datas.append(
"'It''s a very nice app to use, when it''s working correctly. Been having trouble with it as of late. Trying to get my notifications turned on. It keeps telling me twitter has stopped (Report) (OK). I''ve had to uninstall it and then reinstall it a number of times. If it starts working correctly I''d be very happy to give it a higher rating. Like i mentioned it is a very nice app, when working correctly. After just UNISTALLING the APP & then REINSTALLING it. It seems to be working better. I''ve had to do that about 4 or 5 times so far. ?? '")
datas.append(
"'So here is what I hate: when I go to someone''s profile neither on tweets nor on tweets and replies I can't see all of their tweets!! I feel like it''s selected or something but I WANNA SEE ALL TWEETS PLEASE!! Lately I have also noticed that I do not see all the tweets of people I follow on my time line but I WANNA SEE ALL!!! also I wish I could download not only pictures in tweets but also gifs. Oh and sometimes I don''t get any notifications and sometimes I do I don''t know why but it''s annoying, please fix! I love the concept and the fact that it''s the only social media staying itself and not going Facebook or Snapchat with the stories and stuff '")
datas.append(
"'Every time they so-called update the app, they add more problems than they solve. Matter how of fact, you don''t even know how they made better. So now, the search button which usually shows trending topics isn''t doing that anymore. And a tweet with a 1000 rt only shows 1 when seen among other tweets. '")
datas.append(
"'Trending topics revision sucks with latest update... not as accessible and less robust. Likes and retweets counters are cut off if over 99 so 100 shows as 1 unless you isolate the tweet. Other than that, I''m mostly fine with newer changes. Showing who is replying to whom is helpful but obnoxious. Can''t think of a better option, but it''s unpleasant as is. '")
return datas
#for doc in collection():
# testing(sentence=doc)
documents = preprocessing(collection())
for i in range(0, len(documents)):
print(documents[i])
| [
"textblob.TextBlob",
"nltk.corpus.stopwords.words",
"nltk.stem.snowball.EnglishStemmer",
"_pickle.load",
"nltk.tokenize.word_tokenize",
"collections.defaultdict",
"textblob.sentiments.NaiveBayesAnalyzer",
"os.path.expanduser"
] | [((1131, 1192), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Python/SamplePython3/com/radityalabs/"""'], {}), "('~/Python/SamplePython3/com/radityalabs/')\n", (1149, 1192), False, 'import os\n'), ((1203, 1219), 'nltk.stem.snowball.EnglishStemmer', 'EnglishStemmer', ([], {}), '()\n', (1217, 1219), False, 'from nltk.stem.snowball import EnglishStemmer\n'), ((2439, 2467), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (2462, 2467), False, 'import collections\n'), ((2483, 2511), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (2506, 2511), False, 'import collections\n'), ((3076, 3117), 'textblob.TextBlob', 'TextBlob', (['sentence'], {'classifier': 'classifier'}), '(sentence, classifier=classifier)\n', (3084, 3117), False, 'from textblob import TextBlob\n'), ((926, 952), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (941, 952), False, 'from nltk.corpus import stopwords\n'), ((1588, 1608), '_pickle.load', 'cPickle.load', (['handle'], {}), '(handle)\n', (1600, 1608), True, 'import _pickle as cPickle\n'), ((1729, 1749), '_pickle.load', 'cPickle.load', (['handle'], {}), '(handle)\n', (1741, 1749), True, 'import _pickle as cPickle\n'), ((1940, 1983), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sentence'], {'language': '"""english"""'}), "(sentence, language='english')\n", (1953, 1983), False, 'from nltk.tokenize import word_tokenize\n'), ((3392, 3412), 'textblob.sentiments.NaiveBayesAnalyzer', 'NaiveBayesAnalyzer', ([], {}), '()\n', (3410, 3412), False, 'from textblob.sentiments import NaiveBayesAnalyzer\n'), ((2224, 2250), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2239, 2250), False, 'from nltk.corpus import stopwords\n')] |
import pygame
pygame.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(1)
| [
"pygame.init",
"pygame.mixer.music.get_busy",
"pygame.mixer.music.load",
"pygame.time.Clock",
"pygame.mixer.music.play"
] | [((14, 27), 'pygame.init', 'pygame.init', ([], {}), '()\n', (25, 27), False, 'import pygame\n'), ((28, 64), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""ex021.mp3"""'], {}), "('ex021.mp3')\n", (51, 64), False, 'import pygame\n'), ((65, 90), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (88, 90), False, 'import pygame\n'), ((97, 126), 'pygame.mixer.music.get_busy', 'pygame.mixer.music.get_busy', ([], {}), '()\n', (124, 126), False, 'import pygame\n'), ((132, 151), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (149, 151), False, 'import pygame\n')] |
#!/usr/bin/env python3
"""
loss
"""
import tensorflow as tf
def calculate_loss(y, y_pred):
"""
calculate loss function
"""
return tf.losses.softmax_cross_entropy(y, y_pred)
| [
"tensorflow.losses.softmax_cross_entropy"
] | [((151, 193), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['y', 'y_pred'], {}), '(y, y_pred)\n', (182, 193), True, 'import tensorflow as tf\n')] |
from rest_framework import permissions, generics, filters, status, views
from policy import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from policy.models import DcPolicy, DcPolicyHistory
from django.shortcuts import get_object_or_404, get_list_or_404
class PolicyQuoteView(APIView):
''' api view for handling policy related endpoint.
override patch and post function
'''
serializer_class = serializers.DcPolicySerializer
permission_classes = (permissions.AllowAny,)
def post(self, request):
serializer = self.serializer_class(context={'request':request}, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
def patch(self, request):
quote_id = request.data.get('quote_id', None)
policy_status = request.data.get('status', None)
if quote_id:
policy = get_object_or_404(DcPolicy, id=quote_id)
if policy_status == 'accepted':
policy.state = DcPolicy.STATE_CHOICES[1][0]
policy.save()
# check to be sure the status is active and its previously accepted
elif policy_status == 'active' and policy.state == DcPolicy.STATE_CHOICES[1][0]:
policy.state = DcPolicy.STATE_CHOICES[2][0]
policy.save()
return Response(data=serializers.DcPolicySerializer(policy).data,
status=status.HTTP_200_OK)
return Response(data={}, status=status.HTTP_400_BAD_REQUEST)
class PolicyListView(generics.ListAPIView):
serializer_class = serializers.DcPolicySerializer
permission_classes = (permissions.AllowAny,)
def get_queryset(self):
qs = DcPolicy.objects.all()
customer_id = self.request.GET.get('customer_id', None)
if customer_id:
qs = qs.filter(customer_id=customer_id)
return qs
class PolicyDetailView(generics.RetrieveAPIView):
serializer_class = serializers.DcPolicySerializer
permission_classes = (permissions.AllowAny,)
def get_object(self):
id = self.kwargs.get('id')
if id:
policy = get_object_or_404(DcPolicy, id=id)
return policy
class PolicyHistoryDetailView(generics.ListAPIView):
serializer_class = serializers.DcPolicyStateHistorySerializer
permission_classes = (permissions.AllowAny,)
def get_queryset(self):
id = self.kwargs.get('id')
policy = get_object_or_404(DcPolicy, id=id)
return policy.histories.all() | [
"policy.serializers.DcPolicySerializer",
"rest_framework.response.Response",
"django.shortcuts.get_object_or_404",
"policy.models.DcPolicy.objects.all"
] | [((756, 818), 'rest_framework.response.Response', 'Response', ([], {'data': 'serializer.data', 'status': 'status.HTTP_201_CREATED'}), '(data=serializer.data, status=status.HTTP_201_CREATED)\n', (764, 818), False, 'from rest_framework.response import Response\n'), ((1603, 1656), 'rest_framework.response.Response', 'Response', ([], {'data': '{}', 'status': 'status.HTTP_400_BAD_REQUEST'}), '(data={}, status=status.HTTP_400_BAD_REQUEST)\n', (1611, 1656), False, 'from rest_framework.response import Response\n'), ((1853, 1875), 'policy.models.DcPolicy.objects.all', 'DcPolicy.objects.all', ([], {}), '()\n', (1873, 1875), False, 'from policy.models import DcPolicy, DcPolicyHistory\n'), ((2599, 2633), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DcPolicy'], {'id': 'id'}), '(DcPolicy, id=id)\n', (2616, 2633), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((1008, 1048), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DcPolicy'], {'id': 'quote_id'}), '(DcPolicy, id=quote_id)\n', (1025, 1048), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((2287, 2321), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DcPolicy'], {'id': 'id'}), '(DcPolicy, id=id)\n', (2304, 2321), False, 'from django.shortcuts import get_object_or_404, get_list_or_404\n'), ((1482, 1520), 'policy.serializers.DcPolicySerializer', 'serializers.DcPolicySerializer', (['policy'], {}), '(policy)\n', (1512, 1520), False, 'from policy import serializers\n')] |
#!/usr/bin/env python
# encoding: utf-8
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import TranslationDataset, Multi30k
from torchtext.data import Field, BucketIterator
import spacy
import random
import math
import os
spacy_de = spacy.load("de")
spacy_en = spacy.load("en")
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
SRC = Field(tokenize=tokenize_de, init_token="<sos>", eos_token="<eos>", lower=True)
TGT = Field(tokenize=tokenize_en, init_token="<sos>", eos_token="<eos>", lower=True)
train_data, valid_data, test_data = Multi30k.splits(exts=(".de", ".en"), fields=(SRC, TGT))
print("Number of training examples: {}".format(len(train_data.examples)))
print("Number of validation examples: {}".format(len(valid_data.examples)))
print("Number of testing examples: {}".format(len(test_data.examples)))
print(vars(train_data.examples[0]))
SRC.build_vocab(train_data, min_freq=2)
TGT.build_vocab(train_data, min_freq=2)
BATCH_SIZE = 128
device="cuda"
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data), batch_size=BATCH_SIZE, device=device)
| [
"spacy.load",
"torchtext.data.BucketIterator.splits",
"torchtext.datasets.Multi30k.splits",
"torchtext.data.Field"
] | [((276, 292), 'spacy.load', 'spacy.load', (['"""de"""'], {}), "('de')\n", (286, 292), False, 'import spacy\n'), ((304, 320), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (314, 320), False, 'import spacy\n'), ((499, 577), 'torchtext.data.Field', 'Field', ([], {'tokenize': 'tokenize_de', 'init_token': '"""<sos>"""', 'eos_token': '"""<eos>"""', 'lower': '(True)'}), "(tokenize=tokenize_de, init_token='<sos>', eos_token='<eos>', lower=True)\n", (504, 577), False, 'from torchtext.data import Field, BucketIterator\n'), ((584, 662), 'torchtext.data.Field', 'Field', ([], {'tokenize': 'tokenize_en', 'init_token': '"""<sos>"""', 'eos_token': '"""<eos>"""', 'lower': '(True)'}), "(tokenize=tokenize_en, init_token='<sos>', eos_token='<eos>', lower=True)\n", (589, 662), False, 'from torchtext.data import Field, BucketIterator\n'), ((700, 755), 'torchtext.datasets.Multi30k.splits', 'Multi30k.splits', ([], {'exts': "('.de', '.en')", 'fields': '(SRC, TGT)'}), "(exts=('.de', '.en'), fields=(SRC, TGT))\n", (715, 755), False, 'from torchtext.datasets import TranslationDataset, Multi30k\n'), ((1179, 1280), 'torchtext.data.BucketIterator.splits', 'BucketIterator.splits', (['(train_data, valid_data, test_data)'], {'batch_size': 'BATCH_SIZE', 'device': 'device'}), '((train_data, valid_data, test_data), batch_size=\n BATCH_SIZE, device=device)\n', (1200, 1280), False, 'from torchtext.data import Field, BucketIterator\n')] |
import imageio
import matplotlib.pyplot as plt
import numpy as np
import sys
link_line_style = {'color': 'gray', 'linestyle': 'dashed', 'linewidth': 1, 'zorder': 1}
def progress(count, total):
bar_len = 60
filled_len = int(round(bar_len * count / total))
percents = round(100 * count / total, 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] (%s%s)\r' % (bar, percents, '%'))
sys.stdout.flush()
def affine(a, b, t):
return a + (b - a) * t
def init_figure(fig_size):
fig = plt.figure(figsize=fig_size)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.gca().set_aspect('equal')
return fig
def draw_base(degree, points, lerp_0, center):
for i in range(degree):
plt.plot(points[i:i + 2, 0], points[i:i + 2, 1], **link_line_style)
for i in range(degree + 1):
plt.plot([points[i][0], lerp_0[i][0]], [points[i][1], lerp_0[i][1]], **link_line_style)
plt.scatter(points[:, 0], points[:, 1], c='blue', zorder=2)
plt.scatter(center[0], center[1], c='#ec407a', marker='+', zorder=2)
def draw_lerp(lerp, i):
colors = ['c', 'm']
plt.scatter(lerp[:, 0], lerp[:, 1], c='green', s=10, zorder=2)
for j in range(lerp.shape[0] - 1):
plt.plot(lerp[j:j + 2, 0], lerp[j:j + 2, 1], color=colors[i % 2], linewidth=1, zorder=1)
def eval_point(degree, lerp, t):
for i in range(1, degree + 1):
draw_lerp(lerp, i)
lerp = np.array([affine(*lerp[j:j + 2], t) for j in range(0, degree - i + 1)])
return lerp[0]
def draw_eval_points(eval_points, new_point):
plt.scatter(new_point[0], new_point[1], c='y', s=10, zorder=2)
plt.plot(eval_points[:, 0], eval_points[:, 1], 'k-', zorder=1)
plt.scatter(eval_points[-1][0], eval_points[-1][1], s=15, edgecolors='k', facecolors='none', zorder=2)
plt.plot([eval_points[-1][0], new_point[0]], [eval_points[-1][1], new_point[1]], **link_line_style)
def fig2img(fig):
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
return img.reshape((h, w, 3))
def main(points, center, delta, outfile, out_size, fps):
out_mp4 = imageio.get_writer(outfile + '.mp4', fps=fps)
out_gif = imageio.get_writer(outfile + '.gif', fps=fps)
degree = points.shape[0] - 1
lerp_0 = np.array([np.concatenate((affine(center, _[:2], _[2]), _[2:])) for _ in points])
ts = np.concatenate((np.arange(0, 1, delta), [1]))
total = ts.shape[0]
eval_points = np.array([]).reshape((0, 2))
for i, t in enumerate(ts):
progress(i, total)
fig = init_figure((out_size[0] / 100, out_size[1] / 100))
draw_base(degree, points, lerp_0, center)
new_point = eval_point(degree, lerp_0, t)
eval_points = np.concatenate((eval_points, [affine(center, new_point[:2], 1. / new_point[2])]))
draw_eval_points(eval_points, new_point)
img = fig2img(fig)
out_mp4.append_data(img)
out_gif.append_data(img)
plt.close(fig)
progress(total, total)
out_mp4.close()
out_gif.close()
if __name__ == '__main__':
# x, y, weight
points = np.array([
[0, 2, 1],
[0, 5.5, 1.5],
[2.5, 8, 0.5],
[6, 8, 1.5],
[8, 8, 0.5],
[8, 3, 1.5],
[12, 3, 1]
])
center = np.array([6, 2])
delta = 0.01
outfile = 'output'
# width, height
out_size = (1280, 1024)
fps = 10
main(points, center, delta, outfile, out_size, fps)
| [
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"sys.stdout.flush",
"imageio.get_writer",
"sys.stdout.write"
] | [((374, 430), 'sys.stdout.write', 'sys.stdout.write', (["('[%s] (%s%s)\\r' % (bar, percents, '%'))"], {}), "('[%s] (%s%s)\\r' % (bar, percents, '%'))\n", (390, 430), False, 'import sys\n'), ((435, 453), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (451, 453), False, 'import sys\n'), ((543, 571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (553, 571), True, 'import matplotlib.pyplot as plt\n'), ((581, 616), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (589, 616), True, 'import matplotlib.pyplot as plt\n'), ((990, 1049), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[:, 0]', 'points[:, 1]'], {'c': '"""blue"""', 'zorder': '(2)'}), "(points[:, 0], points[:, 1], c='blue', zorder=2)\n", (1001, 1049), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1122), 'matplotlib.pyplot.scatter', 'plt.scatter', (['center[0]', 'center[1]'], {'c': '"""#ec407a"""', 'marker': '"""+"""', 'zorder': '(2)'}), "(center[0], center[1], c='#ec407a', marker='+', zorder=2)\n", (1065, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1239), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lerp[:, 0]', 'lerp[:, 1]'], {'c': '"""green"""', 's': '(10)', 'zorder': '(2)'}), "(lerp[:, 0], lerp[:, 1], c='green', s=10, zorder=2)\n", (1188, 1239), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1693), 'matplotlib.pyplot.scatter', 'plt.scatter', (['new_point[0]', 'new_point[1]'], {'c': '"""y"""', 's': '(10)', 'zorder': '(2)'}), "(new_point[0], new_point[1], c='y', s=10, zorder=2)\n", (1642, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1760), 'matplotlib.pyplot.plot', 'plt.plot', (['eval_points[:, 0]', 'eval_points[:, 1]', '"""k-"""'], {'zorder': '(1)'}), "(eval_points[:, 0], eval_points[:, 1], 'k-', zorder=1)\n", (1706, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1871), 'matplotlib.pyplot.scatter', 'plt.scatter', (['eval_points[-1][0]', 'eval_points[-1][1]'], {'s': '(15)', 'edgecolors': '"""k"""', 'facecolors': '"""none"""', 'zorder': '(2)'}), "(eval_points[-1][0], eval_points[-1][1], s=15, edgecolors='k',\n facecolors='none', zorder=2)\n", (1776, 1871), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1976), 'matplotlib.pyplot.plot', 'plt.plot', (['[eval_points[-1][0], new_point[0]]', '[eval_points[-1][1], new_point[1]]'], {}), '([eval_points[-1][0], new_point[0]], [eval_points[-1][1], new_point\n [1]], **link_line_style)\n', (1880, 1976), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2274), 'imageio.get_writer', 'imageio.get_writer', (["(outfile + '.mp4')"], {'fps': 'fps'}), "(outfile + '.mp4', fps=fps)\n", (2247, 2274), False, 'import imageio\n'), ((2289, 2334), 'imageio.get_writer', 'imageio.get_writer', (["(outfile + '.gif')"], {'fps': 'fps'}), "(outfile + '.gif', fps=fps)\n", (2307, 2334), False, 'import imageio\n'), ((3215, 3321), 'numpy.array', 'np.array', (['[[0, 2, 1], [0, 5.5, 1.5], [2.5, 8, 0.5], [6, 8, 1.5], [8, 8, 0.5], [8, 3, \n 1.5], [12, 3, 1]]'], {}), '([[0, 2, 1], [0, 5.5, 1.5], [2.5, 8, 0.5], [6, 8, 1.5], [8, 8, 0.5],\n [8, 3, 1.5], [12, 3, 1]])\n', (3223, 3321), True, 'import numpy as np\n'), ((3393, 3409), 'numpy.array', 'np.array', (['[6, 2]'], {}), '([6, 2])\n', (3401, 3409), True, 'import numpy as np\n'), ((790, 857), 'matplotlib.pyplot.plot', 'plt.plot', (['points[i:i + 2, 0]', 'points[i:i + 2, 1]'], {}), '(points[i:i + 2, 0], points[i:i + 2, 1], **link_line_style)\n', (798, 857), True, 'import matplotlib.pyplot as plt\n'), ((898, 990), 'matplotlib.pyplot.plot', 'plt.plot', (['[points[i][0], lerp_0[i][0]]', '[points[i][1], lerp_0[i][1]]'], {}), '([points[i][0], lerp_0[i][0]], [points[i][1], lerp_0[i][1]], **\n link_line_style)\n', (906, 990), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1380), 'matplotlib.pyplot.plot', 'plt.plot', (['lerp[j:j + 2, 0]', 'lerp[j:j + 2, 1]'], {'color': 'colors[i % 2]', 'linewidth': '(1)', 'zorder': '(1)'}), '(lerp[j:j + 2, 0], lerp[j:j + 2, 1], color=colors[i % 2], linewidth\n =1, zorder=1)\n', (1295, 1380), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3085), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3080, 3085), True, 'import matplotlib.pyplot as plt\n'), ((660, 669), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (667, 669), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2511), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'delta'], {}), '(0, 1, delta)\n', (2498, 2511), True, 'import numpy as np\n'), ((2562, 2574), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2570, 2574), True, 'import numpy as np\n')] |
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from flask import Flask,request, jsonify
import GetUserTextNoun
import pandas as pd
app = Flask(__name__)
@app.route('/')
def hello():
return "Hello Wor치ld!"
# 파이썬 3버전 설치
# sudo pip3 install flask - 파이썬 3버전 플라스크 설치
@app.route('/info')
def info():
return 'Info'
@app.route('/data', methods = ['GET']) #클라 기준 데이터 전송하는 곳
def userLogin():
print("python flask server")
str = request.args.get('str',"test")
print(str)
obj = GetUserTextNoun.get_tokens(str)
print(obj)
return "str"
@app.route('/adsf', methods = ['POST']) # ios에서 넘어오는 자기소개서 문장을 받는곳
def test():
print(request.get_json())
if __name__ == '__main__':
app.run()
# def get_tokens(x):
# mecab = Mecab()
# try:
# return [i for i in mecab.nouns(x) if len(i) > 1] if x else []
# except Exception as e:
# if str(x) == 'nan':
# return []
# print(e)
# print(str(x))
# raise e
#
#
# def getNonunsData():
# df = pd.read_csv('../../../Desktop/RelayA/dummy_users.tsv', sep='\t')
# df['user_mecab'] = df['user.description'].map(get_tokens)
# df['user_mecab_len'] = df['user_mecab'].map(len)
# return df
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"flask.request.args.get",
"flask.request.get_json",
"GetUserTextNoun.get_tokens",
"flask.Flask"
] | [((277, 292), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'from flask import Flask, request, jsonify\n'), ((579, 610), 'flask.request.args.get', 'request.args.get', (['"""str"""', '"""test"""'], {}), "('str', 'test')\n", (595, 610), False, 'from flask import Flask, request, jsonify\n'), ((635, 666), 'GetUserTextNoun.get_tokens', 'GetUserTextNoun.get_tokens', (['str'], {}), '(str)\n', (661, 666), False, 'import GetUserTextNoun\n'), ((790, 808), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (806, 808), False, 'from flask import Flask, request, jsonify\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 11:01:44 2020
@author: german
"""
import numpy as np
import matplotlib.pyplot as plt
N_x = 1024
x = np.linspace(-8.0,8.0,N_x)
V = np.empty([N_x,2],dtype=np.complex)
V[:,0] = np.cos(2*np.pi*x+np.pi/3.314) + np.cos(2.0*2*np.pi*x+np.pi/5.314)
V[:,1] = np.cos(3.75*2*np.pi*x+np.pi/3.314) + np.cos(4.0*2*np.pi*x+np.pi/5.314)
#plt.plot(x,V)
#plt.show()
for i in [0,1]:
V_F = np.fft.fft(V[:,i])/N_x
k = np.fft.fftfreq(N_x)/(16.0/N_x)
#plt.plot(k,np.real(V_F),k,np.imag(V_F))
#plt.show()
V_F_shifted = np.fft.fftshift(V_F)
k_shifted = (np.linspace(-N_x/2,N_x/2-1,N_x))*(64.0/N_x)
plt.plot(k_shifted[int(N_x/2)-100:int(N_x/2)+100],np.real(V_F_shifted)[int(N_x/2)-100:int(N_x/2)+100],k_shifted[int(N_x/2)-100:int(N_x/2)+100],np.imag(V_F_shifted)[int(N_x/2)-100:int(N_x/2)+100])
plt.show()
#H_m[,i]
| [
"numpy.fft.fftfreq",
"numpy.fft.fft",
"numpy.real",
"numpy.linspace",
"numpy.empty",
"numpy.cos",
"numpy.fft.fftshift",
"numpy.imag",
"matplotlib.pyplot.show"
] | [((176, 203), 'numpy.linspace', 'np.linspace', (['(-8.0)', '(8.0)', 'N_x'], {}), '(-8.0, 8.0, N_x)\n', (187, 203), True, 'import numpy as np\n'), ((206, 242), 'numpy.empty', 'np.empty', (['[N_x, 2]'], {'dtype': 'np.complex'}), '([N_x, 2], dtype=np.complex)\n', (214, 242), True, 'import numpy as np\n'), ((251, 288), 'numpy.cos', 'np.cos', (['(2 * np.pi * x + np.pi / 3.314)'], {}), '(2 * np.pi * x + np.pi / 3.314)\n', (257, 288), True, 'import numpy as np\n'), ((283, 326), 'numpy.cos', 'np.cos', (['(2.0 * 2 * np.pi * x + np.pi / 5.314)'], {}), '(2.0 * 2 * np.pi * x + np.pi / 5.314)\n', (289, 326), True, 'import numpy as np\n'), ((326, 370), 'numpy.cos', 'np.cos', (['(3.75 * 2 * np.pi * x + np.pi / 3.314)'], {}), '(3.75 * 2 * np.pi * x + np.pi / 3.314)\n', (332, 370), True, 'import numpy as np\n'), ((363, 406), 'numpy.cos', 'np.cos', (['(4.0 * 2 * np.pi * x + np.pi / 5.314)'], {}), '(4.0 * 2 * np.pi * x + np.pi / 5.314)\n', (369, 406), True, 'import numpy as np\n'), ((595, 615), 'numpy.fft.fftshift', 'np.fft.fftshift', (['V_F'], {}), '(V_F)\n', (610, 615), True, 'import numpy as np\n'), ((882, 892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (890, 892), True, 'import matplotlib.pyplot as plt\n'), ((451, 470), 'numpy.fft.fft', 'np.fft.fft', (['V[:, i]'], {}), '(V[:, i])\n', (461, 470), True, 'import numpy as np\n'), ((484, 503), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['N_x'], {}), '(N_x)\n', (498, 503), True, 'import numpy as np\n'), ((633, 672), 'numpy.linspace', 'np.linspace', (['(-N_x / 2)', '(N_x / 2 - 1)', 'N_x'], {}), '(-N_x / 2, N_x / 2 - 1, N_x)\n', (644, 672), True, 'import numpy as np\n'), ((732, 752), 'numpy.real', 'np.real', (['V_F_shifted'], {}), '(V_F_shifted)\n', (739, 752), True, 'import numpy as np\n'), ((825, 845), 'numpy.imag', 'np.imag', (['V_F_shifted'], {}), '(V_F_shifted)\n', (832, 845), True, 'import numpy as np\n')] |
from setuptools import setup
setup(name='decode',
version='0.1.0',
packages=['decode'],
entry_points={
'console_scripts': [
'decode = decode.__main__:main'
]
},
) | [
"setuptools.setup"
] | [((30, 162), 'setuptools.setup', 'setup', ([], {'name': '"""decode"""', 'version': '"""0.1.0"""', 'packages': "['decode']", 'entry_points': "{'console_scripts': ['decode = decode.__main__:main']}"}), "(name='decode', version='0.1.0', packages=['decode'], entry_points={\n 'console_scripts': ['decode = decode.__main__:main']})\n", (35, 162), False, 'from setuptools import setup\n')] |
from django.db import models
# Create your models here.
class App(models.Model):
'''
Represents an application to be submitted to the store
'''
name = models.CharField(max_length=100)
version = models.CharField(max_length=20)
mini_description = models.CharField(max_length=120)
description = models.TextField(max_length=300)
copyright = models.CharField(max_length=120)
publisher = models.ForeignKey(
'Publisher', on_delete=models.CASCADE, related_name='apps')
categories = models.ManyToManyField('Category')
class Publisher(models.Model):
'''
Represents a publisher of an app. Publisher can publish
many apps
'''
name = models.CharField(max_length=100, help_text='Name of publisher')
website = models.URLField(help_text='Official website of app')
support_url = models.URLField(help_text='Link to useful app resources')
privacy_policy_url = models.URLField(help_text='Privacy policy link')
class Category(models.Model):
'''
Represents variaous app categories
'''
name = models.CharField(max_length=100, help_text='Category name')
description = models.TextField(
max_length=200, help_text='Category description')
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((170, 202), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (186, 202), False, 'from django.db import models\n'), ((217, 248), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (233, 248), False, 'from django.db import models\n'), ((272, 304), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)'}), '(max_length=120)\n', (288, 304), False, 'from django.db import models\n'), ((323, 355), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (339, 355), False, 'from django.db import models\n'), ((372, 404), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)'}), '(max_length=120)\n', (388, 404), False, 'from django.db import models\n'), ((421, 498), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Publisher"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""apps"""'}), "('Publisher', on_delete=models.CASCADE, related_name='apps')\n", (438, 498), False, 'from django.db import models\n'), ((525, 559), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Category"""'], {}), "('Category')\n", (547, 559), False, 'from django.db import models\n'), ((694, 757), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'help_text': '"""Name of publisher"""'}), "(max_length=100, help_text='Name of publisher')\n", (710, 757), False, 'from django.db import models\n'), ((772, 824), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Official website of app"""'}), "(help_text='Official website of app')\n", (787, 824), False, 'from django.db import models\n'), ((843, 900), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Link to useful app resources"""'}), "(help_text='Link to useful app resources')\n", (858, 900), False, 'from django.db import models\n'), ((926, 974), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Privacy policy link"""'}), "(help_text='Privacy policy link')\n", (941, 974), False, 'from django.db import models\n'), ((1073, 1132), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'help_text': '"""Category name"""'}), "(max_length=100, help_text='Category name')\n", (1089, 1132), False, 'from django.db import models\n'), ((1151, 1217), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'help_text': '"""Category description"""'}), "(max_length=200, help_text='Category description')\n", (1167, 1217), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEngineView
if __name__ == '__main__':
app = QApplication([])
view = QWebEngineView()
view.show()
view.setHtml("""\
<html>
<body>
<iframe width="560" height="315" src="https://www.youtube.com/embed/Cb-srOfRqNc" frameborder="0" allowfullscreen></iframe>
</body>
</html>
""")
app.exec()
| [
"PyQt5.QtWebEngineWidgets.QWebEngineView",
"PyQt5.QtWidgets.QApplication"
] | [((206, 222), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['[]'], {}), '([])\n', (218, 222), False, 'from PyQt5.QtWidgets import QApplication\n'), ((235, 251), 'PyQt5.QtWebEngineWidgets.QWebEngineView', 'QWebEngineView', ([], {}), '()\n', (249, 251), False, 'from PyQt5.QtWebEngineWidgets import QWebEngineView\n')] |
#!/usr/bin/env python
"""
This is a working literal translation of the jq based moby-download frozen
image tool.
Could be done far smaller.
"""
import os
import signal
import sys
import time
from devapp.app import flag, run_app
flag.string('dir', './images', 'Exisiting target dir', short_name='d')
flag.string('repo', 'busybox:latest', 'repo')
def cleanup(*args):
print('Exiting')
os.system('touch /root/foooo')
sys.exit(0)
signal.signal(signal.SIGINT, cleanup)
signal.signal(signal.SIGTERM, cleanup)
def main():
for i in range(1, 100):
print('stasring')
while True:
time.sleep(60)
run = lambda: run_app(main)
if __name__ == '__main__':
run()
| [
"signal.signal",
"devapp.app.flag.string",
"devapp.app.run_app",
"time.sleep",
"sys.exit",
"os.system"
] | [((234, 304), 'devapp.app.flag.string', 'flag.string', (['"""dir"""', '"""./images"""', '"""Exisiting target dir"""'], {'short_name': '"""d"""'}), "('dir', './images', 'Exisiting target dir', short_name='d')\n", (245, 304), False, 'from devapp.app import flag, run_app\n'), ((305, 350), 'devapp.app.flag.string', 'flag.string', (['"""repo"""', '"""busybox:latest"""', '"""repo"""'], {}), "('repo', 'busybox:latest', 'repo')\n", (316, 350), False, 'from devapp.app import flag, run_app\n'), ((447, 484), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'cleanup'], {}), '(signal.SIGINT, cleanup)\n', (460, 484), False, 'import signal\n'), ((485, 523), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'cleanup'], {}), '(signal.SIGTERM, cleanup)\n', (498, 523), False, 'import signal\n'), ((398, 428), 'os.system', 'os.system', (['"""touch /root/foooo"""'], {}), "('touch /root/foooo')\n", (407, 428), False, 'import os\n'), ((433, 444), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (441, 444), False, 'import sys\n'), ((647, 660), 'devapp.app.run_app', 'run_app', (['main'], {}), '(main)\n', (654, 660), False, 'from devapp.app import flag, run_app\n'), ((616, 630), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (626, 630), False, 'import time\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Dict, TypeVar
from azure.cli.command_modules.acs._client_factory import cf_agent_pools
from azure.cli.command_modules.acs._consts import DecoratorMode
from azure.cli.command_modules.acs.decorator import validate_decorator_mode
from azure.cli.core import AzCommandsLoader
from azure.cli.core.azclierror import (
CLIInternalError,
InvalidArgumentValueError,
)
from azure.cli.command_modules.acs._validators import (
extract_comma_separated_string,
)
from azure.cli.core.util import sdk_no_wait
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.profiles import ResourceType
from knack.log import get_logger
logger = get_logger(__name__)
# type variables
AgentPool = TypeVar("AgentPool")
AgentPoolsOperations = TypeVar("AgentPoolsOperations")
# pylint: disable=too-many-instance-attributes, too-few-public-methods
class AKSAgentPoolModels:
"""Store the models used in aks_agentpool_add and aks_agentpool_update.
The api version of the class corresponding to a model is determined by resource_type.
"""
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType,
):
self.__cmd = cmd
self.resource_type = resource_type
self.AgentPool = self.__cmd.get_models(
"AgentPool",
resource_type=self.resource_type,
operation_group="agent_pools",
)
self.AgentPoolUpgradeSettings = self.__cmd.get_models(
"AgentPoolUpgradeSettings",
resource_type=self.resource_type,
operation_group="agent_pools",
)
# pylint: disable=too-many-public-methods
class AKSAgentPoolContext:
"""Implement getter functions for all parameters in aks_agentpool_add and aks_agentpool_update.
"""
def __init__(
self,
cmd: AzCliCommand,
raw_parameters: Dict,
models: AKSAgentPoolModels,
decorator_mode: DecoratorMode,
):
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
if not validate_decorator_mode(decorator_mode):
raise CLIInternalError(
"Unexpected decorator_mode '{}' with type '{}'.".format(
decorator_mode, type(decorator_mode)
)
)
self.cmd = cmd
self.raw_param = raw_parameters
self.models = models
self.decorator_mode = decorator_mode
self.intermediates = dict()
self.agentpool = None
def attach_agentpool(self, agentpool: AgentPool) -> None:
"""Attach the AgentPool object to the context.
The `agentpool` object is only allowed to be attached once, and attaching again will raise a CLIInternalError.
:return: None
"""
if self.agentpool is None:
self.agentpool = agentpool
else:
msg = "the same" if self.agentpool == agentpool else "different"
raise CLIInternalError(
"Attempting to attach the `agentpool` object again, the two objects are {}.".format(
msg
)
)
def get_resource_group_name(self) -> str:
"""Obtain the value of resource_group_name.
Note: resource_group_name will not be decorated into the `agentpool` object.
This is a required parameter and its value should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
resource_group_name = self.raw_param.get("resource_group_name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return resource_group_name
def get_cluster_name(self) -> str:
"""Obtain the value of cluster_name.
Note: cluster_name will not be decorated into the `agentpool` object.
This is a required parameter and its value should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
cluster_name = self.raw_param.get("cluster_name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return cluster_name
def _get_nodepool_name(self, enable_validation: bool = False) -> str:
"""Internal function to obtain the value of nodepool_name.
Note: SDK performs the following validation {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'}.
This is a required parameter and its value should be provided by user explicitly.
This function supports the option of enable_validation. When enabled, it will check if the given nodepool name
is used by any nodepool of the cluster, if so, raise the InvalidArgumentValueError. This verification operation
will send a get request, skip the validation appropriately to avoid multiple api calls.
:return: string
"""
# read the original value passed by the command
nodepool_name = self.raw_param.get("nodepool_name")
# try to read the property value corresponding to the parameter from the `agentpool` object
if self.agentpool and self.agentpool.name is not None:
nodepool_name = self.agentpool.name
# this parameter does not need dynamic completion
# validation
if enable_validation:
instances = cf_agent_pools.list(self.get_resource_group_name, self.get_cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise InvalidArgumentValueError(
"Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(
nodepool_name
)
)
return nodepool_name
def get_nodepool_name(self) -> str:
"""Obtain the value of nodepool_name.
Note: SDK performs the following validation {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'}.
This is a required parameter and its value should be provided by user explicitly.
This function will verify the parameter by default. It will check if the given nodepool name is used by any
nodepool of the cluster, if so, raise the InvalidArgumentValueError. This verification operation will send a
get request, may use the internal function to skip the validation appropriately and avoid multiple api calls.
:return: string
"""
return self._get_nodepool_name(enable_validation=True)
def get_max_surge(self):
"""Obtain the value of max_surge.
:return: string
"""
# read the original value passed by the command
max_surge = self.raw_param.get("max_surge")
# try to read the property value corresponding to the parameter from the `mc` object.
if (
self.agentpool and
self.agentpool.upgrade_settings and
self.agentpool.upgrade_settings.max_surge is not None
):
max_surge = self.agentpool.upgrade_settings.max_surge
# this parameter does not need dynamic completion
# this parameter does not need validation
return max_surge
def get_aks_custom_headers(self) -> Dict[str, str]:
"""Obtain the value of aks_custom_headers.
Note: aks_custom_headers will not be decorated into the `agentpool` object.
This function will normalize the parameter by default. It will call "extract_comma_separated_string" to extract
comma-separated key value pairs from the string.
:return: dictionary
"""
# read the original value passed by the command
aks_custom_headers = self.raw_param.get("aks_custom_headers")
# normalize user-provided header
# usually the purpose is to enable (preview) features through AKSHTTPCustomFeatures
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
)
# this parameter does not need validation
return aks_custom_headers
def get_no_wait(self) -> bool:
"""Obtain the value of no_wait.
Note: no_wait will not be decorated into the `agentpool` object.
:return: bool
"""
# read the original value passed by the command
no_wait = self.raw_param.get("no_wait")
# this parameter does not need dynamic completion
# this parameter does not need validation
return no_wait
class AKSAgentPoolAddDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: AgentPoolsOperations,
raw_parameters: Dict,
resource_type: ResourceType,
):
"""Internal controller of aks_agentpool_add.
Break down the all-in-one aks_agentpool_add function into several relatively independent functions (some of
them have a certain order dependency) that only focus on a specific profile or process a specific piece of
logic. In addition, an overall control function is provided. By calling the aforementioned independent functions
one by one, a complete AgentPool object is gradually decorated and finally requests are sent to create a node
pool.
"""
self.cmd = cmd
self.client = client
self.models = AKSAgentPoolModels(cmd, resource_type)
# store the context in the process of assemble the AgentPool object
self.context = AKSAgentPoolContext(cmd, raw_parameters, self.models, decorator_mode=DecoratorMode.CREATE)
def _ensure_agentpool(self, agentpool: AgentPool) -> None:
"""Internal function to ensure that the incoming `agentpool` object is valid and the same as the attached
`agentpool` object in the context.
If the incoming `agentpool` is not valid or is inconsistent with the `agentpool` in the context, raise a
CLIInternalError.
:return: None
"""
if not isinstance(agentpool, self.models.AgentPool):
raise CLIInternalError(
"Unexpected agentpool object with type '{}'.".format(type(agentpool))
)
if self.context.agentpool != agentpool:
raise CLIInternalError(
"Inconsistent state detected. The incoming `agentpool` "
"is not the same as the `agentpool` in the context."
)
def init_agentpool(self) -> AgentPool:
"""Initialize an AgentPool object with name and attach it to internal context.
Note: As a read only property, name would be ignored when serialized.
:return: the AgentPool object
"""
# Initialize a AgentPool object with name.
agentpool = self.models.AgentPool()
# Note: As a read only property, name would be ignored when serialized.
# Set the name property by explicit assignment, otherwise it will be ignored by initialization.
agentpool.name = self.context.get_nodepool_name()
# attach mc to AKSContext
self.context.attach_agentpool(agentpool)
return agentpool
def set_up_upgrade_settings(self, agentpool: AgentPool) -> AgentPool:
"""Set up upgrade settings for the AgentPool object.
:return: the AgentPool object
"""
self._ensure_agentpool(agentpool)
upgrade_settings = self.models.AgentPoolUpgradeSettings()
max_surge = self.context.get_max_surge()
if max_surge:
upgrade_settings.max_surge = max_surge
agentpool.upgrade_settings = upgrade_settings
return agentpool
def construct_default_agentpool_profile(self) -> AgentPool:
"""The overall controller used to construct the default AgentPool profile.
The completely constructed AgentPool object will later be passed as a parameter to the underlying SDK
(mgmt-containerservice) to send the actual request.
:return: the AgentPool object
"""
# initialize the AgentPool object
agentpool = self.init_agentpool()
# set up upgrade settings
agentpool = self.set_up_upgrade_settings(agentpool)
return agentpool
# pylint: disable=protected-access
def add_agentpool(self, agentpool: AgentPool) -> AgentPool:
"""Send request to add a new agentpool.
The function "sdk_no_wait" will be called to use the ContainerServiceClient to send a reqeust to add a new agent
pool to the cluster.
:return: the ManagedCluster object
"""
self._ensure_agentpool(agentpool)
return sdk_no_wait(
self.context.get_no_wait(),
self.client.begin_create_or_update,
self.context.get_resource_group_name(),
self.context.get_cluster_name(),
# validated in "init_agentpool", skip to avoid duplicate api calls
self.context._get_nodepool_name(enable_validation=False),
agentpool,
headers=self.context.get_aks_custom_headers(),
)
class AKSAgentPoolUpdateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: AgentPoolsOperations,
raw_parameters: Dict,
resource_type: ResourceType,
):
"""Internal controller of aks_agentpool_update.
Break down the all-in-one aks_agentpool_update function into several relatively independent functions (some of
them have a certain order dependency) that only focus on a specific profile or process a specific piece of
logic. In addition, an overall control function is provided. By calling the aforementioned independent functions
one by one, a complete AgentPool object is gradually decorated and finally requests are sent to update an
existing node pool.
"""
self.cmd = cmd
self.client = client
self.models = AKSAgentPoolModels(cmd, resource_type)
# store the context in the process of assemble the AgentPool object
self.context = AKSAgentPoolContext(cmd, raw_parameters, self.models, decorator_mode=DecoratorMode.UPDATE)
| [
"azure.cli.command_modules.acs._validators.extract_comma_separated_string",
"azure.cli.core.azclierror.CLIInternalError",
"knack.log.get_logger",
"azure.cli.command_modules.acs.decorator.validate_decorator_mode",
"azure.cli.command_modules.acs._client_factory.cf_agent_pools.list",
"typing.TypeVar"
] | [((1011, 1031), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1021, 1031), False, 'from knack.log import get_logger\n'), ((1062, 1082), 'typing.TypeVar', 'TypeVar', (['"""AgentPool"""'], {}), "('AgentPool')\n", (1069, 1082), False, 'from typing import Dict, TypeVar\n'), ((1106, 1137), 'typing.TypeVar', 'TypeVar', (['"""AgentPoolsOperations"""'], {}), "('AgentPoolsOperations')\n", (1113, 1137), False, 'from typing import Dict, TypeVar\n'), ((8587, 8695), 'azure.cli.command_modules.acs._validators.extract_comma_separated_string', 'extract_comma_separated_string', (['aks_custom_headers'], {'enable_strip': '(True)', 'extract_kv': '(True)', 'default_value': '{}'}), '(aks_custom_headers, enable_strip=True,\n extract_kv=True, default_value={})\n', (8617, 8695), False, 'from azure.cli.command_modules.acs._validators import extract_comma_separated_string\n'), ((2560, 2599), 'azure.cli.command_modules.acs.decorator.validate_decorator_mode', 'validate_decorator_mode', (['decorator_mode'], {}), '(decorator_mode)\n', (2583, 2599), False, 'from azure.cli.command_modules.acs.decorator import validate_decorator_mode\n'), ((5938, 6010), 'azure.cli.command_modules.acs._client_factory.cf_agent_pools.list', 'cf_agent_pools.list', (['self.get_resource_group_name', 'self.get_cluster_name'], {}), '(self.get_resource_group_name, self.get_cluster_name)\n', (5957, 6010), False, 'from azure.cli.command_modules.acs._client_factory import cf_agent_pools\n'), ((10975, 11109), 'azure.cli.core.azclierror.CLIInternalError', 'CLIInternalError', (['"""Inconsistent state detected. The incoming `agentpool` is not the same as the `agentpool` in the context."""'], {}), "(\n 'Inconsistent state detected. The incoming `agentpool` is not the same as the `agentpool` in the context.'\n )\n", (10991, 11109), False, 'from azure.cli.core.azclierror import CLIInternalError, InvalidArgumentValueError\n')] |
from django.contrib import admin
from django.urls import path
from API import views
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token
urlpatterns = [
path('', views.index),
path('token-auth/', obtain_jwt_token),
path('token-refresh/', refresh_jwt_token),
path('token-verify/', verify_jwt_token),
path('listarCategorias', views.retornarCategorias),
path('categoriasList/', views.CategoriasList.as_view()),
path('categoriasList/<int:pk>', views.CategoriasGet.as_view()),
path('subcategoriasList/', views.SubCategoriasList.as_view()),
path('subcategoriasList/<int:pk>', views.SubCategoriasGet.as_view()),
path('bannerspublicitarios/',views.BannersPublicitariosGet.as_view()),
path('clienteRegister/',views.ClienteCreate.as_view()),
path('clienteRetrieve/',views.ClienteRetrieve.as_view()),
path('solicitudes/',views.SolicitudCreate.as_view()),
path('respuestassolicitud/', views.RespuestaSolicitudList.as_view()),
] | [
"API.views.SolicitudCreate.as_view",
"API.views.ClienteRetrieve.as_view",
"API.views.CategoriasList.as_view",
"API.views.CategoriasGet.as_view",
"API.views.SubCategoriasGet.as_view",
"API.views.SubCategoriasList.as_view",
"API.views.ClienteCreate.as_view",
"API.views.RespuestaSolicitudList.as_view",
"API.views.BannersPublicitariosGet.as_view",
"django.urls.path"
] | [((196, 217), 'django.urls.path', 'path', (['""""""', 'views.index'], {}), "('', views.index)\n", (200, 217), False, 'from django.urls import path\n'), ((223, 260), 'django.urls.path', 'path', (['"""token-auth/"""', 'obtain_jwt_token'], {}), "('token-auth/', obtain_jwt_token)\n", (227, 260), False, 'from django.urls import path\n'), ((266, 307), 'django.urls.path', 'path', (['"""token-refresh/"""', 'refresh_jwt_token'], {}), "('token-refresh/', refresh_jwt_token)\n", (270, 307), False, 'from django.urls import path\n'), ((313, 352), 'django.urls.path', 'path', (['"""token-verify/"""', 'verify_jwt_token'], {}), "('token-verify/', verify_jwt_token)\n", (317, 352), False, 'from django.urls import path\n'), ((358, 408), 'django.urls.path', 'path', (['"""listarCategorias"""', 'views.retornarCategorias'], {}), "('listarCategorias', views.retornarCategorias)\n", (362, 408), False, 'from django.urls import path\n'), ((438, 468), 'API.views.CategoriasList.as_view', 'views.CategoriasList.as_view', ([], {}), '()\n', (466, 468), False, 'from API import views\n'), ((507, 536), 'API.views.CategoriasGet.as_view', 'views.CategoriasGet.as_view', ([], {}), '()\n', (534, 536), False, 'from API import views\n'), ((570, 603), 'API.views.SubCategoriasList.as_view', 'views.SubCategoriasList.as_view', ([], {}), '()\n', (601, 603), False, 'from API import views\n'), ((645, 677), 'API.views.SubCategoriasGet.as_view', 'views.SubCategoriasGet.as_view', ([], {}), '()\n', (675, 677), False, 'from API import views\n'), ((713, 752), 'API.views.BannersPublicitariosGet.as_view', 'views.BannersPublicitariosGet.as_view', ([], {}), '()\n', (750, 752), False, 'from API import views\n'), ((783, 812), 'API.views.ClienteCreate.as_view', 'views.ClienteCreate.as_view', ([], {}), '()\n', (810, 812), False, 'from API import views\n'), ((843, 874), 'API.views.ClienteRetrieve.as_view', 'views.ClienteRetrieve.as_view', ([], {}), '()\n', (872, 874), False, 'from API import views\n'), ((901, 932), 'API.views.SolicitudCreate.as_view', 'views.SolicitudCreate.as_view', ([], {}), '()\n', (930, 932), False, 'from API import views\n'), ((968, 1006), 'API.views.RespuestaSolicitudList.as_view', 'views.RespuestaSolicitudList.as_view', ([], {}), '()\n', (1004, 1006), False, 'from API import views\n')] |
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
import argparse
import os
import shutil
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
import models.wideresnet as models
import dataset.freesound_X as dataset
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig, lwlrap_accumulator, load_checkpoint
from tensorboardX import SummaryWriter
from fastai.basic_data import *
from fastai.basic_train import *
from fastai.train import *
from train import SemiLoss
model = models.WideResNet(num_classes=80)
train_labeled_set, train_unlabeled_set, val_set, test_set, train_unlabeled_warmstart_set, num_classes, pos_weights = dataset.get_freesound()
labeled_trainloader = data.DataLoader(train_labeled_set, batch_size=4, shuffle=True, num_workers=0, drop_last=True)
val_loader = data.DataLoader(val_set, batch_size=4, shuffle=False, num_workers=0)
train_criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())
bunch = DataBunch(labeled_trainloader, val_loader, collate_fn=dataset.collate_fn, device=torch.device('cpu'))
learner = Learner(data=bunch, model=model, loss_func=train_criterion)
lr_find(learner)
fig = learner.recorder.plot(return_fig=True, suggestion=True)
fig.save('lr.png') | [
"matplotlib.use",
"dataset.freesound_X.get_freesound",
"models.wideresnet.WideResNet",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.device"
] | [((57, 78), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (71, 78), False, 'import matplotlib\n'), ((806, 839), 'models.wideresnet.WideResNet', 'models.WideResNet', ([], {'num_classes': '(80)'}), '(num_classes=80)\n', (823, 839), True, 'import models.wideresnet as models\n'), ((957, 980), 'dataset.freesound_X.get_freesound', 'dataset.get_freesound', ([], {}), '()\n', (978, 980), True, 'import dataset.freesound_X as dataset\n'), ((1004, 1102), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_labeled_set'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(train_labeled_set, batch_size=4, shuffle=True, num_workers=\n 0, drop_last=True)\n', (1019, 1102), True, 'import torch.utils.data as data\n'), ((1111, 1179), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_set'], {'batch_size': '(4)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(val_set, batch_size=4, shuffle=False, num_workers=0)\n', (1126, 1179), True, 'import torch.utils.data as data\n'), ((1199, 1221), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (1219, 1221), True, 'import torch.nn as nn\n'), ((1355, 1374), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1367, 1374), False, 'import torch\n')] |
from tools.checks import field_quality_check
from tools.codelists import get_language_codelist
name = "language"
def test(value):
return value in get_language_codelist(), "not in codelist"
calculate = field_quality_check(name, test)
| [
"tools.codelists.get_language_codelist",
"tools.checks.field_quality_check"
] | [((210, 241), 'tools.checks.field_quality_check', 'field_quality_check', (['name', 'test'], {}), '(name, test)\n', (229, 241), False, 'from tools.checks import field_quality_check\n'), ((153, 176), 'tools.codelists.get_language_codelist', 'get_language_codelist', ([], {}), '()\n', (174, 176), False, 'from tools.codelists import get_language_codelist\n')] |
"""Unittests for metrics."""
from unittest import TestCase
import numpy as np
import torch
from sklearn.metrics import f1_score
from robustnessgym.core.metrics import accuracy, f1
from tests.testbeds import MockTestBedv0
class TestSlice(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
def test_accuracy_1(self):
# Create some data
predictions = [0, 1, 1, 0, 1, 2, 3, 7]
labels = [1, 0, 0, 0, 1, 2, 4, 8]
# Ground-truth score
gt_score = np.mean([(p == l) for p, l in zip(predictions, labels)])
# Accuracy using lists
score = accuracy(predictions, labels)
self.assertEqual(score, gt_score)
# Accuracy using np.ndarray
score = accuracy(np.array(predictions), np.array(labels))
self.assertEqual(score, gt_score)
# Accuracy using torch.tensor
score = accuracy(torch.tensor(predictions), torch.tensor(labels))
self.assertEqual(score, gt_score)
def test_accuracy_2(self):
# Create some data
predictions = []
labels = []
# Accuracy using lists
score = accuracy(predictions, labels)
self.assertTrue(np.isnan(score))
# Accuracy using np.ndarray
score = accuracy(np.array(predictions), np.array(labels))
self.assertTrue(np.isnan(score))
# Accuracy using torch.tensor
score = accuracy(torch.tensor(predictions), torch.tensor(labels))
self.assertTrue(np.isnan(score))
def test_accuracy_3(self):
# Create some data
predictions = [1, 2]
labels = [1]
# Mismatched lengths
with self.assertRaises(ValueError):
accuracy(predictions, labels)
def test_f1_1(self):
# Create some data
predictions = [0, 1, 1, 0, 1, 2, 3, 7]
labels = [1, 0, 0, 0, 1, 2, 4, 8]
with self.assertRaises(ValueError):
# F1 using lists
f1(predictions, labels)
with self.assertRaises(ValueError):
# F1 using np.ndarray
f1(np.array(predictions), np.array(labels))
with self.assertRaises(ValueError):
# F1 using torch.tensor
f1(torch.tensor(predictions), torch.tensor(labels))
def test_f1_2(self):
# Create some data
predictions = []
labels = []
# Ground-truth score
gt_score = f1_score(y_true=labels, y_pred=predictions)
# F1 using lists
score = f1(predictions, labels)
self.assertEqual(score, gt_score)
# F1 using np.ndarray
score = f1(np.array(predictions), np.array(labels))
self.assertEqual(score, gt_score)
# F1 using torch.tensor
score = f1(torch.tensor(predictions), torch.tensor(labels))
self.assertEqual(score, gt_score)
def test_f1_3(self):
# Create some data
predictions = [1, 2]
labels = [1]
# Mismatched lengths
with self.assertRaises(ValueError):
f1(predictions, labels)
| [
"tests.testbeds.MockTestBedv0",
"sklearn.metrics.f1_score",
"robustnessgym.core.metrics.accuracy",
"robustnessgym.core.metrics.f1",
"numpy.array",
"torch.tensor",
"numpy.isnan"
] | [((296, 311), 'tests.testbeds.MockTestBedv0', 'MockTestBedv0', ([], {}), '()\n', (309, 311), False, 'from tests.testbeds import MockTestBedv0\n'), ((614, 643), 'robustnessgym.core.metrics.accuracy', 'accuracy', (['predictions', 'labels'], {}), '(predictions, labels)\n', (622, 643), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((1138, 1167), 'robustnessgym.core.metrics.accuracy', 'accuracy', (['predictions', 'labels'], {}), '(predictions, labels)\n', (1146, 1167), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((2411, 2454), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'predictions'}), '(y_true=labels, y_pred=predictions)\n', (2419, 2454), False, 'from sklearn.metrics import f1_score\n'), ((2497, 2520), 'robustnessgym.core.metrics.f1', 'f1', (['predictions', 'labels'], {}), '(predictions, labels)\n', (2499, 2520), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((748, 769), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (756, 769), True, 'import numpy as np\n'), ((771, 787), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (779, 787), True, 'import numpy as np\n'), ((895, 920), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (907, 920), False, 'import torch\n'), ((922, 942), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (934, 942), False, 'import torch\n'), ((1192, 1207), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (1200, 1207), True, 'import numpy as np\n'), ((1271, 1292), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (1279, 1292), True, 'import numpy as np\n'), ((1294, 1310), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1302, 1310), True, 'import numpy as np\n'), ((1336, 1351), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (1344, 1351), True, 'import numpy as np\n'), ((1417, 1442), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (1429, 1442), False, 'import torch\n'), ((1444, 1464), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (1456, 1464), False, 'import torch\n'), ((1490, 1505), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (1498, 1505), True, 'import numpy as np\n'), ((1702, 1731), 'robustnessgym.core.metrics.accuracy', 'accuracy', (['predictions', 'labels'], {}), '(predictions, labels)\n', (1710, 1731), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((1960, 1983), 'robustnessgym.core.metrics.f1', 'f1', (['predictions', 'labels'], {}), '(predictions, labels)\n', (1962, 1983), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((2613, 2634), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (2621, 2634), True, 'import numpy as np\n'), ((2636, 2652), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2644, 2652), True, 'import numpy as np\n'), ((2748, 2773), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (2760, 2773), False, 'import torch\n'), ((2775, 2795), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (2787, 2795), False, 'import torch\n'), ((3028, 3051), 'robustnessgym.core.metrics.f1', 'f1', (['predictions', 'labels'], {}), '(predictions, labels)\n', (3030, 3051), False, 'from robustnessgym.core.metrics import accuracy, f1\n'), ((2078, 2099), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (2086, 2099), True, 'import numpy as np\n'), ((2101, 2117), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2109, 2117), True, 'import numpy as np\n'), ((2215, 2240), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (2227, 2240), False, 'import torch\n'), ((2242, 2262), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (2254, 2262), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils.py
@Desc : 工具模块
@Project : orfd-platform
@Contact : <EMAIL>
@License : (C)Copyright 2018-2019, TheFreer.NET
@WebSite : www.thefreer.net
@Modify Time @Author @Version
------------ ------- --------
2019/05/29 0:44 the freer 2.1
'''
import re
import pandas as pd
from collections import Counter
from setting import PATTERNS, AVG_SEGMENT_LENGTH, AVG_SEGMENT_NUMBER, AVG_DOC_LENGTH
# def is_valid_email(string):
# if re.match(PATTERNS["email"], string):
# return 1
# return 0
#
# def is_valid_contact(string):
# if re.match(PATTERNS["contact"], string):
# return 1
# return 0
#
# def is_valid_url(string):
# if re.match(PATTERNS["url"], string):
# return 1
# return 0
#
# def is_valid_time(string):
# if re.match(PATTERNS["work_time"], string):
# return 1
# return 0
#
def is_fresh(string):
'''
判断输入工作要求是否包括:"接受应届生"
:param string:
:return:
'''
if len(re.split(",", string)) > 1:
return 1
return 0
def split_require(input_list):
'''
分割工作要求
:param input_list: 输入要求列表
:return: 分割结果列表
'''
edu_requires = []
work_requires = []
for inp in input_list:
try:
inp = re.sub(r",.*", "", inp)
r_list = re.split("_", inp)
edu_requires.append(r_list[0])
work_requires.append(r_list[1])
except:
edu_requires.append(inp)
work_requires.append(inp)
return edu_requires, work_requires
def split_welfare(string):
'''
将福利文本分割,数据采集结果福利信息被保存为统一格式:w1_w2_w3
:param string: 输入福利
:return: 福利列表
'''
try:
tmp_list = re.split(",", string)
welfare = re.split(r"_", tmp_list[0])
except:
welfare = ["None"]
return welfare
def welfare_map(w_list, dic):
'''
对输入福利类别进行映射
:param w_list: 类别列表
:param dic: 类别:Label 字典
:return: 编码结果列表
'''
new_welfare = []
for w in w_list:
if w in dic.keys():
new_welfare.append(dic[w])
else:
new_welfare.append(dic["others"])
return new_welfare
def welfare_count(input_list):
'''
统计输入类别列表的类别频率
:param input_list: 输入类别列表
:return: Counter 对象,保存了类别频率排序结果
'''
welfare_list = []
for inp in input_list:
welfare_list += inp
return Counter(welfare_list)
def split_doc(doc):
'''
处理输入段落文本,输出长度 < 168的句段
:param doc: 输入段落
:return: 句段
'''
seg_list = re.split(PATTERNS["segment"], doc)
segment = ""
for seg in seg_list:
if len(seg) > AVG_SEGMENT_LENGTH:
segment += seg
if len(segment) > AVG_DOC_LENGTH:
segment = segment[:AVG_DOC_LENGTH]
if len(segment) < AVG_DOC_LENGTH and len(seg_list) < AVG_SEGMENT_NUMBER:
segment = "".join(seg_list)
if len(segment) < AVG_SEGMENT_LENGTH:
segment = "".join(seg_list)
print(len(segment))
return segment
def split_doc_2(doc):
'''
返回对输入段落分段及过滤处理之后的长度
:param doc: 输入段落
:return: 处理之后的长度
'''
seg_list = re.split(PATTERNS["segment"], doc)
segment = ""
for seg in seg_list:
if len(seg) > AVG_SEGMENT_LENGTH:
segment += seg
return len(segment)
def split_dataset(ori, tri, tes, frac=0.9216):
'''
划分原始数据集为训练集和测试集
:param ori: 原始数据集路径
:param tri: 输出测试集路径
:param tes: 输出测试集路径
:param frac: 划分比例:tes:tri
:return:
'''
origin_data = pd.read_csv(ori) # frac=0.9216
fake = origin_data[origin_data[list(origin_data.columns)[-1]] == 0].sample(frac=frac, random_state=0, axis=0)
real = origin_data[origin_data[list(origin_data.columns)[-1]] == 1].sample(len(fake), random_state=0, axis=0)
train_data = pd.concat([fake, real], axis=0, join="outer")
train_data = train_data.sample(frac=1)
test_data = origin_data[~origin_data.index.isin(train_data.index)]
print(len(origin_data))
print(len(train_data))
print(len(test_data))
train_data.to_csv(tri, index=False)
test_data.to_csv(tes, index=False)
| [
"re.split",
"pandas.read_csv",
"collections.Counter",
"re.sub",
"pandas.concat"
] | [((2152, 2173), 'collections.Counter', 'Counter', (['welfare_list'], {}), '(welfare_list)\n', (2159, 2173), False, 'from collections import Counter\n'), ((2272, 2306), 're.split', 're.split', (["PATTERNS['segment']", 'doc'], {}), "(PATTERNS['segment'], doc)\n", (2280, 2306), False, 'import re\n'), ((2781, 2815), 're.split', 're.split', (["PATTERNS['segment']", 'doc'], {}), "(PATTERNS['segment'], doc)\n", (2789, 2815), False, 'import re\n'), ((3117, 3133), 'pandas.read_csv', 'pd.read_csv', (['ori'], {}), '(ori)\n', (3128, 3133), True, 'import pandas as pd\n'), ((3385, 3430), 'pandas.concat', 'pd.concat', (['[fake, real]'], {'axis': '(0)', 'join': '"""outer"""'}), "([fake, real], axis=0, join='outer')\n", (3394, 3430), True, 'import pandas as pd\n'), ((1584, 1605), 're.split', 're.split', (['""","""', 'string'], {}), "(',', string)\n", (1592, 1605), False, 'import re\n'), ((1618, 1644), 're.split', 're.split', (['"""_"""', 'tmp_list[0]'], {}), "('_', tmp_list[0])\n", (1626, 1644), False, 'import re\n'), ((1005, 1026), 're.split', 're.split', (['""","""', 'string'], {}), "(',', string)\n", (1013, 1026), False, 'import re\n'), ((1227, 1249), 're.sub', 're.sub', (['""",.*"""', '""""""', 'inp'], {}), "(',.*', '', inp)\n", (1233, 1249), False, 'import re\n'), ((1263, 1281), 're.split', 're.split', (['"""_"""', 'inp'], {}), "('_', inp)\n", (1271, 1281), False, 'import re\n')] |
"""How to customize your task class"""
import pytodotxt
class MyTask(pytodotxt.Task): pass
todotxt = pytodotxt.TodoTxt("todo.txt",
parser=pytodotxt.TodoTxtParser(task_type=MyTask))
for task in todotxt.parse():
assert isinstance(task, MyTask)
| [
"pytodotxt.TodoTxtParser"
] | [((170, 211), 'pytodotxt.TodoTxtParser', 'pytodotxt.TodoTxtParser', ([], {'task_type': 'MyTask'}), '(task_type=MyTask)\n', (193, 211), False, 'import pytodotxt\n')] |
""" This is the ai_db module containing the class AiDatabase """
from datetime import datetime
import logging
from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData
#--------------------------------------------------------------------------------------------------#
class AiDatabase:
"All methods relating to the database connection"
def __init__(self, sql_connection):
# Initialize sqlAlchemy
# CONN = create_engine( \
# 'sqlite:////home/jetson/Desktop/ampel2go_code/104_user_display/db.sqlite3')
self.logger = logging.getLogger('ai_db_logger')
self.logger.info('creating an instance of Auxiliary')
self.CONN = create_engine(sql_connection)
self.META_DATA = MetaData(bind=self.CONN)
self.MAIN_OCCUPANCY = Table(
'main_occupancy', self.META_DATA,
Column('id', Integer, primary_key=True),
Column('capacity', Integer), Column('date', DateTime),
Column('person_count', Integer),
Column('direction', Integer),
)
self.MAIN_AREATHRESHOLD = Table(
'main_areathreshold', self.META_DATA,
Column('id', Integer, primary_key=True),
Column('area_threshold', Integer),
)
self.logger.info( "Current occupancy: %s", str(self.get_occupancy()) )
def get_max_id(self):
"gets the last entry of the db (i.e. the one with the highest id"
with self.CONN.connect() as connection:
result = connection.execute("select max(id) as maxid from main_occupancy")
for row in result:
max_id = row['maxid']
return max_id
def clean_db(self):
"Removes all entries in DB, except the last one"
with self.CONN.connect() as connection:
with connection.begin():
result = connection.execute( \
"select max(id) as maxid, count(*) as cnt from main_occupancy")
for row in result:
max_id = row['maxid']
row_cnt = row['cnt']
self.logger.info("clean_db: rows %s , max_id: %s ", row_cnt, max_id)
result = connection.execute( \
"delete from main_occupancy where id <>'" + str(max_id)+"' ")
return
def get_occupancy(self):
"gets the value for the current occupancy"
with self.CONN.connect() as connection:
max_id = self.get_max_id()
person_count = 0
result = connection.execute( \
"select person_count from main_occupancy where id ='" + str(max_id) + "' ")
for row in result:
person_count = row['person_count']
return person_count
def set_occupancy(self, person_count):
"sets the value for the occupancy "
with self.CONN.connect() as connection:
max_id = self.get_max_id()
# placeholder for result b/c pylint
_ = connection.execute( "update main_occupancy set person_count = " \
+ str(person_count) + " where id ='" + str(max_id) + "' ")
self.logger.info("DB-set occupancy: ", person_count)
return
def get_area_threshold(self):
"gets area threshold parameter from db"
with self.CONN.connect() as connection:
result = connection.execute( \
"select area_threshold from main_areathreshold")
for row in result:
area_threshold = row['area_threshold']
return area_threshold
def get_current_data(self):
"get latest values of all three fields from main_occupancy table"
with self.CONN.connect() as connection:
max_id = self.get_max_id()
result = connection.execute( \
"select capacity,person_count, direction from main_occupancy where id ='" \
+ str(max_id)+"' ")
for row in result:
capacity = row['capacity']
latest_person_count = row['person_count']
direction = row['direction']
return capacity, latest_person_count, direction
def set_current_data(self, capacity, person_count, direction):
"write values to all three fields of the main_occupancy table"
with self.CONN.connect() as connection: #klaus: why is connection here unused?
now = datetime.now()
now = now.replace(microsecond=0)
insert = self.MAIN_OCCUPANCY.insert().values(capacity=capacity \
, date=now, person_count=person_count, direction=direction)
self.CONN.execute(insert)
#self.logger.info("DB-set current data: ", person_count)
return
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
if __name__ == "__main__":
SQL_CONNECTION = 'sqlite:///../104_user_display/db.sqlite3'
ai_db = AiDatabase(SQL_CONNECTION)
logger = logging.getLogger('ai_db_logger')
def add_occupancy(num):
"small test for occupancy"
oc1 = ai_db.get_occupancy()
oc2 = oc1 + num
ai_db.set_occupancy(oc2)
oc3 = ai_db.get_occupancy()
passed = oc2 == oc3
logger.info("Occupancy Pass: %s , Add/Before/Calc/After: %s %s %s %s" , str(passed) \
, str(num), str(oc1),str( oc2 ), str(oc3 ))
return passed
for i in [1, 10, -5, -6]:
add_occupancy(i)
capacity, latest_person_count, direction = ai_db.get_current_data()
logger.info("capacity: %s, latest_person_count: %s , direction: %s", capacity \
, latest_person_count, direction)
ai_db.set_current_data(capacity+1, latest_person_count+1, direction+1)
capacity, latest_person_count, direction = ai_db.get_current_data()
logger.info("capacity: %s, latest_person_count: %s , direction: %s", capacity \
, latest_person_count, direction)
ai_db.set_current_data(capacity-1, latest_person_count-1, direction-1)
capacity, latest_person_count, direction = ai_db.get_current_data()
logger.info("capacity: %s, latest_person_count: %s , direction: %s", capacity \
, latest_person_count, direction)
logger.info("Test ended.")
| [
"logging.getLogger",
"sqlalchemy.create_engine",
"sqlalchemy.MetaData",
"datetime.datetime.now",
"sqlalchemy.Column"
] | [((5165, 5198), 'logging.getLogger', 'logging.getLogger', (['"""ai_db_logger"""'], {}), "('ai_db_logger')\n", (5182, 5198), False, 'import logging\n'), ((587, 620), 'logging.getLogger', 'logging.getLogger', (['"""ai_db_logger"""'], {}), "('ai_db_logger')\n", (604, 620), False, 'import logging\n'), ((703, 732), 'sqlalchemy.create_engine', 'create_engine', (['sql_connection'], {}), '(sql_connection)\n', (716, 732), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((758, 782), 'sqlalchemy.MetaData', 'MetaData', ([], {'bind': 'self.CONN'}), '(bind=self.CONN)\n', (766, 782), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((878, 917), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (884, 917), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((931, 958), 'sqlalchemy.Column', 'Column', (['"""capacity"""', 'Integer'], {}), "('capacity', Integer)\n", (937, 958), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((960, 984), 'sqlalchemy.Column', 'Column', (['"""date"""', 'DateTime'], {}), "('date', DateTime)\n", (966, 984), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((998, 1029), 'sqlalchemy.Column', 'Column', (['"""person_count"""', 'Integer'], {}), "('person_count', Integer)\n", (1004, 1029), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((1043, 1071), 'sqlalchemy.Column', 'Column', (['"""direction"""', 'Integer'], {}), "('direction', Integer)\n", (1049, 1071), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((1191, 1230), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (1197, 1230), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((1244, 1277), 'sqlalchemy.Column', 'Column', (['"""area_threshold"""', 'Integer'], {}), "('area_threshold', Integer)\n", (1250, 1277), False, 'from sqlalchemy import create_engine, Table, Column, Integer, DateTime, MetaData\n'), ((4479, 4493), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4491, 4493), False, 'from datetime import datetime\n')] |
import setuptools
from setuptools import version
setuptools.setup(
name='the_pitch',
version="0.0.1.7",
description='',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'pytest',
'pandas',
'numpy',
'pandas_datareader',
'pandas_ta',
]
) | [
"setuptools.find_packages"
] | [((138, 169), 'setuptools.find_packages', 'setuptools.find_packages', (['"""src"""'], {}), "('src')\n", (162, 169), False, 'import setuptools\n')] |
#!/usr/bin/env python
import utoken
from pathlib import Path
from setuptools import setup, find_namespace_packages
long_description = Path('README.md').read_text(encoding='utf-8', errors='ignore')
classifiers = [ # copied from https://pypi.org/classifiers/
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Utilities',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Linguistic',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
]
setup(
name='utoken',
version=utoken.__version__,
description=utoken.__description__,
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=classifiers,
python_requires='>=3.8',
url='https://github.com/uhermjakob/utoken',
download_url='https://github.com/uhermjakob/utoken',
platforms=['any'],
author='<NAME>',
author_email='<EMAIL>',
packages=find_namespace_packages(exclude=['aux']),
keywords=['machine translation', 'datasets', 'NLP', 'natural language processing,'
'computational linguistics'],
entry_points={
'console_scripts': [
'utokenize=utoken.utokenize:main',
'detokenize=utoken.detokenize:main'
],
},
install_requires=[
'regex>=2021.8.3',
'tqdm>=4.40',
],
include_package_data=True,
zip_safe=False,
)
| [
"setuptools.find_namespace_packages",
"pathlib.Path"
] | [((137, 154), 'pathlib.Path', 'Path', (['"""README.md"""'], {}), "('README.md')\n", (141, 154), False, 'from pathlib import Path\n'), ((1090, 1130), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'exclude': "['aux']"}), "(exclude=['aux'])\n", (1113, 1130), False, 'from setuptools import setup, find_namespace_packages\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from test.bigdl.test_utils import BigDLTestCase
import bigdl.dllib.nn.keras.layers.layer as BLayer
import keras.layers as KLayer
import keras.backend as K
from bigdl.dllib.keras.converter import WeightsConverter
from bigdl.dllib.feature.dataset.dataset import *
from bigdl.dllib.nn.keras.layers.topology import Model as BModel
from bigdl.dllib.nn.keras.layers.topology import Sequential as BSequential
from keras.engine import merge as kmerge, Model as KModel
from keras.models import Sequential as KSequential
np.random.seed(1337) # for reproducibility
class TestKerasAPI(BigDLTestCase):
def test_embedding(self):
input_data = np.random.randint(1000, size=(32, 10))
blayer = BLayer.Embedding(1000, 64, input_shape=(10, ))
klayer = KLayer.Embedding(1000, 64, input_length=10)
self.compare_newapi(klayer, blayer, input_data,
WeightsConverter.convert_embedding)
def test_batchnormalization(self):
K.set_image_dim_ordering("th")
input_data = np.random.random_sample([2, 5, 32, 32])
blayer = BLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
klayer = KLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
self.compare_newapi(klayer, blayer, input_data,
WeightsConverter.convert_batchnormalization)
K.set_image_dim_ordering("tf")
input_data2 = np.random.random_sample([2, 32, 32, 4])
blayer = BLayer.BatchNormalization(axis=-1, dim_ordering="tf", input_shape=(32, 32, 4))
klayer = KLayer.BatchNormalization(axis=-1, input_shape=(32, 32, 4))
self.compare_newapi(klayer, blayer, input_data2,
WeightsConverter.convert_batchnormalization)
def test_merge_sum(self):
b1 = BLayer.InputLayer(input_shape=(3, 5))
b2 = BLayer.InputLayer(input_shape=(3, 5))
blayer = BLayer.Merge(layers=[b1, b2], mode="sum")
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode="sum")
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_mul(self):
b1 = BLayer.InputLayer(input_shape=(3, 5))
b2 = BLayer.InputLayer(input_shape=(3, 5))
blayer = BLayer.Merge(layers=[b1, b2], mode="mul")
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode="mul")
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_ave(self):
b1 = BLayer.InputLayer(input_shape=(2, 5, 8))
b2 = BLayer.InputLayer(input_shape=(2, 5, 8))
blayer = BLayer.Merge(layers=[b1, b2], mode="ave")
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="ave")
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_max(self):
b1 = BLayer.InputLayer(input_shape=(2, 5, 8))
b2 = BLayer.InputLayer(input_shape=(2, 5, 8))
blayer = BLayer.Merge(layers=[b1, b2], mode="max")
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="max")
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_concat(self):
b1 = BLayer.InputLayer(input_shape=(2, 5, 11))
b2 = BLayer.InputLayer(input_shape=(2, 5, 8))
blayer = BLayer.Merge(layers=[b1, b2], mode="concat")
k1 = KLayer.InputLayer(input_shape=(2, 5, 11))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="concat")
input_data = [np.random.random([3, 2, 5, 11]), np.random.random([3, 2, 5, 8])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_dot(self):
b1 = BLayer.InputLayer(input_shape=(4, ))
b2 = BLayer.InputLayer(input_shape=(4, ))
blayer = BLayer.Merge(layers=[b1, b2], mode="dot")
k1 = KLayer.InputLayer(input_shape=(4, ))
k2 = KLayer.InputLayer(input_shape=(4, ))
klayer = KLayer.Merge(layers=[k1, k2], mode="dot")
input_data = [np.random.random([2, 4]), np.random.random([2, 4])]
self.compare_newapi(klayer, blayer, input_data)
def test_merge_cos(self):
b1 = BLayer.InputLayer(input_shape=(3, ))
b2 = BLayer.InputLayer(input_shape=(3, ))
blayer = BLayer.Merge(layers=[b1, b2], mode="cos")
k1 = KLayer.InputLayer(input_shape=(3, ))
k2 = KLayer.InputLayer(input_shape=(3, ))
klayer = KLayer.Merge(layers=[k1, k2], mode="cos")
input_data = [np.random.random([2, 3]), np.random.random([2, 3])]
self.compare_newapi(klayer, blayer, input_data)
def test_lenet_shape(self):
from bigdl.dllib.models.lenet.lenet import build_model
model = build_model(10)
input_shape = model.get_input_shape()
np.testing.assert_allclose((28, 28, 1), input_shape[1:])
output_shape = model.get_output_shape()
np.testing.assert_allclose((10, ), output_shape[1:])
def test_graph(self):
x1 = BLayer.Input(shape=(8, ))
x2 = BLayer.Input(shape=(6, ))
y1 = BLayer.Dense(10)(x1)
y2 = BLayer.Dense(10)(x2)
model = BModel([x1, x2], [y1, y2])
input_shapes = model.get_input_shape()
output_shapes = model.get_output_shape()
np.testing.assert_allclose((8, ), input_shapes[0][1:])
np.testing.assert_allclose((6, ), input_shapes[1][1:])
np.testing.assert_allclose((10, ), output_shapes[0][1:])
np.testing.assert_allclose((10, ), output_shapes[1][1:])
def test_train(self):
x = np.random.random([32, 10])
y = np.random.random([32, ])
model = BSequential()
model.add(BLayer.Dense(5, input_shape=(10, )))
model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
model.fit(x, y, batch_size=8, nb_epoch=2, validation_data=(x, y))
model.evaluate(x, y, batch_size=8)
model.predict(x)
def test_train_dataset(self):
images = []
labels = []
for i in range(0, 8):
features = np.random.uniform(0, 1, (200, 200, 3))
label = np.array([2])
images.append(features)
labels.append(label)
image_frame = DistributedImageFrame(self.sc.parallelize(images),
self.sc.parallelize(labels))
transformer = Pipeline([BytesToMat(), Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample(target_keys=['label'])])
data_set = DataSet.image_frame(image_frame).transform(transformer)
model = BSequential()
model.add(BLayer.Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
model.add(BLayer.Reshape((1*220*220, )))
model.add(BLayer.Dense(20, activation="softmax"))
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(data_set, batch_size=8, nb_epoch=2, validation_data=data_set)
def convert_two_dense_model(self, kmodel, weights):
return [weights[2].T, weights[3], weights[0].T, weights[1]]
def test_merge_method_sum(self):
bx1 = BLayer.Input(shape=(8, ))
bx2 = BLayer.Input(shape=(6, ))
by1 = BLayer.Dense(10)(bx1)
by2 = BLayer.Dense(10)(bx2)
bz = BLayer.merge([by1, by2], mode="sum")
bmodel = BModel([bx1, bx2], bz, name="graph1")
kx1 = KLayer.Input(shape=(8, ))
kx2 = KLayer.Input(shape=(6, ))
ky1 = KLayer.Dense(10)(kx1)
ky2 = KLayer.Dense(10)(kx2)
kz = kmerge([ky1, ky2], mode="sum")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 8]), np.random.random([2, 6])]
self.compare_newapi(kmodel, bmodel, input_data, self.convert_two_dense_model)
def test_merge_method_model_concat(self):
bx1 = BLayer.Input(shape=(4, ))
bx2 = BLayer.Input(shape=(5, ))
by1 = BLayer.Dense(6, activation="sigmoid")(bx1)
bbranch1 = BModel(bx1, by1)(bx1)
bbranch2 = BLayer.Dense(8)(bx2)
bz = BLayer.merge([bbranch1, bbranch2], mode="concat")
bmodel = BModel([bx1, bx2], bz)
kx1 = KLayer.Input(shape=(4, ))
kx2 = KLayer.Input(shape=(5, ))
ky1 = KLayer.Dense(6, activation="sigmoid")(kx1)
kbranch1 = KModel(kx1, ky1)(kx1)
kbranch2 = KLayer.Dense(8)(kx2)
kz = KLayer.merge([kbranch1, kbranch2], mode="concat")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 4]), np.random.random([2, 5])]
self.compare_newapi(kmodel, bmodel, input_data, self.convert_two_dense_model)
def test_merge_method_seq_concat(self):
bx1 = BLayer.Input(shape=(10, ))
bx2 = BLayer.Input(shape=(10, ))
by1 = BLayer.Dense(12, activation="sigmoid")(bx1)
bbranch1_node = BModel(bx1, by1)(bx1)
bbranch2 = BSequential()
bbranch2.add(BLayer.Dense(12, input_dim=10))
bbranch2_node = bbranch2(bx2)
bz = BLayer.merge([bbranch1_node, bbranch2_node], mode="concat")
bmodel = BModel([bx1, bx2], bz)
kx1 = KLayer.Input(shape=(10, ))
kx2 = KLayer.Input(shape=(10, ))
ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
kbranch1_node = KModel(kx1, ky1)(kx1)
kbranch2 = KSequential()
kbranch2.add(KLayer.Dense(12, input_dim=10))
kbranch2_node = kbranch2(kx2)
kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
self.compare_newapi(kmodel, bmodel, input_data, self.convert_two_dense_model)
if __name__ == "__main__":
pytest.main([__file__])
| [
"bigdl.dllib.nn.keras.layers.layer.InputLayer",
"bigdl.dllib.nn.keras.layers.layer.Reshape",
"bigdl.dllib.nn.keras.layers.layer.merge",
"keras.layers.Dense",
"bigdl.dllib.models.lenet.lenet.build_model",
"bigdl.dllib.nn.keras.layers.layer.Merge",
"keras.engine.merge",
"pytest.main",
"keras.layers.merge",
"bigdl.dllib.nn.keras.layers.topology.Model",
"keras.engine.Model",
"keras.layers.InputLayer",
"keras.models.Sequential",
"bigdl.dllib.nn.keras.layers.layer.Convolution2D",
"bigdl.dllib.nn.keras.layers.layer.BatchNormalization",
"keras.layers.BatchNormalization",
"keras.backend.set_image_dim_ordering",
"bigdl.dllib.nn.keras.layers.layer.Dense",
"keras.layers.Merge",
"bigdl.dllib.nn.keras.layers.layer.Input",
"bigdl.dllib.nn.keras.layers.layer.Embedding",
"keras.layers.Input",
"keras.layers.Embedding",
"bigdl.dllib.nn.keras.layers.topology.Sequential"
] | [((11057, 11080), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (11068, 11080), False, 'import pytest\n'), ((1303, 1348), 'bigdl.dllib.nn.keras.layers.layer.Embedding', 'BLayer.Embedding', (['(1000)', '(64)'], {'input_shape': '(10,)'}), '(1000, 64, input_shape=(10,))\n', (1319, 1348), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((1367, 1410), 'keras.layers.Embedding', 'KLayer.Embedding', (['(1000)', '(64)'], {'input_length': '(10)'}), '(1000, 64, input_length=10)\n', (1383, 1410), True, 'import keras.layers as KLayer\n'), ((1579, 1609), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""th"""'], {}), "('th')\n", (1603, 1609), True, 'import keras.backend as K\n'), ((1688, 1746), 'bigdl.dllib.nn.keras.layers.layer.BatchNormalization', 'BLayer.BatchNormalization', ([], {'axis': '(1)', 'input_shape': '(5, 32, 32)'}), '(axis=1, input_shape=(5, 32, 32))\n', (1713, 1746), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((1764, 1822), 'keras.layers.BatchNormalization', 'KLayer.BatchNormalization', ([], {'axis': '(1)', 'input_shape': '(5, 32, 32)'}), '(axis=1, input_shape=(5, 32, 32))\n', (1789, 1822), True, 'import keras.layers as KLayer\n'), ((1960, 1990), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (1984, 1990), True, 'import keras.backend as K\n'), ((2070, 2148), 'bigdl.dllib.nn.keras.layers.layer.BatchNormalization', 'BLayer.BatchNormalization', ([], {'axis': '(-1)', 'dim_ordering': '"""tf"""', 'input_shape': '(32, 32, 4)'}), "(axis=-1, dim_ordering='tf', input_shape=(32, 32, 4))\n", (2095, 2148), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2166, 2225), 'keras.layers.BatchNormalization', 'KLayer.BatchNormalization', ([], {'axis': '(-1)', 'input_shape': '(32, 32, 4)'}), '(axis=-1, input_shape=(32, 32, 4))\n', (2191, 2225), True, 'import keras.layers as KLayer\n'), ((2400, 2437), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2417, 2437), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2451, 2488), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2468, 2488), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2506, 2547), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""sum"""'}), "(layers=[b1, b2], mode='sum')\n", (2518, 2547), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2561, 2598), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2578, 2598), True, 'import keras.layers as KLayer\n'), ((2612, 2649), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2629, 2649), True, 'import keras.layers as KLayer\n'), ((2667, 2708), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""sum"""'}), "(layers=[k1, k2], mode='sum')\n", (2679, 2708), True, 'import keras.layers as KLayer\n'), ((2889, 2926), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2906, 2926), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2940, 2977), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (2957, 2977), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((2995, 3036), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""mul"""'}), "(layers=[b1, b2], mode='mul')\n", (3007, 3036), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3050, 3087), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (3067, 3087), True, 'import keras.layers as KLayer\n'), ((3101, 3138), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3, 5)'}), '(input_shape=(3, 5))\n', (3118, 3138), True, 'import keras.layers as KLayer\n'), ((3156, 3197), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""mul"""'}), "(layers=[k1, k2], mode='mul')\n", (3168, 3197), True, 'import keras.layers as KLayer\n'), ((3378, 3418), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3395, 3418), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3432, 3472), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3449, 3472), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3490, 3531), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""ave"""'}), "(layers=[b1, b2], mode='ave')\n", (3502, 3531), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3545, 3585), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3562, 3585), True, 'import keras.layers as KLayer\n'), ((3599, 3639), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3616, 3639), True, 'import keras.layers as KLayer\n'), ((3657, 3698), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""ave"""'}), "(layers=[k1, k2], mode='ave')\n", (3669, 3698), True, 'import keras.layers as KLayer\n'), ((3885, 3925), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3902, 3925), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3939, 3979), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (3956, 3979), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((3997, 4038), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""max"""'}), "(layers=[b1, b2], mode='max')\n", (4009, 4038), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((4052, 4092), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (4069, 4092), True, 'import keras.layers as KLayer\n'), ((4106, 4146), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (4123, 4146), True, 'import keras.layers as KLayer\n'), ((4164, 4205), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""max"""'}), "(layers=[k1, k2], mode='max')\n", (4176, 4205), True, 'import keras.layers as KLayer\n'), ((4395, 4436), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 11)'}), '(input_shape=(2, 5, 11))\n', (4412, 4436), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((4450, 4490), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (4467, 4490), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((4508, 4552), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""concat"""'}), "(layers=[b1, b2], mode='concat')\n", (4520, 4552), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((4566, 4607), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 11)'}), '(input_shape=(2, 5, 11))\n', (4583, 4607), True, 'import keras.layers as KLayer\n'), ((4621, 4661), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(2, 5, 8)'}), '(input_shape=(2, 5, 8))\n', (4638, 4661), True, 'import keras.layers as KLayer\n'), ((4679, 4723), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""concat"""'}), "(layers=[k1, k2], mode='concat')\n", (4691, 4723), True, 'import keras.layers as KLayer\n'), ((4911, 4946), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(4,)'}), '(input_shape=(4,))\n', (4928, 4946), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((4961, 4996), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(4,)'}), '(input_shape=(4,))\n', (4978, 4996), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((5015, 5056), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""dot"""'}), "(layers=[b1, b2], mode='dot')\n", (5027, 5056), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((5070, 5105), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(4,)'}), '(input_shape=(4,))\n', (5087, 5105), True, 'import keras.layers as KLayer\n'), ((5120, 5155), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(4,)'}), '(input_shape=(4,))\n', (5137, 5155), True, 'import keras.layers as KLayer\n'), ((5174, 5215), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""dot"""'}), "(layers=[k1, k2], mode='dot')\n", (5186, 5215), True, 'import keras.layers as KLayer\n'), ((5390, 5425), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3,)'}), '(input_shape=(3,))\n', (5407, 5425), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((5440, 5475), 'bigdl.dllib.nn.keras.layers.layer.InputLayer', 'BLayer.InputLayer', ([], {'input_shape': '(3,)'}), '(input_shape=(3,))\n', (5457, 5475), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((5494, 5535), 'bigdl.dllib.nn.keras.layers.layer.Merge', 'BLayer.Merge', ([], {'layers': '[b1, b2]', 'mode': '"""cos"""'}), "(layers=[b1, b2], mode='cos')\n", (5506, 5535), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((5549, 5584), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3,)'}), '(input_shape=(3,))\n', (5566, 5584), True, 'import keras.layers as KLayer\n'), ((5599, 5634), 'keras.layers.InputLayer', 'KLayer.InputLayer', ([], {'input_shape': '(3,)'}), '(input_shape=(3,))\n', (5616, 5634), True, 'import keras.layers as KLayer\n'), ((5653, 5694), 'keras.layers.Merge', 'KLayer.Merge', ([], {'layers': '[k1, k2]', 'mode': '"""cos"""'}), "(layers=[k1, k2], mode='cos')\n", (5665, 5694), True, 'import keras.layers as KLayer\n'), ((5937, 5952), 'bigdl.dllib.models.lenet.lenet.build_model', 'build_model', (['(10)'], {}), '(10)\n', (5948, 5952), False, 'from bigdl.dllib.models.lenet.lenet import build_model\n'), ((6213, 6237), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(8,)'}), '(shape=(8,))\n', (6225, 6237), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((6252, 6276), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (6264, 6276), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((6362, 6388), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['[x1, x2]', '[y1, y2]'], {}), '([x1, x2], [y1, y2])\n', (6368, 6388), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((6860, 6873), 'bigdl.dllib.nn.keras.layers.topology.Sequential', 'BSequential', ([], {}), '()\n', (6871, 6873), True, 'from bigdl.dllib.nn.keras.layers.topology import Sequential as BSequential\n'), ((7922, 7935), 'bigdl.dllib.nn.keras.layers.topology.Sequential', 'BSequential', ([], {}), '()\n', (7933, 7935), True, 'from bigdl.dllib.nn.keras.layers.topology import Sequential as BSequential\n'), ((8477, 8501), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(8,)'}), '(shape=(8,))\n', (8489, 8501), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8517, 8541), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (8529, 8541), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8628, 8664), 'bigdl.dllib.nn.keras.layers.layer.merge', 'BLayer.merge', (['[by1, by2]'], {'mode': '"""sum"""'}), "([by1, by2], mode='sum')\n", (8640, 8664), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8682, 8719), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['[bx1, bx2]', 'bz'], {'name': '"""graph1"""'}), "([bx1, bx2], bz, name='graph1')\n", (8688, 8719), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((8735, 8759), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(8,)'}), '(shape=(8,))\n', (8747, 8759), True, 'import keras.layers as KLayer\n'), ((8775, 8799), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(6,)'}), '(shape=(6,))\n', (8787, 8799), True, 'import keras.layers as KLayer\n'), ((8886, 8916), 'keras.engine.merge', 'kmerge', (['[ky1, ky2]'], {'mode': '"""sum"""'}), "([ky1, ky2], mode='sum')\n", (8892, 8916), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((8934, 8956), 'keras.engine.Model', 'KModel', (['[kx1, kx2]', 'kz'], {}), '([kx1, kx2], kz)\n', (8940, 8956), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((9179, 9203), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(4,)'}), '(shape=(4,))\n', (9191, 9203), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((9219, 9243), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (9231, 9243), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((9396, 9445), 'bigdl.dllib.nn.keras.layers.layer.merge', 'BLayer.merge', (['[bbranch1, bbranch2]'], {'mode': '"""concat"""'}), "([bbranch1, bbranch2], mode='concat')\n", (9408, 9445), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((9463, 9485), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['[bx1, bx2]', 'bz'], {}), '([bx1, bx2], bz)\n', (9469, 9485), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((9501, 9525), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(4,)'}), '(shape=(4,))\n', (9513, 9525), True, 'import keras.layers as KLayer\n'), ((9541, 9565), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (9553, 9565), True, 'import keras.layers as KLayer\n'), ((9718, 9767), 'keras.layers.merge', 'KLayer.merge', (['[kbranch1, kbranch2]'], {'mode': '"""concat"""'}), "([kbranch1, kbranch2], mode='concat')\n", (9730, 9767), True, 'import keras.layers as KLayer\n'), ((9785, 9807), 'keras.engine.Model', 'KModel', (['[kx1, kx2]', 'kz'], {}), '([kx1, kx2], kz)\n', (9791, 9807), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((10028, 10053), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (10040, 10053), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((10069, 10094), 'bigdl.dllib.nn.keras.layers.layer.Input', 'BLayer.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (10081, 10094), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((10219, 10232), 'bigdl.dllib.nn.keras.layers.topology.Sequential', 'BSequential', ([], {}), '()\n', (10230, 10232), True, 'from bigdl.dllib.nn.keras.layers.topology import Sequential as BSequential\n'), ((10337, 10396), 'bigdl.dllib.nn.keras.layers.layer.merge', 'BLayer.merge', (['[bbranch1_node, bbranch2_node]'], {'mode': '"""concat"""'}), "([bbranch1_node, bbranch2_node], mode='concat')\n", (10349, 10396), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((10414, 10436), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['[bx1, bx2]', 'bz'], {}), '([bx1, bx2], bz)\n', (10420, 10436), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((10452, 10477), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (10464, 10477), True, 'import keras.layers as KLayer\n'), ((10493, 10518), 'keras.layers.Input', 'KLayer.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (10505, 10518), True, 'import keras.layers as KLayer\n'), ((10643, 10656), 'keras.models.Sequential', 'KSequential', ([], {}), '()\n', (10654, 10656), True, 'from keras.models import Sequential as KSequential\n'), ((10761, 10820), 'keras.layers.merge', 'KLayer.merge', (['[kbranch1_node, kbranch2_node]'], {'mode': '"""concat"""'}), "([kbranch1_node, kbranch2_node], mode='concat')\n", (10773, 10820), True, 'import keras.layers as KLayer\n'), ((10838, 10860), 'keras.engine.Model', 'KModel', (['[kx1, kx2]', 'kz'], {}), '([kx1, kx2], kz)\n', (10844, 10860), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((6291, 6307), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(10)'], {}), '(10)\n', (6303, 6307), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((6325, 6341), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(10)'], {}), '(10)\n', (6337, 6341), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((6892, 6926), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(5)'], {'input_shape': '(10,)'}), '(5, input_shape=(10,))\n', (6904, 6926), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((7954, 8010), 'bigdl.dllib.nn.keras.layers.layer.Convolution2D', 'BLayer.Convolution2D', (['(1)', '(5)', '(5)'], {'input_shape': '(3, 224, 224)'}), '(1, 5, 5, input_shape=(3, 224, 224))\n', (7974, 8010), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8030, 8062), 'bigdl.dllib.nn.keras.layers.layer.Reshape', 'BLayer.Reshape', (['(1 * 220 * 220,)'], {}), '((1 * 220 * 220,))\n', (8044, 8062), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8079, 8117), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(20)'], {'activation': '"""softmax"""'}), "(20, activation='softmax')\n", (8091, 8117), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8557, 8573), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(10)'], {}), '(10)\n', (8569, 8573), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8593, 8609), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(10)'], {}), '(10)\n', (8605, 8609), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((8815, 8831), 'keras.layers.Dense', 'KLayer.Dense', (['(10)'], {}), '(10)\n', (8827, 8831), True, 'import keras.layers as KLayer\n'), ((8851, 8867), 'keras.layers.Dense', 'KLayer.Dense', (['(10)'], {}), '(10)\n', (8863, 8867), True, 'import keras.layers as KLayer\n'), ((9259, 9296), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(6)'], {'activation': '"""sigmoid"""'}), "(6, activation='sigmoid')\n", (9271, 9296), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((9321, 9337), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['bx1', 'by1'], {}), '(bx1, by1)\n', (9327, 9337), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((9362, 9377), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(8)'], {}), '(8)\n', (9374, 9377), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((9581, 9618), 'keras.layers.Dense', 'KLayer.Dense', (['(6)'], {'activation': '"""sigmoid"""'}), "(6, activation='sigmoid')\n", (9593, 9618), True, 'import keras.layers as KLayer\n'), ((9643, 9659), 'keras.engine.Model', 'KModel', (['kx1', 'ky1'], {}), '(kx1, ky1)\n', (9649, 9659), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((9684, 9699), 'keras.layers.Dense', 'KLayer.Dense', (['(8)'], {}), '(8)\n', (9696, 9699), True, 'import keras.layers as KLayer\n'), ((10110, 10148), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(12)'], {'activation': '"""sigmoid"""'}), "(12, activation='sigmoid')\n", (10122, 10148), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((10178, 10194), 'bigdl.dllib.nn.keras.layers.topology.Model', 'BModel', (['bx1', 'by1'], {}), '(bx1, by1)\n', (10184, 10194), True, 'from bigdl.dllib.nn.keras.layers.topology import Model as BModel\n'), ((10254, 10284), 'bigdl.dllib.nn.keras.layers.layer.Dense', 'BLayer.Dense', (['(12)'], {'input_dim': '(10)'}), '(12, input_dim=10)\n', (10266, 10284), True, 'import bigdl.dllib.nn.keras.layers.layer as BLayer\n'), ((10534, 10572), 'keras.layers.Dense', 'KLayer.Dense', (['(12)'], {'activation': '"""sigmoid"""'}), "(12, activation='sigmoid')\n", (10546, 10572), True, 'import keras.layers as KLayer\n'), ((10602, 10618), 'keras.engine.Model', 'KModel', (['kx1', 'ky1'], {}), '(kx1, ky1)\n', (10608, 10618), True, 'from keras.engine import merge as kmerge, Model as KModel\n'), ((10678, 10708), 'keras.layers.Dense', 'KLayer.Dense', (['(12)'], {'input_dim': '(10)'}), '(12, input_dim=10)\n', (10690, 10708), True, 'import keras.layers as KLayer\n')] |
import sys
sys.path.append('./arxiv')
from flask import Flask
from arxiv.users import auth, legacy
app = Flask('test')
legacy.init_app(app)
legacy.create_all()
| [
"arxiv.users.legacy.init_app",
"arxiv.users.legacy.create_all",
"sys.path.append",
"flask.Flask"
] | [((11, 37), 'sys.path.append', 'sys.path.append', (['"""./arxiv"""'], {}), "('./arxiv')\n", (26, 37), False, 'import sys\n'), ((107, 120), 'flask.Flask', 'Flask', (['"""test"""'], {}), "('test')\n", (112, 120), False, 'from flask import Flask\n'), ((121, 141), 'arxiv.users.legacy.init_app', 'legacy.init_app', (['app'], {}), '(app)\n', (136, 141), False, 'from arxiv.users import auth, legacy\n'), ((142, 161), 'arxiv.users.legacy.create_all', 'legacy.create_all', ([], {}), '()\n', (159, 161), False, 'from arxiv.users import auth, legacy\n')] |
# Try to load dateutil if it is installed, otherwise use strptime
try:
import dateutil.parser
def custom_date(date_string):
return dateutil.parser.parse(date_string, fuzzy=True)
except:
import datetime
def custom_date(date_string):
return datetime.datetime.strptime(date_string, '%Y-%m-%d')
| [
"datetime.datetime.strptime"
] | [((272, 323), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_string', '"""%Y-%m-%d"""'], {}), "(date_string, '%Y-%m-%d')\n", (298, 323), False, 'import datetime\n')] |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, division, print_function
import os
import yaml
from tree.tree import Tree
# Inits the logging system. Only shell logging, and exception and warning catching.
# File logging can be started by calling log.start_file_logger(name).
from .misc import log
NAME = 'tree'
# Loads config
with open(os.path.dirname(__file__) + '/etc/{0}.cfg'.format(NAME)) as ff:
config = yaml.load(ff)
__version__ = '2.15.5dev'
| [
"os.path.dirname",
"yaml.load"
] | [((454, 467), 'yaml.load', 'yaml.load', (['ff'], {}), '(ff)\n', (463, 467), False, 'import yaml\n'), ((377, 402), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (392, 402), False, 'import os\n')] |
#!/usr/bin/env python
from generator.actions import Actions
import random
import struct
import ctypes
import string
MAX_IMG_SIZE=(1024 * 1024 * 128)
GET_BIT = lambda bit_idx: ((1<<(bit_idx)) - 1)
READONLY_FLAG = GET_BIT(1)
HIDDEN_FLAG = GET_BIT(2)
SYSTEM_FLAG = GET_BIT(3)
VOLUME_LBL_FLAG = GET_BIT(4)
SUBDIRECTORY_FLAG = GET_BIT(5)
ARCHIVE_FLAG = GET_BIT(6)
def kaprica_mixin(self):
if hasattr(self, 'xlat_seed'):
return
def xlat_seed(seed):
def hash_string(seed):
H = 0x314abc86
for c in seed:
H = (H * 37) & 0xffffffff
H ^= ord(c)
H = ((H << 13) ^ (H >> 19)) & 0xffffffff
return H
def hash_iterate(H):
H = (H * 3) & 0xffffffff
H = ((H << 13) ^ (H >> 19) ^ (H >> 21)) & 0xffffffff
return H
xmap = list(xrange(256))
xmap_inv = list(xrange(256))
state = hash_string(seed)
for i in xrange(255, 0, -1):
j = state % i
state = hash_iterate(state)
xmap[i], xmap[j] = xmap[j], xmap[i]
for i in xrange(256):
xmap_inv[xmap[i]] = i
self.xlat_map = xmap
self.xlat_map_inv = xmap_inv
self.xlat_seed = xlat_seed
self.xlat_map = None
self.xlat_map_inv = None
def xlat_string(s, inverse=False):
if inverse:
return ''.join([chr(self.xlat_map_inv[ord(c)]) for c in s])
return ''.join([chr(self.xlat_map[ord(c)]) for c in s])
self.xlat_string = xlat_string
def read(delim=None, length=None, expect=None):
if self.xlat_map:
if delim:
delim = self.xlat_string(delim)
if expect:
expect = self.xlat_string(expect)
return self._original_read(delim=delim, length=length, expect=expect)
self._original_read = self.read
self.read = read
def write(s):
if self.xlat_map:
if s:
s = self.xlat_string(s)
return self._original_write(s)
self._original_write = self.write
self.write = write
pack_bytes = lambda x: struct.pack("{0}B".format(len(x)), *x)
byte_str = lambda x: "\\x%02x" % int(x)
def hfname(name):
for x in reversed(xrange(len(name))):
if ord(name[x]) != 0x20:
return name[:x+1]
return None
def parent_dir(path):
if path == '/':
return None
path = path if path[-1] != '/' else path[:-1]
return path.rpartition('/')[0] + '/'
def random_filename(max_size=11, min_size=3):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
word = ("".join(random.choice(characters) for c in range(max_size))).upper()
while (len(word) < 11):
word += ' '
return word
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
def random_data(size):
buf = random_text()
while len(buf) < size:
buf += random_text()
buf = buf[:size]
return buf
def random_bytes(size):
return [random.randint(0,255) for x in xrange(size)]
def random_file_name():
filename = ''
for x in xrange(random.randint(0,5)):
filename += '/' + random_filename()
return filename
class Bhdr(object):
def __init__(self):
self.bytes_per_sector = 512
self.sectors_per_cluster = 1
self.num_reserved_sectors = 32
self.num_cgfst = 1
self.root_cluster_idx = 2
self.total_sectors_small = 0
self.total_sectors_large = 0
self.sectors_per_cgfst = 0
def serialize(self):
fs_ser = struct.pack('3s', 3*'\x00')
fs_ser += struct.pack('8s', 'CGC_CB_FS')
fs_ser += struct.pack('B', self.sectors_per_cluster)
fs_ser += struct.pack('H', self.num_reserved_sectors)
fs_ser += struct.pack('H', self.bytes_per_sector)
fs_ser += struct.pack('B', self.num_cgfst)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('B', 2)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('I', 0)
fs_ser += struct.pack('I', self.total_sectors_large)
fs_ser += struct.pack('I', 2)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('H', 0)
fs_ser += struct.pack('I', self.sectors_per_cgfst)
fs_ser += struct.pack('H', 1)
fs_ser += struct.pack('H', 6)
fs_ser += struct.pack('12s', 12*'\x00')
fs_ser += struct.pack('18s', 18*'\x00')
fs_ser += struct.pack('8s', 'CGFSPOLL')
fs_ser += struct.pack('419s', 419*'\x00')
fs_ser += struct.pack('B', 8)
fs_ser += struct.pack('2s', '\x44\x88')
return fs_ser
def size(self):
return 512
@classmethod
def random(cls):
hdr = cls()
hdr.sectors_per_cgfst = random.randint(8,16)
hdr.sectors_per_cgfst = 3
hdr.total_sectors_large = (hdr.num_reserved_sectors + hdr.sectors_per_cgfst * hdr.num_cgfst +
(hdr.sectors_per_cgfst * hdr.bytes_per_sector / 4) - 2)
return hdr
class FsFile(object):
def __init__(self, name=None):
if name:
self.name = name[0:11]
if len(name) < 11:
self.name += (11 - len(name)) * '\x20'
self.attrib = 0
self.reserved = 12 * '\x00'
self.starting_cluster = 0
self.size = 0
self.data = None
def is_free(self):
return ord(self.name[0]) == 0x00 or ord(self.name[0]) == 0xE5
def skip_entry(self):
return self.attrib == 0x0F or ord(self.name[0]) == 0x00 or ord(self.name[0]) == 0xE5
def is_directory(self):
return (self.attrib & SUBDIRECTORY_FLAG)
def is_file(self):
return not (self.attrib & SUBDIRECTORY_FLAG)
def compare_name(self, name):
if len(name) > 11:
return False
return hfname(self.name) == hfname(name)
def hname(self):
return hfname(self.name)
def read_from_file(self, read_fn, offset, num_bytes_to_read):
if self.skip_entry():
return
for x in self.data[offset:offset+num_bytes_to_read]:
read_fn(byte_str(x))
def write_to_file(self, offset, bytes_to_write):
if offset == 0:
bytes_to_write = bytes_to_write if bytes_to_write <= self.size else self.size
else:
size_offset_delta = self.size - offset if self.size / 512 == offset / 512 else 0
bytes_to_write = 512 - (offset % 512) if size_offset_delta <= 0 else size_offset_delta
bytes_to_write = 0 if self.size <= offset else bytes_to_write
write_buf = random_bytes(bytes_to_write)
self.data = self.data[0:offset] + write_buf + self.data[offset+bytes_to_write:]
return bytes_to_write, write_buf
def delete_entry(self):
self.name = chr(0xE5) + self.name[1:]
def read_entry(self, read_fn, pwd, recursive=False):
if self.skip_entry():
return
if self.is_file():
read_fn("File Name: {0}\x0e".format(self.name))
else:
read_fn("Directory Name: {0}\x0e".format(pwd + self.name + '/'))
@classmethod
def random(cls, is_file=None):
fs_file = cls(random_filename())
if is_file == None:
if (random.randint(0,5) == 0):
fs_file.attrib |= SUBDIRECTORY_FLAG
fs_file.size = 0
else:
fs_file.size = random.randint(100, 3000)
elif is_file == True:
fs_file.size = random.randint(100, 3000)
else:
fs_file.attrib |= SUBDIRECTORY_FLAG
fs_file.size = 0
return fs_file
class DirectoryTree(object):
def __init__(self, entry=None):
self.entry = entry
self.file_list = []
self.dir_list = []
self.print_list = []
def add_to_print_list(self, fs_entry):
for i, entry in enumerate(self.print_list):
if entry.is_free():
self.print_list[i] = fs_entry
return
self.print_list.append(fs_entry)
def length(self):
if self.entry.skip_entry():
return None
length = 0
for dir_ in self.dir_list:
if dir_.entry.skip_entry():
continue
length += 1
for file_ in self.file_list:
if file_.skip_entry():
continue
length += 1
return length
def get_random_file(self, pwd, odds=20):
if self.entry.skip_entry():
return None
pwd = pwd + self.entry.hname() + '/' if pwd and self.entry.hname()!= '/' else self.entry.hname()
for dir_ in self.dir_list:
if dir_.entry.skip_entry():
continue
random_file = dir_.get_random_file(pwd)
if random_file:
return random_file
for file_ in self.file_list:
if file_.skip_entry():
continue
if random.randint(1,odds) == 1:
return (pwd + file_.hname()), file_
return None
def get_random_dir(self, pwd, odds=20):
if self.entry.skip_entry():
return None
pwd = pwd + self.entry.hname() + '/' if pwd and self.entry.hname()!= '/' else self.entry.hname()
for dir_ in self.dir_list:
if dir_.entry.skip_entry():
continue
random_dir = dir_.get_random_dir(pwd)
if random_dir:
return random_dir
if random.randint(1,odds) == 1:
return pwd, self
return None
def add_entry(self, pwd, parent_dir, fs_entry):
if self.entry.skip_entry():
return None
pwd = pwd + self.entry.hname() + '/' if pwd and self.entry.hname()!= '/' else self.entry.hname()
parent_dir = parent_dir if parent_dir[-1] == '/' else parent_dir + '/'
if pwd == parent_dir:
self.add_to_print_list(fs_entry)
if fs_entry.is_file():
for i, file_ in enumerate(self.file_list):
if file_.is_free():
self.file_list[i] = fs_entry
return True
self.file_list.append(fs_entry)
return True
else:
for i, dir_ in enumerate(self.dir_list):
if dir_.entry.is_free():
self.dir_list[i] = DirectoryTree(fs_entry)
return True
self.dir_list.append(DirectoryTree(fs_entry))
return True
for dir_ in self.dir_list:
if dir_.entry.skip_entry():
continue
if dir_.add_entry(pwd, parent_dir, fs_entry):
return True
return False
def find_entry(self, pwd, path):
if self.entry.skip_entry():
return None
pwd = pwd + self.entry.hname() + '/' if pwd and self.entry.hname()!= '/' else self.entry.hname()
is_directory = False
dirpath = path
if path[-1] == '/':
is_directory = True
dirpath = path[:-1]
for dir_ in self.dir_list:
if dir_.entry.skip_entry():
continue
if dirpath == pwd + self.entry.hname():
return self.entry
found_entry = dir_.find_entry(pwd, path)
if found_entry:
return found_entry
if not is_directory:
for file_ in self.file_list:
if file_.skip_entry():
continue
if path == pwd + file_.hname():
return file_
return None
def read_entry(self, read_fn, pwd=None, recursive=False):
if self.entry.skip_entry():
return
pwd = pwd + self.entry.hname() + '/' if pwd and self.entry.hname()!= '/' else self.entry.hname()
read_fn("Directory Name: {0}\x0e".format(pwd))
if self.length() == 0:
read_fn(" --Empty Directory--\x0e\x0e");
else:
for entry in self.print_list:
if not entry.is_free() and entry.is_directory():
read_fn(" Subdirectory Name: {0}\x0e".format(entry.name))
for entry in self.print_list:
if not entry.is_free() and entry.is_file():
read_fn(" File Name: {0}\x0e".format(entry.name))
read_fn("\x0e")
if recursive:
for entry in self.print_list:
if not entry.is_free() and entry.is_directory():
for dir_ in self.dir_list:
if dir_.entry == entry:
dir_.read_entry(read_fn, pwd, recursive)
break
class CgFs(object):
def __init__(self):
self.hdr = Bhdr.random()
self.fs_info_sector = struct.pack('512s', 512 * '\x00')
total_sectors = self.hdr.total_sectors_small if self.hdr.total_sectors_small else self.hdr.total_sectors_large;
self.raw_data_size = (total_sectors * self.hdr.bytes_per_sector) - self.hdr.size() - len(self.fs_info_sector)
self.num_clusters = (self.hdr.sectors_per_cgfst * self.hdr.bytes_per_sector) / 4
self.raw_data = list(struct.pack('{0}s'.format(self.raw_data_size), self.raw_data_size * '\x00'))
self.cluster_map_idx_0 = self.hdr.num_reserved_sectors * self.hdr.bytes_per_sector - self.hdr.size() - len(self.fs_info_sector)
self.cluster_region_idx_0 = (self.cluster_map_idx_0 +
((self.hdr.sectors_per_cgfst * self.hdr.num_cgfst) * self.hdr.bytes_per_sector))
self.cluster_size = self.hdr.bytes_per_sector * self.hdr.sectors_per_cluster
root_fs = FsFile()
root_fs.name = '/' + 10 * '\x20'
root_fs.starting_cluster = 2
self.root_dir = DirectoryTree(root_fs)
self.all_files = {}
self.all_dirs = {}
def serialize(self):
fs_ser = self.hdr.serialize()
fs_ser += self.fs_info_sector
fs_ser += ''.join(self.raw_data)
return fs_ser
class FileMountTool(Actions):
def _find_file(self):
if not self.is_mounted or not self.fs:
return None
odds = 10
entry = self.fs.root_dir.get_random_file('', odds)
while not entry and odds:
odds -= 1
entry = self.fs.root_dir.get_random_file('', odds)
return entry
def _find_dir(self):
if not self.is_mounted or not self.fs:
return None
odds = 10
entry = self.fs.root_dir.get_random_dir(None, odds)
while not entry and odds:
odds -= 1
entry = self.fs.root_dir.get_random_dir(None, odds)
return entry
def _get_random_byte(self):
self.p1 = ctypes.c_uint32(ctypes.c_uint32(47070 * (self.p1 & 65535)).value + ctypes.c_uint32((self.p1 >> 16)).value).value
self.p2 = ctypes.c_uint32(ctypes.c_uint32(84818 * (self.p2 & 65535)).value + ctypes.c_uint32((self.p2 >> 16)).value).value
byte_idx = (ctypes.c_uint32((self.p1 << 16)).value + self.p2) % 4096;
return struct.unpack('<B', self.magic_page[byte_idx])[0]
def _read(self, data):
self.read(length=len(data), expect=data)
def start(self):
#self.delay(100)
self.is_mounted = False
kaprica_mixin(self)
self.xlat_seed('2281771')
self.p1 = ctypes.c_uint32(struct.unpack('<I', self.magic_page[400:404])[0]).value
self.p2 = ctypes.c_uint32(struct.unpack('<I', self.magic_page[404:408])[0]).value
self._read("MOUNT FILEMORE v 1.0\x0e");
self._read("\x0e\x0e");
self.fs = None
self.is_mounted = False
def unmounted(self):
self._read("Options:\x0e");
self._read("1. Mount File\x0e");
self._read("2. Exit\x0e");
self._read(":> ");
def mount(self):
self.write('1\x0e')
self.fs = CgFs()
self.is_mounted = True
self.write(self.fs.serialize())
self._read("Successfully mounted file system\x0e");
def mounted(self):
self._read("Options:\x0e");
self._read("1. List File/Directory\x0e");
self._read("2. Recursively List Files/Directories\x0e");
self._read("3. Preview File\x0e");
self._read("4. Read From File\x0e");
self._read("5. Write To File\x0e");
self._read("6. Update File Size\x0e");
self._read("7. Add File\x0e");
self._read("8. Add Directory\x0e");
self._read("9. Delete File\x0e");
self._read("10. Delete Directory\x0e");
self._read("11. View Mounted Filesystem Metadata\x0e");
self._read("12. Unmount Filesystem\x0e");
self._read("13. Exit\x0e");
self._read(":> ");
def list_files(self):
self.write('1\x0e')
self._read("Enter Path\x0e");
self._read(":> ");
if random.randint(1,2) == 2:
entry = self._find_file()
else:
entry = self._find_dir()
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
else:
path,fs_entry = entry
self.write(path + '\x0e')
fs_entry.read_entry(self._read, parent_dir(path))
def recursively_list_files(self):
self.write('2\x0e')
self._read("Enter Path To Recurse\x0e");
self._read(":> ");
entry = self._find_dir()
path,fs_entry = entry
self.write(path + '\x0e')
fs_entry.read_entry(self._read, parent_dir(path), True)
def preview_file(self):
self.write('3\x0e')
self._read("Enter Path Of File To Preview\x0e");
self._read(":> ");
entry = self._find_file()
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
else:
path,fs_entry = entry
self.write(path + '\x0e')
read_size = fs_entry.size if fs_entry.size < 512 else 512
fs_entry.read_from_file(self._read, 0, read_size)
self._read('\x0e')
def read_file(self):
self.write('4\x0e')
self._read("Enter Path Of File To Read From\x0e");
self._read(":> ");
entry = self._find_file()
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
offset = random.randint(1,1000)
read_size = random.randint(1,1000)
else:
path,fs_entry = entry
self.write(path + '\x0e')
if random.randint(1,5) == 1:
offset = random.randint(0,fs_entry.size / 2)
else:
offset = 0
read_size = random.randint(1, fs_entry.size)
self._read("Enter Offset\x0e");
self._read(":> ");
self.write("{0}\x0e".format(offset))
self._read("Enter Number Of Bytes To Read\x0e");
self._read(":> ");
self.write("{0}\x0e".format(read_size))
if entry:
fs_entry.read_from_file(self._read, offset, read_size)
self._read('\x0e')
def write_file(self):
self.write('5\x0e')
self._read("Enter Path Of File To Write To\x0e");
self._read(":> ");
entry = self._find_file()
write_buf = None
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
offset = random.randint(20,400)
write_size = random.randint(20,400)
else:
path,fs_entry = entry
self.write(path + '\x0e')
if random.randint(1,5) == 1:
offset = random.randint(0,fs_entry.size / 2)
else:
offset = 0
write_size = random.randint(1, fs_entry.size)
write_size, write_buf = fs_entry.write_to_file(offset, write_size)
self._read("Enter Offset\x0e");
self._read(":> ");
self.write("{0}\x0e".format(offset))
self._read("Enter Number Of Bytes To Write\x0e");
self._read(":> ");
self.write("{0}\x0e".format(write_size))
if write_size:
self._read("Enter File Data To Be Written: [%d bytes]\x0e" % write_size);
if write_buf:
self.write(pack_bytes(write_buf))
self._read("Successfully wrote: \x0e")
for x in write_buf:
self._read(byte_str(x))
self._read("\x0e")
else:
write_buf = write_size * [0]
self.write(pack_bytes(write_buf))
def update_file(self):
self.write('6\x0e')
self._read("Enter Path Of File To Update\x0e");
self._read(":> ");
entry = self._find_file()
write_buf = None
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
new_size = random.randint(1,1000)
else:
path,fs_entry = entry
self.write(path + '\x0e')
new_size = random.randint(fs_entry.size /2, fs_entry.size)
self._read("Enter New Size\x0e");
self._read(":> ");
self.write("{0}\x0e".format(new_size))
if entry:
self._read("File %s has a new file size of: %d\x0e" % (path, new_size))
fs_entry.size = new_size
fs_entry.data= fs_entry.data[:new_size]
else:
self._read("Could not update file size\x0e");
def add_file(self):
self.write('7\x0e')
self._read("Enter Parent Directory Of New File\x0e");
self._read(":> ");
entry = self._find_dir()
path,fs_entry = entry
new_file = FsFile.random(True)
self.write(path + '\x0e')
self._read("Enter Name Of New File\x0e");
self._read(":> ");
self.write(new_file.name + '\x0e')
self._read("Enter Size Of New File\x0e");
self._read(":> ");
self.write('{0}\x0e'.format(new_file.size))
self._read("Input File Data?\x0e");
self._read("1. Yes\x0e");
self._read("2. No\x0e");
self._read("3. Fill With Random Data\x0e")
self._read(":> ");
choice = random.randint(1,3)
self.write("{0}\x0e".format(choice))
if choice == 1:
self._read("Enter File Data To Be Written: [%d bytes]\x0e" % new_file.size);
new_file.data = random_bytes(new_file.size)
self.write(pack_bytes(new_file.data))
elif choice == 2:
new_file.data = new_file.size *[0x00]
elif choice == 3:
new_file.data = []
for x in xrange(new_file.size):
new_file.data.append(self._get_random_byte())
self._read("Successfully added file\x0e");
self._read("Parent dir: {0}\x0e".format(path))
self._read("New file name: {0}\x0e".format(new_file.name))
self.fs.root_dir.add_entry(None, path, new_file)
if choice != 2:
self._read("Data written to disk: \x0e")
for x in new_file.data:
self._read(byte_str(x))
self._read('\x0e')
def add_directory(self):
self.write('8\x0e')
self._read("Enter Parent Directory Of New Directory\x0e");
self._read(":> ");
entry = self._find_dir()
path,fs_entry = entry
new_file = FsFile.random(False)
self.write(path + '\x0e')
self._read("Enter Name Of New Directory\x0e");
self._read(":> ");
self.write(new_file.name + '\x0e')
self._read("Successfully added directory\x0e");
self._read("Parent dir: {0}\x0e".format(path))
self._read("New directory name: {0}\x0e".format(new_file.name))
self.fs.root_dir.add_entry(None, path, new_file)
def delete_file(self):
self.write('9\x0e')
self._read("Enter Path Of File To Delete\x0e");
self._read(":> ");
entry = self._find_file()
if not entry:
path = random_file_name()
self.write('\\' + path + '\x0e')
else:
path,fs_entry = entry
self.write(path + '\x0e')
fs_entry.delete_entry()
self._read("Successfully deleted file\x0e");
self._read("Deleted file: %s\x0e" % path);
def delete_directory(self):
entry = self._find_dir()
if not entry:
path = random_dir_name()
else:
path,fs_entry = entry
if path == '/':
self.write("99\x0e")
return
self.write('10\x0e')
self._read("Enter Path Of Directory To Delete\x0e");
self._read(":> ");
if not entry:
path = random_dir_name()
self.write('\\' + path + '\x0e')
else:
path,fs_entry = entry
self.write(path + '\x0e')
fs_entry.entry.delete_entry()
fs_entry.file_list = []
fs_entry.dir_list = []
fs_entry.print_list = []
self._read("Successfully deleted directory\x0e");
self._read("Deleted directory: %s\x0e" % path);
def view_metadata(self):
self.write('11\x0e')
def unmount(self):
self.write('12\x0e')
self._read("Successfully unmounted file system\x0e");
self._read("Could not unmount file system\x0e");
def exit(self):
#self.comment('nothing left to test')
if not self.is_mounted:
self.write('2\x0e')
else:
self.write('13\x0e')
self._read("Exiting....\x0e");
| [
"ctypes.c_uint32",
"random.choice",
"struct.pack",
"struct.unpack",
"random.randint"
] | [((2561, 2581), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2575, 2581), False, 'import random\n'), ((2765, 2799), 'random.randint', 'random.randint', (['min_size', 'max_size'], {}), '(min_size, max_size)\n', (2779, 2799), False, 'import random\n'), ((3092, 3128), 'random.randint', 'random.randint', (['min_words', 'max_words'], {}), '(min_words, max_words)\n', (3106, 3128), False, 'import random\n'), ((3364, 3386), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3378, 3386), False, 'import random\n'), ((3472, 3492), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3486, 3492), False, 'import random\n'), ((3929, 3958), 'struct.pack', 'struct.pack', (['"""3s"""', "(3 * '\\x00')"], {}), "('3s', 3 * '\\x00')\n", (3940, 3958), False, 'import struct\n'), ((3975, 4005), 'struct.pack', 'struct.pack', (['"""8s"""', '"""CGC_CB_FS"""'], {}), "('8s', 'CGC_CB_FS')\n", (3986, 4005), False, 'import struct\n'), ((4024, 4066), 'struct.pack', 'struct.pack', (['"""B"""', 'self.sectors_per_cluster'], {}), "('B', self.sectors_per_cluster)\n", (4035, 4066), False, 'import struct\n'), ((4085, 4128), 'struct.pack', 'struct.pack', (['"""H"""', 'self.num_reserved_sectors'], {}), "('H', self.num_reserved_sectors)\n", (4096, 4128), False, 'import struct\n'), ((4147, 4186), 'struct.pack', 'struct.pack', (['"""H"""', 'self.bytes_per_sector'], {}), "('H', self.bytes_per_sector)\n", (4158, 4186), False, 'import struct\n'), ((4205, 4237), 'struct.pack', 'struct.pack', (['"""B"""', 'self.num_cgfst'], {}), "('B', self.num_cgfst)\n", (4216, 4237), False, 'import struct\n'), ((4256, 4275), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4267, 4275), False, 'import struct\n'), ((4294, 4313), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4305, 4313), False, 'import struct\n'), ((4332, 4351), 'struct.pack', 'struct.pack', (['"""B"""', '(2)'], {}), "('B', 2)\n", (4343, 4351), False, 'import struct\n'), ((4370, 4389), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4381, 4389), False, 'import struct\n'), ((4408, 4427), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4419, 4427), False, 'import struct\n'), ((4446, 4465), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4457, 4465), False, 'import struct\n'), ((4484, 4503), 'struct.pack', 'struct.pack', (['"""I"""', '(0)'], {}), "('I', 0)\n", (4495, 4503), False, 'import struct\n'), ((4522, 4564), 'struct.pack', 'struct.pack', (['"""I"""', 'self.total_sectors_large'], {}), "('I', self.total_sectors_large)\n", (4533, 4564), False, 'import struct\n'), ((4583, 4602), 'struct.pack', 'struct.pack', (['"""I"""', '(2)'], {}), "('I', 2)\n", (4594, 4602), False, 'import struct\n'), ((4621, 4640), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4632, 4640), False, 'import struct\n'), ((4659, 4678), 'struct.pack', 'struct.pack', (['"""H"""', '(0)'], {}), "('H', 0)\n", (4670, 4678), False, 'import struct\n'), ((4697, 4737), 'struct.pack', 'struct.pack', (['"""I"""', 'self.sectors_per_cgfst'], {}), "('I', self.sectors_per_cgfst)\n", (4708, 4737), False, 'import struct\n'), ((4756, 4775), 'struct.pack', 'struct.pack', (['"""H"""', '(1)'], {}), "('H', 1)\n", (4767, 4775), False, 'import struct\n'), ((4794, 4813), 'struct.pack', 'struct.pack', (['"""H"""', '(6)'], {}), "('H', 6)\n", (4805, 4813), False, 'import struct\n'), ((4832, 4863), 'struct.pack', 'struct.pack', (['"""12s"""', "(12 * '\\x00')"], {}), "('12s', 12 * '\\x00')\n", (4843, 4863), False, 'import struct\n'), ((4880, 4911), 'struct.pack', 'struct.pack', (['"""18s"""', "(18 * '\\x00')"], {}), "('18s', 18 * '\\x00')\n", (4891, 4911), False, 'import struct\n'), ((4928, 4957), 'struct.pack', 'struct.pack', (['"""8s"""', '"""CGFSPOLL"""'], {}), "('8s', 'CGFSPOLL')\n", (4939, 4957), False, 'import struct\n'), ((4976, 5009), 'struct.pack', 'struct.pack', (['"""419s"""', "(419 * '\\x00')"], {}), "('419s', 419 * '\\x00')\n", (4987, 5009), False, 'import struct\n'), ((5026, 5045), 'struct.pack', 'struct.pack', (['"""B"""', '(8)'], {}), "('B', 8)\n", (5037, 5045), False, 'import struct\n'), ((5064, 5090), 'struct.pack', 'struct.pack', (['"""2s"""', '"""D\x88"""'], {}), "('2s', 'D\\x88')\n", (5075, 5090), False, 'import struct\n'), ((5247, 5268), 'random.randint', 'random.randint', (['(8)', '(16)'], {}), '(8, 16)\n', (5261, 5268), False, 'import random\n'), ((13358, 13391), 'struct.pack', 'struct.pack', (['"""512s"""', "(512 * '\\x00')"], {}), "('512s', 512 * '\\x00')\n", (13369, 13391), False, 'import struct\n'), ((22788, 22808), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (22802, 22808), False, 'import random\n'), ((9988, 10011), 'random.randint', 'random.randint', (['(1)', 'odds'], {}), '(1, odds)\n', (10002, 10011), False, 'import random\n'), ((15658, 15704), 'struct.unpack', 'struct.unpack', (['"""<B"""', 'self.magic_page[byte_idx]'], {}), "('<B', self.magic_page[byte_idx])\n", (15671, 15704), False, 'import struct\n'), ((17451, 17471), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (17465, 17471), False, 'import random\n'), ((18958, 18981), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (18972, 18981), False, 'import random\n'), ((19005, 19028), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (19019, 19028), False, 'import random\n'), ((19285, 19317), 'random.randint', 'random.randint', (['(1)', 'fs_entry.size'], {}), '(1, fs_entry.size)\n', (19299, 19317), False, 'import random\n'), ((20005, 20028), 'random.randint', 'random.randint', (['(20)', '(400)'], {}), '(20, 400)\n', (20019, 20028), False, 'import random\n'), ((20053, 20076), 'random.randint', 'random.randint', (['(20)', '(400)'], {}), '(20, 400)\n', (20067, 20076), False, 'import random\n'), ((20334, 20366), 'random.randint', 'random.randint', (['(1)', 'fs_entry.size'], {}), '(1, fs_entry.size)\n', (20348, 20366), False, 'import random\n'), ((21489, 21512), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (21503, 21512), False, 'import random\n'), ((21621, 21669), 'random.randint', 'random.randint', (['(fs_entry.size / 2)', 'fs_entry.size'], {}), '(fs_entry.size / 2, fs_entry.size)\n', (21635, 21669), False, 'import random\n'), ((7746, 7766), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (7760, 7766), False, 'import random\n'), ((7907, 7932), 'random.randint', 'random.randint', (['(100)', '(3000)'], {}), '(100, 3000)\n', (7921, 7932), False, 'import random\n'), ((7990, 8015), 'random.randint', 'random.randint', (['(100)', '(3000)'], {}), '(100, 3000)\n', (8004, 8015), False, 'import random\n'), ((9454, 9477), 'random.randint', 'random.randint', (['(1)', 'odds'], {}), '(1, odds)\n', (9468, 9477), False, 'import random\n'), ((19129, 19149), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (19143, 19149), False, 'import random\n'), ((19180, 19216), 'random.randint', 'random.randint', (['(0)', '(fs_entry.size / 2)'], {}), '(0, fs_entry.size / 2)\n', (19194, 19216), False, 'import random\n'), ((20177, 20197), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (20191, 20197), False, 'import random\n'), ((20228, 20264), 'random.randint', 'random.randint', (['(0)', '(fs_entry.size / 2)'], {}), '(0, fs_entry.size / 2)\n', (20242, 20264), False, 'import random\n'), ((2820, 2845), 'random.choice', 'random.choice', (['characters'], {}), '(characters)\n', (2833, 2845), False, 'import random\n'), ((15585, 15615), 'ctypes.c_uint32', 'ctypes.c_uint32', (['(self.p1 << 16)'], {}), '(self.p1 << 16)\n', (15600, 15615), False, 'import ctypes\n'), ((15961, 16006), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'self.magic_page[400:404]'], {}), "('<I', self.magic_page[400:404])\n", (15974, 16006), False, 'import struct\n'), ((16051, 16096), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'self.magic_page[404:408]'], {}), "('<I', self.magic_page[404:408])\n", (16064, 16096), False, 'import struct\n'), ((15336, 15378), 'ctypes.c_uint32', 'ctypes.c_uint32', (['(47070 * (self.p1 & 65535))'], {}), '(47070 * (self.p1 & 65535))\n', (15351, 15378), False, 'import ctypes\n'), ((15387, 15417), 'ctypes.c_uint32', 'ctypes.c_uint32', (['(self.p1 >> 16)'], {}), '(self.p1 >> 16)\n', (15402, 15417), False, 'import ctypes\n'), ((15467, 15509), 'ctypes.c_uint32', 'ctypes.c_uint32', (['(84818 * (self.p2 & 65535))'], {}), '(84818 * (self.p2 & 65535))\n', (15482, 15509), False, 'import ctypes\n'), ((15518, 15548), 'ctypes.c_uint32', 'ctypes.c_uint32', (['(self.p2 >> 16)'], {}), '(self.p2 >> 16)\n', (15533, 15548), False, 'import ctypes\n')] |
# This sample tests the case where an assignment expression target
# is found within a function decorator or a function default value expression.
from typing import Any, Callable, List, TypeVar
_T = TypeVar("_T")
def decorator(*args: Any, **kwargs: Any) -> Callable[[_T], _T]:
...
@decorator(
[
walrus_target_1
for combination in [[1]]
if None not in (walrus_target_1 := set(combination))
],
)
def decorated(
x: List[str] = [x for x in ["a", "b"] if x in (walrus_target_2 := ["a", "b"])]
):
pass
reveal_type(walrus_target_1, expected_text="set[int]")
reveal_type(walrus_target_2, expected_text="list[str]")
| [
"typing.TypeVar"
] | [((202, 215), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (209, 215), False, 'from typing import Any, Callable, List, TypeVar\n')] |
from time import time, sleep
from sys import stdout
from uuid import uuid4
from math import ceil
from lab.master.worker_info import WorkerInfoCollection, WorkerInfo
from lab.util.distributed_graph import DistributedGraph
from lab.util import message, sockets
from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, \
append_edge, get_number_of_lines, write_to_file, read_file
from lab.util.file_transfer import FileSender, UnexpectedChunkIndex, FileReceiver
from lab.util.server import Server
from lab.util.meta_data import MetaData
# You should create this file yourself in order to run the program using ssh
# by default, let shared_filesystem = 0
from lab.util.ssh_connection_info import shared_filesystem
class Master(Server):
def __init__(self, worker_hostnames: list, graph_path: str, worker_script: str, split_graph: bool, output_file: str,
scale: float, method: str = '', random_walkers_per_worker: int = 1, backup_size: int = 0, walking_iterations: int = 1,
show_debug_messages: bool = True):
started_at = time()
super().__init__()
self.worker_script = worker_script
self.worker_hostnames = worker_hostnames
self.output_file = output_file
self.method = method
self.graph_path = graph_path
self.scale = scale
self.random_walkers_per_worker = random_walkers_per_worker
self.backup_size = backup_size
self.walking_iterations = walking_iterations
self.show_debug_messages = show_debug_messages
self.random_walker_counts_received = 0
# Split graph into sub graphs and send them to the workers
self.worker_info_collection = WorkerInfoCollection()
self.create_workers(graph_path, split_graph)
# Can be used to handle incoming messages from the server
self.message_interface = {
message.ALIVE: self.handle_alive,
message.REGISTER: self.handle_register,
message.DEBUG: self.handle_debug,
message.JOB_COMPLETE: self.handle_job_complete,
message.RANDOM_WALKER_COUNT: self.handle_random_walker_count,
message.MISSING_CHUNK: self.handle_missing_chunk,
message.RECEIVED_FILE: self.handle_received_file,
message.START_SEND_FILE: self.handle_start_send_file,
message.END_SEND_FILE: self.handle_end_send_file,
message.FILE_CHUNK: self.handle_file_chunk,
message.PROGRESS: self.handle_progress
}
self.register_workers()
self.send_meta_data_to_workers()
if not shared_filesystem:
self.send_graphs_to_workers()
self.goal_size = self.get_goal_size()
print(f"Master setup time: {time() - started_at:0.5f}")
self.print_params()
# Run master until stopped
try:
self.run()
except KeyboardInterrupt:
self.terminate_workers()
self.server.terminate()
def print_params(self):
print(f"Method: {self.method}")
print(f"Scale: {self.scale}")
if self.method == "random_walk":
print(f"Number of workers: {len(self.worker_hostnames)}")
print(
f"Random walker per worker: {self.random_walkers_per_worker}")
print(f"Backup size: {self.backup_size}")
print(f"Walking iterations: {self.walking_iterations}")
print(f"Output file: {self.output_file}")
print(f"Goal size: {self.goal_size:0.5f}")
print()
def debug(self, message: str):
if self.show_debug_messages:
print(message)
def get_goal_size(self):
return self.worker_info_collection.get_total_number_of_edges() * self.scale
def send_graph_to_worker(self, worker_id):
data = read_file(
self.worker_info_collection[worker_id].input_sub_graph_path)
self.send_data_to_worker(worker_id, data, message.GRAPH)
self.debug(f'Worker {worker_id} received graph')
def send_graphs_to_workers(self):
for worker_id in self.worker_info_collection.keys():
self.send_graph_to_worker(worker_id)
def process_graph(self, graph_path: str, split_graph: bool) -> [MetaData]:
"""
Divides the graph into `number_of_workers` sub graphs and writes each chunk to a separate file
:param graph_path: Path to the file containing the entire graph
:param split_graph: Should the graph be split
:return: List of paths to the created chunks
"""
if split_graph:
# Split graph into self.n_workers sub graphs
self.split_graph(graph_path)
# Add reverse edges to sub graphs
self.make_sub_graphs_bidirectional(graph_path)
# Sort sub graphs
self.worker_info_collection.sort_sub_graphs()
# Update meta data
self.worker_info_collection.update_meta_data()
else:
# TODO do not duplicate data
for worker_id, hostname in enumerate(self.worker_hostnames):
self.worker_info_collection[worker_id] = WorkerInfo(
hostname=hostname,
worker_id=worker_id,
input_sub_graph_path=graph_path,
meta_data=MetaData(
worker_id=worker_id,
number_of_edges=get_number_of_lines(graph_path),
min_vertex=get_start_vertex(
get_first_line(graph_path)),
max_vertex=get_start_vertex(get_last_line(graph_path))
)
)
def split_graph(self, graph_path):
f = open(graph_path, "r")
for worker_id, sub_graph in enumerate(read_in_chunks(f, len(self.worker_hostnames))):
sub_graph_path = self.random_temp_file(f'input-worker-{worker_id}')
write_to_file(sub_graph_path, sub_graph)
self.worker_info_collection[worker_id] = WorkerInfo(
hostname=self.worker_hostnames[worker_id],
worker_id=worker_id,
input_sub_graph_path=sub_graph_path,
meta_data=MetaData(
worker_id=worker_id,
number_of_edges=get_number_of_lines(sub_graph_path),
min_vertex=get_start_vertex(
get_first_line(sub_graph_path)),
max_vertex=get_start_vertex(get_last_line(sub_graph_path))
)
)
f.close()
def make_sub_graphs_bidirectional(self, graph_path: str):
combined_meta_data = self.worker_info_collection.get_combined_meta_data()
f = open(graph_path, "r")
for edge in read_as_reversed_edges(f):
start_vertex = get_start_vertex(edge)
if start_vertex < combined_meta_data.bottom_layer.min_vertex:
worker_id = combined_meta_data.bottom_layer.worker_id
elif start_vertex > combined_meta_data.top_layer.max_vertex:
worker_id = combined_meta_data.top_layer.worker_id
else:
worker_id = combined_meta_data.get_worker_id_that_has_vertex(
start_vertex)
append_edge(
path=self.worker_info_collection[worker_id].input_sub_graph_path,
edge=edge
)
f.close()
def create_workers(self, graph_path, split_graph):
"""
Creates `self.n_workers` workers
:return: Dictionary containing info about each worker
"""
self.process_graph(graph_path, split_graph)
self.worker_info_collection.start_workers(
self.worker_script,
self.hostname,
self.port,
self.scale,
self.method,
self.random_walkers_per_worker,
self.backup_size,
self.walking_iterations
)
@staticmethod
def random_temp_file(prefix: str):
return f'/tmp/{prefix}-{str(uuid4())}.txt'
def terminate_workers(self):
"""
Terminates the alive workers
"""
self.broadcast(message.write(status=message.TERMINATE))
self.handle_queue()
# Wait for workers to shutdown their child-processes
# TODO use confirmation msg
sleep(0.5)
self.handle_queue()
self.worker_info_collection.terminate_workers()
def handle_alive(self, worker_id):
"""
Updates the last-alive value of the worker
:param worker_id: Id of worker
"""
self.worker_info_collection[worker_id].last_alive = time()
def handle_register(self, worker_id, host, port):
"""
Handles the registration of a worker
:param worker_id: Id of worker
:param host: Host of worker
:param port: Port of worker
"""
self.worker_info_collection[worker_id].meta_data.set_connection_info(
host, port)
self.handle_alive(worker_id)
self.debug(f"Registered worker {worker_id} on {host}:{port}")
@staticmethod
def handle_debug(worker_id, debug_message):
print(f"Worker {worker_id}: {debug_message}")
def handle_progress(self, worker_id, count):
self.worker_info_collection[worker_id].progress = count
def handle_job_complete(self, worker_id):
self.worker_info_collection[worker_id].job_complete = True
def handle_random_walker_count(self, worker_id, count):
self.debug(f'Worker {worker_id} has {count} random walkers')
self.worker_info_collection[worker_id].random_walker_count = count
self.random_walker_counts_received += 1
def register_workers(self):
for i in range(len(self.worker_info_collection)):
status, *args = self.get_message_from_queue()
self.handle_register(*args)
def send_message_to_worker(self, worker_id, message: bytes):
sockets.send_message(
*self.worker_info_collection[worker_id].meta_data.get_connection_info(),
message
)
def handle_missing_chunk(self, worker_id, file_type, index):
self.worker_info_collection[worker_id].file_senders[file_type].index = index
def handle_received_file(self, worker_id, file_type):
if self.worker_info_collection[worker_id].file_senders[file_type] is not None:
self.worker_info_collection[worker_id].file_senders[file_type].target_received_file = True
def send_data_to_worker(self, worker_id: int, data: list, file_type: int):
self.worker_info_collection[worker_id].file_senders[file_type] = FileSender(
worker_id, file_type, data)
file_sender = self.worker_info_collection[worker_id].file_senders[file_type]
self.send_message_to_worker(worker_id, message.write_start_send_file(
worker_id, file_type, len(file_sender.messages)))
while not file_sender.target_received_file or not file_sender.complete_file_send:
if file_sender.complete_file_send:
self.send_message_to_worker(
worker_id, message.write_end_send_file(worker_id, file_type))
sleep(0.1)
else:
self.send_message_to_worker(
worker_id, file_sender.get_next_message())
self.handle_queue()
self.worker_info_collection[worker_id].file_senders[file_type] = None
def handle_start_send_file(self, worker_id, file_type, number_of_chunks):
self.worker_info_collection[worker_id].file_receivers[file_type] = FileReceiver(
number_of_chunks)
def handle_file_chunk(self, worker_id, file_type, index, chunk):
if self.worker_info_collection[worker_id].file_receivers[file_type] is None:
return
try:
self.worker_info_collection[worker_id].file_receivers[file_type].receive_chunk(
index, chunk)
except UnexpectedChunkIndex as e:
self.send_message_to_worker(worker_id, message.write_missing_chunk(
worker_id, file_type, e.expected_index))
def handle_end_send_file(self, worker_id, file_type):
try:
self.worker_info_collection[worker_id].file_receivers[file_type].handle_end_send_file(
)
self.send_message_to_worker(
worker_id, message.write_received_file(worker_id, file_type))
if file_type == message.BACKUP:
self.handle_backup(worker_id)
except UnexpectedChunkIndex as e:
self.send_message_to_worker(worker_id, message.write_missing_chunk(
worker_id, file_type, e.expected_index))
except AttributeError:
return
def handle_backup(self, worker_id):
new_edges = self.worker_info_collection[worker_id].file_receivers[message.BACKUP].file
self.worker_info_collection[worker_id].backup += new_edges
self.worker_info_collection[worker_id].file_receivers[message.BACKUP] = None
def broadcast(self, message, allow_connection_refused: bool = False):
for worker_info in self.worker_info_collection.values():
if worker_info.is_registered():
if allow_connection_refused:
try:
sockets.send_message(
*worker_info.meta_data.get_connection_info(), message)
except ConnectionRefusedError:
continue
else:
sockets.send_message(
*worker_info.meta_data.get_connection_info(), message)
def send_meta_data_to_workers(self, allow_connection_refused: bool = False):
self.broadcast(message.write_meta_data([
worker_info.meta_data.to_dict() for worker_info in self.worker_info_collection.values()
]), allow_connection_refused)
def total_progress(self):
return self.worker_info_collection.get_progress()
def total_edges_received(self):
return self.worker_info_collection.get_total_edges_received()
def print_progress(self):
stdout.write('\r')
stdout.write(
f"{self.total_progress() / self.goal_size * 100:0.5f}% \t")
stdout.flush()
def wait_for_workers_to_complete(self):
while not self.worker_info_collection.all_workers_done():
sleep(0.01)
self.handle_queue()
def create_graph(self):
graph = DistributedGraph(distributed=False)
for worker_info in self.worker_info_collection.values():
graph.load_from_list(worker_info.backup)
return graph
def wait_for_random_walker_counts(self, expected_number: int):
while self.random_walker_counts_received < expected_number:
self.handle_queue()
sleep(0.01)
self.random_walker_counts_received = 0
def wait_for_worker_to_register(self, worker_id):
while not self.worker_info_collection[worker_id].is_registered():
self.handle_queue()
sleep(0.01)
def pause_workers(self):
self.broadcast(message.write_worker_failed(),
allow_connection_refused=True)
def continue_workers(self):
self.broadcast(message.write_continue(), allow_connection_refused=True)
def get_failed_workers(self):
if len([worker_id for worker_id in self.worker_info_collection.keys() if not self.worker_info_collection[worker_id].is_alive()]) == 0:
return []
return [worker_id for worker_id, worker_info in self.worker_info_collection.items() if not sockets.is_alive(*worker_info.meta_data.get_connection_info())]
def control_workers(self):
started_at = time()
failed_workers = self.get_failed_workers()
if len(failed_workers) == 0:
return
self.debug("\n\n")
print("ERROR: A WORKER CRASHED")
# Update connection info
for worker_id in failed_workers:
self.debug(f"Worker {worker_id} died")
self.worker_info_collection[worker_id].meta_data.set_connection_info(
None, None)
self.worker_info_collection[worker_id].file_senders[message.GRAPH] = None
self.worker_info_collection[worker_id].file_senders[message.BACKUP] = None
self.worker_info_collection[worker_id].file_receivers[message.GRAPH] = None
self.worker_info_collection[worker_id].file_receivers[message.BACKUP] = None
self.worker_info_collection[worker_id].process = None
self.worker_info_collection[worker_id].random_walker_count = 0
self.debug(f"Pausing workers")
self.pause_workers()
self.debug("Waiting for random walker counts")
self.wait_for_random_walker_counts(
len(self.worker_info_collection) - len(failed_workers))
random_walkers_to_restart = len(
self.worker_info_collection) * self.random_walkers_per_worker - self.worker_info_collection.random_walker_count()
for worker_id in failed_workers:
self.debug(f"Restarting worker {worker_id}")
number_of_random_walkers = ceil(
random_walkers_to_restart / len(failed_workers))
if number_of_random_walkers < 0:
number_of_random_walkers = 0
self.worker_info_collection[worker_id].start_worker(
worker_script=self.worker_script,
hostname_master=self.hostname,
port_master=self.port,
scale=self.scale,
method=self.method,
number_of_random_walkers=number_of_random_walkers,
load_backup=1,
backup_size=self.backup_size,
walking_iterations=self.walking_iterations
)
random_walkers_to_restart -= number_of_random_walkers
for worker_id in failed_workers:
self.debug(f"Waiting for worker {worker_id} to register")
self.wait_for_worker_to_register(worker_id)
self.debug(f"Sending updated meta-data to workers")
self.send_meta_data_to_workers(allow_connection_refused=True)
for worker_id in failed_workers:
self.send_graph_to_worker(worker_id)
for worker_id in failed_workers:
if len(self.worker_info_collection[worker_id].backup) > 0:
self.send_data_to_worker(
worker_id, self.worker_info_collection[worker_id].backup[:], message.BACKUP)
self.debug(f'Worker {worker_id} received backup')
self.continue_workers()
self.debug(f"Restart successful\n")
print(f"Restarted workers after {time() - started_at} seconds")
def run(self):
"""
Runs the master
"""
self.broadcast(message.write_continue())
started_at = time()
if self.method == "random_walk":
while self.total_progress() < self.goal_size:
sleep(0.01)
self.handle_queue()
if self.show_debug_messages:
self.print_progress()
self.control_workers()
self.broadcast(message.write_job(message.FINISH_JOB))
self.wait_for_workers_to_complete()
print(f"\nEdges received: {self.total_edges_received()}")
print(f"Job complete after {time() - started_at:0.5f}")
self.terminate_workers()
self.server.terminate()
graph = self.create_graph()
graph.write_to_file(self.output_file)
print(f"Master runtime: {time() - started_at:0.5f}")
| [
"time.sleep",
"lab.master.worker_info.WorkerInfoCollection",
"lab.util.file_io.get_last_line",
"lab.util.message.write_end_send_file",
"lab.util.message.write_job",
"lab.util.message.write_worker_failed",
"lab.util.message.write_continue",
"lab.util.file_io.write_to_file",
"lab.util.file_io.read_file",
"sys.stdout.flush",
"lab.util.message.write_missing_chunk",
"lab.util.file_io.get_first_line",
"lab.util.file_io.get_number_of_lines",
"lab.util.distributed_graph.DistributedGraph",
"uuid.uuid4",
"lab.util.file_io.append_edge",
"lab.util.message.write",
"time.time",
"lab.util.file_io.read_as_reversed_edges",
"lab.util.file_transfer.FileSender",
"lab.util.file_transfer.FileReceiver",
"lab.util.message.write_received_file",
"lab.util.file_io.get_start_vertex",
"sys.stdout.write"
] | [((1131, 1137), 'time.time', 'time', ([], {}), '()\n', (1135, 1137), False, 'from time import time, sleep\n'), ((1757, 1779), 'lab.master.worker_info.WorkerInfoCollection', 'WorkerInfoCollection', ([], {}), '()\n', (1777, 1779), False, 'from lab.master.worker_info import WorkerInfoCollection, WorkerInfo\n'), ((3884, 3954), 'lab.util.file_io.read_file', 'read_file', (['self.worker_info_collection[worker_id].input_sub_graph_path'], {}), '(self.worker_info_collection[worker_id].input_sub_graph_path)\n', (3893, 3954), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((6854, 6879), 'lab.util.file_io.read_as_reversed_edges', 'read_as_reversed_edges', (['f'], {}), '(f)\n', (6876, 6879), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((8451, 8461), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (8456, 8461), False, 'from time import time, sleep\n'), ((8762, 8768), 'time.time', 'time', ([], {}), '()\n', (8766, 8768), False, 'from time import time, sleep\n'), ((10770, 10808), 'lab.util.file_transfer.FileSender', 'FileSender', (['worker_id', 'file_type', 'data'], {}), '(worker_id, file_type, data)\n', (10780, 10808), False, 'from lab.util.file_transfer import FileSender, UnexpectedChunkIndex, FileReceiver\n'), ((11731, 11761), 'lab.util.file_transfer.FileReceiver', 'FileReceiver', (['number_of_chunks'], {}), '(number_of_chunks)\n', (11743, 11761), False, 'from lab.util.file_transfer import FileSender, UnexpectedChunkIndex, FileReceiver\n'), ((14292, 14310), 'sys.stdout.write', 'stdout.write', (["'\\r'"], {}), "('\\r')\n", (14304, 14310), False, 'from sys import stdout\n'), ((14413, 14427), 'sys.stdout.flush', 'stdout.flush', ([], {}), '()\n', (14425, 14427), False, 'from sys import stdout\n'), ((14640, 14675), 'lab.util.distributed_graph.DistributedGraph', 'DistributedGraph', ([], {'distributed': '(False)'}), '(distributed=False)\n', (14656, 14675), False, 'from lab.util.distributed_graph import DistributedGraph\n'), ((15908, 15914), 'time.time', 'time', ([], {}), '()\n', (15912, 15914), False, 'from time import time, sleep\n'), ((19076, 19082), 'time.time', 'time', ([], {}), '()\n', (19080, 19082), False, 'from time import time, sleep\n'), ((6013, 6053), 'lab.util.file_io.write_to_file', 'write_to_file', (['sub_graph_path', 'sub_graph'], {}), '(sub_graph_path, sub_graph)\n', (6026, 6053), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((6908, 6930), 'lab.util.file_io.get_start_vertex', 'get_start_vertex', (['edge'], {}), '(edge)\n', (6924, 6930), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((7359, 7452), 'lab.util.file_io.append_edge', 'append_edge', ([], {'path': 'self.worker_info_collection[worker_id].input_sub_graph_path', 'edge': 'edge'}), '(path=self.worker_info_collection[worker_id].\n input_sub_graph_path, edge=edge)\n', (7370, 7452), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((8277, 8316), 'lab.util.message.write', 'message.write', ([], {'status': 'message.TERMINATE'}), '(status=message.TERMINATE)\n', (8290, 8316), False, 'from lab.util import message, sockets\n'), ((14551, 14562), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (14556, 14562), False, 'from time import time, sleep\n'), ((14996, 15007), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (15001, 15007), False, 'from time import time, sleep\n'), ((15228, 15239), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (15233, 15239), False, 'from time import time, sleep\n'), ((15293, 15322), 'lab.util.message.write_worker_failed', 'message.write_worker_failed', ([], {}), '()\n', (15320, 15322), False, 'from lab.util import message, sockets\n'), ((15434, 15458), 'lab.util.message.write_continue', 'message.write_continue', ([], {}), '()\n', (15456, 15458), False, 'from lab.util import message, sockets\n'), ((19029, 19053), 'lab.util.message.write_continue', 'message.write_continue', ([], {}), '()\n', (19051, 19053), False, 'from lab.util import message, sockets\n'), ((11328, 11338), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (11333, 11338), False, 'from time import time, sleep\n'), ((12517, 12566), 'lab.util.message.write_received_file', 'message.write_received_file', (['worker_id', 'file_type'], {}), '(worker_id, file_type)\n', (12544, 12566), False, 'from lab.util import message, sockets\n'), ((19199, 19210), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (19204, 19210), False, 'from time import time, sleep\n'), ((19402, 19439), 'lab.util.message.write_job', 'message.write_job', (['message.FINISH_JOB'], {}), '(message.FINISH_JOB)\n', (19419, 19439), False, 'from lab.util import message, sockets\n'), ((8143, 8150), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8148, 8150), False, 'from uuid import uuid4\n'), ((11261, 11310), 'lab.util.message.write_end_send_file', 'message.write_end_send_file', (['worker_id', 'file_type'], {}), '(worker_id, file_type)\n', (11288, 11310), False, 'from lab.util import message, sockets\n'), ((12178, 12245), 'lab.util.message.write_missing_chunk', 'message.write_missing_chunk', (['worker_id', 'file_type', 'e.expected_index'], {}), '(worker_id, file_type, e.expected_index)\n', (12205, 12245), False, 'from lab.util import message, sockets\n'), ((12753, 12820), 'lab.util.message.write_missing_chunk', 'message.write_missing_chunk', (['worker_id', 'file_type', 'e.expected_index'], {}), '(worker_id, file_type, e.expected_index)\n', (12780, 12820), False, 'from lab.util import message, sockets\n'), ((2815, 2821), 'time.time', 'time', ([], {}), '()\n', (2819, 2821), False, 'from time import time, sleep\n'), ((18907, 18913), 'time.time', 'time', ([], {}), '()\n', (18911, 18913), False, 'from time import time, sleep\n'), ((19588, 19594), 'time.time', 'time', ([], {}), '()\n', (19592, 19594), False, 'from time import time, sleep\n'), ((19798, 19804), 'time.time', 'time', ([], {}), '()\n', (19802, 19804), False, 'from time import time, sleep\n'), ((6382, 6417), 'lab.util.file_io.get_number_of_lines', 'get_number_of_lines', (['sub_graph_path'], {}), '(sub_graph_path)\n', (6401, 6417), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((5491, 5522), 'lab.util.file_io.get_number_of_lines', 'get_number_of_lines', (['graph_path'], {}), '(graph_path)\n', (5510, 5522), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((6492, 6522), 'lab.util.file_io.get_first_line', 'get_first_line', (['sub_graph_path'], {}), '(sub_graph_path)\n', (6506, 6522), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((6573, 6602), 'lab.util.file_io.get_last_line', 'get_last_line', (['sub_graph_path'], {}), '(sub_graph_path)\n', (6586, 6602), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((5605, 5631), 'lab.util.file_io.get_first_line', 'get_first_line', (['graph_path'], {}), '(graph_path)\n', (5619, 5631), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n'), ((5686, 5711), 'lab.util.file_io.get_last_line', 'get_last_line', (['graph_path'], {}), '(graph_path)\n', (5699, 5711), False, 'from lab.util.file_io import read_in_chunks, get_start_vertex, get_first_line, get_last_line, read_as_reversed_edges, append_edge, get_number_of_lines, write_to_file, read_file\n')] |
#!/usr/bin/env python3
import os
import pathlib
import subprocess
import platform
import sys
from colors import prGreen, prCyan, prRed
from exceptions import CommandException, CompileException
# --------------------------------------------------------------------------- #
# --- Classes --------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
class Command:
def __init__(self, cmd):
self.name = 'nvcc'
self.parameters = cmd[1:]
def executeOriginalCommand(self):
try:
cmd = [self.name] + self.parameters
subprocess.run(' '.join(cmd), shell=True, check=True)
except subprocess.CalledProcessError as e:
prRed(e)
if __name__ == '__main__':
cmd = Command(sys.argv)
cmd.executeOriginalCommand()
| [
"colors.prRed"
] | [((733, 741), 'colors.prRed', 'prRed', (['e'], {}), '(e)\n', (738, 741), False, 'from colors import prGreen, prCyan, prRed\n')] |
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, output_file, show
output_file("hbar_stack.html")
source = ColumnDataSource(data=dict(
y=[1, 2, 3, 4, 5],
x1=[1, 2, 4, 3, 4],
x2=[1, 4, 2, 2, 3],
))
p = figure(width=400, height=400)
p.hbar_stack(['x1', 'x2'], y='y', height=0.8, color=("grey", "lightgrey"), source=source)
show(p)
| [
"bokeh.plotting.show",
"bokeh.plotting.figure",
"bokeh.plotting.output_file"
] | [((96, 126), 'bokeh.plotting.output_file', 'output_file', (['"""hbar_stack.html"""'], {}), "('hbar_stack.html')\n", (107, 126), False, 'from bokeh.plotting import figure, output_file, show\n'), ((243, 272), 'bokeh.plotting.figure', 'figure', ([], {'width': '(400)', 'height': '(400)'}), '(width=400, height=400)\n', (249, 272), False, 'from bokeh.plotting import figure, output_file, show\n'), ((365, 372), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (369, 372), False, 'from bokeh.plotting import figure, output_file, show\n')] |
#!/usr/bin/env python
#coding:utf-8
# Standard Library
import time
from itertools import izip
# Third Party
from scipy.stats import multinomial
# Self-made Modules
from __init__ import *
from modules.spconavi_math import *
from modules import dataset, converter
import rospy
import roslib.packages
convert_func = converter.Converter()
dataset_func = dataset.DataSet()
trialname = "3LDK_01"
iteration = 1
sample = 0
init_position_num = 0
speech_num = 3 #0, 1, 2, 3
class GeneratePathWeightMap():
def __init__(self):
pass
def calculate_path_weight_map(self):
##FullPath of folder
#filename = outputfolder_SIG + trialname #+ "/"
filename = "/root/RULO/catkin_ws/src/spco2_mlda_problog/spconavi_ros/data/3LDK_01"
#print(str(roslib.packages.get_pkg_dir("spconavi_ros")))
print (filename, iteration, sample)
outputfile = filename + navigation_folder #outputfolder + trialname + navigation_folder
outputname = outputfile + "T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
gridmap = dataset_func.ReadMap(outputfile)
##Read the cost map file
costmap = dataset_func.ReadCostMap(outputfile)
#Change the costmap to the probabilistic costmap
CostMapProb = convert_func.CostMapProb_jit(gridmap, costmap)
THETA = dataset_func.ReadParameters(iteration, sample, filename, trialname)
W_index = THETA[1]
##Read the speech file
#speech_file = ReadSpeech(int(speech_num))
BoW = [Goal_Word[int(speech_num)]]
if ( "AND" in BoW ):
BoW = Example_AND
elif ( "OR" in BoW ):
BoW = Example_OR
Otb_B = [int(W_index[i] in BoW) * N_best for i in xrange(len(W_index))]
print ("BoW:", Otb_B)
while (sum(Otb_B) == 0):
print("[ERROR] BoW is all zero.", W_index)
word_temp = raw_input("Please word?>")
Otb_B = [int(W_index[i] == word_temp) * N_best for i in xrange(len(W_index))]
print("BoW (NEW):", Otb_B)
S_Nbest = Otb_B
#THETAを展開
W, W_index, Mu, Sig, Pi, Phi_l, K, L = THETA
#length and width of the MAP cells
map_length = len(CostMapProb) #len(costmap)
map_width = len(CostMapProb[0]) #len(costmap[0])
print ("MAP[length][width]:",map_length,map_width)
#Pre-calculation できるものはしておく
LookupTable_ProbCt = np.array([multinomial.pmf(S_Nbest, sum(S_Nbest), W[c])*Pi[c] for c in xrange(L)]) #Ctごとの確率分布 p(St|W_Ct)×p(Ct|Pi) の確率値
###SaveLookupTable(LookupTable_ProbCt, outputfile)
###LookupTable_ProbCt = ReadLookupTable(outputfile) #Read the result from the Pre-calculation file(計算する場合と大差ないかも)
print ("Please wait for PostProbMap")
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
#if (os.path.isfile(output) == False) or (UPDATE_PostProbMap == 1): #すでにファイルがあれば作成しない
#PathWeightMap = PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #マルチCPUで高速化できるかも #CostMapProb * PostProbMap #後の処理のために, この時点ではlogにしない
PathWeightMap = convert_func.PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap)
#[TEST]計算結果を先に保存
dataset_func.SaveProbMap(PathWeightMap, outputfile, speech_num)
print ("[Done] PathWeightMap.")
if __name__ == '__main__':
print ("Ctrl-C is the end of process.")
rospy.init_node('generate_path_weight_map', anonymous=True)
calculate_path_weight = GeneratePathWeightMap()
calculate_path_weight.calculate_path_weight_map()
#rospy.spin() | [
"rospy.init_node",
"modules.converter.Converter",
"modules.dataset.DataSet"
] | [((317, 338), 'modules.converter.Converter', 'converter.Converter', ([], {}), '()\n', (336, 338), False, 'from modules import dataset, converter\n'), ((354, 371), 'modules.dataset.DataSet', 'dataset.DataSet', ([], {}), '()\n', (369, 371), False, 'from modules import dataset, converter\n'), ((3550, 3609), 'rospy.init_node', 'rospy.init_node', (['"""generate_path_weight_map"""'], {'anonymous': '(True)'}), "('generate_path_weight_map', anonymous=True)\n", (3565, 3609), False, 'import rospy\n')] |
import argparse
import json
import logging
from datetime import datetime
from src.server.cea_608_encoder.byte_pair_generator import consume
logging.basicConfig(format='%(levelname)s: %(asctime)s: %(message)s')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f",
"--file_path",
help='Path to JSON file',
type=str,
required=True)
args = parser.parse_args()
try:
now = datetime.now()
time_stamp = now.strftime("%m.%d.%Y_%H-%M-%S")
with open(args.file_path, 'r') as file:
caption_data = json.load(file)
optional_errors = consume(caption_data,time_stamp)
if optional_errors is not None:
print('\n')
for err in optional_errors:
print(err + '\n')
except IOError as err:
logging.error('Error trying to read in the file.', exc_info=err)
except json.decoder.JSONDecodeError as err:
logging.error('Error trying to parse the JSON file.', exc_info=err)
except Exception as err:
logging.error('Error trying to encode caption data.', exc_info=err)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"src.server.cea_608_encoder.byte_pair_generator.consume",
"argparse.ArgumentParser",
"datetime.datetime.now",
"json.load",
"logging.error"
] | [((142, 211), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(asctime)s: %(message)s"""'}), "(format='%(levelname)s: %(asctime)s: %(message)s')\n", (161, 211), False, 'import logging\n'), ((239, 264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (262, 264), False, 'import argparse\n'), ((514, 528), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (526, 528), False, 'from datetime import datetime\n'), ((659, 674), 'json.load', 'json.load', (['file'], {}), '(file)\n', (668, 674), False, 'import json\n'), ((705, 738), 'src.server.cea_608_encoder.byte_pair_generator.consume', 'consume', (['caption_data', 'time_stamp'], {}), '(caption_data, time_stamp)\n', (712, 738), False, 'from src.server.cea_608_encoder.byte_pair_generator import consume\n'), ((934, 998), 'logging.error', 'logging.error', (['"""Error trying to read in the file."""'], {'exc_info': 'err'}), "('Error trying to read in the file.', exc_info=err)\n", (947, 998), False, 'import logging\n'), ((1055, 1122), 'logging.error', 'logging.error', (['"""Error trying to parse the JSON file."""'], {'exc_info': 'err'}), "('Error trying to parse the JSON file.', exc_info=err)\n", (1068, 1122), False, 'import logging\n'), ((1160, 1227), 'logging.error', 'logging.error', (['"""Error trying to encode caption data."""'], {'exc_info': 'err'}), "('Error trying to encode caption data.', exc_info=err)\n", (1173, 1227), False, 'import logging\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Prefix DAG permissions.
Revision ID: 849da589634d
Revises: 45ba3f1493b9
Create Date: 2020-10-01 17:25:10.006322
"""
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu
from airflow import settings
from airflow.security import permissions
# revision identifiers, used by Alembic.
revision = '849da589634d'
down_revision = '45ba3f1493b9'
branch_labels = None
depends_on = None
def prefix_individual_dag_permissions(session):
dag_perms = ['can_dag_read', 'can_dag_edit']
prefix = "DAG:"
permission_view_menus = (
session.query(PermissionView)
.join(Permission)
.filter(Permission.name.in_(dag_perms))
.join(ViewMenu)
.filter(ViewMenu.name != 'all_dags')
.filter(ViewMenu.name.notlike(prefix + '%'))
.all()
)
view_menu_ids = {pvm.view_menu.id for pvm in permission_view_menus}
vm_query = session.query(ViewMenu).filter(ViewMenu.id.in_(view_menu_ids))
vm_query.update({ViewMenu.name: prefix + ViewMenu.name}, synchronize_session=False)
session.commit()
def get_or_create_dag_resource(session):
dag_resource = get_resource_query(session, permissions.RESOURCE_DAG).first()
if dag_resource:
return dag_resource
dag_resource = ViewMenu()
dag_resource.name = permissions.RESOURCE_DAG
session.add(dag_resource)
session.commit()
return dag_resource
def get_or_create_action(session, action_name):
action = get_action_query(session, action_name).first()
if action:
return action
action = Permission()
action.name = action_name
session.add(action)
session.commit()
return action
def get_resource_query(session, resource_name):
return session.query(ViewMenu).filter(ViewMenu.name == resource_name)
def get_action_query(session, action_name):
return session.query(Permission).filter(Permission.name == action_name)
def get_pv_with_action_query(session, action):
return session.query(PermissionView).filter(PermissionView.permission == action)
def get_pv_with_resource_query(session, resource):
return session.query(PermissionView).filter(PermissionView.view_menu_id == resource.id)
def update_pv_action(session, pv_query, action):
pv_query.update({PermissionView.permission_id: action.id}, synchronize_session=False)
session.commit()
def get_pv(session, resource, action):
return (
session.query(PermissionView)
.filter(PermissionView.view_menu == resource)
.filter(PermissionView.permission == action)
.first()
)
def update_pv_resource(session, pv_query, resource):
for pv in pv_query.all():
if not get_pv(session, resource, pv.permission):
pv.view_menu = resource
else:
session.delete(pv)
session.commit()
def migrate_to_new_dag_permissions(db):
# Prefix individual dag perms with `DAG:`
prefix_individual_dag_permissions(db.session)
# Update existing PVs to use `can_read` instead of `can_dag_read`
can_dag_read_action = get_action_query(db.session, 'can_dag_read').first()
old_can_dag_read_pvs = get_pv_with_action_query(db.session, can_dag_read_action)
can_read_action = get_or_create_action(db.session, 'can_read')
update_pv_action(db.session, old_can_dag_read_pvs, can_read_action)
# Update existing PVs to use `can_edit` instead of `can_dag_edit`
can_dag_edit_action = get_action_query(db.session, 'can_dag_edit').first()
old_can_dag_edit_pvs = get_pv_with_action_query(db.session, can_dag_edit_action)
can_edit_action = get_or_create_action(db.session, 'can_edit')
update_pv_action(db.session, old_can_dag_edit_pvs, can_edit_action)
# Update existing PVs for `all_dags` resource to use `DAGs` resource.
all_dags_resource = get_resource_query(db.session, 'all_dags').first()
if all_dags_resource:
old_all_dags_pv = get_pv_with_resource_query(db.session, all_dags_resource)
dag_resource = get_or_create_dag_resource(db.session)
update_pv_resource(db.session, old_all_dags_pv, dag_resource)
# Delete the `all_dags` resource
db.session.delete(all_dags_resource)
# Delete `can_dag_read` action
if can_dag_read_action:
db.session.delete(can_dag_read_action)
# Delete `can_dag_edit` action
if can_dag_edit_action:
db.session.delete(can_dag_edit_action)
db.session.commit()
def upgrade():
db = SQLA()
db.session = settings.Session
migrate_to_new_dag_permissions(db)
db.session.commit()
db.session.close()
def downgrade():
pass
| [
"flask_appbuilder.security.sqla.models.Permission",
"flask_appbuilder.security.sqla.models.Permission.name.in_",
"flask_appbuilder.security.sqla.models.ViewMenu.name.notlike",
"flask_appbuilder.security.sqla.models.ViewMenu.id.in_",
"flask_appbuilder.security.sqla.models.ViewMenu",
"flask_appbuilder.SQLA"
] | [((2096, 2106), 'flask_appbuilder.security.sqla.models.ViewMenu', 'ViewMenu', ([], {}), '()\n', (2104, 2106), False, 'from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu\n'), ((2393, 2405), 'flask_appbuilder.security.sqla.models.Permission', 'Permission', ([], {}), '()\n', (2403, 2405), False, 'from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu\n'), ((5292, 5298), 'flask_appbuilder.SQLA', 'SQLA', ([], {}), '()\n', (5296, 5298), False, 'from flask_appbuilder import SQLA\n'), ((1762, 1792), 'flask_appbuilder.security.sqla.models.ViewMenu.id.in_', 'ViewMenu.id.in_', (['view_menu_ids'], {}), '(view_menu_ids)\n', (1777, 1792), False, 'from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu\n'), ((1586, 1621), 'flask_appbuilder.security.sqla.models.ViewMenu.name.notlike', 'ViewMenu.name.notlike', (["(prefix + '%')"], {}), "(prefix + '%')\n", (1607, 1621), False, 'from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu\n'), ((1469, 1499), 'flask_appbuilder.security.sqla.models.Permission.name.in_', 'Permission.name.in_', (['dag_perms'], {}), '(dag_perms)\n', (1488, 1499), False, 'from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.