code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="wordfilter",
version="0.2.7",
license="MIT",
author="<NAME>",
description="""A small module meant for use in text generators that lets
you filter strings for bad words.""",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dariusk/wordfilter",
packages=["wordfilter"],
package_dir={"wordfilter": "lib"},
package_data={"wordfilter": ["badwords.json"]},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Topic :: Communications",
"Topic :: Text Processing :: Linguistic",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Intended Audience :: Developers"
],
python_requires=">=3"
)
| [
"setuptools.setup"
] | [((115, 863), 'setuptools.setup', 'setup', ([], {'name': '"""wordfilter"""', 'version': '"""0.2.7"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'description': '"""A small module meant for use in text generators that lets\n you filter strings for bad words."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/dariusk/wordfilter"""', 'packages': "['wordfilter']", 'package_dir': "{'wordfilter': 'lib'}", 'package_data': "{'wordfilter': ['badwords.json']}", 'include_package_data': '(True)', 'classifiers': "['Programming Language :: Python :: 3', 'Topic :: Communications',\n 'Topic :: Text Processing :: Linguistic',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Intended Audience :: Developers']", 'python_requires': '""">=3"""'}), '(name=\'wordfilter\', version=\'0.2.7\', license=\'MIT\', author=\'<NAME>\',\n description=\n """A small module meant for use in text generators that lets\n you filter strings for bad words."""\n , long_description=long_description, long_description_content_type=\n \'text/markdown\', url=\'https://github.com/dariusk/wordfilter\', packages=\n [\'wordfilter\'], package_dir={\'wordfilter\': \'lib\'}, package_data={\n \'wordfilter\': [\'badwords.json\']}, include_package_data=True,\n classifiers=[\'Programming Language :: Python :: 3\',\n \'Topic :: Communications\', \'Topic :: Text Processing :: Linguistic\',\n \'License :: OSI Approved :: MIT License\', \'Natural Language :: English\',\n \'Intended Audience :: Developers\'], python_requires=\'>=3\')\n', (120, 863), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python3
"""Tests for ray class."""
import sys
import unittest
import numpy as np
import math
from pyproj import CRS
from evtech import Ray
from evtech import Camera
class TestRay(unittest.TestCase):
"""Tests for `evtech.ray` package."""
def setUp(self):
self.proj = np.array([[-234.48497951320869, -11689.146112537686, -3420.9549093694854, 54967162069.77626],
[-11527.74509904331, 527.9966478964207, -3108.9307732776556, 2267432568.205459],
[0.07731721986909759, 0.01342309733163904, -0.996916676327768, -93150.24955090503]
])
self.bounds = [4405, 655, 5587, 1420]
self.cen = [411228.51669897616, 4693677.177776167, 1653.5802147550032]
self.geo_bounds = [-88.07607063663191, 42.387928513288855, -88.07499236028416, 42.38917669615173]
self.elev = 250.522
self.crs = CRS.from_user_input(32616)
self.path = "foo.jpg"
self.cam = Camera(self.proj, self.bounds, self.cen, self.geo_bounds, self.elev, self.crs, self.path)
pass
def test_construct(self):
ray = Ray([0,0,0],[1,1,1], None)
# Direction should be normalized
self.assertEqual(ray.origin[0],0)
self.assertEqual(ray.origin[1],0)
self.assertEqual(ray.origin[2],0)
self.assertEqual(ray.direction[0],1.0/math.sqrt(3))
self.assertEqual(ray.direction[1],1.0/math.sqrt(3))
self.assertEqual(ray.direction[2],1.0/math.sqrt(3))
def test_elevation_intersect(self):
ray = self.cam.project_from_camera(880,443)
pt = ray.intersect_at_elevation(self.elev)
self.assertAlmostEqual(pt[2], self.elev)
| [
"math.sqrt",
"evtech.Camera",
"pyproj.CRS.from_user_input",
"numpy.array",
"evtech.Ray"
] | [((302, 576), 'numpy.array', 'np.array', (['[[-234.48497951320869, -11689.146112537686, -3420.9549093694854, \n 54967162069.77626], [-11527.74509904331, 527.9966478964207, -\n 3108.9307732776556, 2267432568.205459], [0.07731721986909759, \n 0.01342309733163904, -0.996916676327768, -93150.24955090503]]'], {}), '([[-234.48497951320869, -11689.146112537686, -3420.9549093694854, \n 54967162069.77626], [-11527.74509904331, 527.9966478964207, -\n 3108.9307732776556, 2267432568.205459], [0.07731721986909759, \n 0.01342309733163904, -0.996916676327768, -93150.24955090503]])\n', (310, 576), True, 'import numpy as np\n'), ((882, 908), 'pyproj.CRS.from_user_input', 'CRS.from_user_input', (['(32616)'], {}), '(32616)\n', (901, 908), False, 'from pyproj import CRS\n'), ((958, 1052), 'evtech.Camera', 'Camera', (['self.proj', 'self.bounds', 'self.cen', 'self.geo_bounds', 'self.elev', 'self.crs', 'self.path'], {}), '(self.proj, self.bounds, self.cen, self.geo_bounds, self.elev, self.\n crs, self.path)\n', (964, 1052), False, 'from evtech import Camera\n'), ((1107, 1138), 'evtech.Ray', 'Ray', (['[0, 0, 0]', '[1, 1, 1]', 'None'], {}), '([0, 0, 0], [1, 1, 1], None)\n', (1110, 1138), False, 'from evtech import Ray\n'), ((1357, 1369), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1366, 1369), False, 'import math\n'), ((1417, 1429), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1426, 1429), False, 'import math\n'), ((1477, 1489), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1486, 1489), False, 'import math\n')] |
#!/usr/bin/env python
#-------------------------------------------------------------------
# The MIT License
#
# Copyright (c) 2009 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#-------------------------------------------------------------------
import os
import sys
lib_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../lib"))
if lib_path not in sys.path: sys.path.insert(0, lib_path)
import unittest
from nitro_pie import *
#-------------------------------------------------------------------
class Test(unittest.TestCase):
#---------------------------------------------------------------
def setUp(self):
pass
def tearDown(self):
pass
#---------------------------------------------------------------
def test_loadLibrary(self):
#-----------------------------------------------------------
origLibraryName = JSLibrary.libraryName
JSLibrary.libraryName = "JavaScriptCoreX"
passed = False
try:
context = JSGlobalContextRef.create()
except:
passed = True
self.assertTrue(passed)
#-----------------------------------------------------------
JSLibrary.libraryName = origLibraryName
JSLibrary.libraryPath = "/tmp/JavaScriptCore"
passed = False
try:
context = JSGlobalContextRef.create()
except:
passed = True
self.assertTrue(passed)
#-----------------------------------------------------------
JSLibrary.libraryName = origLibraryName
JSLibrary.libraryPath = None
context = JSGlobalContextRef.create()
context.garbageCollect()
context.release()
#-------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.dirname",
"sys.path.insert"
] | [((1405, 1433), 'sys.path.insert', 'sys.path.insert', (['(0)', 'lib_path'], {}), '(0, lib_path)\n', (1420, 1433), False, 'import sys\n'), ((2927, 2942), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2940, 2942), False, 'import unittest\n'), ((1335, 1363), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1350, 1363), False, 'import os\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stem detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from moonlight import structure
from moonlight.protobuf import musicscore_pb2
from moonlight.staves import base as staves_base
from moonlight.structure import barlines as barlines_module
from moonlight.structure import beams
from moonlight.structure import components
from moonlight.structure import verticals
Point = musicscore_pb2.Point # pylint: disable=invalid-name
class BarlinesTest(absltest.TestCase):
def testDummy(self):
# Create a single staff, and a single vertical which is the correct height
# of a stem. The vertical has x = 20 and goes from
struct = structure.Structure(
staff_detector=staves_base.ComputedStaves(
staves=[[[10, 50], [90, 50]], [[11, 150], [91, 150]],
[[10, 250], [90, 250]], [[10, 350], [90, 350]]],
staffline_distance=[12] * 4,
staffline_thickness=2,
staves_interpolated_y=[[50] * 100, [150] * 100, [250] * 100,
[350] * 100]),
beams=beams.ComputedBeams(np.zeros((0, 2, 2))),
connected_components=components.ComputedComponents(np.zeros((0, 5))),
verticals=verticals.ComputedVerticals(lines=[
# Joins the first 2 staves.
[[10, 50 - 12 * 2], [10, 150 + 12 * 2]],
# Another barline, too close to the first one.
[[12, 50 - 12 * 2], [12, 150 + 12 * 2]],
# This barline is far enough, because the second barline was
# skipped.
[[13, 50 - 12 * 2], [13, 150 + 12 * 2]],
# Single staff barlines are skipped.
[[30, 50 - 12 * 2], [30, 50 + 12 * 2]],
[[31, 150 - 12 * 2], [31, 150 + 12 * 2]],
# Too close to a stem.
[[70, 50 - 12 * 2], [70, 50 + 12 * 2]],
# Too short.
[[90, 50 - 12 * 2], [90, 50 + 12 * 2]],
# Another barline which is kept.
[[90, 50 - 12 * 2], [90, 150 + 12 * 2]],
# Staff 1 has no barlines.
# Staff 2 has 2 barlines.
[[11, 350 - 12 * 2], [11, 350 + 12 * 2]],
[[90, 350 - 12 * 2], [90, 350 + 12 * 2]],
]))
barlines = barlines_module.Barlines(struct, close_barline_threshold=3)
# Create a Page with Glyphs.
input_page = musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(staff=[
musicscore_pb2.Staff(
staffline_distance=12,
center_line=[
musicscore_pb2.Point(x=10, y=50),
musicscore_pb2.Point(x=90, y=50)
],
glyph=[
# Stem is close to the last vertical on the first staff, so
# a barline will not be detected there.
musicscore_pb2.Glyph(
type=musicscore_pb2.Glyph.NOTEHEAD_FILLED,
x=60,
y_position=2,
stem=musicscore_pb2.LineSegment(
start=musicscore_pb2.Point(x=72, y=40),
end=musicscore_pb2.Point(x=72, y=80))),
]),
musicscore_pb2.Staff(
staffline_distance=12,
center_line=[
musicscore_pb2.Point(x=10, y=150),
musicscore_pb2.Point(x=90, y=150)
]),
musicscore_pb2.Staff(
staffline_distance=12,
center_line=[
musicscore_pb2.Point(x=10, y=250),
musicscore_pb2.Point(x=90, y=250)
]),
musicscore_pb2.Staff(
staffline_distance=12,
center_line=[
musicscore_pb2.Point(x=10, y=350),
musicscore_pb2.Point(x=90, y=350)
]),
])
])
page = barlines.apply(input_page)
self.assertEqual(3, len(page.system))
self.assertEqual(2, len(page.system[0].staff))
self.assertItemsEqual([10, 13, 90], (bar.x for bar in page.system[0].bar))
self.assertEqual(1, len(page.system[1].staff))
self.assertEqual(0, len(page.system[1].bar))
self.assertEqual(1, len(page.system[2].staff))
self.assertEqual(2, len(page.system[2].bar))
self.assertItemsEqual([11, 90], (bar.x for bar in page.system[2].bar))
if __name__ == "__main__":
absltest.main()
| [
"moonlight.structure.barlines.Barlines",
"moonlight.protobuf.musicscore_pb2.Point",
"absl.testing.absltest.main",
"numpy.zeros",
"moonlight.structure.verticals.ComputedVerticals",
"moonlight.staves.base.ComputedStaves"
] | [((5131, 5146), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (5144, 5146), False, 'from absl.testing import absltest\n'), ((2933, 2992), 'moonlight.structure.barlines.Barlines', 'barlines_module.Barlines', (['struct'], {'close_barline_threshold': '(3)'}), '(struct, close_barline_threshold=3)\n', (2957, 2992), True, 'from moonlight.structure import barlines as barlines_module\n'), ((1400, 1669), 'moonlight.staves.base.ComputedStaves', 'staves_base.ComputedStaves', ([], {'staves': '[[[10, 50], [90, 50]], [[11, 150], [91, 150]], [[10, 250], [90, 250]], [[10,\n 350], [90, 350]]]', 'staffline_distance': '([12] * 4)', 'staffline_thickness': '(2)', 'staves_interpolated_y': '[[50] * 100, [150] * 100, [250] * 100, [350] * 100]'}), '(staves=[[[10, 50], [90, 50]], [[11, 150], [91, \n 150]], [[10, 250], [90, 250]], [[10, 350], [90, 350]]],\n staffline_distance=[12] * 4, staffline_thickness=2,\n staves_interpolated_y=[[50] * 100, [150] * 100, [250] * 100, [350] * 100])\n', (1426, 1669), True, 'from moonlight.staves import base as staves_base\n'), ((1914, 2384), 'moonlight.structure.verticals.ComputedVerticals', 'verticals.ComputedVerticals', ([], {'lines': '[[[10, 50 - 12 * 2], [10, 150 + 12 * 2]], [[12, 50 - 12 * 2], [12, 150 + 12 *\n 2]], [[13, 50 - 12 * 2], [13, 150 + 12 * 2]], [[30, 50 - 12 * 2], [30, \n 50 + 12 * 2]], [[31, 150 - 12 * 2], [31, 150 + 12 * 2]], [[70, 50 - 12 *\n 2], [70, 50 + 12 * 2]], [[90, 50 - 12 * 2], [90, 50 + 12 * 2]], [[90, \n 50 - 12 * 2], [90, 150 + 12 * 2]], [[11, 350 - 12 * 2], [11, 350 + 12 *\n 2]], [[90, 350 - 12 * 2], [90, 350 + 12 * 2]]]'}), '(lines=[[[10, 50 - 12 * 2], [10, 150 + 12 * 2]],\n [[12, 50 - 12 * 2], [12, 150 + 12 * 2]], [[13, 50 - 12 * 2], [13, 150 +\n 12 * 2]], [[30, 50 - 12 * 2], [30, 50 + 12 * 2]], [[31, 150 - 12 * 2],\n [31, 150 + 12 * 2]], [[70, 50 - 12 * 2], [70, 50 + 12 * 2]], [[90, 50 -\n 12 * 2], [90, 50 + 12 * 2]], [[90, 50 - 12 * 2], [90, 150 + 12 * 2]], [\n [11, 350 - 12 * 2], [11, 350 + 12 * 2]], [[90, 350 - 12 * 2], [90, 350 +\n 12 * 2]]])\n', (1941, 2384), False, 'from moonlight.structure import verticals\n'), ((1796, 1815), 'numpy.zeros', 'np.zeros', (['(0, 2, 2)'], {}), '((0, 2, 2))\n', (1804, 1815), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (1885, 1893), True, 'import numpy as np\n'), ((3238, 3270), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(10)', 'y': '(50)'}), '(x=10, y=50)\n', (3258, 3270), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((3292, 3324), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(90)', 'y': '(50)'}), '(x=90, y=50)\n', (3312, 3324), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4021, 4054), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(10)', 'y': '(150)'}), '(x=10, y=150)\n', (4041, 4054), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4076, 4109), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(90)', 'y': '(150)'}), '(x=90, y=150)\n', (4096, 4109), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4253, 4286), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(10)', 'y': '(250)'}), '(x=10, y=250)\n', (4273, 4286), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4308, 4341), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(90)', 'y': '(250)'}), '(x=90, y=250)\n', (4328, 4341), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4485, 4518), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(10)', 'y': '(350)'}), '(x=10, y=350)\n', (4505, 4518), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((4540, 4573), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(90)', 'y': '(350)'}), '(x=90, y=350)\n', (4560, 4573), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((3776, 3808), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(72)', 'y': '(40)'}), '(x=72, y=40)\n', (3796, 3808), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((3842, 3874), 'moonlight.protobuf.musicscore_pb2.Point', 'musicscore_pb2.Point', ([], {'x': '(72)', 'y': '(80)'}), '(x=72, y=80)\n', (3862, 3874), False, 'from moonlight.protobuf import musicscore_pb2\n')] |
import json
from pydantic import Json, BaseModel
from datamodel_code_generator.parser.jsonschema import JsonSchemaParser
from genson import SchemaBuilder
import streamlit as st
import streamlit_pydantic as sp
class FormGeneratorModel(BaseModel):
model_schema: Json
def main() -> None:
st.header("Python Form Generator")
st.subheader(
"Enter your JSON and get a free Pydantic model + Streamlit Input Form using it!"
)
data = sp.pydantic_form(key="json_input", model=FormGeneratorModel)
if data:
show_generated_code(data.model_schema)
def show_generated_code(schema: Json) -> None:
model_code = json_to_pydantic(schema)
if not model_code:
st.error("Models not found in the input data")
else:
with st.expander("Original Converted Model"):
st.code(model_code, language="python")
st.download_button("Download Generated Model Only", data=model_code, file_name="model.py", mime="text/plain")
show_generated_form(model_code)
MAIN_TEMPLATE = """\
def main() -> None:
st.header("Model Form Submission")
data = sp.pydantic_form(key="my_model", model=Model)
if data:
st.json(data.json())
if __name__ == "__main__":
main()
"""
def show_generated_form(model_code: str) -> None:
code_lines = model_code.split('\n')
code_lines.insert(2, "import streamlit_pydantic as sp")
code_lines.insert(2, "import streamlit as st")
code_lines.insert(-1, MAIN_TEMPLATE)
full_code = '\n'.join(code_lines)
st.subheader("Generated Streamlit Pydantic App")
st.caption("Download it and run with `streamlit run model_form.py`")
st.download_button("Download Generated Form!", data=full_code, file_name="model_form.py", mime="text/plain")
st.code(full_code, language="python")
def json_to_pydantic(input_text: str) -> str:
builder = SchemaBuilder()
builder.add_object(input_text)
schema = builder.to_schema()
parser = JsonSchemaParser(
source=json.dumps(schema),
base_class="pydantic.BaseModel",
)
return parser.parse()
if __name__ == "__main__":
main()
| [
"streamlit.caption",
"genson.SchemaBuilder",
"streamlit.expander",
"json.dumps",
"streamlit_pydantic.pydantic_form",
"streamlit.code",
"streamlit.download_button",
"streamlit.error",
"streamlit.subheader",
"streamlit.header"
] | [((298, 332), 'streamlit.header', 'st.header', (['"""Python Form Generator"""'], {}), "('Python Form Generator')\n", (307, 332), True, 'import streamlit as st\n'), ((337, 441), 'streamlit.subheader', 'st.subheader', (['"""Enter your JSON and get a free Pydantic model + Streamlit Input Form using it!"""'], {}), "(\n 'Enter your JSON and get a free Pydantic model + Streamlit Input Form using it!'\n )\n", (349, 441), True, 'import streamlit as st\n'), ((457, 517), 'streamlit_pydantic.pydantic_form', 'sp.pydantic_form', ([], {'key': '"""json_input"""', 'model': 'FormGeneratorModel'}), "(key='json_input', model=FormGeneratorModel)\n", (473, 517), True, 'import streamlit_pydantic as sp\n'), ((1535, 1583), 'streamlit.subheader', 'st.subheader', (['"""Generated Streamlit Pydantic App"""'], {}), "('Generated Streamlit Pydantic App')\n", (1547, 1583), True, 'import streamlit as st\n'), ((1588, 1656), 'streamlit.caption', 'st.caption', (['"""Download it and run with `streamlit run model_form.py`"""'], {}), "('Download it and run with `streamlit run model_form.py`')\n", (1598, 1656), True, 'import streamlit as st\n'), ((1661, 1774), 'streamlit.download_button', 'st.download_button', (['"""Download Generated Form!"""'], {'data': 'full_code', 'file_name': '"""model_form.py"""', 'mime': '"""text/plain"""'}), "('Download Generated Form!', data=full_code, file_name=\n 'model_form.py', mime='text/plain')\n", (1679, 1774), True, 'import streamlit as st\n'), ((1774, 1811), 'streamlit.code', 'st.code', (['full_code'], {'language': '"""python"""'}), "(full_code, language='python')\n", (1781, 1811), True, 'import streamlit as st\n'), ((1874, 1889), 'genson.SchemaBuilder', 'SchemaBuilder', ([], {}), '()\n', (1887, 1889), False, 'from genson import SchemaBuilder\n'), ((701, 747), 'streamlit.error', 'st.error', (['"""Models not found in the input data"""'], {}), "('Models not found in the input data')\n", (709, 747), True, 'import streamlit as st\n'), ((871, 984), 'streamlit.download_button', 'st.download_button', (['"""Download Generated Model Only"""'], {'data': 'model_code', 'file_name': '"""model.py"""', 'mime': '"""text/plain"""'}), "('Download Generated Model Only', data=model_code,\n file_name='model.py', mime='text/plain')\n", (889, 984), True, 'import streamlit as st\n'), ((771, 810), 'streamlit.expander', 'st.expander', (['"""Original Converted Model"""'], {}), "('Original Converted Model')\n", (782, 810), True, 'import streamlit as st\n'), ((824, 862), 'streamlit.code', 'st.code', (['model_code'], {'language': '"""python"""'}), "(model_code, language='python')\n", (831, 862), True, 'import streamlit as st\n'), ((2004, 2022), 'json.dumps', 'json.dumps', (['schema'], {}), '(schema)\n', (2014, 2022), False, 'import json\n')] |
import os
import json
import logging
import string
from random import randrange
from collections import OrderedDict
import torch
import torchvision.transforms as transforms
from torchvision.datasets.folder import default_loader
from PIL import ImageFile
from seq2seq.tools.tokenizer import Tokenizer, BPETokenizer, CharTokenizer
from seq2seq.tools import batch_sequences
from seq2seq.tools.config import EOS, BOS, PAD, LANGUAGE_TOKENS
from seq2seq.datasets import LinedTextDataset
from seq2seq.datasets.vision import create_padded_caption_batch, imagenet_transform
def get_defected_list(item_list, callback):
defected = []
for i, item in enumerate(item_list):
try:
callback(item)
except:
defected.append(i)
return defected
class ConceptCaptions(object):
"""docstring for Dataset."""
__tokenizers = {
'word': Tokenizer,
'char': CharTokenizer,
'bpe': BPETokenizer
}
def __init__(self, root, image_transform=imagenet_transform,
split='train',
tokenization='word',
num_symbols=32000,
shared_vocab=True,
code_file=None,
vocab_file=None,
insert_start=[BOS], insert_end=[EOS],
mark_language=False,
tokenizer=None,
pre_tokenize=None,
vocab_limit=None,
vocab_min_count=2,
loader=default_loader):
super(ConceptCaptions, self).__init__()
self.split = split
self.shared_vocab = shared_vocab
self.num_symbols = num_symbols
self.tokenizer = tokenizer
self.tokenization = tokenization
self.insert_start = insert_start
self.insert_end = insert_end
self.mark_language = mark_language
self.code_file = code_file
self.vocab_file = vocab_file
self.vocab_limit = vocab_limit
self.vocab_min_count = vocab_min_count
if image_transform is not None:
self.transform = image_transform(train=(split == 'train'))
else:
self.transform = None
self.pre_tokenize = pre_tokenize
self.loader = loader
if split == 'train':
path = {'root': os.path.join(root, 'training'),
'annFile': os.path.join(root, 'training.txt'),
'filtered': os.path.join(root, 'defected_training.json')
}
else:
path = {'root': os.path.join(root, 'validation'),
'annFile': os.path.join(root, 'validation.txt'),
'filtered': os.path.join(root, 'defected_validation.json')
}
self.image_path = path['root']
self.captions = LinedTextDataset(path['annFile'])
if os.path.isfile(path['filtered']):
with open(path['filtered'], 'r') as f:
filtered = json.loads(f.read())
else:
filtered = get_defected_list(range(len(self.captions)),
lambda idx: self._load_image(idx))
with open(path['filtered'], 'w') as f:
f.write(json.dumps(filtered))
self.indexes = list(set(range(len(self.captions))) - set(filtered))
if self.tokenizer is None:
prefix = os.path.join(root, 'captions')
if tokenization not in ['bpe', 'char', 'word']:
raise ValueError("An invalid option for tokenization was used, options are {0}".format(
','.join(['bpe', 'char', 'word'])))
if tokenization == 'bpe':
self.code_file = code_file or '{prefix}.{lang}.{tok}.codes_{num_symbols}'.format(
prefix=prefix, lang='en', tok=tokenization, num_symbols=num_symbols)
else:
num_symbols = ''
self.vocab_file = vocab_file or '{prefix}.{lang}.{tok}.vocab{num_symbols}'.format(
prefix=prefix, lang='en', tok=tokenization, num_symbols=num_symbols)
self.generate_tokenizer()
def generate_tokenizer(self):
additional_tokens = None
if self.mark_language:
additional_tokens = [LANGUAGE_TOKENS('en')]
if self.tokenization == 'bpe':
tokz = BPETokenizer(self.code_file,
vocab_file=self.vocab_file,
num_symbols=self.num_symbols,
additional_tokens=additional_tokens,
pre_tokenize=self.pre_tokenize)
if not hasattr(tokz, 'bpe'):
sentences = (self.captions[i] for i in self.indexes)
tokz.learn_bpe(sentences, from_filenames=False)
else:
tokz = self.__tokenizers[self.tokenization](
vocab_file=self.vocab_file,
additional_tokens=additional_tokens,
pre_tokenize=self.pre_tokenize)
if not hasattr(tokz, 'vocab'):
assert self.split == 'train', "better generate vocab for training split"
sentences = (self.captions[i] for i in self.indexes)
logging.info('generating vocabulary. saving to %s' %
self.vocab_file)
tokz.get_vocab(sentences, from_filenames=False)
tokz.save_vocab(self.vocab_file)
tokz.load_vocab(self.vocab_file, limit=self.vocab_limit,
min_count=self.vocab_min_count)
self.tokenizer = tokz
def _load_image(self, index):
return self.loader('{}/{}.jpg'.format(self.image_path, str(index)))
def __getitem__(self, index):
if isinstance(index, slice):
return [self[idx] for idx in range(index.start or 0, index.stop or len(self), index.step or 1)]
index = self.indexes[index]
img = self._load_image(index)
if self.transform is not None:
img = self.transform(img)
caption = self.tokenizer.tokenize(self.captions[index],
insert_start=self.insert_start,
insert_end=self.insert_end)
return (img, caption)
def __len__(self):
return len(self.indexes)
def get_loader(self, batch_size=1, shuffle=False, pack=False, sampler=None, num_workers=0,
max_length=100, max_tokens=None, batch_first=False,
pin_memory=False, drop_last=False, augment=False):
collate_fn = create_padded_caption_batch(
max_length=max_length, max_tokens=max_tokens,
pack=pack, batch_first=batch_first, augment=augment)
return torch.utils.data.DataLoader(self,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last)
@property
def tokenizers(self):
return OrderedDict(img=self.transform, en=self.tokenizer)
if __name__ == '__main__':
data = ConceptCaptions('/media/drive/Datasets/concept_captions', split='train', image_transform=None)
# #Now read the file back into a Python list object
# with open('test.txt', 'r') as f:
# a = json.loads(f.read())
| [
"collections.OrderedDict",
"seq2seq.datasets.vision.create_padded_caption_batch",
"json.dumps",
"os.path.join",
"logging.info",
"os.path.isfile",
"seq2seq.tools.tokenizer.BPETokenizer",
"torch.utils.data.DataLoader",
"seq2seq.tools.config.LANGUAGE_TOKENS",
"seq2seq.datasets.LinedTextDataset"
] | [((2807, 2840), 'seq2seq.datasets.LinedTextDataset', 'LinedTextDataset', (["path['annFile']"], {}), "(path['annFile'])\n", (2823, 2840), False, 'from seq2seq.datasets import LinedTextDataset\n'), ((2852, 2884), 'os.path.isfile', 'os.path.isfile', (["path['filtered']"], {}), "(path['filtered'])\n", (2866, 2884), False, 'import os\n'), ((6558, 6688), 'seq2seq.datasets.vision.create_padded_caption_batch', 'create_padded_caption_batch', ([], {'max_length': 'max_length', 'max_tokens': 'max_tokens', 'pack': 'pack', 'batch_first': 'batch_first', 'augment': 'augment'}), '(max_length=max_length, max_tokens=max_tokens,\n pack=pack, batch_first=batch_first, augment=augment)\n', (6585, 6688), False, 'from seq2seq.datasets.vision import create_padded_caption_batch, imagenet_transform\n'), ((6725, 6916), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self'], {'batch_size': 'batch_size', 'collate_fn': 'collate_fn', 'sampler': 'sampler', 'shuffle': 'shuffle', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'drop_last': 'drop_last'}), '(self, batch_size=batch_size, collate_fn=\n collate_fn, sampler=sampler, shuffle=shuffle, num_workers=num_workers,\n pin_memory=pin_memory, drop_last=drop_last)\n', (6752, 6916), False, 'import torch\n'), ((7265, 7315), 'collections.OrderedDict', 'OrderedDict', ([], {'img': 'self.transform', 'en': 'self.tokenizer'}), '(img=self.transform, en=self.tokenizer)\n', (7276, 7315), False, 'from collections import OrderedDict\n'), ((3374, 3404), 'os.path.join', 'os.path.join', (['root', '"""captions"""'], {}), "(root, 'captions')\n", (3386, 3404), False, 'import os\n'), ((4335, 4500), 'seq2seq.tools.tokenizer.BPETokenizer', 'BPETokenizer', (['self.code_file'], {'vocab_file': 'self.vocab_file', 'num_symbols': 'self.num_symbols', 'additional_tokens': 'additional_tokens', 'pre_tokenize': 'self.pre_tokenize'}), '(self.code_file, vocab_file=self.vocab_file, num_symbols=self.\n num_symbols, additional_tokens=additional_tokens, pre_tokenize=self.\n pre_tokenize)\n', (4347, 4500), False, 'from seq2seq.tools.tokenizer import Tokenizer, BPETokenizer, CharTokenizer\n'), ((5211, 5280), 'logging.info', 'logging.info', (["('generating vocabulary. saving to %s' % self.vocab_file)"], {}), "('generating vocabulary. saving to %s' % self.vocab_file)\n", (5223, 5280), False, 'import logging\n'), ((2300, 2330), 'os.path.join', 'os.path.join', (['root', '"""training"""'], {}), "(root, 'training')\n", (2312, 2330), False, 'import os\n'), ((2363, 2397), 'os.path.join', 'os.path.join', (['root', '"""training.txt"""'], {}), "(root, 'training.txt')\n", (2375, 2397), False, 'import os\n'), ((2431, 2475), 'os.path.join', 'os.path.join', (['root', '"""defected_training.json"""'], {}), "(root, 'defected_training.json')\n", (2443, 2475), False, 'import os\n'), ((2540, 2572), 'os.path.join', 'os.path.join', (['root', '"""validation"""'], {}), "(root, 'validation')\n", (2552, 2572), False, 'import os\n'), ((2605, 2641), 'os.path.join', 'os.path.join', (['root', '"""validation.txt"""'], {}), "(root, 'validation.txt')\n", (2617, 2641), False, 'import os\n'), ((2675, 2721), 'os.path.join', 'os.path.join', (['root', '"""defected_validation.json"""'], {}), "(root, 'defected_validation.json')\n", (2687, 2721), False, 'import os\n'), ((4253, 4274), 'seq2seq.tools.config.LANGUAGE_TOKENS', 'LANGUAGE_TOKENS', (['"""en"""'], {}), "('en')\n", (4268, 4274), False, 'from seq2seq.tools.config import EOS, BOS, PAD, LANGUAGE_TOKENS\n'), ((3218, 3238), 'json.dumps', 'json.dumps', (['filtered'], {}), '(filtered)\n', (3228, 3238), False, 'import json\n')] |
#!/usr/bin/env python3
import os
import pandas as pd
import sqlalchemy
import yaml
script_dir = os.path.dirname(os.path.realpath("__file__"))
with open('../../cfg.yml', 'r') as cfg_file:
data = cfg_file.read()
tuner_cfg = yaml.safe_load(data)
database = tuner_cfg['database'].replace('mysql', 'mysql+pymysql')
engine = sqlalchemy.create_engine(database)
query = 'select search_time, test.name as test, tuning_run.name as tuning_run, tuning_run.start_date,' \
' collection_date from result' \
' inner join desired_result on desired_result.result_id = result.id' \
' inner join tuning_run on tuning_run.id = result.tuning_run_id' \
' inner join program_version on program_version.id = program_version_id' \
' inner join program on program.id = program_version.program_id' \
' inner join test on test.id = test_id' \
' where (test.name = "bayes_zcu102" or test.name = "no_samp") and program.name = "bnn"'
data = pd.read_sql_query(query, engine)
# Set the end date of each tuning run to the time the last result was collected.
data['end_date'] = data.groupby(['test', 'tuning_run'])['collection_date'].transform('max')
data['duration'] = data['end_date'] - data['start_date']
data = data.drop(columns=['end_date', 'tuning_run'])
# Determine the duration of the shortest tuning run.
min_duration = data['duration'].min()
data = data.drop(columns=['duration'])
# Give all tuning runs the same duration.
data['time'] = data['collection_date'] - data['start_date']
data = data[data['time'] <= min_duration]
data = data.drop(columns=['start_date', 'collection_date', 'time'])
# Determine the average search time for each test.
data = data.groupby(['test']).mean().reset_index()
samp_search_time = data.loc[data['test'] == '20210609_bayes_zcu102', 'search_time'].iloc[0]
opt_search_time = data.loc[data['test'] == '20210616_no_samp', 'search_time'].iloc[0]
# Show the results.
print('Search time with random sampling:', samp_search_time)
print('Search time with numerical optimization:', opt_search_time)
# Output search times to file.
with open('../callouts/search_time.tex', 'w') as output_file:
output_file.write('\\def \\sampsearchtime {%.0f}\n' % samp_search_time)
output_file.write('\\def \\optsearchtime {%.0f}\n' % opt_search_time)
| [
"pandas.read_sql_query",
"yaml.safe_load",
"sqlalchemy.create_engine",
"os.path.realpath"
] | [((228, 248), 'yaml.safe_load', 'yaml.safe_load', (['data'], {}), '(data)\n', (242, 248), False, 'import yaml\n'), ((326, 360), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['database'], {}), '(database)\n', (350, 360), False, 'import sqlalchemy\n'), ((972, 1004), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {}), '(query, engine)\n', (989, 1004), True, 'import pandas as pd\n'), ((114, 142), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (130, 142), False, 'import os\n')] |
#!/usr/bin/env python2
from __future__ import print_function
import httplib2
import base64
from apiclient import discovery
from oauth2client import client as oauth2client
import datetime
import time
import os
import myauth
(PUB_CREDENTIALS,PUB_SCOPE,SUBSCRIPT,TOPIC)=myauth.setPubSubConfirm()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = PUB_CREDENTIALS
PUBSUB_SCOPES = PUB_SCOPE
subscription=SUBSCRIPT
def create_pubsub_client(http=None):
credentials = oauth2client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(PUBSUB_SCOPES)
if not http:
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('pubsub', 'v1', http=http)
client=create_pubsub_client(http=None)
def checkForMessage():
data=None
batch_size = 100
body = {
# Setting ReturnImmediately to false instructs the API to wait
# to collect the message up to the size of MaxEvents, or until
# the timeout.
'returnImmediately': True,
'maxMessages': batch_size,
}
resp = client.projects().subscriptions().pull(
subscription=subscription, body=body).execute()
received_messages = resp.get('receivedMessages')
if received_messages is not None:
ack_ids = []
for received_message in received_messages:
pubsub_message = received_message.get('message')
if pubsub_message:
# Process messages
data = base64.b64decode(str(pubsub_message.get('data')))
# print(data)
# process(data)
# Get the message's ack ID
ack_ids.append(received_message.get('ackId'))
# Create a POST body for the acknowledge request
ack_body = {'ackIds': ack_ids}
# print ack_body
# Acknowledge the message.
client.projects().subscriptions().acknowledge(
subscription=subscription, body=ack_body).execute()
return data
def sendHeartBeat(id1):
message1 = {}
message1['HearBeat']=str(id1)
message1['accounting']=[id1,1,1,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
message1['timeStamp']=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message1 = base64.b64encode(str(message1))
# Create a POST body for the Pub/Sub request
body = {
'messages': [
{'data': message1},
]
}
resp = client.projects().topics().publish(
topic=TOPIC, body=body).execute()
message_ids = resp.get('messageIds')
if message_ids:
for message_id in message_ids:
# Process each message ID
pass
#print(message_id)
def sendMsg(msg):
message1 = base64.b64encode(str(msg))
# Create a POST body for the Pub/Sub request
body = {
'messages': [
{'data': message1},
]
}
resp = client.projects().topics().publish(
topic=TOPIC, body=body).execute()
message_ids = resp.get('messageIds')
if message_ids:
for message_id in message_ids:
# Process each message ID
print(message_id)
def timeCheck(msg,num=3,sec=3):
print("waiting for confirmation")
for i in range(0,num):
sendHeartBeat('timeCheck'+str(i)+':'+str(msg))
return_msd = checkForMessage()
if return_msd != None:
if msg.find(return_msd) >= 0:
print("CONFIRMED!!....")
return return_msd
time.sleep(sec)
sendHeartBeat('timeCheck_2'+str(i)+':'+str(msg))
| [
"myauth.setPubSubConfirm",
"oauth2client.client.GoogleCredentials.get_application_default",
"time.sleep",
"datetime.datetime.now",
"httplib2.Http",
"apiclient.discovery.build"
] | [((270, 295), 'myauth.setPubSubConfirm', 'myauth.setPubSubConfirm', ([], {}), '()\n', (293, 295), False, 'import myauth\n'), ((466, 522), 'oauth2client.client.GoogleCredentials.get_application_default', 'oauth2client.GoogleCredentials.get_application_default', ([], {}), '()\n', (520, 522), True, 'from oauth2client import client as oauth2client\n'), ((722, 764), 'apiclient.discovery.build', 'discovery.build', (['"""pubsub"""', '"""v1"""'], {'http': 'http'}), "('pubsub', 'v1', http=http)\n", (737, 764), False, 'from apiclient import discovery\n'), ((663, 678), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (676, 678), False, 'import httplib2\n'), ((3589, 3604), 'time.sleep', 'time.sleep', (['sec'], {}), '(sec)\n', (3599, 3604), False, 'import time\n'), ((2261, 2284), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2282, 2284), False, 'import datetime\n'), ((2180, 2203), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2201, 2203), False, 'import datetime\n')] |
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.svm import SVC
from .portfolios.portfolio_base import portfolio_base
from .portfolios.portfolio_mixed import portfolio_mixed
from .portfolios.portfolio_hgb import portfolio_hgb
from .portfolios.portfolio_svc import portfolio_svc
from .portfolios.portfolio_rf import portfolio_rf
from .portfolios.portfolio_lr import portfolio_lr
enable_hist_gradient_boosting
def get_fast_classifiers(n_classes):
"""Get a list of very fast classifiers.
Parameters
----------
n_classes : int
Number of classes in the dataset. Used to decide on the complexity
of some of the classifiers.
Returns
-------
fast_classifiers : list of sklearn estimators
List of classification models that can be fitted and evaluated very
quickly.
"""
return [
# These are sorted by approximate speed
DummyClassifier(strategy="prior"),
GaussianNB(),
make_pipeline(MinMaxScaler(), MultinomialNB()),
DecisionTreeClassifier(max_depth=1, class_weight="balanced"),
DecisionTreeClassifier(max_depth=max(5, n_classes),
class_weight="balanced"),
DecisionTreeClassifier(class_weight="balanced",
min_impurity_decrease=.01),
LogisticRegression(C=.1,
solver='lbfgs',
multi_class='auto',
class_weight='balanced',
max_iter=1000),
# FIXME Add warm starting here?
LogisticRegression(C=1,
solver='lbfgs',
multi_class='auto',
class_weight='balanced',
max_iter=1000)
]
def get_fast_regressors():
"""Get a list of very fast regressors.
Returns
-------
fast_regressors : list of sklearn estimators
List of regression models that can be fitted and evaluated very
quickly.
"""
return [
DummyRegressor(),
DecisionTreeRegressor(max_depth=1),
DecisionTreeRegressor(max_depth=5),
Ridge(alpha=10),
Lasso(alpha=10)
]
def get_any_classifiers(portfolio='baseline'):
"""Return a portfolio of classifiers.
Returns
-------
classifiers : list of sklearn estimators
List of classification models.
"""
baseline = portfolio_base()
mixed = portfolio_mixed()
hgb = portfolio_hgb()
svc = portfolio_svc()
rf = portfolio_rf()
lr = portfolio_lr()
portfolios = {
'baseline': baseline,
'mixed': mixed,
'svc': svc,
'hgb': hgb,
'rf': rf,
'lr': lr
}
return (portfolios[portfolio])
| [
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.Lasso",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LogisticRegression",
"sklearn.dummy.DummyRegressor",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.dummy.DummyClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.preprocessing.MinMaxScaler"
] | [((1383, 1416), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""prior"""'}), "(strategy='prior')\n", (1398, 1416), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((1426, 1438), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1436, 1438), False, 'from sklearn.naive_bayes import GaussianNB, MultinomialNB\n'), ((1504, 1564), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(1)', 'class_weight': '"""balanced"""'}), "(max_depth=1, class_weight='balanced')\n", (1526, 1564), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((1691, 1766), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '"""balanced"""', 'min_impurity_decrease': '(0.01)'}), "(class_weight='balanced', min_impurity_decrease=0.01)\n", (1713, 1766), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((1806, 1912), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(0.1)', 'solver': '"""lbfgs"""', 'multi_class': '"""auto"""', 'class_weight': '"""balanced"""', 'max_iter': '(1000)'}), "(C=0.1, solver='lbfgs', multi_class='auto', class_weight=\n 'balanced', max_iter=1000)\n", (1824, 1912), False, 'from sklearn.linear_model import LogisticRegression, Ridge, Lasso\n'), ((2064, 2168), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'solver': '"""lbfgs"""', 'multi_class': '"""auto"""', 'class_weight': '"""balanced"""', 'max_iter': '(1000)'}), "(C=1, solver='lbfgs', multi_class='auto', class_weight=\n 'balanced', max_iter=1000)\n", (2082, 2168), False, 'from sklearn.linear_model import LogisticRegression, Ridge, Lasso\n'), ((2542, 2558), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (2556, 2558), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((2568, 2602), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': '(1)'}), '(max_depth=1)\n', (2589, 2602), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((2612, 2646), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': '(5)'}), '(max_depth=5)\n', (2633, 2646), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((2656, 2671), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(10)'}), '(alpha=10)\n', (2661, 2671), False, 'from sklearn.linear_model import LogisticRegression, Ridge, Lasso\n'), ((2681, 2696), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(10)'}), '(alpha=10)\n', (2686, 2696), False, 'from sklearn.linear_model import LogisticRegression, Ridge, Lasso\n'), ((1462, 1476), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1474, 1476), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1478, 1493), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1491, 1493), False, 'from sklearn.naive_bayes import GaussianNB, MultinomialNB\n')] |
"""
Authors:
<NAME> (<EMAIL>)
<NAME>, <NAME>, <NAME>, <NAME>
Dr. <NAME> (<EMAIL>)
--- Versions ---
0.1 - initial version
"""
import sys
from PySide6 import QtCore, QtGui
from PySide6.QtWidgets import *
from PySide6.QtGui import QDoubleValidator
class CellDef(QWidget):
def __init__(self):
super().__init__()
# global self.params_cell_def
self.current_cell_def = None
self.label_width = 210
self.units_width = 70
self.idx_current_cell_def = 1 # 1-offset for XML
self.xml_root = None
self.custom_data_count = 0
self.custom_data_units_width = 90
self.cycle_duration_flag = False
self.stacked_cycle = QStackedWidget()
self.stack_idx_t00 = -1
self.stack_idx_t01 = -1
self.stack_idx_t02 = -1
self.stack_idx_t03 = -1
self.stack_idx_d00 = -1
self.stack_idx_d01 = -1
self.stack_idx_d02 = -1
self.stack_idx_d03 = -1
# <substrate name="virus">
# <secretion_rate units="1/min">0</secretion_rate>
# <secretion_target units="substrate density">1</secretion_target>
# <uptake_rate units="1/min">10</uptake_rate>
# <net_export_rate units="total substrate/min">0</net_export_rate>
# </substrate>
# Create lists for cell type secretion values, for each substrate (index by substrate index)
self.secretion_rate_val = [] # .setText(uep.find(secretion_sub1_path+"secretion_rate").text)
self.secretion_target_val = []
self.secretion_uptake_rate_val = []
self.secretion_net_export_rate_val = []
# self.cell_defs = CellDefInstances()
self.cell_def_horiz_layout = QHBoxLayout()
self.splitter = QSplitter()
tree_widget_width = 160
tree_widget_height = 400
# tree_widget_height = 1200
self.tree = QTreeWidget()
# self.tree.setStyleSheet("background-color: lightgray")
self.tree.setFixedWidth(tree_widget_width)
self.tree.setFixedHeight(tree_widget_height)
# self.tree.setColumnCount(1)
self.tree.itemClicked.connect(self.tree_item_changed_cb)
header = QTreeWidgetItem(["--- Cell Type ---"])
self.tree.setHeaderItem(header)
# cellname = QTreeWidgetItem(["epi cell"])
# self.tree.insertTopLevelItem(0,cellname)
# cellname = QTreeWidgetItem(["macrophage"])
# self.tree.insertTopLevelItem(1,cellname)
# cities = QTreeWidgetItem(treeWidget)
# titem = QTreeWidgetItem
# titem.setText(0,'ttt')
# header.setText(0,"epithelial cell")
# header.setText(1,"macrophage")
# self.tree.addTopLevelItem(QTreeWidgetItem("foo"))
items = []
model = QtCore.QStringListModel()
model.setStringList(["aaa","bbb"])
# self.tree.insertTopLevelItems(None, model)
# slist = QtCore.QStringList()
# for i in range(10):
# items.append(QTreeWidgetItem(None, QtGui.QStringList(QString("item: %1").arg(i))))
# self.tree.insertTopLevelItems(None, items)
# self.log_widget.setHeaderItem(QTreeWidgetItem(["date", "origin", "type", "message"]))
self.cell_def_horiz_layout.addWidget(self.tree)
self.scroll_cell_def_tree = QScrollArea()
self.scroll_cell_def_tree.setWidget(self.tree)
# splitter.addWidget(self.tree)
self.splitter.addWidget(self.scroll_cell_def_tree)
#------------------
self.controls_hbox = QHBoxLayout()
self.new_button = QPushButton("New")
self.controls_hbox.addWidget(self.new_button)
self.copy_button = QPushButton("Copy")
self.controls_hbox.addWidget(self.copy_button)
self.delete_button = QPushButton("Delete")
self.controls_hbox.addWidget(self.delete_button)
#------------------
self.cycle_tab = QWidget()
self.death_tab = QWidget()
self.volume_tab = QWidget()
self.mechanics_tab = QWidget()
self.motility_tab = QWidget()
self.secretion_tab = QWidget()
self.custom_data_tab = QWidget()
self.scroll_params = QScrollArea()
self.tab_widget = QTabWidget()
self.splitter.addWidget(self.scroll_params)
# self.tab_widget.setStyleSheet('''
# QTabWidget {
# background: magenta;
# border: none;
# }
# QTabBar::tab {
# background: green;
# }
# ''')
self.tab_widget.addTab(self.create_cycle_tab(),"Cycle")
self.tab_widget.addTab(self.create_death_tab(),"Death")
self.tab_widget.addTab(self.create_volume_tab(),"Volume")
self.tab_widget.addTab(self.create_mechanics_tab(),"Mechanics")
self.tab_widget.addTab(self.create_motility_tab(),"Motlity")
self.tab_widget.addTab(self.create_secretion_tab(),"Secretion")
# self.tab_widget.addTab(self.custom_data_tab,"Custom Data")
# self.tab_widget.tabBarClicked.connect(self.tabbar_clicked_cb)
# lay = QVBoxLayout(self)
# lay.setContentsMargins(5, 35, 5, 5)
self.cell_types_tabs_layout = QGridLayout()
self.cell_types_tabs_layout.addWidget(self.tab_widget, 0,0,1,1) # w, row, column, rowspan, colspan
# self.setLayout(lay)
# self.setMinimumSize(400, 320)
# self.tab_widget.addTab(self.celldef_tab,"Cell Types")
# self.tab_widget.addTab(self.user_params_tab,"User Params")
# self.cell_types_tabs_hbox.addWidget(self.tab_widget)
# self.vbox.addLayout(hbox)
# self.vbox.addWidget(QHLine())
#------------------
# hbox = QHBoxLayout()
# label = QLabel("Name of cell type:")
# label.setFixedWidth(110)
# label.setAlignment(QtCore.Qt.AlignRight)
# hbox.addWidget(label)
# self.cell_type_name = QLineEdit()
# # Want to validate name, e.g., starts with alpha, no special chars, etc.
# # self.cycle_trate0_0.setValidator(QtGui.QDoubleValidator())
# # self.cycle_trate0_1.enter.connect(self.save_xml)
# hbox.addWidget(self.cell_type_name)
# self.vbox.addLayout(hbox)
# self.create_cycle_tab()
# self.create_death_tab()
# self.create_volume_tab()
# self.create_mechanics_tab()
# self.create_motility_tab()
# self.create_secretion_tab()
self.create_custom_data_tab()
# # self.vbox.hide()
# self.show_cycle_tab()
#--------------------------------------------------------
def tabbar_clicked_cb(self,idx):
print('tabbar_clicked_cb: idx=',idx) # 0-indexed
if idx==0:
self.show_cycle_tab()
elif idx==1:
self.show_death_tab()
elif idx==2:
self.show_volume_tab()
elif idx==3:
self.show_mechanics_tab()
elif idx==4:
self.show_motility_tab()
elif idx==5:
self.show_secretion_tab()
elif idx==6:
self.show_custom_data_tab()
#--------------------------------------------------------
def create_cycle_tab(self):
# self.group_cycle = QGroupBox()
self.params_cycle = QWidget()
self.vbox_cycle = QVBoxLayout()
# glayout = QGridLayout()
self.cycle_dropdown = QComboBox()
self.cycle_dropdown.setFixedWidth(300)
# self.cycle_dropdown.currentIndex.connect(self.cycle_changed_cb)
self.cycle_dropdown.currentIndexChanged.connect(self.cycle_changed_cb)
# self.cycle_dropdown.currentIndexChanged.connect(self.cycle_phase_transition_cb)
# Rf. Section 17 of User Guide and core/PhysiCell_constants.{h,cpp}
# static const int advanced_Ki67_cycle_model= 0;
# static const int basic_Ki67_cycle_model=1;
# static const int flow_cytometry_cycle_model=2;
# static const int live_apoptotic_cycle_model=3;
# static const int total_cells_cycle_model=4;
# static const int live_cells_cycle_model = 5;
# static const int flow_cytometry_separated_cycle_model = 6;
# static const int cycling_quiescent_model = 7;
self.cycle_dropdown.addItem("live cells") # 0 -> 0
self.cycle_dropdown.addItem("basic Ki67") # 0 -> 1, 1 -> 0
self.cycle_dropdown.addItem("advanced Ki67") # 0 -> 1, 1 -> 2, 2 -> 0
self.cycle_dropdown.addItem("flow cytometry") # 0 -> 1, 1 -> 2, 2 -> 0
self.cycle_dropdown.addItem("flow cytometry separated") # 0->1, 1->2, 2->3, 3->0
self.cycle_dropdown.addItem("cycling quiescent") # 0 -> 1, 1 -> 0
# self.cycle_dropdown.addItem("live apoptotic")
# self.cycle_dropdown.addItem("total cells")
# self.vbox.addWidget(self.cycle_dropdown)
# self.group_cycle.addWidget(self.cycle_dropdown)
self.vbox_cycle.addWidget(self.cycle_dropdown)
self.cycle_label = QLabel("Phenotype: cycle")
self.cycle_label.setStyleSheet("background-color: orange")
self.cycle_label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(self.cycle_label)
#----------------------------
# self.cycle_rate_duration_hbox = QHBoxLayout()
# self.rb1 = QRadioButton("transition rate(s)", self)
# self.rb1.clicked.connect(self.cycle_phase_transition_cb)
# self.cycle_rate_duration_hbox.addWidget(self.rb1)
# self.rb2 = QRadioButton("duration(s)", self)
# self.rb2.clicked.connect(self.cycle_phase_transition_cb)
# self.cycle_rate_duration_hbox.addWidget(self.rb2)
# self.cycle_rate_duration_hbox.addStretch(1) # not sure about this, but keeps buttons shoved to left
# self.vbox.addLayout(self.cycle_rate_duration_hbox)
#-----------------------------
# We'll create a unique widget to hold different rates or durations, depending
# on which cycle and method of defining it (transition rates or duration times) is chosen.
# Then we will only display the relevant one, based on these choices.
# self.stacked_cycle = QStackedWidget()
# transition rates
self.stack_t00 = QWidget()
self.stack_t01 = QWidget()
self.stack_t02 = QWidget()
self.stack_t03 = QWidget()
# duration times
self.stack_d00 = QWidget()
self.stack_d01 = QWidget()
self.stack_d02 = QWidget()
self.stack_d03 = QWidget()
#------ Cycle transition rate (1 node) ----------------------
# self.cycle_dropdown.addItem("live cells") # 0 -> 0
glayout = QGridLayout()
label = QLabel("phase 0->0 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# glayout.addWidget(*Widget, row, column, rowspan, colspan)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate00 = QLineEdit()
self.cycle_trate00.setValidator(QtGui.QDoubleValidator())
# self.cycle_trate0_0.enter.connect(self.save_xml)
glayout.addWidget(self.cycle_trate00, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate00_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate00_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
# hbox.addWidget(units_1min)
self.stack_t00.setLayout(glayout)
idx_stacked_widget = 0
self.stack_idx_t00 = idx_stacked_widget
print(" new stacked widget: t00 -------------> ",idx_stacked_widget)
self.stacked_cycle.addWidget(self.stack_t00) # <------------- stack widget 0
#------ Cycle transition rates (2 nodes) ----------------------
# self.cycle_dropdown.addItem("basic Ki67") # 0 -> 1, 1 -> 0
# self.cycle_dropdown.addItem("cycling quiescent") # 0 -> 1, 1 -> 0
glayout = QGridLayout()
label = QLabel("phase 0->1 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate01 = QLineEdit()
self.cycle_trate01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1->0 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate10 = QLineEdit()
self.cycle_trate10.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate10, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate10_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate10_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
#-------
# glayout.addWidget(QLabel("rwh-------------------------------AAAAAAAAAAAAAAAAAAAAAaa"), 2,0,4,4) # w, row, column, rowspan, colspan
# glayout.addWidget(QLabel(""), 2,0,3,4) # w, row, column, rowspan, colspan
# glayout.addStretch(0)
#---
self.stack_t01.setLayout(glayout)
idx_stacked_widget += 1
self.stack_idx_t01 = idx_stacked_widget
print(" new stacked widget: t01 -------------> ",idx_stacked_widget)
self.stacked_cycle.addWidget(self.stack_t01) # <------------- stack widget 1
#------ Cycle transition rates (3 nodes) ----------------------
# self.cycle_dropdown.addItem("advanced Ki67") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry") # 0 -> 1, 1 -> 2, 2 -> 0
glayout = QGridLayout()
label = QLabel("phase 0->1 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_02_01 = QLineEdit()
self.cycle_trate_02_01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_02_01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_02_01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_02_01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1->2 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_02_12 = QLineEdit()
self.cycle_trate_02_12.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_02_12, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_02_12_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_02_12_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 2->0 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 2,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_02_20 = QLineEdit()
self.cycle_trate_02_20.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_02_20, 2,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_02_20_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_02_20_fixed, 2,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 2,4,1,1) # w, row, column, rowspan, colspan
#-----
self.stack_t02.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: t02 -------------> ",idx_stacked_widget)
self.stack_idx_t02 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_t02)
#------ Cycle transition rates (4 nodes) ----------------------
# self.cycle_dropdown.addItem("flow cytometry separated") # 0->1, 1->2, 2->3, 3->0
glayout = QGridLayout()
label = QLabel("phase 0->1 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_03_01 = QLineEdit()
self.cycle_trate_03_01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_03_01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_03_01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_03_01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1->2 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_03_12 = QLineEdit()
self.cycle_trate_03_12.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_03_12, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_03_12_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_03_12_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 2->3 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 2,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_03_23 = QLineEdit()
self.cycle_trate_03_23.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_03_23, 2,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_03_23_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_03_23_fixed, 2,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 2,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 3->0 transition rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 3,0,1,1) # w, row, column, rowspan, colspan
self.cycle_trate_03_30 = QLineEdit()
self.cycle_trate_03_30.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_trate_03_30, 3,1,1,2) # w, row, column, rowspan, colspan
self.cycle_trate_03_30_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_trate_03_30_fixed, 3,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 3,4,1,1) # w, row, column, rowspan, colspan
#-----
self.stack_t03.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: t03 -------------> ",idx_stacked_widget)
self.stack_idx_t03 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_t03)
#===========================================================================
#------ Cycle duration rates ----------------------
# self.cycle_dropdown.addItem("live cells") # 0 -> 0
glayout = QGridLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1)
# glayout.addWidget(*Widget, row, column, rowspan, colspan)
self.cycle_duration00 = QLineEdit()
self.cycle_duration00.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration00, 0,1,1,2)
self.cycle_duration00_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration00_fixed, 0,3,1,1)
units = QLabel("min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignCenter)
glayout.addWidget(units, 0,4,1,1)
#-----
self.stack_d00.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: d00 -------------> ",idx_stacked_widget)
self.stack_idx_d00 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_d00)
#------ Cycle duration rates (2 nodes) ----------------------
# self.cycle_dropdown.addItem("basic Ki67") # 0 -> 1, 1 -> 0
# self.cycle_dropdown.addItem("cycling quiescent") # 0 -> 1, 1 -> 0
glayout = QGridLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration01 = QLineEdit()
self.cycle_duration01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration10 = QLineEdit()
self.cycle_duration10.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration10, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration10_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration10_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
# glayout.addWidget(QLabel(""), 2,0,1,1) # w, row, column, rowspan, colspan
#-------
self.stack_d01.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: d01 -------------> ",idx_stacked_widget)
self.stack_idx_d01 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_d01)
#------ Cycle duration (3 nodes) ----------------------
# self.cycle_dropdown.addItem("advanced Ki67") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry") # 0 -> 1, 1 -> 2, 2 -> 0
glayout = QGridLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_02_01 = QLineEdit()
self.cycle_duration_02_01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_02_01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_02_01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_02_01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_02_12 = QLineEdit()
self.cycle_duration_02_12.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_02_12, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_02_12_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_02_12_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 2 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 2,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_02_20 = QLineEdit()
self.cycle_duration_02_20.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_02_20, 2,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_02_20_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_02_20_fixed, 2,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 2,4,1,1) # w, row, column, rowspan, colspan
#-----
self.stack_d02.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: d02 -------------> ",idx_stacked_widget)
self.stack_idx_d02 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_d02)
#------ Cycle duration (4 nodes) ----------------------
# self.cycle_dropdown.addItem("flow cytometry separated") # 0->1, 1->2, 2->3, 3->0
glayout = QGridLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 0,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_03_01 = QLineEdit()
self.cycle_duration_03_01.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_03_01, 0,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_03_01_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_03_01_fixed, 0,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 0,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 1 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 1,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_03_12 = QLineEdit()
self.cycle_duration_03_12.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_03_12, 1,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_03_12_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_03_12_fixed, 1,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 1,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 2 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 2,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_03_23 = QLineEdit()
self.cycle_duration_03_23.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_03_23, 2,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_03_23_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_03_23_fixed, 2,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 2,4,1,1) # w, row, column, rowspan, colspan
#-------
label = QLabel("phase 3 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(label, 3,0,1,1) # w, row, column, rowspan, colspan
self.cycle_duration_03_30 = QLineEdit()
self.cycle_duration_03_30.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cycle_duration_03_30, 3,1,1,2) # w, row, column, rowspan, colspan
self.cycle_duration_03_30_fixed = QCheckBox("Fixed")
glayout.addWidget(self.cycle_duration_03_30_fixed, 3,3,1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setAlignment(QtCore.Qt.AlignCenter)
units.setFixedWidth(self.units_width)
glayout.addWidget(units, 3,4,1,1) # w, row, column, rowspan, colspan
#-----
self.stack_d03.setLayout(glayout)
idx_stacked_widget += 1
print(" new stacked widget: d03 -------------> ",idx_stacked_widget)
self.stack_idx_d03 = idx_stacked_widget
self.stacked_cycle.addWidget(self.stack_d03)
#---------------------------------------------
# After adding all combos of cycle widgets (groups) to the stacked widget,
# add it to this panel.
# self.vbox.addWidget(self.stacked)
self.vbox_cycle.addWidget(self.stacked_cycle)
# spacerItem = QSpacerItem(100,500)
# self.vbox.addItem(spacerItem)
self.vbox_cycle.addStretch()
self.params_cycle.setLayout(self.vbox_cycle)
return self.params_cycle
# return cycle_tab
#--------------------------------------------------------
def create_death_tab(self):
death_tab = QWidget()
# layout = QVBoxLayout()
glayout = QGridLayout()
# label = QLabel("Phenotype: death")
# label.setStyleSheet("background-color: orange")
# label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(label)
# self.vbox.addWidget(QHLine())
#----------------
label = QLabel("Apoptosis")
label.setAlignment(QtCore.Qt.AlignCenter)
label.setStyleSheet('background-color: yellow')
# layout.addWidget(apoptosis_label)
idr = 0
glayout.addWidget(label, idr,0, 1,4) # w, row, column, rowspan, colspan
# hbox = QHBoxLayout()
label = QLabel("death rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_death_rate = QLineEdit()
self.apoptosis_death_rate.setValidator(QtGui.QDoubleValidator())
# hbox.addWidget(self.apoptosis_death_rate)
glayout.addWidget(self.apoptosis_death_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# layout.addLayout(hbox)
# <cycle code="6" name="Flow cytometry model (separated)">
# <phase_durations units="min">
# <duration index="0" fixed_duration="false">300.0</duration>
# <duration index="1" fixed_duration="true">480</duration>
# <duration index="2" fixed_duration="true">240</duration>
# <duration index="3" fixed_duration="true">60</duration>
# </phase_durations>
# self.apoptosis_phase0_duration_hbox = QHBoxLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.apoptosis_phase0_duration_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_phase0_duration = QLineEdit()
self.apoptosis_phase0_duration.setValidator(QtGui.QDoubleValidator())
# self.apoptosis_phase0_duration_hbox.addWidget(self.apoptosis_phase0_duration)
glayout.addWidget(self.apoptosis_phase0_duration, idr,1, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_phase0_duration_fixed = QCheckBox("Fixed")
# self.apoptosis_phase0_duration_hbox.addWidget(self.apoptosis_phase0_duration_fixed)
glayout.addWidget(self.apoptosis_phase0_duration_fixed, idr,2, 1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignCenter)
glayout.addWidget(units, idr,3, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_phase0_duration_hbox.addWidget(units)
#-------
# <phase_durations units="min">
# <duration index="0" fixed_duration="true">516</duration>
# <unlysed_fluid_change_rate units="1/min">0.05</unlysed_fluid_change_rate>
# <lysed_fluid_change_rate units="1/min">0</lysed_fluid_change_rate>
# <cytoplasmic_biomass_change_rate units="1/min">1.66667e-02</cytoplasmic_biomass_change_rate>
# <nuclear_biomass_change_rate units="1/min">5.83333e-03</nuclear_biomass_change_rate>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
# self.apoptosis_unlysed_rate_hbox = QHBoxLayout()
label = QLabel("unlysed fluid change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_unlysed_rate_hbox.addWidget(label)
self.apoptosis_unlysed_rate = QLineEdit()
self.apoptosis_unlysed_rate.setValidator(QtGui.QDoubleValidator())
# self.apoptosis_unlysed_rate_hbox.addWidget(self.apoptosis_unlysed_rate)
glayout.addWidget(self.apoptosis_unlysed_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_unlysed_rate_hbox.addWidget(units)
# self.vbox.addLayout(self.apoptosis_unlysed_rate_hbox)
# self.apoptosis_lysed_rate_hbox = QHBoxLayout()
label = QLabel("lysed fluid change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_lysed_rate_hbox.addWidget(label)
self.apoptosis_lysed_rate = QLineEdit()
self.apoptosis_lysed_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.apoptosis_lysed_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_lysed_rate_hbox.addWidget(self.apoptosis_lysed_rate)
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_lysed_rate_hbox.addWidget(units)
# self.vbox.addLayout(self.apoptosis_lysed_rate_hbox)
# self.apoptosis_cytoplasmic_hbox = QHBoxLayout()
label = QLabel("cytoplasmic biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_cytoplasmic_hbox.addWidget(label)
self.apoptosis_cytoplasmic_biomass_change_rate = QLineEdit()
self.apoptosis_cytoplasmic_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.apoptosis_cytoplasmic_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_cytoplasmic_hbox.addWidget(self.apoptosis_cytoplasmic_biomass_change_rate)
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.apoptosis_cytoplasmic_hbox.addWidget(units)
# self.vbox.addLayout(self.apoptosis_cytoplasmic_biomass_change_rate_hbox)
# <nuclear_biomass_change_rate units="1/min">5.83333e-03</nuclear_biomass_change_rate>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
# self.apoptosis_nuclear_hbox = QHBoxLayout()
label = QLabel("nuclear biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.apoptosis_nuclear_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_nuclear_biomass_change_rate = QLineEdit()
self.apoptosis_nuclear_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
# self.apoptosis_nuclear_hbox.addWidget(self.apoptosis_nuclear_biomass_change_rate)
glayout.addWidget(self.apoptosis_nuclear_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.apoptosis_nuclear_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.vbox.addLayout(hbox)
# self.apoptosis_calcification_hbox = QHBoxLayout()
label = QLabel("calcification rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.apoptosis_calcification_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_calcification_rate = QLineEdit()
self.apoptosis_calcification_rate.setValidator(QtGui.QDoubleValidator())
# self.apoptosis_calcification_hbox.addWidget(self.apoptosis_calcification_rate)
glayout.addWidget(self.apoptosis_calcification_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.apoptosis_calcification_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.vbox.addLayout(hbox)
# self.apoptosis_rel_rupture_volume_hbox = QHBoxLayout()
label = QLabel("relative rupture volume")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.apoptosis_rel_rupture_volume_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.apoptosis_relative_rupture_volume = QLineEdit()
self.apoptosis_relative_rupture_volume.setValidator(QtGui.QDoubleValidator())
# self.apoptosis_rel_rupture_volume_hbox.addWidget(self.apoptosis_relative_rupture_volume)
glayout.addWidget(self.apoptosis_relative_rupture_volume, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.apoptosis_rel_rupture_volume_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.vbox.addLayout(hbox)
#----------------
label = QLabel("Necrosis")
label.setAlignment(QtCore.Qt.AlignCenter)
label.setStyleSheet('background-color: yellow')
idr += 1
glayout.addWidget(label, idr,0, 1,4) # w, row, column, rowspan, colspan
label = QLabel("death rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_death_rate = QLineEdit()
self.necrosis_death_rate.setValidator(QtGui.QDoubleValidator())
# hbox.addWidget(self.necrosis_death_rate)
glayout.addWidget(self.necrosis_death_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# layout.addLayout(hbox)
# <cycle code="6" name="Flow cytometry model (separated)">
# <phase_durations units="min">
# <duration index="0" fixed_duration="false">300.0</duration>
# <duration index="1" fixed_duration="true">480</duration>
# <duration index="2" fixed_duration="true">240</duration>
# <duration index="3" fixed_duration="true">60</duration>
# </phase_durations>
# self.necrosis_phase0_duration_hbox = QHBoxLayout()
label = QLabel("phase 0 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.necrosis_phase0_duration_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_phase0_duration = QLineEdit()
self.necrosis_phase0_duration.setValidator(QtGui.QDoubleValidator())
# self.necrosis_phase0_duration_hbox.addWidget(self.necrosis_phase0_duration)
glayout.addWidget(self.necrosis_phase0_duration, idr,1, 1,1) # w, row, column, rowspan, colspan
self.necrosis_phase0_duration_fixed = QCheckBox("Fixed")
# self.necrosis_phase0_duration_hbox.addWidget(self.necrosis_phase0_duration_fixed)
glayout.addWidget(self.necrosis_phase0_duration_fixed, idr,2, 1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignCenter)
glayout.addWidget(units, idr,3, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_phase0_duration_hbox.addWidget(units)
#----
label = QLabel("phase 1 duration")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_phase1_duration = QLineEdit()
self.necrosis_phase1_duration.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.necrosis_phase1_duration, idr,1, 1,1) # w, row, column, rowspan, colspan
self.necrosis_phase1_duration_fixed = QCheckBox("Fixed")
glayout.addWidget(self.necrosis_phase1_duration_fixed, idr,2, 1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignCenter)
glayout.addWidget(units, idr,3, 1,1) # w, row, column, rowspan, colspan
#-------
# <phase_durations units="min">
# <duration index="0" fixed_duration="true">516</duration>
# <unlysed_fluid_change_rate units="1/min">0.05</unlysed_fluid_change_rate>
# <lysed_fluid_change_rate units="1/min">0</lysed_fluid_change_rate>
# <cytoplasmic_biomass_change_rate units="1/min">1.66667e-02</cytoplasmic_biomass_change_rate>
# <nuclear_biomass_change_rate units="1/min">5.83333e-03</nuclear_biomass_change_rate>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
# self.necrosis_unlysed_rate_hbox = QHBoxLayout()
label = QLabel("unlysed fluid change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_unlysed_rate_hbox.addWidget(label)
self.necrosis_unlysed_rate = QLineEdit()
self.necrosis_unlysed_rate.setValidator(QtGui.QDoubleValidator())
# self.necrosis_unlysed_rate_hbox.addWidget(self.necrosis_unlysed_rate)
glayout.addWidget(self.necrosis_unlysed_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_unlysed_rate_hbox.addWidget(units)
# self.vbox.addLayout(self.necrosis_unlysed_rate_hbox)
# self.necrosis_lysed_rate_hbox = QHBoxLayout()
label = QLabel("lysed fluid change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_lysed_rate_hbox.addWidget(label)
self.necrosis_lysed_rate = QLineEdit()
self.necrosis_lysed_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.necrosis_lysed_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_lysed_rate_hbox.addWidget(self.necrosis_lysed_rate)
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_lysed_rate_hbox.addWidget(units)
# self.vbox.addLayout(self.necrosis_lysed_rate_hbox)
# self.necrosis_cytoplasmic_hbox = QHBoxLayout()
label = QLabel("cytoplasmic biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_cytoplasmic_hbox.addWidget(label)
self.necrosis_cytoplasmic_biomass_change_rate = QLineEdit()
self.necrosis_cytoplasmic_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.necrosis_cytoplasmic_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_cytoplasmic_hbox.addWidget(self.necrosis_cytoplasmic_biomass_change_rate)
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.necrosis_cytoplasmic_hbox.addWidget(units)
# self.vbox.addLayout(self.necrosis_cytoplasmic_biomass_change_rate_hbox)
# <nuclear_biomass_change_rate units="1/min">5.83333e-03</nuclear_biomass_change_rate>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
# self.necrosis_nuclear_hbox = QHBoxLayout()
label = QLabel("nuclear biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.necrosis_nuclear_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_nuclear_biomass_change_rate = QLineEdit()
self.necrosis_nuclear_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
# self.necrosis_nuclear_hbox.addWidget(self.necrosis_nuclear_biomass_change_rate)
glayout.addWidget(self.necrosis_nuclear_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.necrosis_nuclear_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.vbox.addLayout(hbox)
# self.necrosis_calcification_hbox = QHBoxLayout()
label = QLabel("calcification rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.necrosis_calcification_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_calcification_rate = QLineEdit()
self.necrosis_calcification_rate.setValidator(QtGui.QDoubleValidator())
# self.necrosis_calcification_hbox.addWidget(self.necrosis_calcification_rate)
glayout.addWidget(self.necrosis_calcification_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.necrosis_calcification_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# self.vbox.addLayout(hbox)
# self.necrosis_rel_rupture_volume_hbox = QHBoxLayout()
label = QLabel("relative rupture volume")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# self.necrosis_rel_rupture_volume_hbox.addWidget(label)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.necrosis_relative_rupture_volume = QLineEdit()
self.necrosis_relative_rupture_volume.setValidator(QtGui.QDoubleValidator())
# self.necrosis_rel_rupture_volume_hbox.addWidget(self.necrosis_relative_rupture_volume)
glayout.addWidget(self.necrosis_relative_rupture_volume, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.necrosis_rel_rupture_volume_hbox.addWidget(units)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
glayout.setVerticalSpacing(10) # rwh - argh
death_tab.setLayout(glayout)
return death_tab
#--------------------------------------------------------
def create_volume_tab(self):
volume_tab = QWidget()
glayout = QGridLayout()
# vlayout = QVBoxLayout()
label = QLabel("Phenotype: volume")
label.setStyleSheet("background-color: orange")
label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(label)
# <total units="micron^3">2494</total>
# <fluid_fraction units="dimensionless">0.75</fluid_fraction>
# <nuclear units="micron^3">540</nuclear>
label = QLabel("total")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr = 0
# self.volume_total_hbox.addWidget(label)
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_total = QLineEdit()
self.volume_total.setValidator(QtGui.QDoubleValidator())
# self.volume_total_hbox.addWidget(self.volume_total)
glayout.addWidget(self.volume_total, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron^3")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# self.volume_total_hbox.addWidget(units)
# vlayout.addLayout(self.volume_total_hbox)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("fluid fraction")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_fluid_fraction = QLineEdit()
self.volume_fluid_fraction.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_fluid_fraction, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("nuclear")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_nuclear = QLineEdit()
self.volume_nuclear.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_nuclear, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron^3")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# <fluid_change_rate units="1/min">0.05</fluid_change_rate>
# <cytoplasmic_biomass_change_rate units="1/min">0.0045</cytoplasmic_biomass_change_rate>
# <nuclear_biomass_change_rate units="1/min">0.0055</nuclear_biomass_change_rate>
#---
label = QLabel("fluid change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_fluid_change_rate = QLineEdit()
self.volume_fluid_change_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_fluid_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("cytoplasmic biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_cytoplasmic_biomass_change_rate = QLineEdit()
self.volume_cytoplasmic_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_cytoplasmic_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("nuclear biomass change rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_nuclear_biomass_change_rate = QLineEdit()
self.volume_nuclear_biomass_change_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_nuclear_biomass_change_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
# <calcified_fraction units="dimensionless">0</calcified_fraction>
# <calcification_rate units="1/min">0</calcification_rate>
label = QLabel("calcification fraction")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_calcified_fraction = QLineEdit()
self.volume_calcified_fraction.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_calcified_fraction, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("calcified rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.volume_calcification_rate = QLineEdit()
self.volume_calcification_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.volume_calcification_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
label = QLabel("relative rupture volume")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.relative_rupture_volume = QLineEdit()
self.relative_rupture_volume.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.relative_rupture_volume, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#------
for idx in range(5): # rwh: hack solution to align rows
blank_line = QLabel("")
idr += 1
glayout.addWidget(blank_line, idr,0, 1,1) # w, row, column, rowspan, colspan
#------
# vlayout.setVerticalSpacing(10) # rwh - argh
volume_tab.setLayout(glayout)
return volume_tab
#--------------------------------------------------------
def create_mechanics_tab(self):
mechanics_tab = QWidget()
glayout = QGridLayout()
label = QLabel("Phenotype: mechanics")
label.setStyleSheet("background-color: orange")
label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(label)
# <cell_cell_adhesion_strength units="micron/min">0.4</cell_cell_adhesion_strength>
# <cell_cell_repulsion_strength units="micron/min">10.0</cell_cell_repulsion_strength>
# <relative_maximum_adhesion_distance units="dimensionless">1.25</relative_maximum_adhesion_distance>
label = QLabel("cell-cell adhesion strength")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr = 0
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.cell_cell_adhesion_strength = QLineEdit()
self.cell_cell_adhesion_strength.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cell_cell_adhesion_strength, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("cell-cell repulsion strength")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.cell_cell_repulsion_strength = QLineEdit()
self.cell_cell_repulsion_strength.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.cell_cell_repulsion_strength, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("relative max adhesion distance")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.relative_maximum_adhesion_distance = QLineEdit()
self.relative_maximum_adhesion_distance.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.relative_maximum_adhesion_distance, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
# <options>
# <set_relative_equilibrium_distance enabled="false" units="dimensionless">1.8</set_relative_equilibrium_distance>
# <set_absolute_equilibrium_distance enabled="false" units="micron">15.12</set_absolute_equilibrium_distance>
# </options>
label = QLabel("Options:")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignLeft)
idr += 1
glayout.addWidget(label, idr,0, 1,3) # w, row, column, rowspan, colspan
#--------
label = QLabel("relative equilibrium distance")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.set_relative_equilibrium_distance = QLineEdit()
self.set_relative_equilibrium_distance.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.set_relative_equilibrium_distance, idr,1, 1,1) # w, row, column, rowspan, colspan
self.set_relative_equilibrium_distance_enabled = QCheckBox("enable")
glayout.addWidget(self.set_relative_equilibrium_distance_enabled, idr,2, 1,1) # w, row, column, rowspan, colspan
# units = QLabel("")
# units.setFixedWidth(self.units_width)
# units.setAlignment(QtCore.Qt.AlignLeft)
# glayout.addWidget(units, idr,3, 1,1) # w, row, column, rowspan, colspan
#--------
label = QLabel("absolute equilibrium distance")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.set_absolute_equilibrium_distance = QLineEdit()
self.set_absolute_equilibrium_distance.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.set_absolute_equilibrium_distance, idr,1, 1,1) # w, row, column, rowspan, colspan
self.set_absolute_equilibrium_distance_enabled = QCheckBox("enable")
glayout.addWidget(self.set_absolute_equilibrium_distance_enabled, idr,2, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignCenter)
glayout.addWidget(units, idr,3, 1,1) # w, row, column, rowspan, colspan
#------
for idx in range(11): # rwh: hack solution to align rows
blank_line = QLabel("")
idr += 1
glayout.addWidget(blank_line, idr,0, 1,1) # w, row, column, rowspan, colspan
#------
# vlayout.setVerticalSpacing(10) # rwh - argh
mechanics_tab.setLayout(glayout)
return mechanics_tab
#--------------------------------------------------------
def create_motility_tab(self):
motility_tab = QWidget()
glayout = QGridLayout()
label = QLabel("Phenotype: motility")
label.setStyleSheet("background-color: orange")
label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(label)
# self.vbox.addWidget(QHLine())
#---
# <speed units="micron/min">1</speed>
# <persistence_time units="min">1</persistence_time>
# <migration_bias units="dimensionless">.75</migration_bias>
label = QLabel("speed")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# label.setStyleSheet("border: 1px solid black;")
idr = 0
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.speed = QLineEdit()
self.speed.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.speed, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("micron/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# units.setStyleSheet("border: 1px solid black;")
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("persistence time")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.persistence_time = QLineEdit()
self.persistence_time.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.persistence_time, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("migration bias")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.migration_bias = QLineEdit()
self.migration_bias.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.migration_bias, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
# <options>
# <enabled>false</enabled>
# <use_2D>true</use_2D>
# <chemotaxis>
# <enabled>false</enabled>
# <substrate>virus</substrate>
# <direction>1</direction>
# </chemotaxis>
# </options>
#---
self.motility_enabled = QCheckBox("enable")
# self.motility_enabled.setAlignment(QtCore.Qt.AlignRight)
# label.setFixedWidth(self.label_width)
idr += 1
glayout.addWidget(self.motility_enabled, idr,0, 1,1) # w, row, column, rowspan, colspan
self.motility_2D = QCheckBox("2D")
# self.motility_2D.setAlignment(QtCore.Qt.AlignRight)
glayout.addWidget(self.motility_2D, idr,1, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("Chemotaxis")
label.setFixedWidth(200)
label.setAlignment(QtCore.Qt.AlignCenter)
label.setStyleSheet('background-color: yellow')
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.chemotaxis_enabled = QCheckBox("enabled")
glayout.addWidget(self.chemotaxis_enabled, idr,1, 1,1) # w, row, column, rowspan, colspan
self.motility_substrate_dropdown = QComboBox()
# self.motility_substrate_dropdown.setFixedWidth(240)
idr += 1
glayout.addWidget(self.motility_substrate_dropdown, idr,0, 1,1) # w, row, column, rowspan, colspan
# self.cycle_dropdown.currentIndex.connect(self.cycle_changed_cb)
self.motility_substrate_dropdown.currentIndexChanged.connect(self.motility_substrate_changed_cb) # beware: will be triggered on a ".clear" too
# self.motility_substrate_dropdown.addItem("oxygen")
self.chemotaxis_direction_positive = QCheckBox("up drection")
glayout.addWidget(self.chemotaxis_direction_positive, idr,1, 1,1) # w, row, column, rowspan, colspan
#------
for idx in range(11): # rwh: hack solution to align rows
blank_line = QLabel("")
idr += 1
glayout.addWidget(blank_line, idr,0, 1,1) # w, row, column, rowspan, colspan
#------
# vlayout.setVerticalSpacing(10) # rwh - argh
motility_tab.setLayout(glayout)
return motility_tab
#--------------------------------------------------------
def create_secretion_tab(self):
secretion_tab = QWidget()
glayout = QGridLayout()
label = QLabel("Phenotype: secretion")
label.setStyleSheet("background-color: orange")
label.setAlignment(QtCore.Qt.AlignCenter)
# <substrate name="virus">
# <secretion_rate units="1/min">0</secretion_rate>
# <secretion_target units="substrate density">1</secretion_target>
# <uptake_rate units="1/min">10</uptake_rate>
# <net_export_rate units="total substrate/min">0</net_export_rate>
# </substrate>
# <substrate name="interferon">
# <secretion_rate units="1/min">0</secretion_rate>
# <secretion_target units="substrate density">1</secretion_target>
# <uptake_rate units="1/min">0</uptake_rate>
# <net_export_rate units="total substrate/min">0</net_export_rate>
# </substrate>
# cycle_path = ".//cell_definition[" + str(idx_current_cell_def) + "]//phenotype//cycle"
# phase_transition_path = cycle_path + "//phase_transition_rates"
# print(' >> phase_transition_path ')
# pt_uep = uep.find(phase_transition_path)
self.secretion_substrate_dropdown = QComboBox()
self.secretion_substrate_dropdown.setFixedWidth(300)
self.secretion_substrate_dropdown.currentIndexChanged.connect(self.secretion_substrate_changed_cb) # beware: will be triggered on a ".clear" too
# self.uep_cell_defs = self.xml_root.find(".//cell_definitions")
# print('self.uep_cell_defs= ',self.uep_cell_defs)
# # secretion_path = ".//cell_definition[" + str(idx_current_cell_def) + "]//phenotype//secretion//"
# uep_secretion = self.xml_root.find(".//cell_definitions//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//secretion")
# print('uep_secretion = ',uep_secretion )
# # vp = [] # pointers to <variable> nodes
# if self.uep_cell_defs:
# # uep = self.xml_root.find('.//secretion') # find unique entry point
# idx = 0
# for sub in uep_secretion.findall('substrate'):
# # vp.append(var)
# print(idx,") -- secretion substrate = ",sub.attrib['name'])
# idx += 1
# label = QLabel("oxygen")
# label.setStyleSheet('background-color: lightgreen')
# label.setFixedWidth(150)
# self.vbox.addWidget(label)
label = QLabel("secretion rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# label.setStyleSheet("border: 1px solid black;")
idr = 0
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.secretion_rate = QLineEdit()
self.secretion_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.secretion_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
# units.setStyleSheet("border: 1px solid black;")
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("target")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
# label.setStyleSheet("border: 1px solid black;")
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.secretion_target = QLineEdit()
self.secretion_target.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.secretion_target, idr,1, 1,1) # w, row, column, rowspan, colspan
# units = QLabel("substrate density")
units = QLabel("sub density")
# units.setFixedWidth(self.units_width+5)
# units.setFixedWidth(110)
units.setAlignment(QtCore.Qt.AlignLeft)
# units.setStyleSheet("border: 1px solid black;")
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("uptake rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.uptake_rate = QLineEdit()
self.uptake_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.uptake_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("1/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#---
label = QLabel("net export rate")
label.setFixedWidth(self.label_width)
label.setAlignment(QtCore.Qt.AlignRight)
idr += 1
glayout.addWidget(label, idr,0, 1,1) # w, row, column, rowspan, colspan
self.secretion_net_export_rate = QLineEdit()
self.secretion_net_export_rate.setValidator(QtGui.QDoubleValidator())
glayout.addWidget(self.secretion_net_export_rate, idr,1, 1,1) # w, row, column, rowspan, colspan
units = QLabel("total/min")
units.setFixedWidth(self.units_width)
units.setAlignment(QtCore.Qt.AlignLeft)
glayout.addWidget(units, idr,2, 1,1) # w, row, column, rowspan, colspan
#------
for idx in range(11): # rwh: hack solution to align rows
blank_line = QLabel("")
idr += 1
glayout.addWidget(blank_line, idr,0, 1,1) # w, row, column, rowspan, colspan
#------
# vlayout.setVerticalSpacing(10) # rwh - argh
secretion_tab.setLayout(glayout)
return secretion_tab
#--------------------------------------------------------
def create_molecular_tab(self):
label = QLabel("Phenotype: molecular")
label.setStyleSheet("background-color: orange")
label.setAlignment(QtCore.Qt.AlignCenter)
self.vbox.addWidget(label)
#--------------------------------------------------------
def create_custom_data_tab(self):
#===== Custom data
label = QLabel("Custom data")
label.setStyleSheet("background-color: cyan")
#-------------------------
self.custom_data_controls_hbox = QHBoxLayout()
# self.new_button = QPushButton("New")
self.new_button = QPushButton("Append 5 more rows")
self.custom_data_controls_hbox.addWidget(self.new_button)
self.new_button.clicked.connect(self.append_more_cb)
self.clear_button = QPushButton("Clear selected rows")
self.custom_data_controls_hbox.addWidget(self.clear_button)
self.clear_button.clicked.connect(self.clear_rows_cb)
#-------------------------
# Fixed names for columns:
hbox = QHBoxLayout()
# self.select = QtWidgets.QCheckBox("")
w = QLabel("Name")
w.setAlignment(QtCore.Qt.AlignCenter)
hbox.addWidget(w)
# col2 = QtWidgets.QLabel("Type")
# col2.setAlignment(QtCore.Qt.AlignCenter)
# hbox.addWidget(col2)
w = QLabel("Value (double)")
w.setAlignment(QtCore.Qt.AlignCenter)
hbox.addWidget(w)
w = QLabel("Units")
w.setAlignment(QtCore.Qt.AlignCenter)
hbox.addWidget(w)
# label.setFixedWidth(180)
# self.vbox.addWidget(label)
# self.vbox.addLayout(self.custom_data_controls_hbox)
# self.vbox.addLayout(hbox)
# Create lists for the various input boxes
self.custom_data_select = []
self.custom_data_name = []
self.custom_data_value = []
self.custom_data_units = []
for idx in range(10): # rwh/TODO - this should depend on how many in the .xml
# self.main_layout.addLayout(NewUserParam(self))
hbox = QHBoxLayout()
w = QCheckBox("")
self.custom_data_select.append(w)
hbox.addWidget(w)
w = QLineEdit()
self.custom_data_name.append(w)
# self.name.setValidator(QtGui.QDoubleValidator())
# self.diffusion_coef.enter.connect(self.save_xml)
hbox.addWidget(w)
# if idx == 0:
# w.setText("random_seed")
w = QLineEdit()
self.custom_data_value.append(w)
# w.setValidator(QtGui.QDoubleValidator())
# if idx == 0:
# w.setText("0")
hbox.addWidget(w)
w = QLineEdit()
w.setFixedWidth(self.custom_data_units_width)
self.custom_data_units.append(w)
hbox.addWidget(w)
# units = QtWidgets.QLabel("micron^2/min")
# units.setFixedWidth(self.units_width)
# hbox.addWidget(units)
# self.vbox.addLayout(hbox)
# self.vbox.addLayout(hbox)
# self.vbox.addLayout(hbox)
self.custom_data_count = self.custom_data_count + 1
#==================================================================
# compare with config_tab.py
# self.config_params.setLayout(self.vbox)
# self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
# self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
# self.scroll.setWidgetResizable(True)
# self.scroll.setWidget(self.config_params) # self.config_params = QWidget()
# self.layout = QVBoxLayout(self)
# self.layout.addWidget(self.scroll)
#===============
# self.params_cell_def.setLayout(self.vbox)
self.scroll_params.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_params.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_params.setWidgetResizable(True)
# self.scroll_params.setWidget(self.params_cell_def)
self.scroll_params.setWidget(self.tab_widget) # self.tab_widget = QTabWidget()
# self.save_button = QPushButton("Save")
# self.text = QLabel("Hello World",alignment=QtCore.Qt.AlignCenter)
self.layout = QVBoxLayout(self)
# self.layout.addStretch(1)
# self.layout.addWidget(self.tabs)
# self.layout.addWidget(self.params)
self.layout.addLayout(self.controls_hbox)
# self.layout.addLayout(self.cell_types_tabs_layout)
# self.layout.addWidget(self.tab_widget)
# self.layout.addWidget(self.scroll)
self.layout.addWidget(self.splitter)
# self.layout.addWidget(self.vbox)
# self.layout.addWidget(self.text)
# self.layout.addWidget(self.save_button)
# self.save_button.clicked.connect(self.save_xml)
# @QtCore.Slot()
# def save_xml(self):
# # self.text.setText(random.choice(self.hello))
# pass
#--------------------------------------------------------
@QtCore.Slot()
def cycle_changed_cb(self, idx):
# pass
print('------ cycle_changed_cb(): idx = ',idx)
self.customize_cycle_choices()
# QMessageBox.information(self, "Cycle Changed:",
# "Current Cycle Index: %d" % idx )
@QtCore.Slot()
def motility_substrate_changed_cb(self, idx):
print('------ motility_substrate_changed_cb(): idx = ',idx)
print(self.motility_substrate_dropdown.currentText())
if idx == -1:
return
@QtCore.Slot()
def secretion_substrate_changed_cb(self, idx):
print('------ secretion_substrate_changed_cb(): idx = ',idx)
print(self.secretion_substrate_dropdown.currentText())
if idx == -1:
return
# uep = self.xml_root.find('.//microenvironment_setup') # find unique entry point
secretion_substrate_path = self.xml_root.find(".//cell_definitions//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//secretion//substrate[" + str(idx+1) + "]")
if (secretion_substrate_path):
print(secretion_substrate_path)
# <substrate name="virus">
# <secretion_rate units="1/min">0</secretion_rate>
# <secretion_target units="substrate density">1</secretion_target>
# <uptake_rate units="1/min">10</uptake_rate>
# <net_export_rate units="total substrate/min">0</net_export_rate>
# </substrate>
# uep = self.xml_root.find(".//cell_definitions//cell_definition")
# print(" secretion_rate=", secretion_substrate_path.find('.//secretion_rate').text )
self.secretion_rate.setText(secretion_substrate_path.find(".//secretion_rate").text)
self.secretion_target.setText(secretion_substrate_path.find(".//secretion_target").text)
self.uptake_rate.setText(secretion_substrate_path.find(".//uptake_rate").text)
self.secretion_net_export_rate.setText(secretion_substrate_path.find(".//net_export_rate").text)
# self.cycle_dropdown.addItem("live cells") # 0 -> 0
# self.cycle_dropdown.addItem("basic Ki67") # 0 -> 1, 1 -> 0
# self.cycle_dropdown.addItem("advanced Ki67") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry separated") # 0->1, 1->2, 2->3, 3->0
# self.cycle_dropdown.addItem("cycling quiescent") # 0 -> 1, 1 -> 0
def cycle_phase_transition_cb(self):
# rb1.toggled.connect(self.updateLabel)(self, idx_choice):
# print('self.cycle_rows_vbox.count()=', self.cycle_rows_vbox.count())
print('cycle_phase_transition_cb: self.stacked_cycle.count()=', self.stacked_cycle.count())
radioBtn = self.sender()
if radioBtn.isChecked():
print("--------- ",radioBtn.text())
print("self.cycle_dropdown.currentText() = ",self.cycle_dropdown.currentText())
print("self.cycle_dropdown.currentIndex() = ",self.cycle_dropdown.currentIndex())
# self.cycle_rows_vbox.clear()
# if radioBtn.text().find("duration"):
if "duration" in radioBtn.text():
print('cycle_phase_transition_cb: --> duration')
self.cycle_duration_flag = True
self.customize_cycle_choices()
else: # transition rates
print('cycle_phase_transition_cb: NOT duration')
self.cycle_duration_flag = False
self.customize_cycle_choices()
# pass
# self.cycle_dropdown.addItem("live cells") # 0 -> 0
# self.cycle_dropdown.addItem("basic Ki67") # 0 -> 1, 1 -> 0
# self.cycle_dropdown.addItem("advanced Ki67") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry") # 0 -> 1, 1 -> 2, 2 -> 0
# self.cycle_dropdown.addItem("flow cytometry separated") # 0->1, 1->2, 2->3, 3->0
# self.cycle_dropdown.addItem("cycling quiescent") # 0 -> 1, 1 -> 0
def customize_cycle_choices(self):
if self.cycle_duration_flag: # specifying duration times (radio button)
if self.cycle_dropdown.currentIndex() == 0: # live
print("customize_cycle_choices(): idx = ",self.stack_idx_d00)
self.stacked_cycle.setCurrentIndex(self.stack_idx_d00)
elif (self.cycle_dropdown.currentIndex() == 1) or (self.cycle_dropdown.currentIndex() == 5): # basic Ki67 or cycling quiescent
print("customize_cycle_choices(): idx = ",self.stack_idx_d01)
self.stacked_cycle.setCurrentIndex(self.stack_idx_d01)
elif (self.cycle_dropdown.currentIndex() == 2) or (self.cycle_dropdown.currentIndex() == 3): # advanced Ki67 or flow cytometry
print("customize_cycle_choices(): idx = ",self.stack_idx_d02)
self.stacked_cycle.setCurrentIndex(self.stack_idx_d02)
elif (self.cycle_dropdown.currentIndex() == 4): # flow cytometry separated
print("customize_cycle_choices(): idx = ",self.stack_idx_d03)
self.stacked_cycle.setCurrentIndex(self.stack_idx_d03)
else: # specifying transition rates (radio button)
if self.cycle_dropdown.currentIndex() == 0: # live
print("customize_cycle_choices(): idx = ",self.stack_idx_t00)
self.stacked_cycle.setCurrentIndex(self.stack_idx_t00)
elif (self.cycle_dropdown.currentIndex() == 1) or (self.cycle_dropdown.currentIndex() == 5): # basic Ki67 or cycling quiescent
print("customize_cycle_choices(): idx = ",self.stack_idx_t01)
self.stacked_cycle.setCurrentIndex(self.stack_idx_t01)
elif (self.cycle_dropdown.currentIndex() == 2) or (self.cycle_dropdown.currentIndex() == 3): # advanced Ki67 or flow cytometry
print("customize_cycle_choices(): idx = ",self.stack_idx_t02)
self.stacked_cycle.setCurrentIndex(self.stack_idx_t02)
elif (self.cycle_dropdown.currentIndex() == 4): # flow cytometry separated
print("customize_cycle_choices(): idx = ",self.stack_idx_t03)
self.stacked_cycle.setCurrentIndex(self.stack_idx_t03)
@QtCore.Slot()
def clear_rows_cb(self):
print("----- clearing all selected rows")
@QtCore.Slot()
def append_more_cb(self):
for idx in range(5):
# self.main_layout.addLayout(NewUserParam(self))
hbox = QHBoxLayout()
w = QCheckBox("")
self.custom_data_select.append(w)
hbox.addWidget(w)
w = QLineEdit()
self.custom_data_name.append(w)
hbox.addWidget(w)
w = QLineEdit()
self.custom_data_value.append(w)
# w.setValidator(QtGui.QDoubleValidator())
hbox.addWidget(w)
w = QLineEdit()
w.setFixedWidth(self.custom_data_units_width)
self.custom_data_units.append(w)
hbox.addWidget(w)
self.vbox.addLayout(hbox)
# self.main_layout.addLayout(hbox)
self.custom_data_count = self.custom_data_count + 1
print(self.custom_data_count)
#---------------------------------
# def fill_motility_substrates(self):
def fill_substrates_comboboxes(self):
print("cell_def_tab.py: ------- fill_substrates_comboboxes")
self.motility_substrate_dropdown.clear()
self.secretion_substrate_dropdown.clear()
uep = self.xml_root.find('.//microenvironment_setup') # find unique entry point
# vp = [] # pointers to <variable> nodes
if uep:
idx = 0
for var in uep.findall('variable'):
# vp.append(var)
print(" --> ",var.attrib['name'])
name = var.attrib['name']
self.motility_substrate_dropdown.addItem(name)
self.secretion_substrate_dropdown.addItem(name)
# def delete_substrate_from_comboboxes(self, name):
def delete_substrate_from_comboboxes(self, item_idx):
# print("------- delete_substrate_from_comboboxes: name=",name)
print("------- delete_substrate_from_comboboxes: name=",item_idx)
self.motility_substrate_dropdown.removeItem(item_idx)
self.secretion_substrate_dropdown.removeItem(item_idx)
# self.motility_substrate_dropdown.clear()
# self.secretion_substrate_dropdown.clear()
def tree_item_changed_cb(self, it,col):
print('--- tree_item_changed:', it, col, it.text(col) )
self.current_cell_def = it.text(col)
print('--- self.current_cell_def= ',self.current_cell_def )
# fill in the GUI with this one's params
self.fill_gui(self.current_cell_def)
def populate_tree(self):
uep = self.xml_root.find(".//cell_definitions")
if uep:
self.tree.clear()
idx = 0
for cell_def in uep:
# print(cell_def.attrib['name'])
cd_name = cell_def.attrib['name']
cellname = QTreeWidgetItem([cd_name])
self.tree.insertTopLevelItem(idx,cellname)
idx += 1
def first_cell_def_name(self):
uep = self.xml_root.find(".//cell_definitions//cell_definition")
if uep:
return(uep.attrib['name'])
#-------------------------------------------------------------------
def fill_gui(self, cell_def_name):
# <cell_definitions>
# <cell_definition name="default" ID="0">
# <cell_definition name="motile tumor cell" ID="1">
if cell_def_name == None:
cell_def_name = self.xml_root.find(".//cell_definitions//cell_definition").attrib['name']
print('--------- fill_gui: cell_def_name=',cell_def_name)
# self.cell_type_name.setText(cell_def_name)
uep = self.xml_root.find(".//cell_definitions")
if uep:
# self.tree.clear()
idx = 0
for cell_def in uep:
# print(cell_def.attrib['name'])
cd_name = cell_def.attrib['name']
# cd_cycle_code = cell_def.attrib['name']
cellname = QTreeWidgetItem([cd_name])
print('cellname.text(0)=',cellname.text(0))
cellidx = QTreeWidgetItem([cd_name]).indexOfChild
print('cellidx=',cellidx)
print('cell_def_name=',cell_def_name)
if cellname.text(0) == cell_def_name:
print("break out of cell_def loop with idx=",idx)
break
# self.tree.insertTopLevelItem(idx,cellname)
idx += 1
self.idx_current_cell_def = idx + 1 # we use 1-offset indices below
cycle_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//cycle"
cycle_code = int(uep.find(cycle_path).attrib['code'])
print(' >> cycle_path=',cycle_path, ", code=",cycle_code)
# static const int advanced_Ki67_cycle_model= 0;
# static const int basic_Ki67_cycle_model=1;
# static const int flow_cytometry_cycle_model=2;
# static const int live_apoptotic_cycle_model=3;
# static const int total_cells_cycle_model=4;
# static const int live_cells_cycle_model = 5;
# static const int flow_cytometry_separated_cycle_model = 6;
# static const int cycling_quiescent_model = 7;
# self.cycle_dropdown.addItem("live cells")
# self.cycle_dropdown.addItem("basic Ki67")
# self.cycle_dropdown.addItem("advanced Ki67")
# self.cycle_dropdown.addItem("flow cytometry")
# self.cycle_dropdown.addItem("flow cytometry separated")
# self.cycle_dropdown.addItem("cycling quiescent")
if cycle_code == 0:
self.cycle_dropdown.setCurrentIndex(2)
elif cycle_code == 1:
self.cycle_dropdown.setCurrentIndex(1)
elif cycle_code == 2:
self.cycle_dropdown.setCurrentIndex(3)
elif cycle_code == 5:
self.cycle_dropdown.setCurrentIndex(0)
elif cycle_code == 6:
self.cycle_dropdown.setCurrentIndex(4)
elif cycle_code == 7:
self.cycle_dropdown.setCurrentIndex(5)
# <cell_definition name="cargo cell" ID="2" visible="true">
# <phenotype>
# <cycle code="5" name="live">
# <phase_transition_rates units="1/min">
# <rate start_index="0" end_index="0" fixed_duration="false">0.0</rate>
# </phase_transition_rates>
phase_transition_path = cycle_path + "//phase_transition_rates"
print(' >> phase_transition_path ')
pt_uep = uep.find(phase_transition_path)
# if pt_uep:
# # self.rb1 = QRadioButton("transition rate(s)", self)
# self.rb1.setChecked(True)
# for rate in pt_uep:
# print(rate)
# print("start_index=",rate.attrib["start_index"])
# if (rate.attrib['start_index'] == "0") and (rate.attrib['end_index'] == "0"):
# self.cycle_trate00.setText(rate.text)
# elif (rate.attrib['start_index'] == "0") and (rate.attrib['end_index'] == "1"):
# self.cycle_trate01.setText(rate.text)
# elif (rate.attrib['start_index'] == "1") and (rate.attrib['end_index'] == "2"):
# self.cycle_trate12.setText(rate.text)
# elif (rate.attrib['start_index'] == "2") and (rate.attrib['end_index'] == "3"):
# self.cycle_trate23.setText(rate.text)
# elif (rate.attrib['start_index'] == "3") and (rate.attrib['end_index'] == "0"):
# self.cycle_trate30.setText(rate.text)
# <cycle code="6" name="Flow cytometry model (separated)">
# <phase_durations units="min">
# <duration index="0" fixed_duration="false">300.0</duration>
# <duration index="1" fixed_duration="true">480</duration>
# <duration index="2" fixed_duration="true">240</duration>
# <duration index="3" fixed_duration="true">60</duration>
# </phase_durations>
#
# self.phase0_duration = QLineEdit()
phase_durations_path = cycle_path + "//phase_durations"
print(' >> phase_durations_path =',phase_durations_path )
pd_uep = uep.find(phase_durations_path)
print(' >> pd_uep =',pd_uep )
# if pd_uep:
# self.rb2.setChecked(True)
# for pd in pd_uep:
# print(pd)
# print("index=",pd.attrib["index"])
# if pd.attrib['index'] == "0":
# self.cycle_duration00.setText(pd.text)
# self.cycle_duration01.setText(pd.text)
# elif pd.attrib['index'] == "1":
# self.cycle_duration_02_01.setText(pd.text)
# self.cycle_duration_03_01.setText(pd.text)
# elif pd.attrib['index'] == "2":
# self.cycle_duration_02_20.setText(pd.text)
# self.cycle_duration_03_23.setText(pd.text)
# elif pd.attrib['index'] == "3":
# self.cycle_duration_03_30.setText(pd.text)
# rf. microenv:
# self.cell_type_name.setText(var.attrib['name'])
# self.diffusion_coef.setText(vp[0].find('.//diffusion_coefficient').text)
# ------------------ cell_definition: default
# --------- cycle (live)
# self.float0.value = float(uep.find('.//cell_definition[1]//phenotype//cycle//phase_transition_rates//rate[1]').text)
# <death>
# <model code="100" name="apoptosis">
# ...
# <model code="101" name="necrosis">
# --------- death
death_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//death//"
print('death_path=',death_path)
# rwh/TODO: validate we've got apoptosis or necrosis since order is not required in XML.
apoptosis_path = death_path + "model[1]//"
# self.apoptosis_death_rate.setText(uep.find('.//cell_definition[1]//phenotype//death//model[1]//death_rate').text)
self.apoptosis_death_rate.setText(uep.find(apoptosis_path + 'death_rate').text)
phase_durations_path = apoptosis_path + "phase_durations"
print(' >> phase_durations_path =',phase_durations_path )
pd_uep = uep.find(phase_durations_path)
print(' >> pd_uep =',pd_uep )
if pd_uep:
for pd in pd_uep:
print(pd)
print("index=",pd.attrib["index"])
if pd.attrib['index'] == "0":
self.apoptosis_phase0_duration.setText(pd.text)
# elif pd.attrib['index'] == "1":
# self.apoptosis_phase1_duration.setText(pd.text)
# elif pd.attrib['index'] == "2":
# self.apoptosis_phase2_duration.setText(pd.text)
# elif pd.attrib['index'] == "3":
# self.apoptosis_phase3_duration.setText(pd.text)
#-----
necrosis_path = death_path + "model[2]//"
self.necrosis_death_rate.setText(uep.find(necrosis_path + 'death_rate').text)
phase_durations_path = necrosis_path + "phase_durations"
print(' >> necrosis phase_durations_path =',phase_durations_path )
pd_uep = uep.find(phase_durations_path)
print(' >> pd_uep =',pd_uep )
if pd_uep:
for pd in pd_uep:
print(pd)
print("index=",pd.attrib["index"])
if pd.attrib['index'] == "0":
self.necrosis_phase0_duration.setText(pd.text)
elif pd.attrib['index'] == "1":
self.necrosis_phase1_duration.setText(pd.text)
# elif pd.attrib['index'] == "2":
# self.necrosis_phase2_duration.setText(pd.text)
# elif pd.attrib['index'] == "3":
# self.necrosis_phase3_duration.setText(pd.text)
#-----
apoptosis_params_path = apoptosis_path + "parameters//"
necrosis_params_path = necrosis_path + "parameters//"
# necrosis_path = ".//cell_definition[" + str(idx) + "]//phenotype//death//"
# self.apoptosis_unlysed_rate.setText(uep.find('.//cell_definition[1]//phenotype//death//model[1]//unlysed_fluid_change_rate').text)
# full_str = death_path + "model[1]//unlysed_fluid_change_rate"
# print('full_str=',full_str)
# self.apoptosis_unlysed_rate.setText(uep.find(full_str).text)
# <parameters>
# <unlysed_fluid_change_rate units="1/min">0.07</unlysed_fluid_change_rate>
# <lysed_fluid_change_rate units="1/min">0</lysed_fluid_change_rate>
# <cytoplasmic_biomass_change_rate units="1/min">1.66667e-02</cytoplasmic_biomass_change_rate>
# <nuclear_biomass_change_rate units="1/min">5.83333e-03</nuclear_biomass_change_rate>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
#---- apoptosis
self.apoptosis_unlysed_rate.setText(uep.find(apoptosis_params_path+"unlysed_fluid_change_rate").text)
self.apoptosis_lysed_rate.setText(uep.find(apoptosis_params_path+"lysed_fluid_change_rate").text)
self.apoptosis_cytoplasmic_biomass_change_rate.setText(uep.find(apoptosis_params_path+"cytoplasmic_biomass_change_rate").text)
self.apoptosis_nuclear_biomass_change_rate.setText(uep.find(apoptosis_params_path+"nuclear_biomass_change_rate").text)
self.apoptosis_calcification_rate.setText(uep.find(apoptosis_params_path+"nuclear_biomass_change_rate").text)
self.apoptosis_relative_rupture_volume.setText(uep.find(apoptosis_params_path+"relative_rupture_volume").text)
#---- necrosis
self.necrosis_unlysed_rate.setText(uep.find(necrosis_params_path+"unlysed_fluid_change_rate").text)
self.necrosis_lysed_rate.setText(uep.find(necrosis_params_path+"lysed_fluid_change_rate").text)
self.necrosis_cytoplasmic_biomass_change_rate.setText(uep.find(necrosis_params_path+"cytoplasmic_biomass_change_rate").text)
self.necrosis_nuclear_biomass_change_rate.setText(uep.find(necrosis_params_path+"nuclear_biomass_change_rate").text)
self.necrosis_calcification_rate.setText(uep.find(necrosis_params_path+"nuclear_biomass_change_rate").text)
self.necrosis_relative_rupture_volume.setText(uep.find(necrosis_params_path+"relative_rupture_volume").text)
# self.apoptosis_unlysed_rate.setText(uep.find("'" + death_path + "model[1]//unlysed_fluid_change_rate'" + ").text)"
# self.float3.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[1]//parameters//lysed_fluid_change_rate').text)
# self.float4.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[1]//parameters//cytoplasmic_biomass_change_rate').text)
# self.float5.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[1]//parameters//nuclear_biomass_change_rate').text)
# self.float6.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[1]//parameters//calcification_rate').text)
# self.float7.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[1]//parameters//relative_rupture_volume').text)
# self.float8.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//death_rate').text)
# self.float9.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//unlysed_fluid_change_rate').text)
# self.float10.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//lysed_fluid_change_rate').text)
# self.float11.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//cytoplasmic_biomass_change_rate').text)
# self.float12.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//nuclear_biomass_change_rate').text)
# self.float13.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//calcification_rate').text)
# self.float14.value = float(uep.find('.//cell_definition[1]//phenotype//death//model[2]//parameters//relative_rupture_volume').text)
# # --------- volume
# <volume>
# <total units="micron^3">2494</total>
# <fluid_fraction units="dimensionless">0.75</fluid_fraction>
# <nuclear units="micron^3">540</nuclear>
# <fluid_change_rate units="1/min">0.05</fluid_change_rate>
# <cytoplasmic_biomass_change_rate units="1/min">0.0045</cytoplasmic_biomass_change_rate>
# <nuclear_biomass_change_rate units="1/min">0.0055</nuclear_biomass_change_rate>
# <calcified_fraction units="dimensionless">0</calcified_fraction>
# <calcification_rate units="1/min">0</calcification_rate>
# <relative_rupture_volume units="dimensionless">2.0</relative_rupture_volume>
volume_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//volume//"
print('volume_path=',volume_path)
self.volume_total.setText(uep.find(volume_path+"total").text)
self.volume_fluid_fraction.setText(uep.find(volume_path+"fluid_fraction").text)
self.volume_nuclear.setText(uep.find(volume_path+"nuclear").text)
self.volume_fluid_change_rate.setText(uep.find(volume_path+"fluid_change_rate").text)
self.volume_cytoplasmic_biomass_change_rate.setText(uep.find(volume_path+"cytoplasmic_biomass_change_rate").text)
self.volume_nuclear_biomass_change_rate.setText(uep.find(volume_path+"nuclear_biomass_change_rate").text)
self.volume_calcified_fraction.setText(uep.find(volume_path+"calcified_fraction").text)
self.volume_calcification_rate.setText(uep.find(volume_path+"calcification_rate").text)
self.relative_rupture_volume.setText(uep.find(volume_path+"relative_rupture_volume").text)
# self.necrosis_relative_rupture_volume.setText(uep.find(necrosis_params_path+"relative_rupture_volume").text)
# self.float15.value = float(uep.find('.//cell_definition[1]//phenotype//volume//total').text)
# self.float16.value = float(uep.find('.//cell_definition[1]//phenotype//volume//fluid_fraction').text)
# self.float17.value = float(uep.find('.//cell_definition[1]//phenotype//volume//nuclear').text)
# self.float18.value = float(uep.find('.//cell_definition[1]//phenotype//volume//fluid_change_rate').text)
# self.float19.value = float(uep.find('.//cell_definition[1]//phenotype//volume//cytoplasmic_biomass_change_rate').text)
# self.float20.value = float(uep.find('.//cell_definition[1]//phenotype//volume//nuclear_biomass_change_rate').text)
# self.float21.value = float(uep.find('.//cell_definition[1]//phenotype//volume//calcified_fraction').text)
# self.float22.value = float(uep.find('.//cell_definition[1]//phenotype//volume//calcification_rate').text)
# self.float23.value = float(uep.find('.//cell_definition[1]//phenotype//volume//relative_rupture_volume').text)
# <mechanics>
# <cell_cell_adhesion_strength units="micron/min">0.4</cell_cell_adhesion_strength>
# <cell_cell_repulsion_strength units="micron/min">10.0</cell_cell_repulsion_strength>
# <relative_maximum_adhesion_distance units="dimensionless">1.25</relative_maximum_adhesion_distance>
# <options>
# <set_relative_equilibrium_distance enabled="false" units="dimensionless">1.8</set_relative_equilibrium_distance>
# <set_absolute_equilibrium_distance enabled="false" units="micron">15.12</set_absolute_equilibrium_distance>
# </options>
# # --------- mechanics
mechanics_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//mechanics//"
print('mechanics_path=',mechanics_path)
self.cell_cell_adhesion_strength.setText(uep.find(mechanics_path+"cell_cell_adhesion_strength").text)
self.cell_cell_repulsion_strength.setText(uep.find(mechanics_path+"cell_cell_repulsion_strength").text)
self.relative_maximum_adhesion_distance.setText(uep.find(mechanics_path+"relative_maximum_adhesion_distance").text)
mechanics_options_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//mechanics//options//"
self.set_relative_equilibrium_distance.setText(uep.find(mechanics_options_path+"set_relative_equilibrium_distance").text)
self.set_absolute_equilibrium_distance.setText(uep.find(mechanics_options_path+"set_absolute_equilibrium_distance").text)
if uep.find(mechanics_options_path+"set_relative_equilibrium_distance").attrib['enabled'].lower() == 'true':
self.set_relative_equilibrium_distance_enabled.setChecked(True)
else:
self.set_relative_equilibrium_distance_enabled.setChecked(False)
if uep.find(mechanics_options_path+"set_absolute_equilibrium_distance").attrib['enabled'].lower() == 'true':
self.set_absolute_equilibrium_distance_enabled.setChecked(True)
else:
self.set_absolute_equilibrium_distance_enabled.setChecked(False)
# self.float24.value = float(uep.find('.//cell_definition[1]//phenotype//mechanics//cell_cell_adhesion_strength').text)
# self.float25.value = float(uep.find('.//cell_definition[1]//phenotype//mechanics//cell_cell_repulsion_strength').text)
# self.float26.value = float(uep.find('.//cell_definition[1]//phenotype//mechanics//relative_maximum_adhesion_distance').text)
# self.bool0.value = ('true' == (uep.find('.//cell_definition[1]//phenotype//mechanics//options//set_relative_equilibrium_distance').attrib['enabled'].lower()))
# self.bool1.value = ('true' == (uep.find('.//cell_definition[1]//phenotype//mechanics//options//set_absolute_equilibrium_distance').attrib['enabled'].lower()))
# <motility>
# <speed units="micron/min">5.0</speed>
# <persistence_time units="min">5.0</persistence_time>
# <migration_bias units="dimensionless">0.5</migration_bias>
# <options>
# <enabled>true</enabled>
# <use_2D>true</use_2D>
# <chemotaxis>
# <enabled>false</enabled>
# <substrate>director signal</substrate>
# <direction>1</direction>
# </chemotaxis>
# </options>
# # --------- motility
motility_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//motility//"
print('motility_path=',motility_path)
self.speed.setText(uep.find(motility_path+"speed").text)
self.persistence_time.setText(uep.find(motility_path+"persistence_time").text)
self.migration_bias.setText(uep.find(motility_path+"migration_bias").text)
motility_options_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//motility//options//"
# print(' motility options enabled', uep.find(motility_options_path +'enabled').text)
if uep.find(motility_options_path +'enabled').text.lower() == 'true':
self.motility_enabled.setChecked(True)
else:
self.motility_enabled.setChecked(False)
if uep.find(motility_options_path +'use_2D').text.lower() == 'true':
self.motility_2D.setChecked(True)
else:
self.motility_2D.setChecked(False)
# # --------- secretion
# <substrate name="virus">
# <secretion_rate units="1/min">0</secretion_rate>
# <secretion_target units="substrate density">1</secretion_target>
# <uptake_rate units="1/min">10</uptake_rate>
# <net_export_rate units="total substrate/min">0</net_export_rate>
# </substrate>
secretion_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//secretion//"
print('secretion_path =',secretion_path)
secretion_sub1_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//secretion//substrate[1]//"
# if self.uep_cell_defs:
# self.uep_cell_defs = self.xml_root.find(".//cell_definitions")
# print('self.uep_cell_defs= ',self.uep_cell_defs)
# # secretion_path = ".//cell_definition[" + str(idx_current_cell_def) + "]//phenotype//secretion//"
uep_secretion = self.xml_root.find(".//cell_definitions//cell_definition[" + str(self.idx_current_cell_def) + "]//phenotype//secretion")
print('uep_secretion = ',uep_secretion )
self.secretion_rate_val.clear()
self.secretion_target_val.clear()
self.secretion_uptake_rate_val.clear()
self.secretion_net_export_rate_val.clear()
idx = 0
for sub in uep_secretion.findall('substrate'):
print(idx,") -- secretion substrate = ",sub.attrib['name'])
self.secretion_rate_val.append(sub.find("secretion_rate").text)
self.secretion_target_val.append(sub.find("secretion_target").text)
self.secretion_uptake_rate_val.append(sub.find("uptake_rate").text)
self.secretion_net_export_rate_val.append(sub.find("net_export_rate").text)
idx += 1
self.secretion_rate.setText(self.secretion_rate_val[0])
self.secretion_target.setText(self.secretion_target_val[0])
self.uptake_rate.setText(self.secretion_uptake_rate_val[0])
self.secretion_net_export_rate.setText(self.secretion_net_export_rate_val[0])
# # --------- molecular
# # --------- custom data
# <custom_data>
# <receptor units="dimensionless">0.0</receptor>
# <cargo_release_o2_threshold units="mmHg">10</cargo_release_o2_threshold>
uep_custom_data = self.xml_root.find(".//cell_definitions//cell_definition[" + str(self.idx_current_cell_def) + "]//custom_data")
# custom_data_path = ".//cell_definition[" + str(self.idx_current_cell_def) + "]//custom_data//"
print('uep_custom_data=',uep_custom_data)
idx = 0
# rwh/TODO: if we have more vars than we initially created rows for, we'll need
# to call 'append_more_cb' for the excess.
for var in uep_custom_data:
print(idx, ") ",var)
self.custom_data_name[idx].setText(var.tag)
print("tag=",var.tag)
self.custom_data_value[idx].setText(var.text)
if 'units' in var.keys():
self.custom_data_units[idx].setText(var.attrib['units'])
idx += 1
# Read values from the GUI widgets and generate/write a new XML
def fill_xml(self):
pass
# TODO: verify valid type (numeric) and range?
# xml_root.find(".//x_min").text = str(self.xmin.value)
# xml_root.find(".//x_max").text = str(self.xmax.value)
def clear_gui(self):
# self.cell_type_name.setText('')
self.cycle_trate00.setText('')
self.cycle_trate01.setText('')
self.cycle_trate10.setText('')
self.cycle_trate_02_01.setText('')
self.cycle_trate_02_12.setText('')
self.cycle_trate_02_20.setText('')
self.cycle_trate_03_01.setText('')
self.cycle_trate_03_12.setText('')
self.cycle_trate_03_23.setText('')
self.cycle_trate_03_30.setText('')
self.cycle_duration00.setText('')
self.cycle_duration01.setText('')
self.cycle_duration10.setText('')
self.cycle_duration_02_01.setText('')
self.cycle_duration_02_12.setText('')
self.cycle_duration_02_20.setText('')
self.cycle_duration_03_01.setText('')
self.cycle_duration_03_12.setText('')
self.cycle_duration_03_23.setText('')
self.cycle_duration_03_30.setText('')
self.apoptosis_death_rate.setText('')
self.apoptosis_phase0_duration.setText('')
# self.apoptosis_phase1_duration.setText('')
# self.apoptosis_phase2_duration.setText('')
# self.apoptosis_phase3_duration.setText('')
self.apoptosis_unlysed_rate.setText('')
self.apoptosis_lysed_rate.setText('')
self.apoptosis_cytoplasmic_biomass_change_rate.setText('')
self.apoptosis_nuclear_biomass_change_rate.setText('')
self.apoptosis_calcification_rate.setText('')
self.apoptosis_relative_rupture_volume.setText('')
self.necrosis_death_rate.setText('')
self.necrosis_phase0_duration.setText('')
self.necrosis_phase1_duration.setText('')
# self.necrosis_phase2_duration.setText('')
# self.necrosis_phase3_duration.setText('')
self.necrosis_unlysed_rate.setText('')
self.necrosis_lysed_rate.setText('')
self.necrosis_cytoplasmic_biomass_change_rate.setText('')
self.necrosis_nuclear_biomass_change_rate.setText('')
self.necrosis_calcification_rate.setText('')
self.necrosis_relative_rupture_volume.setText('')
self.volume_total.setText('')
self.volume_fluid_fraction.setText('')
self.volume_nuclear.setText('')
self.volume_fluid_change_rate.setText('')
self.volume_cytoplasmic_biomass_change_rate.setText('')
self.volume_nuclear_biomass_change_rate.setText('')
self.volume_calcified_fraction.setText('')
self.volume_calcification_rate.setText('')
self.relative_rupture_volume.setText('')
self.cell_cell_adhesion_strength.setText('')
self.cell_cell_repulsion_strength.setText('')
self.relative_maximum_adhesion_distance.setText('')
self.set_relative_equilibrium_distance.setText('')
self.set_absolute_equilibrium_distance.setText('')
self.speed.setText('')
self.persistence_time.setText('')
self.migration_bias.setText('')
self.secretion_rate.setText('')
self.secretion_target.setText('')
self.uptake_rate.setText('')
self.secretion_net_export_rate.setText('')
| [
"PySide6.QtGui.QDoubleValidator",
"PySide6.QtCore.QStringListModel",
"PySide6.QtCore.Slot"
] | [((82039, 82052), 'PySide6.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (82050, 82052), False, 'from PySide6 import QtCore, QtGui\n'), ((82317, 82330), 'PySide6.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (82328, 82330), False, 'from PySide6 import QtCore, QtGui\n'), ((82558, 82571), 'PySide6.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (82569, 82571), False, 'from PySide6 import QtCore, QtGui\n'), ((88300, 88313), 'PySide6.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (88311, 88313), False, 'from PySide6 import QtCore, QtGui\n'), ((88399, 88412), 'PySide6.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (88410, 88412), False, 'from PySide6 import QtCore, QtGui\n'), ((2796, 2821), 'PySide6.QtCore.QStringListModel', 'QtCore.QStringListModel', ([], {}), '()\n', (2819, 2821), False, 'from PySide6 import QtCore, QtGui\n'), ((11056, 11080), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (11078, 11080), False, 'from PySide6 import QtCore, QtGui\n'), ((12476, 12500), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (12498, 12500), False, 'from PySide6 import QtCore, QtGui\n'), ((13274, 13298), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (13296, 13298), False, 'from PySide6 import QtCore, QtGui\n'), ((14907, 14931), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (14929, 14931), False, 'from PySide6 import QtCore, QtGui\n'), ((15725, 15749), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (15747, 15749), False, 'from PySide6 import QtCore, QtGui\n'), ((16543, 16567), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (16565, 16567), False, 'from PySide6 import QtCore, QtGui\n'), ((17811, 17835), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (17833, 17835), False, 'from PySide6 import QtCore, QtGui\n'), ((18629, 18653), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (18651, 18653), False, 'from PySide6 import QtCore, QtGui\n'), ((19447, 19471), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (19469, 19471), False, 'from PySide6 import QtCore, QtGui\n'), ((20265, 20289), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (20287, 20289), False, 'from PySide6 import QtCore, QtGui\n'), ((21599, 21623), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (21621, 21623), False, 'from PySide6 import QtCore, QtGui\n'), ((22799, 22823), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (22821, 22823), False, 'from PySide6 import QtCore, QtGui\n'), ((23600, 23624), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (23622, 23624), False, 'from PySide6 import QtCore, QtGui\n'), ((25010, 25034), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (25032, 25034), False, 'from PySide6 import QtCore, QtGui\n'), ((25831, 25855), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (25853, 25855), False, 'from PySide6 import QtCore, QtGui\n'), ((26652, 26676), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (26674, 26676), False, 'from PySide6 import QtCore, QtGui\n'), ((27917, 27941), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (27939, 27941), False, 'from PySide6 import QtCore, QtGui\n'), ((28738, 28762), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (28760, 28762), False, 'from PySide6 import QtCore, QtGui\n'), ((29559, 29583), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (29581, 29583), False, 'from PySide6 import QtCore, QtGui\n'), ((30380, 30404), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (30402, 30404), False, 'from PySide6 import QtCore, QtGui\n'), ((32770, 32794), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (32792, 32794), False, 'from PySide6 import QtCore, QtGui\n'), ((34136, 34160), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (34158, 34160), False, 'from PySide6 import QtCore, QtGui\n'), ((36002, 36026), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (36024, 36026), False, 'from PySide6 import QtCore, QtGui\n'), ((36997, 37021), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (37019, 37021), False, 'from PySide6 import QtCore, QtGui\n'), ((38033, 38057), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (38055, 38057), False, 'from PySide6 import QtCore, QtGui\n'), ((39365, 39389), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (39387, 39389), False, 'from PySide6 import QtCore, QtGui\n'), ((40369, 40393), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (40391, 40393), False, 'from PySide6 import QtCore, QtGui\n'), ((41392, 41416), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (41414, 41416), False, 'from PySide6 import QtCore, QtGui\n'), ((42555, 42579), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (42577, 42579), False, 'from PySide6 import QtCore, QtGui\n'), ((43915, 43939), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (43937, 43939), False, 'from PySide6 import QtCore, QtGui\n'), ((45022, 45046), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (45044, 45046), False, 'from PySide6 import QtCore, QtGui\n'), ((46637, 46661), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (46659, 46661), False, 'from PySide6 import QtCore, QtGui\n'), ((47623, 47647), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (47645, 47647), False, 'from PySide6 import QtCore, QtGui\n'), ((48650, 48674), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (48672, 48674), False, 'from PySide6 import QtCore, QtGui\n'), ((49973, 49997), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (49995, 49997), False, 'from PySide6 import QtCore, QtGui\n'), ((50969, 50993), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (50991, 50993), False, 'from PySide6 import QtCore, QtGui\n'), ((51984, 52008), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (52006, 52008), False, 'from PySide6 import QtCore, QtGui\n'), ((53508, 53532), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (53530, 53532), False, 'from PySide6 import QtCore, QtGui\n'), ((54345, 54369), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (54367, 54369), False, 'from PySide6 import QtCore, QtGui\n'), ((54998, 55022), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (55020, 55022), False, 'from PySide6 import QtCore, QtGui\n'), ((55947, 55971), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (55969, 55971), False, 'from PySide6 import QtCore, QtGui\n'), ((56680, 56704), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (56702, 56704), False, 'from PySide6 import QtCore, QtGui\n'), ((57415, 57439), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (57437, 57439), False, 'from PySide6 import QtCore, QtGui\n'), ((58273, 58297), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (58295, 58297), False, 'from PySide6 import QtCore, QtGui\n'), ((58959, 58983), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (58981, 58983), False, 'from PySide6 import QtCore, QtGui\n'), ((59750, 59774), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (59772, 59774), False, 'from PySide6 import QtCore, QtGui\n'), ((61442, 61466), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (61464, 61466), False, 'from PySide6 import QtCore, QtGui\n'), ((62160, 62184), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (62182, 62184), False, 'from PySide6 import QtCore, QtGui\n'), ((62893, 62917), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (62915, 62917), False, 'from PySide6 import QtCore, QtGui\n'), ((64142, 64166), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (64164, 64166), False, 'from PySide6 import QtCore, QtGui\n'), ((65079, 65103), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (65101, 65103), False, 'from PySide6 import QtCore, QtGui\n'), ((66928, 66952), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (66950, 66952), False, 'from PySide6 import QtCore, QtGui\n'), ((67646, 67670), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (67668, 67670), False, 'from PySide6 import QtCore, QtGui\n'), ((68304, 68328), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (68326, 68328), False, 'from PySide6 import QtCore, QtGui\n'), ((73879, 73903), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (73901, 73903), False, 'from PySide6 import QtCore, QtGui\n'), ((74649, 74673), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (74671, 74673), False, 'from PySide6 import QtCore, QtGui\n'), ((75449, 75473), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (75471, 75473), False, 'from PySide6 import QtCore, QtGui\n'), ((76127, 76151), 'PySide6.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (76149, 76151), False, 'from PySide6 import QtCore, QtGui\n')] |
#!/usr/bin/env python
# coding: utf-8
# Einkommentieren, falls nur CPU genutzt werden soll
#import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import tensorflow.compat.v1 as tf
import cv2
import urllib
from datetime import datetime
# Geklaut von https://gist.github.com/madhawav/1546a4b99c8313f06c0b2d7d7b4a09e2
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
class PeopleCounter:
def __init__(self, model_path, threshold=0.7):
self.odapi = DetectorAPI(path_to_ckpt=model_path)
self.threshold = threshold
def get_image(self, url):
resp = urllib.request.urlopen(url)
self.image = np.asarray(bytearray(resp.read()), dtype="uint8")
self.image = cv2.imdecode(self.image, -1)
def count_people(self, verbose=False):
peoplecount = 0
boxes, scores, classes, num = self.odapi.processFrame(self.image)
for i in range(len(boxes)):
# Class 1 represents human
if classes[i] == 1 and scores[i] > self.threshold:
box = boxes[i]
cv2.rectangle(self.image,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)
peoplecount += 1
if verbose:
cv2.imshow('image', self.image)
cv2.waitKey(0)
return peoplecount
if __name__ == '__main__':
model_path = './faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb'
webcams = [{'ID':1,'URL':'http://217.24.53.18/record/current.jpg', 'Lat':'50.258318',"Lon":'10.964798','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':2,'URL':'http://www2.annaberg-buchholz.de/webcam/markt.jpg', 'Lat':'50.580062',"Lon":'13.002370','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':3,'URL':'https://www.konzil-konstanz.de/webcam/hafen.jpg', 'Lat':'47.660951',"Lon":'9.178256','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':4,'URL':'https://www.erfurt.de/webcam/fischmarkt.jpg', 'Lat':'50.978031',"Lon":'11.028691','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':5,'URL':'https://www.juwelier-roller.de/media/webcam/chemnitz_markt.jpg', 'Lat':'50.832587',"Lon":'12.919738','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':6,'URL':'https://www.celle-tourismus.de/webcam/image-640x480.jpg', 'Lat':'52.623973',"Lon":'10.080568','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':7,'URL':'https://webcam.heilbronn.de/current.jpg', 'Lat':'49.142365',"Lon":'9.219044','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':8,'URL':'https://www.verkehrsinfos.ulm.de/webcam/einstein/current.jpg', 'Lat':'48.401848',"Lon":'9.992416','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':9,'URL':'https://achern.de/tools/webcam/webcam/achern-rathaus1.jpg', 'Lat':'48.625454',"Lon":'8.082615','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':10,'URL':'http://www.marktplatzcam.mybiberach.de/MarktplatzCam000M.jpg', 'Lat':'48.097822',"Lon":'9.787595','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':11,'URL':'https://www.radolfzell.de/docs/webcam/radolfzell_640.jpg', 'Lat':' 47.745237',"Lon":'8.966910','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':12,'URL':'http://ftp.kaufhaus.ludwigbeck.de/webcam/webcam.jpg', 'Lat':'48.137079',"Lon":'11.576006','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':13,'URL':'https://cdn01.koeln.de/uploads/webcam/live.jpg', 'Lat':'50.941278',"Lon":'6.958281','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':14,'URL':'http://www.adlerauge1.de/subs/www/current.jpg', 'Lat':'51.513989',"Lon":'7.466483','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':15,'URL':'https://www.hal-oever.de/webcam/schlastshut.jpg', 'Lat':'53.078206',"Lon":'8.799147','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':16,'URL':'https://www.call-mail-call.de/webcam/000M.jpg', 'Lat':'52.376701',"Lon":'9.728407','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':17,'URL':'http://172.16.17.32:19812/record/current.jpg', 'Lat':'50.043667',"Lon":'10.2330092','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':18,'URL':'http://www.lemgo.de/fileadmin/image/webcam/aktuell.jpg', 'Lat':'52.028423',"Lon":'8.901522','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':19,'URL':'https://www.fiwa-forum.de/webcam/fiwa-forum-cam.jpg', 'Lat':'51.630403',"Lon":'13.708284','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':20,'URL':'https://rathaus-hildesheim.de/webcam/webcam.jpg', 'Lat':'52.1527203',"Lon":'9.9515704','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':21,'URL':'https://www.siegen.de/fileadmin/cms/bilder/Webcam/WebCam_Siegen.jpg', 'Lat':'50.8335211',"Lon":'7.9867985','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':22,'URL':'https://lamp01.dortmund.de/webcams/friedensplatz/current.jpg', 'Lat':'51.511543',"Lon":'7.466345','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':23,'URL':'https://lamp01.dortmund.de/webcams/altermarkt_hik/current_TIMING.jpg', 'Lat':'51.513989',"Lon":'7.466483','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':24,'URL':'https://service.ka-news.de/tools/webcams/?cam=27', 'Lat':'49.009220',"Lon":'8.403912','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':25,'URL':'https://www.augsburg.de/fileadmin/user_upload/header/webcam/webcamdachspitz/B_Rathausplatz_Dachspitz_00.jpg', 'Lat':'48.368963',"Lon":'10.898227','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':26,'URL':'https://www2.braunschweig.de/webcam/schloss.jpg', 'Lat':'52.263363',"Lon":'10.527763','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':27,'URL':'http://webcambild-rathaus.aachen.de/webcam_rathaus.jpg', 'Lat':'50.776103',"Lon":'6.083780','Name':'<NAME>', 'Personenzahl':None, 'Stand':None },
{'ID':28,'URL':'http://www.brillen-krille.de/Webcam/foto.jpg', 'Lat':'54.087890',"Lon":'12.134464','Name':'<NAME>', 'Personenzahl':None, 'Stand':None }]
pc = PeopleCounter(model_path)
for cam in webcams:
pc.get_image(cam['URL'])
cam['Personenzahl'] = pc.count_people(verbose=False)
cam['Stand'] = datetime.now().strftime("%Y-%m-%d %H:%M")
print(cam["Name"]+" :"+str(cam["Personenzahl"]))
client_s3 = boto3.client("s3" )
response = client_s3.put_object(
Bucket="sdd-s3-basebucket",
Body=json.dumps(webcams),
Key="webcamdaten/" + "/".datetime.now().strftime("%Y%m%d%H") + "/webcamdaten.json"
)
| [
"cv2.rectangle",
"tensorflow.compat.v1.GraphDef",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.compat.v1.import_graph_def",
"urllib.request.urlopen",
"cv2.imshow",
"datetime.datetime.now",
"cv2.imdecode",
"numpy.expand_dims",
"cv2.waitKey",
"tensorflow.compat.v1.Session"
] | [((467, 477), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (475, 477), True, 'import tensorflow.compat.v1 as tf\n'), ((884, 922), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (894, 922), True, 'import tensorflow.compat.v1 as tf\n'), ((1858, 1887), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1872, 1887), True, 'import numpy as np\n'), ((2895, 2922), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2917, 2922), False, 'import urllib\n'), ((3015, 3043), 'cv2.imdecode', 'cv2.imdecode', (['self.image', '(-1)'], {}), '(self.image, -1)\n', (3027, 3043), False, 'import cv2\n'), ((553, 566), 'tensorflow.compat.v1.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (564, 566), True, 'import tensorflow.compat.v1 as tf\n'), ((584, 623), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['self.path_to_ckpt', '"""rb"""'], {}), "(self.path_to_ckpt, 'rb')\n", (598, 623), True, 'import tensorflow.compat.v1 as tf\n'), ((757, 799), 'tensorflow.compat.v1.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (776, 799), True, 'import tensorflow.compat.v1 as tf\n'), ((3371, 3448), 'cv2.rectangle', 'cv2.rectangle', (['self.image', '(box[1], box[0])', '(box[3], box[2])', '(255, 0, 0)', '(2)'], {}), '(self.image, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2)\n', (3384, 3448), False, 'import cv2\n'), ((3514, 3545), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'self.image'], {}), "('image', self.image)\n", (3524, 3545), False, 'import cv2\n'), ((3562, 3576), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3573, 3576), False, 'import cv2\n'), ((8842, 8856), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8854, 8856), False, 'from datetime import datetime\n')] |
from django.apps import AppConfig
class TextSearchVectorConfig(AppConfig):
name = 'tsvector_field'
def ready(self):
"""
This supports two use cases for using tsvector_field:
1. Configure your Django project to use tsvecotr_field's DatabaseSchemaEditor
directly by creating your own DatabaseWrapper and referencing
tsvector_field.DatabaseSchemaEditor in the SchemaEditorClass attribute.
See: tsvector_field/schema.py for more info.
2. Just add `tsvector_field` to your project's INSTALLED_APPS setting and this
will use the `pre_migrate` mechanism. Note: `pre_migrate` is not fired for
./manage.py migrate --run-syncdb. So if you are building apps without migrations
you will have to use the more reliable approach in option #1.
"""
from django.db import connection
from . import DatabaseSchemaEditor
if not isinstance(connection.schema_editor(), DatabaseSchemaEditor):
# only register for pre_migrate if we're not already configured
# with the DatabaseSchemaEditor, see option #1 in doc above
from django.db.models.signals import pre_migrate
from .receivers import inject_trigger_operations
pre_migrate.connect(inject_trigger_operations)
| [
"django.db.connection.schema_editor",
"django.db.models.signals.pre_migrate.connect"
] | [((1296, 1342), 'django.db.models.signals.pre_migrate.connect', 'pre_migrate.connect', (['inject_trigger_operations'], {}), '(inject_trigger_operations)\n', (1315, 1342), False, 'from django.db.models.signals import pre_migrate\n'), ((963, 989), 'django.db.connection.schema_editor', 'connection.schema_editor', ([], {}), '()\n', (987, 989), False, 'from django.db import connection\n')] |
#!/usr/bin/env python
import logging
import increment_lib
def lambda_handler(event, context):
"""Increments a given CountName, possibly derived from api_gateway info. If CountName does not exist,
conditional_get_count will return a zero, so this function will increment and return 1."""
logging.warning("DEBUG: {r}".format(r=repr(event)))
try:
CountName = increment_lib.parse_event(event, "POST")
if CountName is None:
return increment_lib.make_return("event must specify CountName", 400)
ddb, tables = increment_lib.ddb_connect()
count_value = increment_lib.conditional_get_count(CountName, tables)
increment_lib.increment_count(count_value)
increment_lib.set_count(CountName, count_value, tables)
return increment_lib.make_return("count is {c}".format(c=count_value['count']), 200)
except:
logging.exception("Caught unknown error")
return increment_lib.make_return("unknown error", 400)
| [
"increment_lib.ddb_connect",
"increment_lib.set_count",
"logging.exception",
"increment_lib.parse_event",
"increment_lib.make_return",
"increment_lib.increment_count",
"increment_lib.conditional_get_count"
] | [((385, 425), 'increment_lib.parse_event', 'increment_lib.parse_event', (['event', '"""POST"""'], {}), "(event, 'POST')\n", (410, 425), False, 'import increment_lib\n'), ((560, 587), 'increment_lib.ddb_connect', 'increment_lib.ddb_connect', ([], {}), '()\n', (585, 587), False, 'import increment_lib\n'), ((610, 664), 'increment_lib.conditional_get_count', 'increment_lib.conditional_get_count', (['CountName', 'tables'], {}), '(CountName, tables)\n', (645, 664), False, 'import increment_lib\n'), ((673, 715), 'increment_lib.increment_count', 'increment_lib.increment_count', (['count_value'], {}), '(count_value)\n', (702, 715), False, 'import increment_lib\n'), ((724, 779), 'increment_lib.set_count', 'increment_lib.set_count', (['CountName', 'count_value', 'tables'], {}), '(CountName, count_value, tables)\n', (747, 779), False, 'import increment_lib\n'), ((475, 537), 'increment_lib.make_return', 'increment_lib.make_return', (['"""event must specify CountName"""', '(400)'], {}), "('event must specify CountName', 400)\n", (500, 537), False, 'import increment_lib\n'), ((893, 934), 'logging.exception', 'logging.exception', (['"""Caught unknown error"""'], {}), "('Caught unknown error')\n", (910, 934), False, 'import logging\n'), ((950, 997), 'increment_lib.make_return', 'increment_lib.make_return', (['"""unknown error"""', '(400)'], {}), "('unknown error', 400)\n", (975, 997), False, 'import increment_lib\n')] |
"""
Example of of simple souce RayPencil with two apertures.
"""
from poptics.ray import RayPencil, RayPath, SourcePoint
from poptics.surface import CircularAperture, IrisAperture
from poptics.vector import Vector3d
import matplotlib.pyplot as plt
def main():
# Form two apertures both 20mm with Iris closed to 0.5 ratio
ca = CircularAperture(50,20)
iris = IrisAperture(80,20,0.5)
# source for the rays at (0,10,-50) in global coordinates
source = SourcePoint(Vector3d(0.0,10,-50))
# Form a pencil is the circular aperture as specified angle of 0.45 microns
# and add a RayPath to ech ray
pencil = RayPencil().addBeam(ca,source,wavelength = 0.65).addMonitor(RayPath())
# Propgate throgh the the both aperture and another 30 mm to make it visible
pencil *= ca
pencil *= iris
pencil += 30
# Make a diagram
ca.draw()
iris.draw()
pencil.draw()
plt.axis("equal")
plt.show()
main()
| [
"poptics.ray.RayPencil",
"poptics.ray.RayPath",
"poptics.vector.Vector3d",
"poptics.surface.CircularAperture",
"matplotlib.pyplot.axis",
"poptics.surface.IrisAperture",
"matplotlib.pyplot.show"
] | [((356, 380), 'poptics.surface.CircularAperture', 'CircularAperture', (['(50)', '(20)'], {}), '(50, 20)\n', (372, 380), False, 'from poptics.surface import CircularAperture, IrisAperture\n'), ((391, 416), 'poptics.surface.IrisAperture', 'IrisAperture', (['(80)', '(20)', '(0.5)'], {}), '(80, 20, 0.5)\n', (403, 416), False, 'from poptics.surface import CircularAperture, IrisAperture\n'), ((982, 999), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (990, 999), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1012, 1014), True, 'import matplotlib.pyplot as plt\n'), ((509, 531), 'poptics.vector.Vector3d', 'Vector3d', (['(0.0)', '(10)', '(-50)'], {}), '(0.0, 10, -50)\n', (517, 531), False, 'from poptics.vector import Vector3d\n'), ((733, 742), 'poptics.ray.RayPath', 'RayPath', ([], {}), '()\n', (740, 742), False, 'from poptics.ray import RayPencil, RayPath, SourcePoint\n'), ((673, 684), 'poptics.ray.RayPencil', 'RayPencil', ([], {}), '()\n', (682, 684), False, 'from poptics.ray import RayPencil, RayPath, SourcePoint\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Add `is_in_testset` to raw_datasets in MySQL database, so that at least 10%
of the data online has the flag `is_in_testset`.
"""
import pymysql
import pymysql.cursors
import random
import math
# hwrt modules
from hwrt.handwritten_data import HandwrittenData
import hwrt.utils as utils
import hwrt.filter_dataset as filter_dataset
def main(mysql, symbol_yml_file):
"""Add testset flag to recordings in MySQL database."""
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['<PASSWORD>'],
db=mysql['db'],
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# Get IDs of symbols we want to create testset for
metadata = filter_dataset.get_metadata()
datasets = filter_dataset.get_symbol_ids(symbol_yml_file, metadata)
for i, data in enumerate(datasets):
fid, formula_in_latex = data['id'], data['formula_in_latex']
print("%i: Create testset for %s (id: %i)..." % (i,
formula_in_latex,
fid))
sql = ("SELECT `id`, `is_in_testset` FROM `wm_raw_draw_data` "
"WHERE `accepted_formula_id` = %i" % fid)
cursor.execute(sql)
raw_datasets = cursor.fetchall()
is_in_testset = 0
raw_candidate_ids = []
for raw_data in raw_datasets:
if raw_data['is_in_testset'] == 1:
is_in_testset += 1
else:
raw_candidate_ids.append(raw_data['id'])
testset_ratio = 0.1
testset_total = int(math.ceil(len(raw_datasets) * testset_ratio))
remaining = testset_total - is_in_testset
if remaining > 0:
print(("\t%i in testset. "
"Add remaining %i datasets to testset...") %
(is_in_testset, remaining))
add_new = random.sample(raw_candidate_ids, remaining)
if len(add_new) < 20:
for el in add_new:
print("\thttp://write-math.com/view/?raw_data_id=%i" % el)
for rid in add_new:
sql = ("UPDATE `wm_raw_draw_data` SET `is_in_testset`=1 "
"WHERE `id` = %i LIMIT 1") % rid
cursor.execute(sql)
connection.commit()
def get_parser():
"""Return the parser object for this script."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-s", "--symbol",
dest="symbol_filename",
type=lambda x: utils.is_valid_file(parser, x),
required=True,
help="symbol yml file",
metavar="FILE")
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
cfg = utils.get_database_configuration()
if 'mysql_online' in cfg:
main(cfg['mysql_online'], args.symbol_filename)
if 'mysql_local' in cfg:
main(cfg['mysql_local'], args.symbol_filename)
| [
"random.sample",
"argparse.ArgumentParser",
"pymysql.connect",
"hwrt.filter_dataset.get_metadata",
"hwrt.utils.get_database_configuration",
"hwrt.filter_dataset.get_symbol_ids",
"hwrt.utils.is_valid_file"
] | [((496, 640), 'pymysql.connect', 'pymysql.connect', ([], {'host': "mysql['host']", 'user': "mysql['user']", 'passwd': "mysql['<PASSWORD>']", 'db': "mysql['db']", 'cursorclass': 'pymysql.cursors.DictCursor'}), "(host=mysql['host'], user=mysql['user'], passwd=mysql[\n '<PASSWORD>'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor)\n", (511, 640), False, 'import pymysql\n'), ((872, 901), 'hwrt.filter_dataset.get_metadata', 'filter_dataset.get_metadata', ([], {}), '()\n', (899, 901), True, 'import hwrt.filter_dataset as filter_dataset\n'), ((917, 973), 'hwrt.filter_dataset.get_symbol_ids', 'filter_dataset.get_symbol_ids', (['symbol_yml_file', 'metadata'], {}), '(symbol_yml_file, metadata)\n', (946, 973), True, 'import hwrt.filter_dataset as filter_dataset\n'), ((2659, 2746), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=\n ArgumentDefaultsHelpFormatter)\n', (2673, 2746), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((3152, 3186), 'hwrt.utils.get_database_configuration', 'utils.get_database_configuration', ([], {}), '()\n', (3184, 3186), True, 'import hwrt.utils as utils\n'), ((2081, 2124), 'random.sample', 'random.sample', (['raw_candidate_ids', 'remaining'], {}), '(raw_candidate_ids, remaining)\n', (2094, 2124), False, 'import random\n'), ((2899, 2929), 'hwrt.utils.is_valid_file', 'utils.is_valid_file', (['parser', 'x'], {}), '(parser, x)\n', (2918, 2929), True, 'import hwrt.utils as utils\n')] |
"""
uqid.py version 0.3.0
https://github.com/denis-ryzhkov/uqidpy
Copyright (C) 2015-2018 by <NAME> <<EMAIL>>
MIT License, see http://opensource.org/licenses/MIT
"""
### import
from datetime import datetime
from random import choice
try:
xrange
except NameError:
xrange = range
### chars
digits = '0123456789'
base62 = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
### uqid
def uqid(length=64, chars=base62):
return ''.join(choice(chars) for i in xrange(length))
### dtid
_dt_format = '%Y%m%d%H%M%S%f'
_dt_length = 20
def dtid(length=24, chars=base62):
s = datetime.utcnow().strftime(_dt_format)
if length > _dt_length:
return s + uqid(length - _dt_length, chars)
return s[:length]
### datetime_from_dtid
def datetime_from_dtid(s):
return datetime.strptime(s[:_dt_length], _dt_format)
### tests
def tests():
### import
from time import time
from uuid import uuid4
try:
from bson import ObjectId
except ImportError:
ObjectId = None
### samples
print('\nSAMPLES:')
length = 24
print('str(uuid4()): {}'.format(uuid4()))
print('len(^): {}'.format(len(str(uuid4()))))
print('len(v): {}'.format(length))
if ObjectId:
print('str(ObjectId()): {}'.format(str(ObjectId())))
print('uqid({}): {}'.format(length, uqid(length)))
print('dtid({}): {}'.format(length, dtid(length)))
assert len(uqid(length)) == length
assert len(dtid(length)) == length
assert (datetime.utcnow() - datetime_from_dtid(dtid())).total_seconds() < 0.1
N = 1000*1000
print('Iterations: {}'.format(N))
### seconds
print('\nSECONDS:')
start = time()
for _ in xrange(N):
str(uuid4())
print('str(uuid4()) seconds: {:.6f}'.format(time() - start))
if ObjectId:
start = time()
for _ in xrange(N):
str(ObjectId())
print('str(ObjectId()) seconds: {:.6f}'.format(time() - start))
start = time()
for _ in xrange(N):
uqid(length)
print('uqid({}) seconds: {:.6f}'.format(length, time() - start))
start = time()
for _ in xrange(N):
dtid(length)
print('dtid({}) seconds: {:.6f}'.format(length, time() - start))
### duplicates
print('\nDUPLICATES:')
U = len(set(str(uuid4()) for _ in xrange(N)))
print('str(uuid4()) duplicates: {}'.format(N - U))
if ObjectId:
U = len(set(str(ObjectId()) for _ in xrange(N)))
print('str(ObjectId()) duplicates: {}'.format(N - U))
U = len(set(uqid(length) for _ in xrange(N)))
print('uqid({}) duplicates: {}'.format(length, N - U))
U = len(set(dtid(length) for _ in xrange(N)))
print('dtid({}) duplicates: {}'.format(length, N - U))
if __name__ == '__main__':
tests()
| [
"random.choice",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"uuid.uuid4",
"bson.ObjectId",
"time.time"
] | [((807, 852), 'datetime.datetime.strptime', 'datetime.strptime', (['s[:_dt_length]', '_dt_format'], {}), '(s[:_dt_length], _dt_format)\n', (824, 852), False, 'from datetime import datetime\n'), ((1697, 1703), 'time.time', 'time', ([], {}), '()\n', (1701, 1703), False, 'from time import time\n'), ((1996, 2002), 'time.time', 'time', ([], {}), '()\n', (2000, 2002), False, 'from time import time\n'), ((2130, 2136), 'time.time', 'time', ([], {}), '()\n', (2134, 2136), False, 'from time import time\n'), ((1848, 1854), 'time.time', 'time', ([], {}), '()\n', (1852, 1854), False, 'from time import time\n'), ((463, 476), 'random.choice', 'choice', (['chars'], {}), '(chars)\n', (469, 476), False, 'from random import choice\n'), ((603, 620), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (618, 620), False, 'from datetime import datetime\n'), ((1135, 1142), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1140, 1142), False, 'from uuid import uuid4\n'), ((1740, 1747), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1745, 1747), False, 'from uuid import uuid4\n'), ((1797, 1803), 'time.time', 'time', ([], {}), '()\n', (1801, 1803), False, 'from time import time\n'), ((1899, 1909), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (1907, 1909), False, 'from bson import ObjectId\n'), ((2100, 2106), 'time.time', 'time', ([], {}), '()\n', (2104, 2106), False, 'from time import time\n'), ((2234, 2240), 'time.time', 'time', ([], {}), '()\n', (2238, 2240), False, 'from time import time\n'), ((1183, 1190), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1188, 1190), False, 'from uuid import uuid4\n'), ((1299, 1309), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (1307, 1309), False, 'from bson import ObjectId\n'), ((1515, 1532), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1530, 1532), False, 'from datetime import datetime\n'), ((1966, 1972), 'time.time', 'time', ([], {}), '()\n', (1970, 1972), False, 'from time import time\n'), ((2320, 2327), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2325, 2327), False, 'from uuid import uuid4\n'), ((2447, 2457), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (2455, 2457), False, 'from bson import ObjectId\n')] |
# built in libraries
from functools import lru_cache
from tamcolors.utils.immutable_cache import ImmutableCache
from tamcolors.utils.object_packer import FastHandObjectPacker
"""
terminal colors supported on all platforms
Color holds all color values for all supported modes
RGBA holds the values for mode rgb
"""
class Color(ImmutableCache, FastHandObjectPacker):
__slots__ = ("_mode_2",
"_mode_16_pal_256",
"_mode_16",
"_mode_256",
"_mode_rgb",
"_has_alpha",
"_byte_cache")
def __init__(self, mode_16, mode_256, mode_rgb, mode_16_pal_256=None, mode_2=None, _color_id=None):
"""
info: Makes a Color object
:param mode_16: int
:param mode_256: int
:param mode_rgb: RGBA
:param mode_16_pal_256: int or None
:param mode_2: int or None
:param _color_id: int: Used ONLY for the default COLORS
"""
if mode_2 is None:
mode_2 = mode_16
if mode_16_pal_256 is None:
mode_16_pal_256 = mode_16
self._mode_2 = mode_2
self._mode_16_pal_256 = mode_16_pal_256
self._mode_16 = mode_16
self._mode_256 = mode_256
self._mode_rgb = mode_rgb
self._has_alpha = -2 in (mode_2, mode_16, mode_256) or (mode_rgb.a != 255 and not mode_rgb.is_default)
if _color_id is None:
self._byte_cache = bytes((0,
*self._int_mode_to_binary(self._mode_2),
*self._int_mode_to_binary(self._mode_16_pal_256),
*self._int_mode_to_binary(self._mode_16),
*self._int_mode_to_binary(self._mode_256),
*self.mode_rgb.to_bytes()))
else:
if abs(_color_id) == _color_id:
self._byte_cache = bytes((1, _color_id))
else:
self._byte_cache = bytes((2, abs(_color_id)))
def __str__(self):
return "(2: {}, 16_pal_256: {}, 16: {}, 256: {}, rgb: {}, has_alpha: {})".format(self.mode_2,
self.mode_16_pal_256,
self.mode_16,
self.mode_256,
self.mode_rgb,
self.has_alpha)
def __hash__(self):
return hash((self._mode_2,
self._mode_16_pal_256,
self._mode_16,
self._mode_256,
self._mode_rgb,
self._mode_16_pal_256,
self._has_alpha))
def __repr__(self):
return str(self)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.mode_2 == other.mode_2 and\
self.mode_16_pal_256 == other.mode_16_pal_256 and\
self.mode_16 == other.mode_16 and\
self.mode_256 == other.mode_256 and\
self.mode_rgb == other.mode_rgb and\
self.has_alpha == other.has_alpha
return False
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def _int_mode_to_binary(mode):
return abs(min(0, mode)), abs(mode)
@staticmethod
def _int_mode_from_binary(binary):
if binary[0] == 0:
return binary[1]
return binary[0]*-1
@property
def mode_2(self):
"""
info: Gets mode 2
:return: int
"""
return self._mode_2
@property
def mode_16_pal_256(self):
"""
info: Gets mode 16 pal 256
:return: int
"""
return self._mode_16_pal_256
@property
def mode_16(self):
"""
info: Gets mode 16
:return: int
"""
return self._mode_16
@property
def mode_256(self):
"""
info: Gets mode 256
:return: int
"""
return self._mode_256
@property
def mode_rgb(self):
"""
info: Gets mode rgb
:return: RGBA
"""
return self._mode_rgb
@property
def has_alpha(self):
"""
info: Checks if color has any alpha
:return: bool
"""
return self._has_alpha
def place_color_over(self, old_color, override_alpha):
"""
info: Will calculate what the new color will be
:param old_color: Color
:param override_alpha: bool
:return: color
"""
if override_alpha:
return self
mode_2 = self.mode_2
if mode_2 == -2:
mode_2 = old_color.mode_2
mode_16_pal_256 = self.mode_16_pal_256
if mode_16_pal_256 == -2:
mode_16_pal_256 = old_color.mode_16_pal_256
mode_16 = self.mode_16
if mode_16 == -2:
mode_16 = old_color.mode_16
mode_256 = self.mode_256
if mode_256 == -2:
mode_256 = old_color.mode_256
mode_rgb = self.mode_rgb
if mode_rgb.a != 255 and not self.mode_rgb.is_default:
if mode_rgb.a == 0:
mode_rgb = old_color.mode_rgb
else:
mode_rgb = RGBA(self.transparent_value(mode_rgb.r, mode_rgb.a, old_color.mode_rgb.r),
self.transparent_value(mode_rgb.g, mode_rgb.a, old_color.mode_rgb.g),
self.transparent_value(mode_rgb.b, mode_rgb.a, old_color.mode_rgb.b),
old_color.mode_rgb.a)
return self.__class__(mode_16, mode_256, mode_rgb, mode_16_pal_256, mode_2)
@staticmethod
@lru_cache(maxsize=5000)
def transparent_value(new, alpha, old):
alpha = alpha/255
return min(255, max(0, round(alpha * new + (1 - alpha) * old)))
def to_bytes(self):
return self._byte_cache
@classmethod
@lru_cache(maxsize=5000)
def _from(cls, other_modes, mode_rgb):
mode_2 = cls._int_mode_from_binary(other_modes[1:3])
mode_16_pal_256 = cls._int_mode_from_binary(other_modes[3:5])
mode_16 = cls._int_mode_from_binary(other_modes[5:7])
mode_256 = cls._int_mode_from_binary(other_modes[7:9])
return cls(mode_16, mode_256, mode_rgb, mode_16_pal_256, mode_2)
@classmethod
@lru_cache(maxsize=5000)
def _from_color_id(cls, color_code):
"""
info: makes color from bytes
:param color_code: bytes: color id
:return: Color
"""
if color_code[0] == 1:
return COLORS[color_code[1]]
return COLOR_MAP[color_code[1]*-1]
@classmethod
def from_bytes(cls, object_byte_array):
if object_byte_array[0] == 0:
other_modes = bytes(object_byte_array[:9])
del object_byte_array[:9]
mode_rgb = RGBA.from_bytes(object_byte_array)
return cls._from(other_modes, mode_rgb)
else:
color_code = bytes(object_byte_array[:2])
del object_byte_array[:2]
return cls._from_color_id(color_code)
class RGBA(ImmutableCache, FastHandObjectPacker):
__slots__ = ("_r", "_g", "_b", "_a", "_is_default", "_byte_cache")
def __init__(self, r, g, b, a=255, is_default=False):
"""
info: Will make a RGBA object
:param r: int
:param g: int
:param b: int
:param a: int
:param is_default: Bool
"""
self._r = r
self._g = g
self._b = b
self._a = a
self._is_default = is_default
self._byte_cache = bytes((self._r, self._g, self._b, self._a, int(self._is_default)))
def __str__(self):
return "(r: {}, g: {}, b: {}, a: {}, is_default: {})".format(self.r,
self.g,
self.b,
self.a,
self.is_default)
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self._r, self._g, self._b, self._a, self._is_default))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.r == other.r and self.g == other.g and self.b == other.b and self.a == other.a \
and self.is_default == other.is_default
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def r(self):
"""
info: Will get the r value
:return: int
"""
return self._r
@property
def g(self):
"""
info: Will get the g value
:return: int
"""
return self._g
@property
def b(self):
"""
info: Will get the b value
:return: int
"""
return self._b
@property
def a(self):
"""
info: Will get the a value
:return: int
"""
return self._a
@property
def is_default(self):
"""
info: See if color is default
:return: bool
"""
return self._is_default
def to_bytes(self):
return self._byte_cache
@classmethod
@lru_cache(maxsize=5000)
def _from(cls, r, g, b, a, is_default):
return cls(r, g, b, a, bool(is_default))
@classmethod
def from_bytes(cls, object_byte_array):
obj = cls._from(*object_byte_array[:5])
del object_byte_array[:5]
return obj
ALPHA = Color(-2, -2, RGBA(0, 0, 0, 0), _color_id=-2)
DEFAULT = Color(-1, -1, RGBA(0, 0, 0, 255, True), _color_id=-1)
BLACK = Color(0, 0, RGBA(0, 0, 0), _color_id=0)
RED = Color(1, 1, RGBA(128, 0, 0), _color_id=1)
GREEN = Color(2, 2, RGBA(0, 128, 0), _color_id=2)
YELLOW = Color(3, 3, RGBA(128, 128, 0), _color_id=3)
BLUE = Color(4, 4, RGBA(0, 0, 128), _color_id=4)
PURPLE = Color(5, 5, RGBA(128, 0, 128), _color_id=5)
AQUA = Color(6, 6, RGBA(0, 128, 128), _color_id=6)
WHITE = Color(7, 7, RGBA(192, 192, 192), _color_id=7)
GRAY = Color(8, 8, RGBA(128, 128, 128), _color_id=8)
LIGHT_RED = Color(9, 9, RGBA(255, 0, 0), _color_id=9)
LIGHT_GREEN = Color(10, 10, RGBA(0, 255, 0), _color_id=10)
LIGHT_YELLOW = Color(11, 11, RGBA(255, 255, 0), _color_id=11)
LIGHT_BLUE = Color(12, 12, RGBA(0, 0, 255), _color_id=12)
LIGHT_PURPLE = Color(13, 13, RGBA(255, 0, 255), _color_id=13)
LIGHT_AQUA = Color(14, 14, RGBA(0, 255, 255), _color_id=14)
LIGHT_WHITE = Color(15, 15, RGBA(255, 255, 255), _color_id=15)
COLOR_0 = BLACK
COLOR_1 = RED
COLOR_2 = GREEN
COLOR_3 = YELLOW
COLOR_4 = BLUE
COLOR_5 = PURPLE
COLOR_6 = AQUA
COLOR_7 = WHITE
COLOR_8 = GRAY
COLOR_9 = LIGHT_RED
COLOR_10 = LIGHT_GREEN
COLOR_11 = LIGHT_YELLOW
COLOR_12 = LIGHT_BLUE
COLOR_13 = LIGHT_PURPLE
COLOR_14 = LIGHT_AQUA
COLOR_15 = LIGHT_WHITE
COLOR_16 = Color(0, 16, RGBA(0, 0, 0))
COLOR_17 = Color(4, 17, RGBA(0, 0, 95))
COLOR_18 = Color(4, 18, RGBA(0, 0, 95))
COLOR_19 = Color(12, 19, RGBA(0, 0, 175))
COLOR_20 = Color(12, 20, RGBA(0, 0, 215))
COLOR_21 = Color(12, 21, RGBA(0, 0, 255))
COLOR_22 = Color(6, 22, RGBA(0, 95, 0))
COLOR_23 = Color(12, 23, RGBA(0, 95, 95))
COLOR_24 = Color(6, 24, RGBA(0, 95, 135))
COLOR_25 = Color(6, 25, RGBA(0, 95, 175))
COLOR_26 = Color(12, 26, RGBA(0, 95, 215))
COLOR_27 = Color(12, 27, RGBA(0, 95, 255))
COLOR_28 = Color(2, 28, RGBA(0, 135, 0))
COLOR_29 = Color(6, 29, RGBA(0, 135, 95))
COLOR_30 = Color(14, 30, RGBA(0, 135, 135))
COLOR_31 = Color(6, 31, RGBA(0, 135, 175))
COLOR_32 = Color(12, 32, RGBA(0, 135, 215))
COLOR_33 = Color(14, 33, RGBA(0, 135, 255))
COLOR_34 = Color(2, 34, RGBA(0, 175, 0))
COLOR_35 = Color(2, 35, RGBA(0, 175, 95))
COLOR_36 = Color(6, 36, RGBA(0, 175, 135))
COLOR_37 = Color(6, 37, RGBA(0, 175, 175))
COLOR_38 = Color(6, 38, RGBA(0, 175, 215))
COLOR_39 = Color(12, 39, RGBA(0, 175, 255))
COLOR_40 = Color(10, 40, RGBA(0, 215, 0))
COLOR_41 = Color(10, 41, RGBA(0, 215, 95))
COLOR_42 = Color(10, 42, RGBA(0, 215, 135))
COLOR_43 = Color(10, 43, RGBA(0, 215, 175))
COLOR_44 = Color(2, 44, RGBA(0, 215, 21))
COLOR_45 = Color(14, 45, RGBA(0, 215, 255))
COLOR_46 = Color(10, 46, RGBA(0, 255, 0))
COLOR_47 = Color(10, 47, RGBA(0, 255, 95))
COLOR_48 = Color(10, 48, RGBA(0, 255, 135))
COLOR_49 = Color(10, 49, RGBA(0, 255, 175))
COLOR_50 = Color(14, 50, RGBA(0, 255, 215))
COLOR_51 = Color(14, 51, RGBA(0, 255, 255))
COLOR_52 = Color(1, 52, RGBA(95, 0, 0))
COLOR_53 = Color(5, 53, RGBA(95, 0, 95))
COLOR_54 = Color(5, 54, RGBA(95, 0, 135))
COLOR_55 = Color(5, 55, RGBA(95, 0, 175))
COLOR_56 = Color(5, 56, RGBA(95, 0, 215))
COLOR_57 = Color(5, 57, RGBA(95, 0, 255))
COLOR_58 = Color(3, 58, RGBA(95, 95, 0))
COLOR_59 = Color(8, 59, RGBA(95, 95, 95))
COLOR_60 = Color(8, 60, RGBA(95, 95, 135))
COLOR_61 = Color(5, 61, RGBA(95, 95, 175))
COLOR_62 = Color(5, 62, RGBA(95, 95, 215))
COLOR_63 = Color(5, 63, RGBA(95, 95, 255))
COLOR_64 = Color(3, 64, RGBA(95, 135, 0))
COLOR_65 = Color(2, 65, RGBA(95, 135, 95))
COLOR_66 = Color(2, 66, RGBA(95, 135, 135))
COLOR_67 = Color(5, 67, RGBA(95, 135, 175))
COLOR_68 = Color(6, 68, RGBA(95, 135, 215))
COLOR_69 = Color(5, 69, RGBA(95, 135, 255))
COLOR_70 = Color(2, 70, RGBA(95, 175, 0))
COLOR_71 = Color(2, 71, RGBA(95, 175, 95))
COLOR_72 = Color(2, 72, RGBA(95, 175, 135))
COLOR_73 = Color(6, 73, RGBA(95, 175, 175))
COLOR_74 = Color(14, 74, RGBA(95, 175, 215))
COLOR_75 = Color(14, 75, RGBA(95, 175, 255))
COLOR_76 = Color(2, 76, RGBA(95, 215, 0))
COLOR_77 = Color(10, 77, RGBA(95, 215, 95))
COLOR_78 = Color(14, 78, RGBA(95, 215, 135))
COLOR_79 = Color(6, 79, RGBA(95, 215, 175))
COLOR_80 = Color(14, 80, RGBA(95, 215, 215))
COLOR_81 = Color(14, 81, RGBA(95, 215, 255))
COLOR_82 = Color(2, 82, RGBA(95, 255, 0))
COLOR_83 = Color(10, 83, RGBA(95, 255, 95))
COLOR_84 = Color(10, 84, RGBA(95, 255, 135))
COLOR_85 = Color(14, 85, RGBA(95, 255, 175))
COLOR_86 = Color(6, 86, RGBA(95, 255, 215))
COLOR_87 = Color(14, 87, RGBA(95, 255, 255))
COLOR_88 = Color(1, 88, RGBA(135, 0, 0))
COLOR_89 = Color(1, 89, RGBA(135, 0, 95))
COLOR_90 = Color(5, 90, RGBA(135, 0, 135))
COLOR_91 = Color(5, 91, RGBA(135, 0, 175))
COLOR_92 = Color(5, 92, RGBA(135, 0, 215))
COLOR_93 = Color(13, 93, RGBA(135, 0, 255))
COLOR_94 = Color(3, 94, RGBA(135, 95, 0))
COLOR_95 = Color(1, 95, RGBA(135, 95, 95))
COLOR_96 = Color(5, 96, RGBA(135, 95, 135))
COLOR_97 = Color(5, 97, RGBA(135, 95, 175))
COLOR_98 = Color(5, 98, RGBA(135, 95, 215))
COLOR_99 = Color(5, 99, RGBA(135, 95, 255))
COLOR_100 = Color(3, 100, RGBA(135, 135, 0))
COLOR_101 = Color(3, 101, RGBA(135, 135, 95))
COLOR_102 = Color(7, 102, RGBA(135, 135, 135))
COLOR_103 = Color(8, 103, RGBA(135, 135, 175))
COLOR_104 = Color(8, 104, RGBA(135, 135, 215))
COLOR_105 = Color(5, 105, RGBA(135, 135, 255))
COLOR_106 = Color(10, 106, RGBA(135, 175, 0))
COLOR_107 = Color(3, 107, RGBA(135, 175, 95))
COLOR_108 = Color(12, 108, RGBA(135, 175, 135))
COLOR_109 = Color(6, 109, RGBA(135, 175, 175))
COLOR_110 = Color(14, 110, RGBA(135, 175, 215))
COLOR_111 = Color(14, 111, RGBA(135, 175, 255))
COLOR_112 = Color(2, 112, RGBA(135, 215, 0))
COLOR_113 = Color(10, 113, RGBA(135, 215, 95))
COLOR_114 = Color(2, 114, RGBA(135, 215, 135))
COLOR_115 = Color(10, 115, RGBA(135, 215, 175))
COLOR_116 = Color(14, 116, RGBA(135, 215, 215))
COLOR_117 = Color(14, 117, RGBA(135, 215, 255))
COLOR_118 = Color(10, 118, RGBA(135, 255, 0))
COLOR_119 = Color(10, 119, RGBA(135, 255, 95))
COLOR_120 = Color(10, 120, RGBA(135, 255, 135))
COLOR_121 = Color(10, 121, RGBA(135, 255, 175))
COLOR_122 = Color(10, 122, RGBA(135, 255, 215))
COLOR_123 = Color(14, 123, RGBA(135, 255, 255))
COLOR_124 = Color(1, 124, RGBA(175, 0, 0))
COLOR_125 = Color(1, 125, RGBA(175, 0, 95))
COLOR_126 = Color(5, 126, RGBA(175, 0, 135))
COLOR_127 = Color(5, 127, RGBA(175, 0, 175))
COLOR_128 = Color(5, 128, RGBA(175, 0, 215))
COLOR_129 = Color(13, 129, RGBA(175, 0, 255))
COLOR_130 = Color(3, 130, RGBA(175, 95, 0))
COLOR_131 = Color(1, 131, RGBA(175, 95, 95))
COLOR_132 = Color(1, 132, RGBA(175, 95, 135))
COLOR_133 = Color(5, 133, RGBA(175, 95, 175))
COLOR_134 = Color(5, 134, RGBA(175, 95, 215))
COLOR_135 = Color(13, 135, RGBA(175, 95, 255))
COLOR_136 = Color(3, 136, RGBA(175, 135, 0))
COLOR_137 = Color(3, 137, RGBA(175, 135, 95))
COLOR_138 = Color(3, 138, RGBA(175, 135, 135))
COLOR_139 = Color(3, 139, RGBA(175, 135, 175))
COLOR_140 = Color(5, 140, RGBA(175, 135, 215))
COLOR_141 = Color(13, 141, RGBA(175, 135, 255))
COLOR_142 = Color(3, 142, RGBA(175, 175, 0))
COLOR_143 = Color(3, 143, RGBA(175, 175, 95))
COLOR_144 = Color(3, 144, RGBA(175, 175, 135))
COLOR_145 = Color(7, 145, RGBA(175, 175, 175))
COLOR_146 = Color(5, 146, RGBA(175, 175, 215))
COLOR_147 = Color(8, 147, RGBA(175, 175, 255))
COLOR_148 = Color(10, 148, RGBA(175, 215, 0))
COLOR_149 = Color(2, 149, RGBA(175, 215, 95))
COLOR_150 = Color(6, 150, RGBA(175, 215, 135))
COLOR_151 = Color(7, 151, RGBA(175, 215, 175))
COLOR_152 = Color(14, 152, RGBA(175, 215, 215))
COLOR_153 = Color(14, 153, RGBA(175, 215, 255))
COLOR_154 = Color(10, 154, RGBA(175, 255, 0))
COLOR_155 = Color(10, 155, RGBA(175, 255, 95))
COLOR_156 = Color(10, 156, RGBA(175, 255, 135))
COLOR_157 = Color(10, 157, RGBA(175, 255, 175))
COLOR_158 = Color(14, 158, RGBA(175, 255, 215))
COLOR_159 = Color(14, 159, RGBA(175, 255, 255))
COLOR_160 = Color(9, 160, RGBA(215, 0, 0))
COLOR_161 = Color(9, 161, RGBA(215, 0, 95))
COLOR_162 = Color(13, 162, RGBA(215, 0, 135))
COLOR_163 = Color(13, 163, RGBA(215, 0, 175))
COLOR_164 = Color(13, 164, RGBA(215, 0, 215))
COLOR_165 = Color(5, 165, RGBA(215, 0, 255))
COLOR_166 = Color(3, 166, RGBA(215, 95, 0))
COLOR_167 = Color(9, 167, RGBA(215, 95, 95))
COLOR_168 = Color(9, 168, RGBA(215, 95, 135))
COLOR_169 = Color(9, 169, RGBA(215, 95, 175))
COLOR_170 = Color(5, 170, RGBA(215, 95, 215))
COLOR_171 = Color(13, 171, RGBA(215, 95, 255))
COLOR_172 = Color(3, 172, RGBA(215, 135, 0))
COLOR_173 = Color(3, 173, RGBA(215, 135, 95))
COLOR_174 = Color(3, 174, RGBA(215, 135, 135))
COLOR_175 = Color(13, 175, RGBA(215, 135, 175))
COLOR_176 = Color(13, 176, RGBA(215, 135, 215))
COLOR_177 = Color(13, 177, RGBA(215, 135, 255))
COLOR_178 = Color(3, 178, RGBA(215, 175, 0))
COLOR_179 = Color(3, 179, RGBA(215, 175, 95))
COLOR_180 = Color(3, 180, RGBA(215, 175, 135))
COLOR_181 = Color(3, 181, RGBA(215, 175, 175))
COLOR_182 = Color(6, 182, RGBA(215, 175, 215))
COLOR_183 = Color(13, 183, RGBA(215, 175, 255))
COLOR_184 = Color(11, 184, RGBA(215, 215, 0))
COLOR_185 = Color(3, 185, RGBA(215, 215, 95))
COLOR_186 = Color(3, 186, RGBA(215, 215, 135))
COLOR_187 = Color(11, 187, RGBA(215, 215, 175))
COLOR_188 = Color(7, 188, RGBA(215, 215, 215))
COLOR_189 = Color(13, 189, RGBA(215, 215, 255))
COLOR_190 = Color(10, 190, RGBA(215, 255, 0))
COLOR_191 = Color(11, 191, RGBA(215, 255, 95))
COLOR_192 = Color(10, 192, RGBA(215, 255, 135))
COLOR_193 = Color(10, 193, RGBA(215, 255, 175))
COLOR_194 = Color(2, 194, RGBA(215, 255, 215))
COLOR_195 = Color(14, 195, RGBA(215, 255, 255))
COLOR_196 = Color(9, 196, RGBA(255, 0, 0))
COLOR_197 = Color(9, 197, RGBA(255, 0, 95))
COLOR_198 = Color(9, 198, RGBA(255, 0, 135))
COLOR_199 = Color(13, 199, RGBA(255, 0, 175))
COLOR_200 = Color(13, 200, RGBA(255, 0, 215))
COLOR_201 = Color(9, 201, RGBA(255, 0, 255))
COLOR_202 = Color(9, 202, RGBA(255, 95, 0))
COLOR_203 = Color(11, 203, RGBA(255, 95, 95))
COLOR_204 = Color(9, 204, RGBA(255, 95, 135))
COLOR_205 = Color(13, 205, RGBA(255, 95, 175))
COLOR_206 = Color(13, 206, RGBA(255, 95, 215))
COLOR_207 = Color(13, 207, RGBA(255, 95, 255))
COLOR_208 = Color(11, 208, RGBA(255, 135, 0))
COLOR_209 = Color(11, 209, RGBA(255, 135, 95))
COLOR_210 = Color(9, 210, RGBA(255, 135, 135))
COLOR_211 = Color(13, 211, RGBA(255, 135, 175))
COLOR_212 = Color(13, 212, RGBA(255, 135, 215))
COLOR_213 = Color(5, 213, RGBA(255, 135, 255))
COLOR_214 = Color(6, 214, RGBA(255, 175, 0))
COLOR_215 = Color(11, 215, RGBA(255, 175, 95))
COLOR_216 = Color(11, 216, RGBA(255, 175, 135))
COLOR_217 = Color(9, 217, RGBA(255, 175, 175))
COLOR_218 = Color(13, 218, RGBA(255, 175, 215))
COLOR_219 = Color(13, 219, RGBA(255, 175, 255))
COLOR_220 = Color(11, 220, RGBA(255, 215, 0))
COLOR_221 = Color(11, 221, RGBA(255, 215, 95))
COLOR_222 = Color(11, 222, RGBA(255, 215, 135))
COLOR_223 = Color(11, 223, RGBA(255, 215, 175))
COLOR_224 = Color(13, 224, RGBA(255, 215, 215))
COLOR_225 = Color(13, 225, RGBA(255, 215, 255))
COLOR_226 = Color(11, 226, RGBA(255, 255, 0))
COLOR_227 = Color(11, 227, RGBA(255, 255, 95))
COLOR_228 = Color(11, 228, RGBA(255, 255, 135))
COLOR_229 = Color(11, 229, RGBA(255, 255, 175))
COLOR_230 = Color(6, 230, RGBA(255, 255, 215))
COLOR_231 = Color(15, 231, RGBA(255, 255, 255))
COLOR_232 = Color(0, 232, RGBA(8, 8, 8))
COLOR_233 = Color(0, 233, RGBA(18, 18, 18))
COLOR_234 = Color(0, 234, RGBA(28, 28, 28))
COLOR_235 = Color(0, 235, RGBA(38, 38, 38))
COLOR_236 = Color(0, 236, RGBA(48, 48, 48))
COLOR_237 = Color(0, 237, RGBA(58, 58, 58))
COLOR_238 = Color(0, 238, RGBA(68, 68, 68))
COLOR_239 = Color(8, 239, RGBA(78, 78, 78))
COLOR_240 = Color(8, 240, RGBA(88, 88, 88))
COLOR_241 = Color(8, 241, RGBA(98, 98, 98))
COLOR_242 = Color(8, 242, RGBA(108, 108, 108))
COLOR_243 = Color(8, 243, RGBA(118, 118, 118))
COLOR_244 = Color(8, 244, RGBA(128, 128, 128))
COLOR_245 = Color(8, 245, RGBA(138, 138, 138))
COLOR_246 = Color(8, 246, RGBA(148, 148, 148))
COLOR_247 = Color(8, 247, RGBA(158, 158, 158))
COLOR_248 = Color(8, 248, RGBA(168, 168, 168))
COLOR_249 = Color(8, 249, RGBA(178, 178, 178))
COLOR_250 = Color(7, 250, RGBA(188, 188, 188))
COLOR_251 = Color(15, 251, RGBA(198, 198, 198))
COLOR_252 = Color(15, 252, RGBA(208, 208, 208))
COLOR_253 = Color(15, 253, RGBA(218, 218, 218))
COLOR_254 = Color(15, 254, RGBA(228, 228, 228))
COLOR_255 = Color(15, 255, RGBA(238, 238, 238))
COLOR_MAP = {-2: ALPHA, -1: DEFAULT}
COLORS = []
for color_id in range(256):
COLORS.append(vars()["COLOR_{}".format(color_id)])
COLOR_MAP[color_id] = COLORS[color_id]
del color_id
COLORS = tuple(COLORS)
| [
"functools.lru_cache"
] | [((6068, 6091), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(5000)'}), '(maxsize=5000)\n', (6077, 6091), False, 'from functools import lru_cache\n'), ((6315, 6338), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(5000)'}), '(maxsize=5000)\n', (6324, 6338), False, 'from functools import lru_cache\n'), ((6734, 6757), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(5000)'}), '(maxsize=5000)\n', (6743, 6757), False, 'from functools import lru_cache\n'), ((9751, 9774), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(5000)'}), '(maxsize=5000)\n', (9760, 9774), False, 'from functools import lru_cache\n')] |
import json
from flask import Flask, render_template
import sleipnir
app = Flask(__name__)
app.register_blueprint(sleipnir.v1)
with open('config.json') as f:
settings = json.loads(f.read())
@app.route('/')
def index():
return render_template('index.html', settings=settings)
if __name__ == '__main__':
app.run()
| [
"flask.render_template",
"flask.Flask"
] | [((79, 94), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (84, 94), False, 'from flask import Flask, render_template\n'), ((240, 288), 'flask.render_template', 'render_template', (['"""index.html"""'], {'settings': 'settings'}), "('index.html', settings=settings)\n", (255, 288), False, 'from flask import Flask, render_template\n')] |
import os.path
import shlex
from ..generators.bash_script import BashScriptGenerator
from .._util import get_appdir_path
from . import BuilderBase
class AutotoolsBuilder(BuilderBase):
_script_filename = "build-autotools.sh"
def _get_configure_extra_variables(self) -> list:
default_params = [
"--prefix=/usr",
]
configure_config = self._builder_config.get("configure", None)
if not configure_config:
configure_config = dict()
extra_params = configure_config.get("extra_params", [])
rv = list(default_params)
rv += extra_params
return rv
@staticmethod
def from_dict(data: dict):
# TODO!
raise NotImplementedError()
def _get_source_dir(self, project_root_dir):
source_dir = self._builder_config.get("source_dir", None)
if not source_dir:
return project_root_dir
if not os.path.isabs(source_dir):
source_dir = os.path.join(project_root_dir, source_dir)
return source_dir
def _generate_configure_command(self, project_root_dir: str):
args = [os.path.join(self._get_source_dir(project_root_dir), "configure")]
for param in self._get_configure_extra_variables():
escaped_value = shlex.quote(param)
args.append(escaped_value)
source_dir = self._get_source_dir(project_root_dir)
args.append(source_dir)
return " ".join(args)
def generate_build_script(self, project_root_dir: str, build_dir: str) -> str:
script_path = os.path.join(build_dir, self.__class__._script_filename)
generator = BashScriptGenerator(script_path)
generator.add_lines([
"# make sure we're in the build directory",
"cd {}".format(shlex.quote(build_dir)),
"",
"# build in separate directory to avoid a mess in the build dir",
"mkdir -p autotools-build",
"cd autotools-build",
"",
])
autogen_path = os.path.join(self._get_source_dir(project_root_dir), "autogen.sh")
if self._builder_config.get("allow_insource"):
generator.add_lines([
"# in case the project uses autogen.sh, we have to call that script to generate the configure script",
"[ -f {0} ] && (cd {1} && {0})".format(
shlex.quote(autogen_path),
shlex.quote(os.path.dirname(autogen_path))
),
"",
])
else:
generator.add_lines([
"# the user needs to explicitly allow in source operations in order to be able to auto call autogen.sh",
"if [ -f {0} ]; then",
" echo \"Warning: autogen.sh found, might have to be called by us\""
" echo \"f so please add allow_insource: true to the autotools builder config\"",
"fi",
"",
])
if "configure" in self._builder_config:
generator.add_lines([
"# set up build directory with configure",
self._generate_configure_command(project_root_dir),
""
])
else:
generator.add_lines([
"# configure: section not found, not generating configure call (this might be intentional)"
""
])
generator.add_lines([
"# build project",
"make -j $(nproc)",
"",
"# install binaries into AppDir (requires correct CMake install(...) configuration)",
"make install DESTDIR={}".format(shlex.quote(get_appdir_path(build_dir))),
])
generator.build_file()
return os.path.basename(script_path)
| [
"shlex.quote"
] | [((1302, 1320), 'shlex.quote', 'shlex.quote', (['param'], {}), '(param)\n', (1313, 1320), False, 'import shlex\n'), ((1816, 1838), 'shlex.quote', 'shlex.quote', (['build_dir'], {}), '(build_dir)\n', (1827, 1838), False, 'import shlex\n'), ((2412, 2437), 'shlex.quote', 'shlex.quote', (['autogen_path'], {}), '(autogen_path)\n', (2423, 2437), False, 'import shlex\n')] |
"""
"""
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from rest_framework.authtoken.models import Token
class User(AbstractUser):
TASKS={
'on_create':['addUserToDefaultTrial','createToken'],
'on_save':[],
'on_delete':['removeUserFromStripe']
}
dateCreated = models.DateTimeField(auto_now=True)
lastModified = models.DateTimeField(auto_now=True)
testUser = models.BooleanField(default=False)
guestUser = models.BooleanField(default=False)
validEmail = models.BooleanField(default=False)
email = models.EmailField(unique=True)
def generateSingleSigninToken(self):
token = default_token_generator.make_token(self)
uidb64 = urlsafe_base64_encode(str(self.pk).encode())
return {"token": token, "uidb64": uidb64}
@property
def name(self):
return self.get_full_name()
class Meta(AbstractUser.Meta):
verbose_name = _("Usuario")
verbose_name_plural = _("Usuarios")
def __str__(self):
return self.get_full_name()
| [
"django.db.models.EmailField",
"django.utils.translation.ugettext_lazy",
"django.db.models.BooleanField",
"django.contrib.auth.tokens.default_token_generator.make_token",
"django.db.models.DateTimeField"
] | [((549, 584), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (569, 584), False, 'from django.db import models\n'), ((604, 639), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (624, 639), False, 'from django.db import models\n'), ((655, 689), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (674, 689), False, 'from django.db import models\n'), ((706, 740), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (725, 740), False, 'from django.db import models\n'), ((758, 792), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (777, 792), False, 'from django.db import models\n'), ((805, 835), 'django.db.models.EmailField', 'models.EmailField', ([], {'unique': '(True)'}), '(unique=True)\n', (822, 835), False, 'from django.db import models\n'), ((894, 934), 'django.contrib.auth.tokens.default_token_generator.make_token', 'default_token_generator.make_token', (['self'], {}), '(self)\n', (928, 934), False, 'from django.contrib.auth.tokens import default_token_generator\n'), ((1178, 1190), 'django.utils.translation.ugettext_lazy', '_', (['"""Usuario"""'], {}), "('Usuario')\n", (1179, 1190), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1221, 1234), 'django.utils.translation.ugettext_lazy', '_', (['"""Usuarios"""'], {}), "('Usuarios')\n", (1222, 1234), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from __future__ import print_function
###########
# imports #
###########
import numpy as np
from matplotlib.image import imread
#############
# functions #
#############
def construct_target_grid(num_cells):
"""Constructs a rectangular grid. First a grid resolution is randomly
chosen. grid_resolution equal to 1 implies equal number of cells and
locations on the grid. The random parameter beta controls how rectangular
the grid will be -- beta=1 constructs a square rectangle.
num_cells -- the number of cells in the single-cell data."""
grid_resolution = int(np.random.randint(1, 2+(num_cells/1000), 1))
grid_resolution = 2
num_locations = len(range(0, num_cells, grid_resolution))
grid_dim = int(np.ceil(np.sqrt(num_locations)))
beta = round(np.random.uniform(1, 1.5), 1) # controls how rectangular the grid is
# beta = 1 # set this for a square grid
x = np.arange(grid_dim * beta)
y = np.arange(grid_dim / beta)
locations = np.array([(i, j) for i in x for j in y])
return locations
def create_target_space_from_image(image):
"""Create a tissue target space from a given image. The image is assumed to
contain a black-colored tissue space in white background.
image -- the location of the image on the disk."""
img = imread(image)
img_width = img.shape[1]
img_height = img.shape[0]
locations = np.array([(x, y) for x in range(img_width) for y in range(img_height)
if sum(img[y, x, :] == np.array([0, 0, 0]))])
return locations
| [
"numpy.sqrt",
"matplotlib.image.imread",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.arange"
] | [((914, 940), 'numpy.arange', 'np.arange', (['(grid_dim * beta)'], {}), '(grid_dim * beta)\n', (923, 940), True, 'import numpy as np\n'), ((949, 975), 'numpy.arange', 'np.arange', (['(grid_dim / beta)'], {}), '(grid_dim / beta)\n', (958, 975), True, 'import numpy as np\n'), ((992, 1032), 'numpy.array', 'np.array', (['[(i, j) for i in x for j in y]'], {}), '([(i, j) for i in x for j in y])\n', (1000, 1032), True, 'import numpy as np\n'), ((1306, 1319), 'matplotlib.image.imread', 'imread', (['image'], {}), '(image)\n', (1312, 1319), False, 'from matplotlib.image import imread\n'), ((592, 637), 'numpy.random.randint', 'np.random.randint', (['(1)', '(2 + num_cells / 1000)', '(1)'], {}), '(1, 2 + num_cells / 1000, 1)\n', (609, 637), True, 'import numpy as np\n'), ((793, 818), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(1.5)'], {}), '(1, 1.5)\n', (810, 818), True, 'import numpy as np\n'), ((750, 772), 'numpy.sqrt', 'np.sqrt', (['num_locations'], {}), '(num_locations)\n', (757, 772), True, 'import numpy as np\n'), ((1515, 1534), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1523, 1534), True, 'import numpy as np\n')] |
import os
from shutil import rmtree, copy
from tempfile import gettempdir
from pathlib import Path
import pytest
from pew._utils import invoke_pew as invoke
@pytest.yield_fixture(scope='session')
def workon_home():
tmpdir = os.environ.get('TMPDIR', gettempdir())
os.environ['WORKON_HOME'] = str(Path(tmpdir) / 'WORKON_HOME')
workon = Path(os.environ['WORKON_HOME'])
rmtree(str(workon), ignore_errors=True)
workon.mkdir(parents=True)
yield workon
rmtree(str(workon))
@pytest.yield_fixture()
def env1(workon_home):
invoke('new', 'env1', '-d')
yield
invoke('rm', 'env1')
@pytest.yield_fixture()
def env2(workon_home):
invoke('new', 'env2', '-d')
yield
invoke('rm', 'env2')
@pytest.yield_fixture()
def testpackageenv(workon_home):
testpackage = str(Path(__file__).parent / 'testpackage')
invoke('new', 'source', '-d')
invoke('in', 'source', 'python', 'setup.py', 'install', cwd=testpackage)
yield
invoke('rm', 'source')
@pytest.yield_fixture()
def testtemplate(workon_home):
sourcetemplate = Path(__file__).parent / 'template_test'
testtemplatefile = workon_home / 'template_test'
copy(str(sourcetemplate), str(testtemplatefile))
testtemplatefile.chmod(0o700)
yield testtemplatefile
testtemplatefile.unlink()
| [
"pew._utils.invoke_pew",
"tempfile.gettempdir",
"pathlib.Path",
"pytest.yield_fixture"
] | [((162, 199), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (182, 199), False, 'import pytest\n'), ((502, 524), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {}), '()\n', (522, 524), False, 'import pytest\n'), ((618, 640), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {}), '()\n', (638, 640), False, 'import pytest\n'), ((734, 756), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {}), '()\n', (754, 756), False, 'import pytest\n'), ((1002, 1024), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {}), '()\n', (1022, 1024), False, 'import pytest\n'), ((351, 382), 'pathlib.Path', 'Path', (["os.environ['WORKON_HOME']"], {}), "(os.environ['WORKON_HOME'])\n", (355, 382), False, 'from pathlib import Path\n'), ((552, 579), 'pew._utils.invoke_pew', 'invoke', (['"""new"""', '"""env1"""', '"""-d"""'], {}), "('new', 'env1', '-d')\n", (558, 579), True, 'from pew._utils import invoke_pew as invoke\n'), ((594, 614), 'pew._utils.invoke_pew', 'invoke', (['"""rm"""', '"""env1"""'], {}), "('rm', 'env1')\n", (600, 614), True, 'from pew._utils import invoke_pew as invoke\n'), ((668, 695), 'pew._utils.invoke_pew', 'invoke', (['"""new"""', '"""env2"""', '"""-d"""'], {}), "('new', 'env2', '-d')\n", (674, 695), True, 'from pew._utils import invoke_pew as invoke\n'), ((710, 730), 'pew._utils.invoke_pew', 'invoke', (['"""rm"""', '"""env2"""'], {}), "('rm', 'env2')\n", (716, 730), True, 'from pew._utils import invoke_pew as invoke\n'), ((855, 884), 'pew._utils.invoke_pew', 'invoke', (['"""new"""', '"""source"""', '"""-d"""'], {}), "('new', 'source', '-d')\n", (861, 884), True, 'from pew._utils import invoke_pew as invoke\n'), ((889, 961), 'pew._utils.invoke_pew', 'invoke', (['"""in"""', '"""source"""', '"""python"""', '"""setup.py"""', '"""install"""'], {'cwd': 'testpackage'}), "('in', 'source', 'python', 'setup.py', 'install', cwd=testpackage)\n", (895, 961), True, 'from pew._utils import invoke_pew as invoke\n'), ((976, 998), 'pew._utils.invoke_pew', 'invoke', (['"""rm"""', '"""source"""'], {}), "('rm', 'source')\n", (982, 998), True, 'from pew._utils import invoke_pew as invoke\n'), ((257, 269), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (267, 269), False, 'from tempfile import gettempdir\n'), ((307, 319), 'pathlib.Path', 'Path', (['tmpdir'], {}), '(tmpdir)\n', (311, 319), False, 'from pathlib import Path\n'), ((1077, 1091), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1081, 1091), False, 'from pathlib import Path\n'), ((812, 826), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (816, 826), False, 'from pathlib import Path\n')] |
import pytest
from hatch.project.core import Project
from hatchling.utils.constants import DEFAULT_BUILD_SCRIPT
@pytest.fixture(autouse=True)
def local_builder(mock_backend_process, mocker):
if mock_backend_process:
mocker.patch('hatch.env.virtual.VirtualEnvironment.build_environment')
yield
def test(hatch, temp_dir, helpers):
project_name = 'My App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert result.exit_code == 0, result.output
path = temp_dir / 'my-app'
build_script = path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def clean(self, versions):
if self.target_name == 'wheel':
pathlib.Path('my_app', 'lib.so').unlink()
def initialize(self, version, build_data):
if self.target_name == 'wheel':
pathlib.Path('my_app', 'lib.so').touch()
"""
)
)
project = Project(path)
config = dict(project.raw_config)
config['tool']['hatch']['build']['hooks'] = {'custom': {'path': build_script.name}}
project.save_config(config)
with path.as_cwd():
result = hatch('build')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
assert build_directory.is_dir()
build_artifact = path / 'my_app' / 'lib.so'
assert build_artifact.is_file()
artifacts = list(build_directory.iterdir())
assert len(artifacts) == 2
with path.as_cwd():
result = hatch('version', 'minor')
assert result.exit_code == 0, result.output
result = hatch('clean')
assert result.exit_code == 0, result.output
artifacts = list(build_directory.iterdir())
assert not artifacts
assert not build_artifact.exists()
assert result.output == helpers.dedent(
"""
Setting up build environment
Setting up build environment
"""
)
| [
"pytest.fixture",
"hatch.project.core.Project"
] | [((116, 144), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (130, 144), False, 'import pytest\n'), ((1201, 1214), 'hatch.project.core.Project', 'Project', (['path'], {}), '(path)\n', (1208, 1214), False, 'from hatch.project.core import Project\n')] |
import re
import time
import datetime
from libraries.utils import coroutine
from libraries.db import get_logdb
REGEX_SPECIAL_CHARS = r'([\.\*\+\?\|\(\)\{\}\[\]])'
REGEX_LOG_FORMAT_VARIABLE = r'\$([a-zA-Z0-9\_]+)'
def build_pattern(log_format):
"""
Build regular expression to parse given format.
:param log_format: format string to parse
:return: regular expression to parse given format
"""
pattern = re.sub(REGEX_SPECIAL_CHARS, r'\\\1', log_format)
pattern = re.sub(REGEX_LOG_FORMAT_VARIABLE, '(?P<\\1>.*)', pattern)
pattern = re.compile(pattern)
# Initialize database with the field parsed from log format
_ = get_logdb(pattern.groupindex.keys())
return pattern
def process_log(log_file, pattern):
f = open(log_file)
db_processer = process_db()
for l in f:
matched = pattern.match(l)
if matched:
db_processer.send(matched.groupdict())
db_processer.close()
@coroutine
def process_db():
logdb = get_logdb()
raws = []
try:
while True:
raw = (yield)
if raw is not None:
raw['time_local'] = int(time.mktime(datetime.datetime.strptime(
raw['time_local'], "%d/%b/%Y:%H:%M:%S %z").timetuple()))
if len(raws) < 1000:
raws.append(raw)
else:
logdb.processmany(raws)
raws.clear()
raws.append(raw)
except GeneratorExit:
if raws:
logdb.processmany(raws)
pass
| [
"datetime.datetime.strptime",
"re.sub",
"libraries.db.get_logdb",
"re.compile"
] | [((429, 479), 're.sub', 're.sub', (['REGEX_SPECIAL_CHARS', '"""\\\\\\\\\\\\1"""', 'log_format'], {}), "(REGEX_SPECIAL_CHARS, '\\\\\\\\\\\\1', log_format)\n", (435, 479), False, 'import re\n'), ((492, 549), 're.sub', 're.sub', (['REGEX_LOG_FORMAT_VARIABLE', '"""(?P<\\\\1>.*)"""', 'pattern'], {}), "(REGEX_LOG_FORMAT_VARIABLE, '(?P<\\\\1>.*)', pattern)\n", (498, 549), False, 'import re\n'), ((564, 583), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (574, 583), False, 'import re\n'), ((996, 1007), 'libraries.db.get_logdb', 'get_logdb', ([], {}), '()\n', (1005, 1007), False, 'from libraries.db import get_logdb\n'), ((1161, 1230), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["raw['time_local']", '"""%d/%b/%Y:%H:%M:%S %z"""'], {}), "(raw['time_local'], '%d/%b/%Y:%H:%M:%S %z')\n", (1187, 1230), False, 'import datetime\n')] |
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Integration test for branch-day.py"""
import json
import os
import subprocess
import tempfile
import unittest
INFRA_CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
BRANCH_DAY_PY = os.path.join(INFRA_CONFIG_DIR, 'scripts', 'branch-day.py')
MOCK_PY = os.path.join(INFRA_CONFIG_DIR, 'scripts', 'tests', 'utils', 'mock.py')
class BranchDayUnitTest(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
self._invocations_file = os.path.join(self._temp_dir.name,
'invocations.json')
self._milestones_py = os.path.join(self._temp_dir.name, 'milestones.py')
self._branch_py = os.path.join(self._temp_dir.name, 'branch.py')
self._main_star = os.path.join(self._temp_dir.name, 'main.star')
self._dev_star = os.path.join(self._temp_dir.name, 'dev.star')
self._binaries = (self._milestones_py, self._branch_py, self._main_star,
self._dev_star)
for path in self._binaries:
os.symlink(MOCK_PY, path)
def tearDown(self):
self._temp_dir.cleanup()
def _execute_branch_day_py(self, args, mock_details=None):
def details(binary, stdout=None, stderr=None, exit_code=None):
binary = os.path.basename(binary)
d = {
'stdout': stdout or 'fake {} stdout'.format(binary),
'stderr': stderr or 'fake {} stderr'.format(binary),
}
if exit_code:
d['exit_code'] = exit_code
return d
mock_details = mock_details or {}
mock_details = {
b: details(b, **mock_details.get(b, {}))
for b in self._binaries
}
env = os.environ.copy()
env.update({
'INVOCATIONS_FILE': self._invocations_file,
'MOCK_DETAILS': json.dumps(mock_details),
})
cmd = [
BRANCH_DAY_PY, '--milestones-py', self._milestones_py, '--branch-py',
self._branch_py, '--main-star', self._main_star, '--dev-star',
self._dev_star
]
cmd += args or []
return subprocess.run(cmd,
env=env,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def test_branch_day_invocation_fails(self):
result = self._execute_branch_day_py(
['--milestone', 'XX', '--branch', 'YYYY'],
mock_details={
self._milestones_py: {
'stdout': 'FAKE FAILURE STDOUT',
'stderr': 'FAKE FAILURE STDERR',
'exit_code': 1,
}
})
self.assertNotEqual(result.returncode, 0)
expected_output = '\n'.join([
'Executing {} failed'.format([
self._milestones_py, 'activate', '--milestone', 'XX', '--branch',
'YYYY'
]),
'FAKE FAILURE STDOUT',
'FAKE FAILURE STDERR',
'',
])
self.assertEqual(result.stdout, expected_output)
def test_branch_day(self):
result = self._execute_branch_day_py(
['--milestone', 'XX', '--branch', 'YYYY'])
self.assertEqual(result.returncode, 0,
(f'subprocess failed\n***COMMAND***\n{result.args}\n'
f'***OUTPUT***\n{result.stdout}\n'))
self.assertEqual(result.stdout, '')
with open(self._invocations_file) as f:
invocations = json.load(f)
expected_invocations = [
[
self._milestones_py, 'activate', '--milestone', 'XX', '--branch',
'YYYY'
],
[self._main_star],
[self._dev_star],
]
self.assertEqual(invocations, expected_invocations)
def test_branch_day_on_branch(self):
result = self._execute_branch_day_py(
['--on-branch', '--milestone', 'XX', '--branch', 'YYYY'])
self.assertEqual(result.returncode, 0,
(f'subprocess failed\n***COMMAND***\n{result.args}\n'
f'***OUTPUT***\n{result.stdout}\n'))
self.assertEqual(result.stdout, '')
with open(self._invocations_file) as f:
invocations = json.load(f)
expected_invocations = [
[
self._branch_py, 'initialize', '--milestone', 'XX', '--branch',
'YYYY'
],
[self._main_star],
[self._dev_star],
]
self.assertEqual(invocations, expected_invocations)
if __name__ == '__main__':
unittest.main()
| [
"tempfile.TemporaryDirectory",
"subprocess.run",
"os.path.join",
"os.symlink",
"os.environ.copy",
"json.dumps",
"json.load",
"os.path.basename",
"unittest.main"
] | [((394, 452), 'os.path.join', 'os.path.join', (['INFRA_CONFIG_DIR', '"""scripts"""', '"""branch-day.py"""'], {}), "(INFRA_CONFIG_DIR, 'scripts', 'branch-day.py')\n", (406, 452), False, 'import os\n'), ((463, 533), 'os.path.join', 'os.path.join', (['INFRA_CONFIG_DIR', '"""scripts"""', '"""tests"""', '"""utils"""', '"""mock.py"""'], {}), "(INFRA_CONFIG_DIR, 'scripts', 'tests', 'utils', 'mock.py')\n", (475, 533), False, 'import os\n'), ((336, 376), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""', '""".."""'], {}), "(__file__, '..', '..', '..')\n", (348, 376), False, 'import os\n'), ((4524, 4539), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4537, 4539), False, 'import unittest\n'), ((620, 649), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (647, 649), False, 'import tempfile\n'), ((679, 732), 'os.path.join', 'os.path.join', (['self._temp_dir.name', '"""invocations.json"""'], {}), "(self._temp_dir.name, 'invocations.json')\n", (691, 732), False, 'import os\n'), ((801, 851), 'os.path.join', 'os.path.join', (['self._temp_dir.name', '"""milestones.py"""'], {}), "(self._temp_dir.name, 'milestones.py')\n", (813, 851), False, 'import os\n'), ((874, 920), 'os.path.join', 'os.path.join', (['self._temp_dir.name', '"""branch.py"""'], {}), "(self._temp_dir.name, 'branch.py')\n", (886, 920), False, 'import os\n'), ((943, 989), 'os.path.join', 'os.path.join', (['self._temp_dir.name', '"""main.star"""'], {}), "(self._temp_dir.name, 'main.star')\n", (955, 989), False, 'import os\n'), ((1011, 1056), 'os.path.join', 'os.path.join', (['self._temp_dir.name', '"""dev.star"""'], {}), "(self._temp_dir.name, 'dev.star')\n", (1023, 1056), False, 'import os\n'), ((1833, 1850), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1848, 1850), False, 'import os\n'), ((2201, 2295), 'subprocess.run', 'subprocess.run', (['cmd'], {'env': 'env', 'text': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, env=env, text=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (2215, 2295), False, 'import subprocess\n'), ((1212, 1237), 'os.symlink', 'os.symlink', (['MOCK_PY', 'path'], {}), '(MOCK_PY, path)\n', (1222, 1237), False, 'import os\n'), ((1434, 1458), 'os.path.basename', 'os.path.basename', (['binary'], {}), '(binary)\n', (1450, 1458), False, 'import os\n'), ((3515, 3527), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3524, 3527), False, 'import json\n'), ((4220, 4232), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4229, 4232), False, 'import json\n'), ((1944, 1968), 'json.dumps', 'json.dumps', (['mock_details'], {}), '(mock_details)\n', (1954, 1968), False, 'import json\n')] |
# Generated by Django 3.2.12 on 2022-04-05 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0018_migrate_organisers_to_entities'),
]
operations = [
migrations.AlterField(
model_name='event',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='location',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='series',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='session',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"django.db.models.BigAutoField"
] | [((343, 439), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (362, 439), False, 'from django.db import migrations, models\n'), ((555, 651), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (574, 651), False, 'from django.db import migrations, models\n'), ((765, 861), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (784, 861), False, 'from django.db import migrations, models\n'), ((976, 1072), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (995, 1072), False, 'from django.db import migrations, models\n')] |
import os
import copy
import torch
import numpy as np
from torch import optim
from torch.nn import functional as F
from torch.distributions.categorical import Categorical
from .networks import ACUnet
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PPO:
def __init__(self, action_dims, args):
self.clip = args.clip
self.epoch = args.epoch
self.ent_coef = args.ent_coef
self.batch_size = args.batch_size
self.vloss_coef = args.vloss_coef
self.max_grad_norm = args.max_grad_norm
# start to build the network.
if args.actor_net_type == 'unet':
self.actor = ACUnet(action_dims, None, args).to(device)
else:
raise NotImplementedError
self.old_actor = copy.deepcopy(self.actor).to(device)
# define the optimizer...
self.optimizer = optim.Adam(self.actor.parameters(), args.lr, eps=args.eps)
def predict(self, obs, is_training=False, training_mask=False):
if is_training:
self.actor.train()
else:
self.actor.eval()
obs = np.expand_dims(obs, axis=0)
with torch.no_grad():
# get tensors
obs_tensor = torch.tensor(obs, dtype=torch.float32).to(device)
values, acts_logit = self.actor(obs_tensor)
acts_softmax = F.softmax(acts_logit, dim=1)
# select actions
actions = Categorical(acts_softmax).sample()
if training_mask:
return acts_softmax.detach().cpu().numpy().squeeze(), actions.detach().cpu().numpy().squeeze()
else:
return values.detach().cpu().numpy().squeeze(), actions.detach().cpu().numpy().squeeze()
# update the network
def _update_network(self, obs, actions, returns, advantages):
# before update the network, the old network will try to load the weights
self.old_actor.load_state_dict(self.actor.state_dict())
inds = np.arange(obs.shape[0])
nbatch_train = obs.shape[0] // self.batch_size
for _ in range(self.epoch):
np.random.shuffle(inds)
for start in range(0, obs.shape[0], nbatch_train):
# get the mini-batchs
end = start + nbatch_train
mbinds = inds[start:end]
mb_obs = obs[mbinds]
mb_actions = actions[mbinds]
mb_returns = returns[mbinds]
mb_advs = advantages[mbinds]
# convert minibatches to tensor
mb_obs = torch.tensor(mb_obs, dtype=torch.float32).to(device)
mb_actions = torch.tensor(mb_actions, dtype=torch.float32).to(device)
mb_returns = torch.tensor(mb_returns, dtype=torch.float32).to(device).unsqueeze(1)
mb_advs = torch.tensor(mb_advs, dtype=torch.float32).to(device).unsqueeze(1)
# normalize adv
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-8)
# start to get values
mb_values, logits = self.actor(mb_obs)
pis = F.softmax(logits, dim=1)
# start to calculate the value loss...
value_loss = (mb_returns - mb_values).pow(2).mean()
# start to calculate the policy loss
with torch.no_grad():
_, old_logits = self.old_actor(mb_obs)
old_pis = F.softmax(old_logits, dim=1)
# get the old log probs
old_log_prob, _ = self.evaluate_actions(old_pis, mb_actions)
old_log_prob = old_log_prob.detach()
# evaluate the current policy
log_prob, ent_loss = self.evaluate_actions(pis, mb_actions)
prob_ratio = torch.exp(log_prob - old_log_prob)
# surr1
surr1 = prob_ratio * mb_advs
surr2 = torch.clamp(prob_ratio, 1 - self.clip, 1 + self.clip) * mb_advs
policy_loss = -torch.min(surr1, surr2).mean()
# final total loss
total_loss = policy_loss + self.vloss_coef * value_loss - ent_loss * self.ent_coef
# clear the grad buffer
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
# update
self.optimizer.step()
# convert the numpy array to tensors
# def _get_tensors(self, obs):
# obs_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32).to(device)
# return obs_tensor
def evaluate_actions(self, pi, actions):
cate_dist = Categorical(pi)
log_prob = cate_dist.log_prob(actions).unsqueeze(-1)
entropy = cate_dist.entropy().mean()
return log_prob, entropy
# adjust the learning rate
def _adjust_learning_rate(self, init_lr, update, num_updates):
lr_frac = 1 - (update / num_updates)
adjust_lr = init_lr * lr_frac
for param_group in self.optimizer.param_groups:
param_group['lr'] = adjust_lr
def save(self, filename, directory):
torch.save(self.actor.state_dict(), directory+'/{}_ACNet.pth'.format(filename))
def load(self, filename, directory):
self.actor.load_state_dict(torch.load(directory+'/{}_ACNet.pth'.format(filename), map_location=device))
| [
"numpy.random.shuffle",
"torch.exp",
"torch.clamp",
"torch.min",
"torch.tensor",
"torch.cuda.is_available",
"numpy.expand_dims",
"copy.deepcopy",
"torch.no_grad",
"torch.nn.functional.softmax",
"numpy.arange",
"torch.distributions.categorical.Categorical"
] | [((233, 258), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (256, 258), False, 'import torch\n'), ((1989, 2012), 'numpy.arange', 'np.arange', (['obs.shape[0]'], {}), '(obs.shape[0])\n', (1998, 2012), True, 'import numpy as np\n'), ((4752, 4767), 'torch.distributions.categorical.Categorical', 'Categorical', (['pi'], {}), '(pi)\n', (4763, 4767), False, 'from torch.distributions.categorical import Categorical\n'), ((1129, 1156), 'numpy.expand_dims', 'np.expand_dims', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (1143, 1156), True, 'import numpy as np\n'), ((1170, 1185), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1183, 1185), False, 'import torch\n'), ((1371, 1399), 'torch.nn.functional.softmax', 'F.softmax', (['acts_logit'], {'dim': '(1)'}), '(acts_logit, dim=1)\n', (1380, 1399), True, 'from torch.nn import functional as F\n'), ((2116, 2139), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (2133, 2139), True, 'import numpy as np\n'), ((784, 809), 'copy.deepcopy', 'copy.deepcopy', (['self.actor'], {}), '(self.actor)\n', (797, 809), False, 'import copy\n'), ((1452, 1477), 'torch.distributions.categorical.Categorical', 'Categorical', (['acts_softmax'], {}), '(acts_softmax)\n', (1463, 1477), False, 'from torch.distributions.categorical import Categorical\n'), ((3126, 3150), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (3135, 3150), True, 'from torch.nn import functional as F\n'), ((3816, 3850), 'torch.exp', 'torch.exp', (['(log_prob - old_log_prob)'], {}), '(log_prob - old_log_prob)\n', (3825, 3850), False, 'import torch\n'), ((1238, 1276), 'torch.tensor', 'torch.tensor', (['obs'], {'dtype': 'torch.float32'}), '(obs, dtype=torch.float32)\n', (1250, 1276), False, 'import torch\n'), ((3348, 3363), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3361, 3363), False, 'import torch\n'), ((3454, 3482), 'torch.nn.functional.softmax', 'F.softmax', (['old_logits'], {'dim': '(1)'}), '(old_logits, dim=1)\n', (3463, 3482), True, 'from torch.nn import functional as F\n'), ((3944, 3997), 'torch.clamp', 'torch.clamp', (['prob_ratio', '(1 - self.clip)', '(1 + self.clip)'], {}), '(prob_ratio, 1 - self.clip, 1 + self.clip)\n', (3955, 3997), False, 'import torch\n'), ((2570, 2611), 'torch.tensor', 'torch.tensor', (['mb_obs'], {'dtype': 'torch.float32'}), '(mb_obs, dtype=torch.float32)\n', (2582, 2611), False, 'import torch\n'), ((2652, 2697), 'torch.tensor', 'torch.tensor', (['mb_actions'], {'dtype': 'torch.float32'}), '(mb_actions, dtype=torch.float32)\n', (2664, 2697), False, 'import torch\n'), ((4039, 4062), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (4048, 4062), False, 'import torch\n'), ((2738, 2783), 'torch.tensor', 'torch.tensor', (['mb_returns'], {'dtype': 'torch.float32'}), '(mb_returns, dtype=torch.float32)\n', (2750, 2783), False, 'import torch\n'), ((2834, 2876), 'torch.tensor', 'torch.tensor', (['mb_advs'], {'dtype': 'torch.float32'}), '(mb_advs, dtype=torch.float32)\n', (2846, 2876), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# wmd_launcher.py
#
# Copyright 2013 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import alsaaudio
import sys
import time
from math import pi, sin
from numpy import arange # like range, but supports floating point
A = 440
D = 293.66
F = 349.23
C = 523.25
C4 = 261.63
C3 = 130.81
B = 493.88
D5= 587.33
G = 392.00
C4 = 261.63
D4 = 293.66
E4 = 329.63
Gab4 = 415.30
G3 = 196.0
B2 = 123.47
B3_flat = 233.08
A3 = 220.00
D4l = 311.13
song_of_time_notes = [A, A, D, D, D, D, F, F, A, A, D, D, D, D, F, F, A, C, B, B, G, G, F, G, A, A, D, D, C4, E4, D, D, D, D]
class FrequencyGenerator:
def __init__(self, channels = 2, sample_size = 1, frame_rate = 44100, period_size = 11025):
self.channels = channels
self.sample_size = sample_size
self.frame_size = self.channels * self.sample_size
self.frame_rate = frame_rate
self.byte_rate = self.frame_rate * self.frame_size # bytes per second
self.period_size = period_size
self.pcm = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK)
self.pcm.setchannels(self.channels)
self.pcm.setformat(alsaaudio.PCM_FORMAT_U8)
self.pcm.setrate(self.frame_rate)
self.pcm.setperiodsize(self.period_size)
def quantize(self, f): # map (-1..1) -> [0..256)
return int((f+1)*127) # depends on PCM format
def sine_wave(self, freq):
wave = [chr(self.quantize(sin(x))) * self.channels for x in arange(0, 2*pi, 2*pi / (self.frame_rate/freq))]
wave_data = "".join(wave) + "".join(wave)
(nwaves, extra_bytes) = divmod(self.period_size * self.frame_size, len(wave_data))
self.pcm.write((wave_data * nwaves) + wave_data[:extra_bytes])
def play_zelda(self):
zelda = [C4, C4, G3, G3, G3, G3, C4, C4, D4, D4l, F, G]
for note in zelda:
self.sine_wave(note)
def zelda_secret(self):
G = 783.99
Fs = 739.99
Ds = 622.25
Gs = 415.30
E = 659.26
HGs = 830.61
HC = 1046.50
secret = [G, Fs, Ds, A, Gs, E, HGs, HC]
for note in secret:
self.sine_wave(note)
def main():
t = FrequencyGenerator()
t.zelda_secret()
if __name__ == "__main__":
main()
| [
"math.sin",
"alsaaudio.PCM",
"numpy.arange"
] | [((2460, 2497), 'alsaaudio.PCM', 'alsaaudio.PCM', (['alsaaudio.PCM_PLAYBACK'], {}), '(alsaaudio.PCM_PLAYBACK)\n', (2473, 2497), False, 'import alsaaudio\n'), ((2867, 2919), 'numpy.arange', 'arange', (['(0)', '(2 * pi)', '(2 * pi / (self.frame_rate / freq))'], {}), '(0, 2 * pi, 2 * pi / (self.frame_rate / freq))\n', (2873, 2919), False, 'from numpy import arange\n'), ((2833, 2839), 'math.sin', 'sin', (['x'], {}), '(x)\n', (2836, 2839), False, 'from math import pi, sin\n')] |
import pickle
import requests
class Network:
def __init__(self, cache_file="net_cache.pickle"):
self.cache_file = cache_file
# Cache file may not exist.
try:
with open(self.cache_file, "r") as f:
self.cache = pickle.load(f)
except:
self.cache = {}
def get_text(self, url):
resp = requests.get(url)
resp.raise_for_status()
return resp.text
def post_text(self, url, params):
params_key = str(params)
if (url, params_key) not in self.cache:
resp = requests.post(url, params=params)
resp.raise_for_status()
self.cache[(url, params_key)] = resp.text
with open(self.cache_file, "w") as f:
pickle.dump(self.cache, f)
return self.cache[(url, params_key)]
| [
"pickle.dump",
"requests.post",
"pickle.load",
"requests.get"
] | [((372, 389), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (384, 389), False, 'import requests\n'), ((588, 621), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (601, 621), False, 'import requests\n'), ((268, 282), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (279, 282), False, 'import pickle\n'), ((780, 806), 'pickle.dump', 'pickle.dump', (['self.cache', 'f'], {}), '(self.cache, f)\n', (791, 806), False, 'import pickle\n')] |
import tempfile
from ..common import check_call
def install(version, target_filename='/usr/local/bin/minikube', with_sudo=False):
with tempfile.TemporaryDirectory() as tempdir:
check_call(['curl', '-Ls', 'https://github.com/kubernetes/minikube/releases/download/{}/minikube-linux-amd64.tar.gz'.format(version), '-ominikube.tar.gz'], cwd=tempdir)
check_call(['tar', '-xzvf', 'minikube.tar.gz'], cwd=tempdir)
check_call([*(['sudo'] if with_sudo else []), 'mv', '-f', 'out/minikube-linux-amd64', target_filename], cwd=tempdir)
check_call(['chmod', '+x', target_filename], cwd=tempdir)
check_call([target_filename, 'version'])
| [
"tempfile.TemporaryDirectory"
] | [((141, 170), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (168, 170), False, 'import tempfile\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-06-20 13:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods2', '0014_evallog'),
]
operations = [
migrations.RenameField(
model_name='trainmodel',
old_name='checkpoint_prefix',
new_name='checkpoint_step',
),
migrations.AddField(
model_name='tasklog',
name='ip',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='trainmodel',
name='model_path',
field=models.CharField(default='', max_length=200),
),
]
| [
"django.db.migrations.RenameField",
"django.db.models.CharField"
] | [((288, 398), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""trainmodel"""', 'old_name': '"""checkpoint_prefix"""', 'new_name': '"""checkpoint_step"""'}), "(model_name='trainmodel', old_name=\n 'checkpoint_prefix', new_name='checkpoint_step')\n", (310, 398), False, 'from django.db import migrations, models\n'), ((546, 589), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(50)'}), "(default='', max_length=50)\n", (562, 589), False, 'from django.db import migrations, models\n'), ((719, 763), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)'}), "(default='', max_length=200)\n", (735, 763), False, 'from django.db import migrations, models\n')] |
from django.urls import path
from .views import sign_in, sign_up, logout
urlpatterns = [
path('signin', sign_in, name='sign-in'),
path('signup', sign_up, name='sign-up'),
path('logout', logout, name='logout')
] | [
"django.urls.path"
] | [((94, 133), 'django.urls.path', 'path', (['"""signin"""', 'sign_in'], {'name': '"""sign-in"""'}), "('signin', sign_in, name='sign-in')\n", (98, 133), False, 'from django.urls import path\n'), ((139, 178), 'django.urls.path', 'path', (['"""signup"""', 'sign_up'], {'name': '"""sign-up"""'}), "('signup', sign_up, name='sign-up')\n", (143, 178), False, 'from django.urls import path\n'), ((184, 221), 'django.urls.path', 'path', (['"""logout"""', 'logout'], {'name': '"""logout"""'}), "('logout', logout, name='logout')\n", (188, 221), False, 'from django.urls import path\n')] |
import unittest
from solution import solution_part_one, solution_part_two
class TestPartOne(unittest.TestCase):
def test_one(self):
self.assertEqual(solution_part_one([0, 2, 7, 1]), 5)
class TestPartTwo(unittest.TestCase):
def test_one(self):
self.assertEqual(solution_part_two([0, 2, 7, 1]), 4)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"solution.solution_part_one",
"solution.solution_part_two"
] | [((357, 372), 'unittest.main', 'unittest.main', ([], {}), '()\n', (370, 372), False, 'import unittest\n'), ((164, 195), 'solution.solution_part_one', 'solution_part_one', (['[0, 2, 7, 1]'], {}), '([0, 2, 7, 1])\n', (181, 195), False, 'from solution import solution_part_one, solution_part_two\n'), ((289, 320), 'solution.solution_part_two', 'solution_part_two', (['[0, 2, 7, 1]'], {}), '([0, 2, 7, 1])\n', (306, 320), False, 'from solution import solution_part_one, solution_part_two\n')] |
import numpy as np
'''
- Name: format_output
- Parameter(s):
- analysis: SPICE simulation result
- simulation_mode: Type of simulation (operating_pint, transient, etc)
- Description:
Receives a raw SPICE simulation result and creates a dictionary with a key/value pair for each node
'''
def format_output(analysis, simulation_mode):
voltages = {}
currents ={}
# Loop through nodes
for node in analysis.nodes.values():
data_label = str(node) # Extract node name
if simulation_mode == 'operating_point':
voltages[data_label] = float(node)
else:
voltages[data_label] = np.array(node)
# Loop through branches
for branch in analysis.branches.values():
data_label = str(branch) # Extract node name
if simulation_mode == 'operating_point':
currents[data_label] = float(node)
else:
currents[data_label] = np.array(branch)
# If the simulation mode is "transient", we also return time
if simulation_mode == 'transient':
t = []
for val in analysis.time:
t.append(val)
voltages['time'] = np.array(t)
currents['time'] = np.array(t)
return voltages, currents | [
"numpy.array"
] | [((1168, 1179), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1176, 1179), True, 'import numpy as np\n'), ((1207, 1218), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1215, 1218), True, 'import numpy as np\n'), ((646, 660), 'numpy.array', 'np.array', (['node'], {}), '(node)\n', (654, 660), True, 'import numpy as np\n'), ((939, 955), 'numpy.array', 'np.array', (['branch'], {}), '(branch)\n', (947, 955), True, 'import numpy as np\n')] |
import argparse
import math
from functools import partial
from multiprocessing import Pool, Manager
from os.path import join
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import utils.data_utils as du
import utils.func_utils as fu
from config import MovieQAPath
_mp = MovieQAPath()
dataset_dir = _mp.dataset_dir
class Args(object):
def __init__(self):
pass
def find_max_length(qa):
q_max, a_max = 0, 0
for ins in qa:
if q_max < len(ins['question']):
q_max = len(ins['question'])
for a in ins['answers']:
if a_max < len(a):
a_max = len(a)
return q_max, a_max
def create_one_tfrecord(qa, args, video_data, shard_id):
num_shards = int(math.ceil(len(qa) / float(args.num_per_shards)))
start_ndx = shard_id * args.num_per_shards
end_ndx = min((shard_id + 1) * args.num_per_shards, len(qa))
output_filename = join(dataset_dir, '%s-%d-of-%d.tfrecord' % (args.split, shard_id + 1, num_shards))
fu.safe_remove(output_filename)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for idx in range(start_ndx, end_ndx):
ins = qa[idx]
ques = du.pad_list_numpy(ins['question'], args.q_max)
ans = du.pad_list_numpy(ins['answers'], args.a_max)
video_list = sorted(list(video_data[ins['imdb_key']].keys()))
num_frame = sum([int(math.ceil(video_data[ins['imdb_key']][v]['real_frames'] / 15))
for v in video_list])
spectrum = np.zeros(num_frame, dtype=np.int64)
index = 0
for v in video_list:
num = int(math.ceil(video_data[ins['imdb_key']][v]['real_frames'] / 15))
if v in ins['video_clips']:
spectrum[idx][index:(index + num)] = 1
index += num
feature_lists = tf.train.FeatureLists(feature_list={
"ans": du.feature_list(ans, 'int'),
"spec": du.feature_list(spectrum, 'int')
})
feature = {
"ques": du.feature(ques, 'int'),
"ql": du.feature(len(ins['question']), 'int'),
"al": du.feature([len(a) for a in ins['answers']], 'int'),
"subt": du.feature(join(_mp.encode_dir, ins['imdb_key'] + '.npy').encode(), 'string'),
"feat": du.feature(join(_mp.feature_dir, ins['imdb_key'] + '.npy').encode(), 'string')
}
# if 'subt' in args.mode:
# feature['subt'] = du.feature(join(_mp.encode_dir, ins['imdb_key'] + '.npz').encode(), 'string')
# if 'feat' in args.mode:
# feature['feat'] = du.feature(join(_mp.feature_dir, ins['imdb_key'] + '.npy').encode(), 'string')
if args.split == 'train' or args.split == 'val':
feature['gt'] = du.feature(ins['correct_index'], 'int')
context = tf.train.Features(feature=feature)
example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)
tfrecord_writer.write(example.SerializeToString())
def create_tfrecord(encode_qa, split, mode, num_per_shards):
split_qa = [qa for qa in encode_qa if split in qa['qid']]
fu.make_dirs(dataset_dir)
args = Args()
args.q_max, args.a_max = find_max_length(encode_qa)
manager = Manager()
split_qa = manager.list(split_qa)
video_data = manager.dict(du.json_load(_mp.video_data_file))
args.split = split
args.mode = mode
args.num_per_shards = num_per_shards
func = partial(create_one_tfrecord, split_qa, args, video_data)
num_shards = int(math.ceil(len(split_qa) / float(num_per_shards)))
with Pool(4) as pool, tqdm(total=num_shards, desc='Create %s Tfrecord' % split) as pbar:
for _ in pool.imap_unordered(func, list(range(num_shards))):
pbar.update()
def count(encode_qa):
print(len([qa for qa in encode_qa if 'train' in qa['qid']]))
print(len([qa for qa in encode_qa if 'val' in qa['qid']]))
print(len([qa for qa in encode_qa if 'tests' in qa['qid']]))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--split', default='train/val/tests', help='Which split we want to make.')
parser.add_argument('--num_per_shards', default=32, help='Number of shards.', type=int)
parser.add_argument('--count', action='store_true', help='Count the number of qa.')
parser.add_argument('--mode', default='subt+feat', help='Create records with only subtitle.')
return parser.parse_args()
def main():
args = parse_args()
split = args.split
encode_qa = du.json_load(_mp.encode_qa_file)
if args.count:
count(encode_qa)
else:
if 'train' in split:
create_tfrecord(encode_qa, 'train', args.mode, args.num_per_shards)
if 'val' in split:
create_tfrecord(encode_qa, 'val', args.mode, args.num_per_shards)
if 'tests' in split:
create_tfrecord(encode_qa, 'tests', args.mode, args.num_per_shards)
if __name__ == '__main__':
main()
| [
"math.ceil",
"argparse.ArgumentParser",
"utils.func_utils.make_dirs",
"utils.data_utils.feature_list",
"tqdm.tqdm",
"os.path.join",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.SequenceExample",
"config.MovieQAPath",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"tensorflow.train.Features",
"utils.data_utils.json_load",
"multiprocessing.Manager",
"utils.data_utils.pad_list_numpy",
"utils.data_utils.feature",
"utils.func_utils.safe_remove"
] | [((290, 303), 'config.MovieQAPath', 'MovieQAPath', ([], {}), '()\n', (301, 303), False, 'from config import MovieQAPath\n'), ((928, 1014), 'os.path.join', 'join', (['dataset_dir', "('%s-%d-of-%d.tfrecord' % (args.split, shard_id + 1, num_shards))"], {}), "(dataset_dir, '%s-%d-of-%d.tfrecord' % (args.split, shard_id + 1,\n num_shards))\n", (932, 1014), False, 'from os.path import join\n'), ((1016, 1047), 'utils.func_utils.safe_remove', 'fu.safe_remove', (['output_filename'], {}), '(output_filename)\n', (1030, 1047), True, 'import utils.func_utils as fu\n'), ((3291, 3316), 'utils.func_utils.make_dirs', 'fu.make_dirs', (['dataset_dir'], {}), '(dataset_dir)\n', (3303, 3316), True, 'import utils.func_utils as fu\n'), ((3408, 3417), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (3415, 3417), False, 'from multiprocessing import Pool, Manager\n'), ((3619, 3675), 'functools.partial', 'partial', (['create_one_tfrecord', 'split_qa', 'args', 'video_data'], {}), '(create_one_tfrecord, split_qa, args, video_data)\n', (3626, 3675), False, 'from functools import partial\n'), ((4186, 4211), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4209, 4211), False, 'import argparse\n'), ((4698, 4730), 'utils.data_utils.json_load', 'du.json_load', (['_mp.encode_qa_file'], {}), '(_mp.encode_qa_file)\n', (4710, 4730), True, 'import utils.data_utils as du\n'), ((1058, 1102), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_filename'], {}), '(output_filename)\n', (1085, 1102), True, 'import tensorflow as tf\n'), ((3487, 3520), 'utils.data_utils.json_load', 'du.json_load', (['_mp.video_data_file'], {}), '(_mp.video_data_file)\n', (3499, 3520), True, 'import utils.data_utils as du\n'), ((3757, 3764), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (3761, 3764), False, 'from multiprocessing import Pool, Manager\n'), ((3774, 3831), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_shards', 'desc': "('Create %s Tfrecord' % split)"}), "(total=num_shards, desc='Create %s Tfrecord' % split)\n", (3778, 3831), False, 'from tqdm import tqdm\n'), ((1214, 1260), 'utils.data_utils.pad_list_numpy', 'du.pad_list_numpy', (["ins['question']", 'args.q_max'], {}), "(ins['question'], args.q_max)\n", (1231, 1260), True, 'import utils.data_utils as du\n'), ((1279, 1324), 'utils.data_utils.pad_list_numpy', 'du.pad_list_numpy', (["ins['answers']", 'args.a_max'], {}), "(ins['answers'], args.a_max)\n", (1296, 1324), True, 'import utils.data_utils as du\n'), ((1571, 1606), 'numpy.zeros', 'np.zeros', (['num_frame'], {'dtype': 'np.int64'}), '(num_frame, dtype=np.int64)\n', (1579, 1606), True, 'import numpy as np\n'), ((2970, 3004), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (2987, 3004), True, 'import tensorflow as tf\n'), ((3027, 3097), 'tensorflow.train.SequenceExample', 'tf.train.SequenceExample', ([], {'context': 'context', 'feature_lists': 'feature_lists'}), '(context=context, feature_lists=feature_lists)\n', (3051, 3097), True, 'import tensorflow as tf\n'), ((2123, 2146), 'utils.data_utils.feature', 'du.feature', (['ques', '"""int"""'], {}), "(ques, 'int')\n", (2133, 2146), True, 'import utils.data_utils as du\n'), ((2907, 2946), 'utils.data_utils.feature', 'du.feature', (["ins['correct_index']", '"""int"""'], {}), "(ins['correct_index'], 'int')\n", (2917, 2946), True, 'import utils.data_utils as du\n'), ((1689, 1750), 'math.ceil', 'math.ceil', (["(video_data[ins['imdb_key']][v]['real_frames'] / 15)"], {}), "(video_data[ins['imdb_key']][v]['real_frames'] / 15)\n", (1698, 1750), False, 'import math\n'), ((1434, 1495), 'math.ceil', 'math.ceil', (["(video_data[ins['imdb_key']][v]['real_frames'] / 15)"], {}), "(video_data[ins['imdb_key']][v]['real_frames'] / 15)\n", (1443, 1495), False, 'import math\n'), ((1973, 2000), 'utils.data_utils.feature_list', 'du.feature_list', (['ans', '"""int"""'], {}), "(ans, 'int')\n", (1988, 2000), True, 'import utils.data_utils as du\n'), ((2026, 2058), 'utils.data_utils.feature_list', 'du.feature_list', (['spectrum', '"""int"""'], {}), "(spectrum, 'int')\n", (2041, 2058), True, 'import utils.data_utils as du\n'), ((2321, 2367), 'os.path.join', 'join', (['_mp.encode_dir', "(ins['imdb_key'] + '.npy')"], {}), "(_mp.encode_dir, ins['imdb_key'] + '.npy')\n", (2325, 2367), False, 'from os.path import join\n'), ((2424, 2471), 'os.path.join', 'join', (['_mp.feature_dir', "(ins['imdb_key'] + '.npy')"], {}), "(_mp.feature_dir, ins['imdb_key'] + '.npy')\n", (2428, 2471), False, 'from os.path import join\n')] |
#!/usr/bin/python3
""" Utility for neding hshdumps to http://cracker.offensive-security.com/
"""
import argparse
from html.parser import HTMLParser
import sys
import requests as req
URL = "http://cracker.offensive-security.com/insert.php"
class MLStripper(HTMLParser):
""" Parser class used to strip HTML tags from server response
"""
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def check_hash(priority_code, lm_hash):
""" Function used to send crack requests to cracked app
"""
if not lm_hash:
raise ValueError("Will not submit invalid hash: <%s>" % lm_hash)
data = {"type": "lm", "method": "table"}
data["priority"] = str(priority_code)
data["hash"] = lm_hash
result = req.post(URL, data=data)
if len(result.text) > 512:
raise RuntimeError("Recieved bad response from service (too long)")
s = MLStripper()
s.feed(result.text)
return s.get_data().strip()
def parse_line(line):
""" Function used to parse LM+NT hash(es) from input line
"""
parts = line.strip().split(":")
if len(parts) == 1:
return parts[0]
elif len(parts) == 2:
if parts[0] and parts[1]:
return ":".join(parts)
elif len(parts) >= 4:
return ":".join(parts[2:4])
raise ValueError("Could not parse hash(es) from input: <%s>" % line)
def crack_input(priority_code, line):
""" Function used to coordinate crack requests
"""
try:
hash_val = parse_line(line)
except ValueError as err:
print(err)
return
try:
passwd = check_hash(priority_code, hash_val)
except ValueError as err:
print(err)
return
except RuntimeError as err:
print(err)
return
print_result(line, passwd)
def print_result(hash_in, passwd_out):
""" Funtion userd to print result to console
"""
print("%s\n\t=> %s" % (hash_in, passwd_out))
def main():
""" Main function for handling user arguments
"""
parser = argparse.ArgumentParser(description='Check windows hashdumps against http://cracker.offensive-security.com')
parser.add_argument('priority_code', help='Priority code provided by PWK course console')
parser.add_argument('hash_dump', default='-', nargs='?',
help='LM/NTLM hash to be sent to cracker; default reads from STDIN')
args = parser.parse_args()
if args.hash_dump == "-":
for line in sys.stdin.readlines():
crack_input(args.priority_code, line.strip())
else:
crack_input(args.priority_code, args.hash_dump)
if __name__ == "__main__":
main()
| [
"sys.stdin.readlines",
"requests.post",
"argparse.ArgumentParser"
] | [((924, 948), 'requests.post', 'req.post', (['URL'], {'data': 'data'}), '(URL, data=data)\n', (932, 948), True, 'import requests as req\n'), ((2206, 2319), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Check windows hashdumps against http://cracker.offensive-security.com"""'}), "(description=\n 'Check windows hashdumps against http://cracker.offensive-security.com')\n", (2229, 2319), False, 'import argparse\n'), ((2633, 2654), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (2652, 2654), False, 'import sys\n')] |
from django.apps import apps
from django.test import Client, RequestFactory, TestCase
from django.urls import reverse
from goutdotcom.users.tests.factories import UserFactory
from goutdotcom.vitals.models import Height, Weight
from goutdotcom.vitals.tests.factories import WeightFactory
from ..views import IndexView, VitalCreate, VitalDetail
class TestDetailView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
self.user2 = UserFactory()
self.weight = WeightFactory(user=self.user)
self.detail_url = reverse('vitals:detail', kwargs={'vital':self.weight.name, 'pk':self.weight.pk})
def test_detail(self):
### request detail_url from reverse on fake Weight object above
request = self.factory.get(self.detail_url)
request.user = self.user
### response with fake Weight object's name, pk for VitalDetail view
response = VitalDetail.as_view()(request, vital=self.weight.name, pk=self.weight.pk)
self.assertEqual(response.status_code, 200)
def test_get_object(self):
request = self.factory.get(self.detail_url)
request.user = self.user
### response with fake Weight object's name, pk for VitalDetail vie
# w
view = VitalDetail(kwargs={'vital':self.weight.name, 'pk':self.weight.pk})
view.model = apps.get_model('vitals', model_name=view.kwargs['vital'])
view.request = request
queryset = view.get_queryset()
self.assertQuerysetEqual(queryset, Weight.objects.filter(pk=self.weight.pk), transform=lambda x: x)
def test_get_404(self):
request = self.factory.get(self.detail_url)
request.user = self.user2
### response with fake Weight object's name, pk for VitalDetail view
response = VitalDetail.as_view(request, user=self.user, kwargs={"vital":self.weight.name, "pk":self.weight.pk})
self.assertEqual(response.status_code, 404)
def test_get_template_names(self):
request = self.factory.get(self.detail_url)
request.user = self.user
### response with fake Weight object's name, pk for VitalDetail view
view = VitalDetail(kwargs={'vital':self.weight.name, 'pk':self.weight.pk})
view.request = request
template = view.get_template_names()
self.assertEqual(template, 'vitals/vital_detail.html')
class TestIndexView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
self.weight = WeightFactory(user=self.user)
def test_get_context_data(self):
request = self.factory.get('/vitals/index')
request.user = self.user
response = IndexView.as_view()(request)
self.assertIsInstance(response.context_data, dict)
self.assertIn('weight_list', response.context_data)
self.assertIn('height_list', response.context_data)
class TestVitalCreate(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
self.create_url = reverse('vitals:create', kwargs={'vital': 'weight'})
def test_get_form_class(self):
request = self.factory.get(self.create_url)
request.user = self.user
view = VitalCreate(kwargs={'vital':'weight'})
view.request = request
#form_class = view.get_form_class()
response = VitalCreate.as_view()(request, vital='weight')
self.assertContains(response.context_data["form"], "WeightForm")
def test_get_template_names(self):
request = self.factory.get(self.create_url)
request.user = self.user
### response with fake Weight object's name, pk for VitalDetail view
view = VitalCreate(kwargs={'vital':'weight'})
view.request = request
template = view.get_template_names()
self.assertEqual(template, 'vitals/vital_form.html')
def test_get_context_data(self):
request = self.factory.get('/vitals/weight/create')
request.user = self.user
response = VitalCreate.as_view()(request, vital='weight')
self.assertIsInstance(response.context_data, dict)
self.assertIn('vital', response.context_data)
| [
"django.test.RequestFactory",
"goutdotcom.vitals.models.Weight.objects.filter",
"django.urls.reverse",
"goutdotcom.vitals.tests.factories.WeightFactory",
"goutdotcom.users.tests.factories.UserFactory",
"django.apps.apps.get_model"
] | [((423, 439), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (437, 439), False, 'from django.test import Client, RequestFactory, TestCase\n'), ((460, 473), 'goutdotcom.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (471, 473), False, 'from goutdotcom.users.tests.factories import UserFactory\n'), ((495, 508), 'goutdotcom.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (506, 508), False, 'from goutdotcom.users.tests.factories import UserFactory\n'), ((531, 560), 'goutdotcom.vitals.tests.factories.WeightFactory', 'WeightFactory', ([], {'user': 'self.user'}), '(user=self.user)\n', (544, 560), False, 'from goutdotcom.vitals.tests.factories import WeightFactory\n'), ((587, 674), 'django.urls.reverse', 'reverse', (['"""vitals:detail"""'], {'kwargs': "{'vital': self.weight.name, 'pk': self.weight.pk}"}), "('vitals:detail', kwargs={'vital': self.weight.name, 'pk': self.\n weight.pk})\n", (594, 674), False, 'from django.urls import reverse\n'), ((1384, 1441), 'django.apps.apps.get_model', 'apps.get_model', (['"""vitals"""'], {'model_name': "view.kwargs['vital']"}), "('vitals', model_name=view.kwargs['vital'])\n", (1398, 1441), False, 'from django.apps import apps\n'), ((2484, 2500), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (2498, 2500), False, 'from django.test import Client, RequestFactory, TestCase\n'), ((2521, 2534), 'goutdotcom.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (2532, 2534), False, 'from goutdotcom.users.tests.factories import UserFactory\n'), ((2557, 2586), 'goutdotcom.vitals.tests.factories.WeightFactory', 'WeightFactory', ([], {'user': 'self.user'}), '(user=self.user)\n', (2570, 2586), False, 'from goutdotcom.vitals.tests.factories import WeightFactory\n'), ((3015, 3031), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (3029, 3031), False, 'from django.test import Client, RequestFactory, TestCase\n'), ((3052, 3065), 'goutdotcom.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (3063, 3065), False, 'from goutdotcom.users.tests.factories import UserFactory\n'), ((3092, 3144), 'django.urls.reverse', 'reverse', (['"""vitals:create"""'], {'kwargs': "{'vital': 'weight'}"}), "('vitals:create', kwargs={'vital': 'weight'})\n", (3099, 3144), False, 'from django.urls import reverse\n'), ((1555, 1595), 'goutdotcom.vitals.models.Weight.objects.filter', 'Weight.objects.filter', ([], {'pk': 'self.weight.pk'}), '(pk=self.weight.pk)\n', (1576, 1595), False, 'from goutdotcom.vitals.models import Height, Weight\n')] |
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# <NAME>
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
import logging
# external packages
import PyQt5
# local imports
class Remote(PyQt5.QtCore.QObject):
"""
The class Remote inherits all information and handling of remotely controlling
mountwizzard 4.
>>> fw = Remote( app=None,
>>> )
"""
__all__ = ['Remote',
'startRemote',
'stopRemote',
]
logger = logging.getLogger(__name__)
def __init__(self,
app=None,
):
super().__init__()
self.app = app
self.clientConnection = None
self.tcpServer = None
def startRemote(self):
"""
startRemote prepares the remote listening by starting a tcp server listening
on localhost and port 3490.
:return: success
"""
if self.tcpServer is not None:
return False
self.tcpServer = PyQt5.QtNetwork.QTcpServer(self)
hostAddress = PyQt5.QtNetwork.QHostAddress('127.0.0.1')
if not self.tcpServer.listen(hostAddress, 3490):
self.logger.warning('Port already in use')
self.tcpServer = None
return False
else:
self.logger.info('Remote access enabled')
self.tcpServer.newConnection.connect(self.addConnection)
return True
def stopRemote(self):
"""
stopRemote kills all connections and stops the tcpServer
:return: true for test purpose
"""
if self.clientConnection is not None:
self.clientConnection.close()
if self.tcpServer is not None:
self.tcpServer = None
return True
def addConnection(self):
"""
addConnection allows a new connection for remote access to mw4 only one connection
is allowed.
:return: success
"""
if self.tcpServer is None:
return False
self.clientConnection = self.tcpServer.nextPendingConnection()
if self.clientConnection == 0:
self.logger.error('Cannot establish incoming connection')
return False
self.clientConnection.nextBlockSize = 0
self.clientConnection.readyRead.connect(self.receiveMessage)
self.clientConnection.disconnected.connect(self.removeConnection)
self.clientConnection.error.connect(self.handleError)
connection = self.clientConnection.peerAddress().toString()
self.logger.info(f'Connection to MountWizzard from {connection}')
return True
def receiveMessage(self):
"""
receiveMessage is the command dispatcher for remote access
:return: success
"""
if self.clientConnection.bytesAvailable() == 0:
return False
validCommands = ['shutdown',
'shutdown mount',
'boot mount',
]
connection = self.clientConnection.peerAddress().toString()
command = str(self.clientConnection.read(100), "ascii")
command = command.replace('\n', '')
command = command.replace('\r', '')
self.logger.info(f'Command {command} from {connection} received')
if command in validCommands:
self.app.remoteCommand.emit(command)
else:
self.logger.error(f'Unknown command {command} from {connection} received')
return True
def removeConnection(self):
"""
removeConnection clear the existing connection
:return: true for test purpose
"""
connection = self.clientConnection.peerAddress().toString()
self.clientConnection.close()
self.logger.info(f'Connection from {connection} closed')
return True
def handleError(self, socketError):
"""
handleError does error handling -> writing to log
:param socketError:
:return: true for test purpose
"""
connection = self.clientConnection.peerAddress().toString()
self.logger.error(f'Connection from {connection} failed, error: {socketError}')
return True
| [
"logging.getLogger",
"PyQt5.QtNetwork.QHostAddress",
"PyQt5.QtNetwork.QTcpServer"
] | [((870, 897), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (887, 897), False, 'import logging\n'), ((1377, 1409), 'PyQt5.QtNetwork.QTcpServer', 'PyQt5.QtNetwork.QTcpServer', (['self'], {}), '(self)\n', (1403, 1409), False, 'import PyQt5\n'), ((1432, 1473), 'PyQt5.QtNetwork.QHostAddress', 'PyQt5.QtNetwork.QHostAddress', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (1460, 1473), False, 'import PyQt5\n')] |
'''
MIT License
Copyright (c) 2021 Futurewei Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import unittest
import psycopg2
from helper import commitSQL, selectOneRecord, getConn
class TestCompoundKey(unittest.TestCase):
sharedConn = None
@classmethod
def setUpClass(cls):
cls.sharedConn = getConn()
commitSQL(cls.sharedConn, "CREATE TABLE compoundkey (id integer, idstr text, id3 integer, dataA integer, PRIMARY KEY(id, idstr, id3));")
commitSQL(cls.sharedConn, "CREATE TABLE compoundkeyintint (id integer, id2 integer, dataA integer, PRIMARY KEY(id, id2));")
commitSQL(cls.sharedConn, "CREATE TABLE compoundkeytxttxt (id text, id2 text, dataA integer, PRIMARY KEY(id, id2));")
commitSQL(cls.sharedConn, "CREATE TABLE compoundkeyboolint (id bool, id2 integer, dataA integer, PRIMARY KEY(id, id2));")
@classmethod
def tearDownClass(cls):
# TODO delete table
cls.sharedConn.close()
def test_prefixScanThreeKeys(self):
# Populate some records for the tests
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
for i in range(1, 11):
cur.execute("INSERT INTO compoundkey VALUES (1, 'sometext', %s, 1);", (i,))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkey VALUES (2, 'someothertext', %s, 2);", (i,))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkey VALUES (3, 'somemoretext', %s, 3);", (i,))
# Prefix scan with first two keys specified with =
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
cur.execute("SELECT * FROM compoundkey WHERE id = 2 AND idstr = 'someothertext';")
for i in range(1, 11):
record = cur.fetchone()
self.assertNotEqual(record, None)
self.assertEqual(record[0], 2)
self.assertEqual(record[1], "someothertext")
self.assertEqual(record[2], i)
self.assertEqual(record[3], 2)
# Parital Prefix scan with keys specified by inequality
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
cur.execute("SELECT * FROM compoundkey WHERE id >= 3 AND id3 > 1;")
for i in range(2, 11):
record = cur.fetchone()
self.assertNotEqual(record, None)
self.assertEqual(record[0], 3)
self.assertEqual(record[1], "somemoretext")
self.assertEqual(record[2], i)
self.assertEqual(record[3], 3)
# Partial prefix scan with extra filter that is not a prefix
record = selectOneRecord(self.sharedConn, "SELECT * FROM compoundkey WHERE id = 1 AND id3 = 5;")
self.assertEqual(record[0], 1)
self.assertEqual(record[1], "sometext")
self.assertEqual(record[2], 5)
self.assertEqual(record[3], 1)
def test_prefixScanIntInt(self):
# Populate some records for the tests
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeyintint VALUES (1, %s, 1);", (i,))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeyintint VALUES (2, %s, 2);", (i,))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeyintint VALUES (3, %s, 3);", (i,))
# Prefix scan with first key specified with =
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
cur.execute("SELECT * FROM compoundkeyintint WHERE id = 2;")
for i in range(1, 11):
record = cur.fetchone()
self.assertNotEqual(record, None)
self.assertEqual(record[0], 2)
self.assertEqual(record[1], i)
self.assertEqual(record[2], 2)
def test_prefixScanTxtTxt(self):
# Populate some records for the tests
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeytxttxt VALUES ('1', %s, 1);", (str(i),))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeytxttxt VALUES ('2', %s, 2);", (str(i),))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeytxttxt VALUES ('3', %s, 3);", (str(i),))
# Prefix scan with first key specified with =
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
cur.execute("SELECT * FROM compoundkeytxttxt WHERE id = '2';")
# The result set is sorted lexographically on the second text key, so here
# just check that each key is present
keys = [str(i) for i in range(1,11)]
for i in range(1, 11):
record = cur.fetchone()
self.assertNotEqual(record, None)
self.assertEqual(record[0], '2')
self.assertEqual(str(i) in keys, True)
keys.remove(str(i))
self.assertEqual(record[2], 2)
def test_prefixScanBoolInt(self):
# Populate some records for the tests
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeyboolint VALUES (TRUE, %s, 1);", (i,))
for i in range(1, 11):
cur.execute("INSERT INTO compoundkeyboolint VALUES (FALSE, %s, 2);", (i,))
# Prefix scan with first key specified with =
with self.sharedConn: # commits at end of context if no errors
with self.sharedConn.cursor() as cur:
cur.execute("SELECT * FROM compoundkeyboolint WHERE id = FALSE;")
for i in range(1, 11):
record = cur.fetchone()
self.assertNotEqual(record, None)
self.assertEqual(record[0], False)
self.assertEqual(record[1], i)
self.assertEqual(record[2], 2)
| [
"helper.commitSQL",
"helper.selectOneRecord",
"helper.getConn"
] | [((1301, 1310), 'helper.getConn', 'getConn', ([], {}), '()\n', (1308, 1310), False, 'from helper import commitSQL, selectOneRecord, getConn\n'), ((1319, 1464), 'helper.commitSQL', 'commitSQL', (['cls.sharedConn', '"""CREATE TABLE compoundkey (id integer, idstr text, id3 integer, dataA integer, PRIMARY KEY(id, idstr, id3));"""'], {}), "(cls.sharedConn,\n 'CREATE TABLE compoundkey (id integer, idstr text, id3 integer, dataA integer, PRIMARY KEY(id, idstr, id3));'\n )\n", (1328, 1464), False, 'from helper import commitSQL, selectOneRecord, getConn\n'), ((1464, 1596), 'helper.commitSQL', 'commitSQL', (['cls.sharedConn', '"""CREATE TABLE compoundkeyintint (id integer, id2 integer, dataA integer, PRIMARY KEY(id, id2));"""'], {}), "(cls.sharedConn,\n 'CREATE TABLE compoundkeyintint (id integer, id2 integer, dataA integer, PRIMARY KEY(id, id2));'\n )\n", (1473, 1596), False, 'from helper import commitSQL, selectOneRecord, getConn\n'), ((1596, 1722), 'helper.commitSQL', 'commitSQL', (['cls.sharedConn', '"""CREATE TABLE compoundkeytxttxt (id text, id2 text, dataA integer, PRIMARY KEY(id, id2));"""'], {}), "(cls.sharedConn,\n 'CREATE TABLE compoundkeytxttxt (id text, id2 text, dataA integer, PRIMARY KEY(id, id2));'\n )\n", (1605, 1722), False, 'from helper import commitSQL, selectOneRecord, getConn\n'), ((1722, 1852), 'helper.commitSQL', 'commitSQL', (['cls.sharedConn', '"""CREATE TABLE compoundkeyboolint (id bool, id2 integer, dataA integer, PRIMARY KEY(id, id2));"""'], {}), "(cls.sharedConn,\n 'CREATE TABLE compoundkeyboolint (id bool, id2 integer, dataA integer, PRIMARY KEY(id, id2));'\n )\n", (1731, 1852), False, 'from helper import commitSQL, selectOneRecord, getConn\n'), ((3917, 4008), 'helper.selectOneRecord', 'selectOneRecord', (['self.sharedConn', '"""SELECT * FROM compoundkey WHERE id = 1 AND id3 = 5;"""'], {}), "(self.sharedConn,\n 'SELECT * FROM compoundkey WHERE id = 1 AND id3 = 5;')\n", (3932, 4008), False, 'from helper import commitSQL, selectOneRecord, getConn\n')] |
# Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
import numpy as np
import pytest
import hoomd
import hoomd.conftest
@pytest.fixture
def filter_list():
return [
hoomd.filter.All(),
hoomd.filter.Tags([1, 2, 3]),
hoomd.filter.Type(["A"])
]
def test_initialization_setting(filter_list):
filter_updater = hoomd.update.FilterUpdater(1, [])
assert filter_updater.trigger == hoomd.trigger.Periodic(1)
assert filter_updater.filters == []
filter_updater.filters.extend(filter_list)
assert len(filter_updater.filters) == 3
assert filter_list == filter_updater.filters
filter_updater = hoomd.update.FilterUpdater(5, filter_list)
assert filter_updater.trigger == hoomd.trigger.Periodic(5)
assert len(filter_updater.filters) == 3
assert filter_list == filter_updater.filters
filter_updater.trigger = hoomd.trigger.After(100)
assert filter_updater.trigger == hoomd.trigger.After(100)
@pytest.fixture
def filter_updater(filter_list):
return hoomd.update.FilterUpdater(1, filter_list)
@pytest.fixture(scope="function")
def simulation(lattice_snapshot_factory, simulation_factory, filter_list):
sim = simulation_factory(
lattice_snapshot_factory(particle_types=["A", "B"]))
# place filters in state list manually to enable updating the particle
# groups.
for filter_ in filter_list:
sim.state._get_group(filter_)
return sim
def test_attaching(simulation, filter_updater):
simulation.operations += filter_updater
trigger = filter_updater.trigger
filters = filter_updater.filters
simulation.run(0)
assert trigger == filter_updater.trigger
assert filters == filter_updater.filters
assert filter_updater._cpp_obj is not None
assert filter_updater._attached
def assert_group_match(filter_, state, mpi=False):
filter_tags = set(filter_(state))
group_tags = set(state._get_group(filter_).member_tags)
# On MPI simulations, the group tags won't exactly match since they include
# particles from every rank, so two checks are necessary. One that no
# particles in the filters tags are not in the groups tags (below), and that
# all local tags in group tags are in filter tags (2nd check).
assert filter_tags - group_tags == set()
if not mpi:
return
NOT_LOCAL = 4294967295
with state.cpu_local_snapshot as snapshot:
np.all(snapshot.particles.rtag[group_tags - filter_tags] == NOT_LOCAL)
def test_updating(simulation, filter_updater, filter_list):
simulation.operations += filter_updater
simulation.run(0)
rng = np.random.default_rng(43)
def modify_typeid(state):
with state.cpu_local_snapshot as snapshot:
Np = len(snapshot.particles.typeid)
indices = rng.choice(Np, max(1, int(Np * 0.1)), replace=False)
values = rng.choice([0, 1], len(indices))
snapshot.particles.typeid[indices] = values
for _ in range(4):
modify_typeid(simulation.state)
simulation.run(1)
for filter_ in filter_list:
assert_group_match(filter_, simulation.state)
def test_pickling(simulation, filter_updater):
hoomd.conftest.operation_pickling_check(filter_updater, simulation)
| [
"hoomd.update.FilterUpdater",
"numpy.random.default_rng",
"hoomd.filter.Type",
"hoomd.filter.Tags",
"hoomd.conftest.operation_pickling_check",
"hoomd.filter.All",
"pytest.fixture",
"numpy.all",
"hoomd.trigger.After",
"hoomd.trigger.Periodic"
] | [((1148, 1180), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1162, 1180), False, 'import pytest\n'), ((426, 459), 'hoomd.update.FilterUpdater', 'hoomd.update.FilterUpdater', (['(1)', '[]'], {}), '(1, [])\n', (452, 459), False, 'import hoomd\n'), ((725, 767), 'hoomd.update.FilterUpdater', 'hoomd.update.FilterUpdater', (['(5)', 'filter_list'], {}), '(5, filter_list)\n', (751, 767), False, 'import hoomd\n'), ((953, 977), 'hoomd.trigger.After', 'hoomd.trigger.After', (['(100)'], {}), '(100)\n', (972, 977), False, 'import hoomd\n'), ((1102, 1144), 'hoomd.update.FilterUpdater', 'hoomd.update.FilterUpdater', (['(1)', 'filter_list'], {}), '(1, filter_list)\n', (1128, 1144), False, 'import hoomd\n'), ((2704, 2729), 'numpy.random.default_rng', 'np.random.default_rng', (['(43)'], {}), '(43)\n', (2725, 2729), True, 'import numpy as np\n'), ((3282, 3349), 'hoomd.conftest.operation_pickling_check', 'hoomd.conftest.operation_pickling_check', (['filter_updater', 'simulation'], {}), '(filter_updater, simulation)\n', (3321, 3349), False, 'import hoomd\n'), ((260, 278), 'hoomd.filter.All', 'hoomd.filter.All', ([], {}), '()\n', (276, 278), False, 'import hoomd\n'), ((288, 316), 'hoomd.filter.Tags', 'hoomd.filter.Tags', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (305, 316), False, 'import hoomd\n'), ((326, 350), 'hoomd.filter.Type', 'hoomd.filter.Type', (["['A']"], {}), "(['A'])\n", (343, 350), False, 'import hoomd\n'), ((497, 522), 'hoomd.trigger.Periodic', 'hoomd.trigger.Periodic', (['(1)'], {}), '(1)\n', (519, 522), False, 'import hoomd\n'), ((805, 830), 'hoomd.trigger.Periodic', 'hoomd.trigger.Periodic', (['(5)'], {}), '(5)\n', (827, 830), False, 'import hoomd\n'), ((1015, 1039), 'hoomd.trigger.After', 'hoomd.trigger.After', (['(100)'], {}), '(100)\n', (1034, 1039), False, 'import hoomd\n'), ((2495, 2565), 'numpy.all', 'np.all', (['(snapshot.particles.rtag[group_tags - filter_tags] == NOT_LOCAL)'], {}), '(snapshot.particles.rtag[group_tags - filter_tags] == NOT_LOCAL)\n', (2501, 2565), True, 'import numpy as np\n')] |
import pytest
import torch
import models.bnn as bnn
@pytest.mark.parametrize("local_reparam", [True, False])
def test_sampling(local_reparam):
"""Tests that the ffg layer samples from the correct distribution."""
torch.manual_seed(24)
layer = bnn.nn.FFGLinear(2, 3, bias=False, init_sd=0.1, local_reparameterization=local_reparam)
x = torch.randn(1, 2)
mu = x.mm(layer.weight_mean.t())
sd = x.pow(2).mm(layer.weight_sd.pow(2).t()).sqrt()
a = torch.stack([layer(x) for _ in range(1000)])
assert torch.allclose(mu, a.mean(0), atol=1e-2)
assert torch.allclose(sd, a.std(0), atol=1e-2)
def test_init_from_deterministic_params():
layer = bnn.nn.FFGLinear(5, 3)
weight = torch.randn(3, 5)
bias = torch.randn(3)
layer.init_from_deterministic_params({"weight": weight, "bias": bias})
assert torch.allclose(weight, layer.weight_mean)
assert torch.allclose(bias, layer.bias_mean)
def test_init_from_deterministic_params_no_bias():
layer = bnn.nn.FFGLinear(5, 3, bias=False)
weight = torch.randn(3, 5)
layer.init_from_deterministic_params({"weight": weight})
assert torch.allclose(weight, layer.weight_mean)
| [
"torch.manual_seed",
"pytest.mark.parametrize",
"models.bnn.nn.FFGLinear",
"torch.allclose",
"torch.randn"
] | [((58, 113), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""local_reparam"""', '[True, False]'], {}), "('local_reparam', [True, False])\n", (81, 113), False, 'import pytest\n'), ((226, 247), 'torch.manual_seed', 'torch.manual_seed', (['(24)'], {}), '(24)\n', (243, 247), False, 'import torch\n'), ((261, 353), 'models.bnn.nn.FFGLinear', 'bnn.nn.FFGLinear', (['(2)', '(3)'], {'bias': '(False)', 'init_sd': '(0.1)', 'local_reparameterization': 'local_reparam'}), '(2, 3, bias=False, init_sd=0.1, local_reparameterization=\n local_reparam)\n', (277, 353), True, 'import models.bnn as bnn\n'), ((357, 374), 'torch.randn', 'torch.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (368, 374), False, 'import torch\n'), ((683, 705), 'models.bnn.nn.FFGLinear', 'bnn.nn.FFGLinear', (['(5)', '(3)'], {}), '(5, 3)\n', (699, 705), True, 'import models.bnn as bnn\n'), ((719, 736), 'torch.randn', 'torch.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (730, 736), False, 'import torch\n'), ((748, 762), 'torch.randn', 'torch.randn', (['(3)'], {}), '(3)\n', (759, 762), False, 'import torch\n'), ((849, 890), 'torch.allclose', 'torch.allclose', (['weight', 'layer.weight_mean'], {}), '(weight, layer.weight_mean)\n', (863, 890), False, 'import torch\n'), ((902, 939), 'torch.allclose', 'torch.allclose', (['bias', 'layer.bias_mean'], {}), '(bias, layer.bias_mean)\n', (916, 939), False, 'import torch\n'), ((1005, 1039), 'models.bnn.nn.FFGLinear', 'bnn.nn.FFGLinear', (['(5)', '(3)'], {'bias': '(False)'}), '(5, 3, bias=False)\n', (1021, 1039), True, 'import models.bnn as bnn\n'), ((1053, 1070), 'torch.randn', 'torch.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (1064, 1070), False, 'import torch\n'), ((1143, 1184), 'torch.allclose', 'torch.allclose', (['weight', 'layer.weight_mean'], {}), '(weight, layer.weight_mean)\n', (1157, 1184), False, 'import torch\n')] |
from pathlib import Path
oppositors = (
'abs',
'modular',
'quasi',
'quasi_reflect',
'over',
'integers_by_order'
)
result = []
for opp in oppositors:
result.append(
f"""
#### `{opp}` oppositor
[Code](tests/op_{opp}.py)

"""
)
Path('res.txt').write_text(
'\n\n'.join(result)
)
| [
"pathlib.Path"
] | [((307, 322), 'pathlib.Path', 'Path', (['"""res.txt"""'], {}), "('res.txt')\n", (311, 322), False, 'from pathlib import Path\n')] |
"""Groups all data from sc2 -> ability, unit, upgrades, cost"""
from bisect import bisect_left
from functools import lru_cache, reduce
from typing import List, Optional
from .constants import ZERGLING
from .data import ATTRIBUTE, RACE
from .unit_command import UnitCommand
from .ids.ability_id import AbilityId
from .ids.unit_typeid import UnitTypeId
FREE_MORPH_ABILITY_CATEGORIES = ["Lower", "Raise", "Land", "Lift"]
def split_camel_case(text) -> list:
"""Splits words from CamelCase text."""
return list(reduce(lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]), text, []))
class GameData:
"""Its the main class from this files, it groups and organizes all the others"""
def __init__(self, data):
ids = tuple(a.value for a in AbilityId if a.value != 0)
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities if a.ability_id in ids}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
self.effects = {e.effect_id: EffectRawData(self, e) for e in data.effects}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability) -> "Cost":
"""Returns the resources cost for the abilities, units, upgrades"""
if isinstance(ability, AbilityId):
ability = self.abilities[ability.value]
elif isinstance(ability, UnitCommand):
ability = self.abilities[ability.ability.value]
assert isinstance(ability, AbilityData), f"C: {ability}"
for unit in self.units.values():
if unit.creation_ability is None:
continue
if not AbilityData.id_exists(unit.creation_ability.id.value):
continue
if unit.creation_ability.is_free_morph:
continue
if unit.creation_ability == ability:
if unit.id == ZERGLING:
return Cost(unit.cost.minerals * 2, unit.cost.vespene * 2, unit.cost.time)
morph_cost = unit.morph_cost
if morph_cost: # can be None
return morph_cost
return unit.cost_zerg_corrected
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class EffectRawData:
"""Group and work with all data related to effects"""
def __init__(self, game_data, proto):
self._game_data = game_data
self.proto = proto
@property
def id(self) -> int:
"""Return the effect id"""
return self.proto.effect_id
@property
def name(self) -> str:
"""Return the effect name"""
return self.proto.name
@property
def friendly_name(self) -> str:
"""Check if the effect is friendly(from the player or an ally)"""
return self.proto.friendly_name
@property
def radius(self) -> float:
"""Check the area of the effect"""
return self.proto.radius
class AbilityData:
"""Group and work with all data related to abilities"""
ability_ids: List[int] = []
for ability_id in AbilityId:
ability_ids.append(ability_id.value)
ability_ids.remove(0)
ability_ids.sort()
@classmethod
def id_exists(cls, ability_id):
"""Check if the ability id exists"""
assert isinstance(ability_id, int), f"Wrong type: {ability_id} is not int"
if ability_id == 0:
return False
i = bisect_left(cls.ability_ids, ability_id) # quick binary search
return i != len(cls.ability_ids) and cls.ability_ids[i] == ability_id
def __init__(self, game_data, proto):
self._game_data = game_data
self.proto = proto
assert self.id != 0
def __repr__(self) -> str:
return f"AbilityData(name={self.proto.button_name})"
@property
def id(self) -> AbilityId:
"""Returns the id numbers of the abilities"""
if self.proto.remaps_to_ability_id:
return AbilityId(self.proto.remaps_to_ability_id)
return AbilityId(self.proto.ability_id)
@property
def link_name(self) -> str:
""" For Stimpack this returns 'BarracksTechLabResearch' """
return self.proto.button_name
@property
def button_name(self) -> str:
""" For Stimpack this returns 'Stimpack' """
return self.proto.button_name
@property
def friendly_name(self) -> str:
""" For Stimpack this returns 'Research Stimpack' """
return self.proto.friendly_name
@property
def is_free_morph(self) -> bool:
"""If morphing the unit is free it returns True"""
parts = split_camel_case(self.proto.link_name)
for part in parts:
if part in FREE_MORPH_ABILITY_CATEGORIES:
return True
return False
@property
def cost(self) -> "Cost":
"""Returns the ability cost"""
return self._game_data.calculate_ability_cost(self.id)
class UnitTypeData:
"""Group and work with all data related to units"""
def __init__(self, game_data, proto):
self._game_data = game_data
self.proto = proto
def __repr__(self) -> str:
return "UnitTypeData(name={})".format(self.name)
@property
def id(self) -> UnitTypeId:
"""Returns the id numbers of the units"""
return UnitTypeId(self.proto.unit_id)
@property
def name(self) -> str:
"""Returns the names of the units"""
return self.proto.name
@property
def creation_ability(self) -> Optional[AbilityData]:
"""Check if the unit has a creation ability"""
if self.proto.ability_id and self.proto.ability_id in self._game_data.abilities:
return self._game_data.abilities[self.proto.ability_id]
return None
@property
def attributes(self) -> List[ATTRIBUTE]:
"""Return a list of attributes of the unit"""
return self.proto.attributes
def has_attribute(self, attr) -> bool:
"""Return True if the unit has specified attribute"""
assert isinstance(attr, ATTRIBUTE)
return attr in self.attributes
@property
def has_minerals(self) -> bool:
"""Return True if the unit has minerals(only useful for mineral patches)"""
return self.proto.has_minerals
@property
def has_vespene(self) -> bool:
"""Return True if the unit has vespene(only useful for geysers)"""
return self.proto.has_vespene
@property
def cargo_size(self) -> int:
""" How much cargo this unit uses up in cargo_space """
return self.proto.cargo_size
@property
def tech_requirement(self) -> Optional[UnitTypeId]:
""" Tech-building requirement of buildings - may work for units but unreliably """
if not self.proto.tech_requirement:
return None
if self.proto.tech_requirement not in self._game_data.units:
return None
return UnitTypeId(self.proto.tech_requirement)
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter
Building tech equality, e.g. Hive is the same as Lair and Hatchery """
return_list = []
for tech_alias in self.proto.tech_alias:
if tech_alias in self._game_data.units:
return_list.append(UnitTypeId(tech_alias))
if return_list:
return return_list
return None
@property
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand """
if not self.proto.unit_alias:
return None
if self.proto.unit_alias not in self._game_data.units:
return None
return UnitTypeId(self.proto.unit_alias)
@property
def race(self) -> RACE:
"""Returns the race which the unit belongs"""
return RACE(self.proto.race)
@property
def cost(self) -> "Cost":
"""Returns the unit cost"""
return Cost(self.proto.mineral_cost, self.proto.vespene_cost, self.proto.build_time)
@property
def cost_zerg_corrected(self) -> "Cost":
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == RACE.Zerg and ATTRIBUTE.Structure.value in self.attributes:
return Cost(self.proto.mineral_cost - 50, self.proto.vespene_cost, self.proto.build_time)
return self.cost
@property
def morph_cost(self) -> Optional["Cost"]:
""" This returns 150 minerals for OrbitalCommand instead of 550 """
if self.tech_alias is None or self.tech_alias[0] in {UnitTypeId.TECHLAB, UnitTypeId.REACTOR}:
return None
tech_alias_cost_minerals = max(
[self._game_data.units[tech_alias.value].cost.minerals for tech_alias in self.tech_alias]
)
tech_alias_cost_vespene = max(
[self._game_data.units[tech_alias.value].cost.vespene for tech_alias in self.tech_alias]
)
return Cost(
self.proto.mineral_cost - tech_alias_cost_minerals,
self.proto.vespene_cost - tech_alias_cost_vespene,
self.proto.build_time,
)
class UpgradeData:
"""Group and work with all data related to upgrades"""
def __init__(self, game_data, proto):
self._game_data = game_data
self.proto = proto
def __repr__(self):
return "UpgradeData({} - research ability: {}, {})".format(self.name, self.research_ability, self.cost)
@property
def name(self) -> str:
"""Returns the names of the units"""
return self.proto.name
@property
def research_ability(self) -> Optional[AbilityData]:
"""Research the ability if its available"""
if self.proto.ability_id and self.proto.ability_id in self._game_data.abilities:
return self._game_data.abilities[self.proto.ability_id]
return None
@property
def cost(self) -> "Cost":
"""Return the cost of the upgrade"""
return Cost(self.proto.mineral_cost, self.proto.vespene_cost, self.proto.research_time)
class Cost:
"""Initialize resources and time cost for cost functions"""
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self) -> str:
return f"Cost({self.minerals}, {self.vespene})"
def __eq__(self, other) -> bool:
return self.minerals == other.minerals and self.vespene == other.vespene
def __ne__(self, other) -> bool:
return self.minerals != other.minerals or self.vespene != other.vespene
| [
"functools.lru_cache",
"bisect.bisect_left"
] | [((1173, 1195), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(256)'}), '(maxsize=256)\n', (1182, 1195), False, 'from functools import lru_cache, reduce\n'), ((3589, 3629), 'bisect.bisect_left', 'bisect_left', (['cls.ability_ids', 'ability_id'], {}), '(cls.ability_ids, ability_id)\n', (3600, 3629), False, 'from bisect import bisect_left\n')] |
__author__ = 'rcj1492'
__created__ = '2016.09'
__license__ = 'MIT'
'''
python module for bot api
https://github.com/luckydonald/pytgbot
python wrapper for telegram cli
https://github.com/luckydonald/pytg
telegram cli
https://github.com/vysheng/tg
telegram with OAUTH
http://stackoverflow.com/questions/37264827/telegram-bot-oauth-authorization
haproxy with ssl pass-thru
https://serversforhackers.com/using-ssl-certificates-with-haproxy
http://nginx.2469901.n2.nabble.com/SSL-pass-through-td7583170.html
'''
class TelegramBotError(Exception):
def __init__(self, message='', error_dict=None):
# TODO create bad connection diagnostics methods
text = '\nFailure connecting to Telegram Bot API with %s request.' % message
self.error = {
'message': message
}
if error_dict:
if isinstance(error_dict, dict):
self.error = error_dict
super(TelegramBotError, self).__init__(text)
# TODO: test all different errors
class telegramBotHandler(object):
def __init__(self):
pass
def handle(self, response):
# construct default response details
details = {
'method': response.request.method,
'code': response.status_code,
'url': response.url,
'error': '',
'json': None,
'headers': response.headers,
}
# handle different codes
if details['code'] == 200:
details['json'] = response.json()
elif details['code'] == 403 or details['code'] == 400:
details['error'] = response.json()['description']
else:
details['error'] = response.content.decode()
return details
class telegramBotRegister(object):
''' a class of methods to register a new bot with telegram bot api
currently must be done manually
https://core.telegram.org/bots#6-botfather
botfather_url = 'https://web.telegram.org/#/im?p=@BotFather'
setup_sequence = [
'tg://bot_command?command=start',
'tg://bot_command?command=newbot&bot=BotFather',
'message with name',
'message with username',
'tg://bot_command?command=cancel&bot=BotFather'
]
'''
def __init__(self, bot_settings):
pass
def setup(self):
return self
def update(self):
return self
class telegramBotClient(object):
''' a class of methods for interacting with telegram bot api '''
# https://core.telegram.org/bots/api
_class_fields = {
'schema': {
'api_endpoint': 'https://api.telegram.org/bot',
'file_endpoint': 'https://api.telegram.org/file/bot',
'bot_id': 0,
'access_token': '',
'max_connections': 0,
'webhook_url': 'https://mydomain.com/secret_token_value',
'certificate_id': '',
'certificate_path': 'path/to/cert.pub',
'certificate_url': '',
'last_update': 0,
'user_id': 0,
'user_name': '',
'message_text': 'am i too needy?',
'message_style': 'markdown',
'button_list': [ 'yes' ],
'keypad_type': 'phone',
'photo_id': '',
'photo_path': '',
'photo_url': '',
'caption_text': '',
'file_id': '',
'photo_extensions': {
'jpg': '.+\\.jpg$',
'jpeg': '.+\\.jpeg$',
'gif': '.+\\.gif$',
'png': '.+\\.png$',
'tif': '.+\\.tif$',
'bmp': '.+\\.bmp$'
},
'certificate_extensions': {
'pem': '.+\\.pem$'
}
},
'components': {
'.bot_id': {
'integer_data': True
},
'.last_update': {
'integer_data': True
},
'.user_id': {
'integer_data': True
},
'.message_style': {
'discrete_values': [ 'markdown' ]
},
'.keypad_type': {
'discrete_values': [ 'phone', 'calculator' ]
},
'.button_list[0]': {
'max_length': 32
},
'.caption_text': {
'max_length': 200
},
'.max_connections': {
'integer_data': True,
'max_value': 100,
'min_value': 1
}
}
}
def __init__(self, bot_id, access_token, requests_handler=None):
''' initialization method for moves client class
:param bot_id: integer with telegram id number for bot
:param access_token: string with access token for bot provided by telegram botfather
:param requests_handler: callable that handles requests errors
'''
# construct class field model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# construct client attributes
object_title = '%s.__init__(bot_id=%s)' % (self.__class__.__name__, str(bot_id))
self.bot_id = self.fields.validate(bot_id, '.bot_id', object_title)
object_title = '%s.__init__(access_token=%s)' % (self.__class__.__name__, str(access_token))
self.access_token = self.fields.validate(access_token, '.access_token', object_title)
self.api_endpoint = '%s%s:%s' % (self.fields.schema['api_endpoint'], self.bot_id, self.access_token)
self.file_endpoint = '%s%s:%s/' % (self.fields.schema['file_endpoint'], self.bot_id, self.access_token)
# construct handlers
self.requests_handler = requests_handler
self.telegram_handler = telegramBotHandler()
def _get_data(self, file_url, file_name='', method_title='', argument_title=''):
''' a helper method to retrieve data buffer for a file url
:param file_url: string with url to file
:param file_name: [optional] string with name to affix to file buffer
:param method_title: [optional] string with name of class method calling
:param argument_title: [optional] string with name of method argument key
:return: byte data buffer with file data
'''
# https://docs.python.org/3/library/io.html#io.BytesIO
import io
import requests
# fill empty values
if not file_name:
file_name = 'file'
if not method_title:
method_title = '%s._get_data' % self.__class__.__name__
if not argument_title:
argument_title = 'file_url'
# request file from url
try:
remote_file = requests.get(file_url)
except requests.exceptions.ConnectionError as err:
if self.requests_handler:
return self.requests_handler(err)
else:
raise
except:
raise ValueError('%s(%s=%s) is not a valid url.' % (method_title, argument_title, file_url))
# add contents to buffer
file_buffer = io.BytesIO(remote_file.content)
file_buffer.name = '%s' % file_name
return file_buffer
def _validate_type(self, file_name, extension_map, method_title, argument_title):
''' a helper method to validate extension type of file
:param file_name: string with file name to test
:param extension_map: dictionary with extensions names and regex patterns
:param method_title: string with title of feeder method
:param argument_title: string with title of argument key from feeder method
:return: string with file extension
'''
# validate file extension
from labpack.parsing.regex import labRegex
file_extension = ''
ext_types = labRegex(extension_map)
file_mapping = ext_types.map(file_name)[0]
extension_list = []
for key, value in file_mapping.items():
if isinstance(value, bool):
extension_list.append('.%s' % key)
if value and isinstance(value, bool):
file_extension = '.%s' + key
if not file_extension:
raise ValueError('%s(%s=%s) must be one of %s file types.' % (method_title, argument_title, file_name, extension_list))
return file_extension
def _compile_buttons(self, button_list, small_buttons, persist_buttons):
''' a helper method to compile buttons to telegram api format
:param button_list: list of strings with button values
:param small_buttons: boolean to resize buttons to fit text size
:param persist_buttons: boolean to keep buttons around after exiting
:return: string in json serial format
'''
import json
keyboard_list = []
for item in button_list:
keyboard_list.append([{'text': item}])
keyboard_kwargs = {
'keyboard': keyboard_list,
'one_time_keyboard': not persist_buttons,
'resize_keyboard': small_buttons
}
json_data = json.dumps(keyboard_kwargs)
return json_data
def _compile_keypad(self, keypad_type, persist_buttons):
''' a helper method to compile keypad buttons to telegram api format
:param keypad_type: string with type of keypad to emulate
:param persist_buttons: boolean to keep buttons around after exiting
:return: string in json serial format
'''
import json
keyboard_list = []
if keypad_type == 'phone':
row_list = [ {'text': '1'}, {'text': '2'}, {'text': '3'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '4'}, {'text': '5'}, {'text': '6'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '7'}, {'text': '8'}, {'text': '9'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '*'}, {'text': '0'}, {'text': '#'} ]
keyboard_list.append(row_list)
elif keypad_type == 'calculator':
row_list = [ {'text': '7'}, {'text': '8'}, {'text': '9'}, {'text': '/'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '4'}, {'text': '5'}, {'text': '6'}, {'text': '*'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '1'}, {'text': '2'}, {'text': '3'}, {'text': '-'} ]
keyboard_list.append(row_list)
row_list = [ {'text': '0'}, {'text': '.'}, {'text': '='}, {'text': '+'} ]
keyboard_list.append(row_list)
keyboard_kwargs = {
'keyboard': keyboard_list,
'one_time_keyboard': not persist_buttons,
'resize_keyboard': True
}
json_data = json.dumps(keyboard_kwargs)
return json_data
def _post_request(self, url, data=None, files=None):
''' a helper method for sending post requests to telegram api
https://core.telegram.org/bots/api#making-requests
https://requests.readthedocs.io/en/master/user/quickstart/
:param url: string with url for post request
:param data: [optional] dictionary with data to add to request
:param files: [optional] byte data to add to request
:return: dictionary with response details
'''
import requests
# construct request fields
request_kwargs = {
'url': url
}
if data:
request_kwargs['data'] = data
if files:
request_kwargs['files'] = files
# send request
try:
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.telegram_handler.handle(response)
return response_details
def get_me(self):
''' a method to retrieve details about the bot from telegram api
:return: dictionary of response details with bot details in 'json' key
{
'headers': { ... },
'url': 'https://api.telegram.org/bot.../getUpdates',
'code': 200,
'error': '',
'json': {
'ok': True,
'result': {
'id': 1234567890,
'first_name': '<NAME>',
'username': 'myBot'
}
}
}
'''
# construct request fields
url = '%s/getMe?test=me' % self.api_endpoint
# send request
response_details = self._post_request(url)
return response_details
def set_webhook(self, webhook_url, certificate_id='', certificate_path='', certificate_url='', max_connections=40):
# https://core.telegram.org/bots/self-signed
title = '%s.set_webhook' % self.__class__.__name__
# validate inputs
input_fields = {
'webhook_url': webhook_url,
'certificate_id': certificate_id,
'certificate_path': certificate_path,
'certificate_url': certificate_url,
'max_connections': max_connections
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
request_kwargs = {
'url': '%s/setWebhook' % self.api_endpoint,
'data': {
'url': webhook_url,
'max_connections': max_connections
}
}
# construct extension map
extension_map = self.fields.schema['certificate_extensions']
# add photo to request keywords
if certificate_path:
import os
self._validate_type(certificate_path, extension_map, title, 'certificate_path')
if not os.path.exists(certificate_path):
raise ValueError('%s is not a valid file path.' % certificate_path)
request_kwargs['files'] = { 'certificate': open(certificate_path, 'rb') }
elif certificate_id:
request_kwargs['data']['certificate'] = certificate_id
elif certificate_url:
file_extension = self._validate_type(certificate_url, extension_map, title, 'certificate_url')
file_buffer = self._get_data(certificate_url, 'certificate%s' % file_extension, title, 'certificate_url')
request_kwargs['files'] = { 'certificate': file_buffer }
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def delete_webhook(self):
title = '%s.delete_webhook' % self.__class__.__name__
# construct request fields
request_kwargs = {
'url': '%s/setWebhook' % self.api_endpoint
}
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def get_updates(self, last_update=0):
''' a method to retrieve messages for bot from telegram api
:param last_update: integer with update id of last message received
:return: dictionary of response details with update list in [json][result]
{
'headers': { ... },
'url': 'https://api.telegram.org/bot.../getUpdates',
'code': 200,
'error': '',
'json': {
'ok': True,
'result': [
{
'update_id': 667652176,
'message': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'text': 'Hey',
'from': {
'first_name': 'First',
'id': 1234567890,
'last_name': 'Last'
},
'message_id': 173,
'date': 1478729313
}
},
{
'update_id': 667652176,
'message': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'caption': 'Interesting song',
'photo': [
{
'file_id': 'AgADAQ...EC',
'width': 51,
'file_size': 1238,
'height': 90
},
{
'file_id': 'AgADAQ...Ag',
'width': 180,
'file_size': 13151,
'height': 320
},
{
'file_id': 'AgADAQ...VC',
'width': 449,
'file_size': 51134,
'height': 800
},
{
'file_id': 'AgADAQ...AC',
'width': 719,
'file_size': 82609,
'height': 1280
}
],
'from': {
'first_name': 'First',
'id': 1234567890,
'last_name': 'Last'
},
'message_id': 175,
'date': 1478729799
}
},
{
'update_id': 667652179,
'message': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'caption': 'Snow in slow mo',
'document': {
'file_name': 'IMG_0010.MOV',
'thumb': {
'file_id': 'AAQB...IC',
'file_size': 2547,
'width': 90,
'height': 50
},
'file_size': 51588899,
'file_id': 'BQAD...QI'
}
'from': {
'first_name': 'First',
'id': 1234567890,
'last_name': 'Last'
},
'message_id': 176,
'date': 1478729313
}
},
{
'update_id': 667652180,
'message': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'location': {
'latitude': 12.345678,
'longitude': -23.456789
},
'venue': {'
location': {
'latitude': 12.345678,
'longitude': -23.456789
},
'address': '1 Laboratory Rd',
'title': 'Collective Acuity Labs',
'foursquare_id': '4a...e3'
},
'from': {
'first_name': 'First',
'id': 1234567890,
'last_name': 'Last'
},
'message_id': 177,
'date': 1478729313
}
},
{
'update_id': 667652191,
'message': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'voice': {
'duration': 3,
'mime_type': 'audio/ogg',
'file_id': 'AwADAQADAgADXGbcC3hOFYsqDDtKAg',
'file_size': 7008
},
'from': {
'first_name': 'First',
'id': 1234567890,
'last_name': 'Last'
},
'message_id': 224,
'date': 1478729313
}
}
]
}
}
'''
title = '%s.get_updates' % self.__class__.__name__
# construct request fields
request_kwargs = {
'url': '%s/getUpdates' % self.api_endpoint
}
# add offset to kwargs
if last_update:
object_title = '%s(last_update=%s)' % (title, str(last_update))
self.fields.validate(last_update, '.last_update', object_title)
request_kwargs['data'] = {
'offset': last_update + 1
}
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def get_route(self, file_id):
''' a method to retrieve route information for file on telegram api
:param file_id: string with id of file in a message send to bot
:return: dictionary of response details with route details in [json][result]
'''
title = '%s.get_route' % self.__class__.__name__
# validate inputs
input_fields = {
'file_id': file_id,
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct key word arguments
request_kwargs = {
'url': '%s/getFile' % self.api_endpoint,
'data': {
'file_id': file_id
}
}
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def get_file(self, file_route, file_name=''):
''' a method to retrieve data for a file housed on telegram api
:param file_route: string with route to file endpoint on telegram api
:return: byte data stream with file data
'''
title = '%s.get_file' % self.__class__.__name__
# construct file url
file_url = '%s%s' % (self.file_endpoint, file_route)
# send request for file data
data_buffer = self._get_data(file_url, file_name, method_title=title)
return data_buffer
def send_message(self, user_id, message_text, message_style='', button_list=None, small_buttons=True, persist_buttons=False, link_preview=True):
''' a method to send a message using telegram api
:param user_id: integer with id of telegram user
:param message_text: string with message to user
:param message_style: [optional] string with style to apply to text, only 'markdown'
:param button_list: [optional] list of string to include as buttons in message
:param small_buttons: [optional] boolean to resize buttons to single line
:param persist_buttons: [optional] boolean to keep buttons around after exiting
:param link_preview: [optional] boolean to open up a preview window of a link in message
:return: dictionary of response details with message details in [json][result]
{
'headers': { ... },
'url': 'https://api.telegram.org/bot.../sendMessage',
'code': 200,
'error': '',
'json': {
'ok': True,
'result': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'text': 'text me again',
'from': {
'first_name': '<NAME>',
'id': 987654310,
'username': 'myBot'
},
'message_id': 178,
'date': 1478729313
}
}
}
'''
title = '%s.send_message' % self.__class__.__name__
# validate inputs
input_fields = {
'user_id': user_id,
'message_text': message_text,
'message_style': message_style,
'button_list': button_list
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct key word arguments
request_kwargs = {
'url': '%s/sendMessage' % self.api_endpoint,
'data': {
'chat_id': user_id,
'text': message_text
}
}
if message_style:
if message_style == 'markdown':
request_kwargs['data']['parse_mode'] = 'Markdown'
elif message_style == 'html':
request_kwargs['data']['parse_mode'] = 'HTML'
if button_list:
request_kwargs['data']['reply_markup'] = self._compile_buttons(button_list, small_buttons, persist_buttons)
# elif keypad_type:
# request_kwargs['data']['reply_markup'] = self._compile_keypad(keypad_type, persist_buttons)
if not link_preview:
request_kwargs['data']['disable_web_page_preview'] = True
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def send_photo(self, user_id, photo_id='', photo_path='', photo_url='', caption_text='', button_list=None, small_buttons=True, persist_buttons=False):
''' a method to send a photo using telegram api
:param user_id: integer with id of telegram user
:param photo_id: [optional] string with id of file stored with telegram api
:param photo_path: [optional] string with local path to file
:param photo_url: [optional] string with url of file
:param caption_text: [optional] string with caption to add to photo
:return: dictionary of response details with message detail in [json][result]
{
'headers': { ... },
'url': 'https://api.telegram.org/bot.../sendPhoto',
'code': 200,
'error': '',
'json': {
'ok': True,
'result': {
'chat': {
'first_name': 'First',
'type': 'private',
'id': 1234567890,
'last_name': 'Last'
},
'caption': 'lab logo',
'photo': [
{
'file_id': 'AgADAQ...EC',
'width': 51,
'file_size': 1238,
'height': 90
},
{
'file_id': 'AgADAQ...Ag',
'width': 180,
'file_size': 13151,
'height': 320
},
{
'file_id': 'AgADAQ...VC',
'width': 449,
'file_size': 51134,
'height': 800
},
{
'file_id': 'AgADAQ...AC',
'width': 719,
'file_size': 82609,
'height': 1280
}
],
'from': {
'first_name': '<NAME>',
'id': 987654310,
'username': 'myBot'
},
'message_id': 179,
'date': 1478729413
}
}
}
'''
title = '%s.send_photo' % self.__class__.__name__
# validate inputs
input_fields = {
'user_id': user_id,
'caption_text': caption_text,
'photo_id': photo_id,
'photo_path': photo_path,
'photo_url': photo_url,
'button_list': button_list
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct extension map
extension_map = self.fields.schema['photo_extensions']
# construct key word arguments
request_kwargs = {
'url': '%s/sendPhoto' % self.api_endpoint,
'data': {
'chat_id': user_id
}
}
if caption_text:
request_kwargs['data']['caption'] = caption_text
if button_list:
request_kwargs['data']['reply_markup'] = self._compile_buttons(button_list, small_buttons, persist_buttons)
# add photo to request keywords
if photo_path:
import os
self._validate_type(photo_path, extension_map, title, 'photo_path')
if not os.path.exists(photo_path):
raise ValueError('%s is not a valid file path.' % photo_path)
request_kwargs['files'] = { 'photo': open(photo_path, 'rb') }
elif photo_id:
request_kwargs['data']['photo'] = photo_id
elif photo_url:
file_extension = self._validate_type(photo_url, extension_map, title, 'photo_url')
file_buffer = self._get_data(photo_url, 'photo%s' % file_extension, title, 'photo_url')
request_kwargs['files'] = { 'photo': file_buffer }
else:
raise IndexError('%s(...) requires either a photo_path, photo_id or photo_url argument' % title)
# send request
response_details = self._post_request(**request_kwargs)
return response_details
def send_voice(self, user_id, voice_id='', voice_path='', voice_url='', caption_text='', button_list=None, small_buttons=True, persist_buttons=False):
return True
if __name__ == '__main__':
from labpack.records.settings import load_settings, save_settings
from labpack.handlers.requests import handle_requests
telegram_config = load_settings('../../../cred/telegram.yaml')
photo_url = 'https://pbs.twimg.com/profile_images/479475632158408704/Zelyz-xr_400x400.png'
photo_id = 'AgADAQADsKcxG4RH3Q85DF_-VgGr___A5y8ABVzwsrRBb8xF-wEAAQI'
photo_path = '../../data/test_photo.png'
file_path = '../../data/test_voice.ogg'
update_path = '../../data/telegram-update.json'
update_id = load_settings(update_path)['last_update']
bot_id = telegram_config['telegram_bot_id']
access_token = telegram_config['telegram_access_token']
user_id = telegram_config['telegram_admin_id']
telegram_bot = telegramBotClient(bot_id, access_token, requests_handler=handle_requests)
details = telegram_bot.get_me()
assert details['json']['result']['id'] == bot_id
updates_details = telegram_bot.get_updates()
if updates_details['json']['result']:
update_list = sorted(updates_details['json']['result'], key=lambda k: k['update_id'])
offset_details = { 'last_update': update_list[-1]['update_id']}
save_settings(offset_details, update_path, overwrite=True)
# details = telegram_bot.send_message(user_id, 'text me again')
# details = telegram_bot.send_photo(user_id, photo_url=photo_url, caption_text='Lab Logo')
# details = telegram_bot.send_photo(user_id, photo_id=photo_id)
# details = telegram_bot.send_photo(user_id, photo_path=photo_path)
# details = telegram_bot.send_message(user_id, '*Select a Number:*\n\t_1_\n\t\t`2`\n\t\t\t[3](http://collectiveacuity.com)', message_style='markdown')
# details = telegram_bot.send_message(user_id, 'Select a Number:', button_list=['1','2','3'])
# details = telegram_bot.send_message(user_id, 'Select a Letter:', button_list=['ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEF'], small_buttons=False, persist_buttons=True)
file_id = 'AwADAQADAwADXGbcCxP7_eEhVMEeAg'
details = telegram_bot.get_route(file_id)
file_route = details['json']['result']['file_path']
file_buffer = telegram_bot.get_file(file_route, file_name='test_voice')
file_data = file_buffer.getvalue()
file_name = file_buffer.name
from labpack.parsing.magic import labMagic
lab_magic = labMagic('../../data/magic.mgc')
file_details = lab_magic.analyze(byte_data=file_data)
save_path = '../../data/%s%s' % (file_name, file_details['extension'])
with open(save_path, 'wb') as f:
f.write(file_data)
f.close()
| [
"os.path.exists",
"requests.post",
"labpack.parsing.regex.labRegex",
"labpack.records.settings.load_settings",
"json.dumps",
"io.BytesIO",
"requests.get",
"requests.Request",
"labpack.records.settings.save_settings",
"jsonmodel.validators.jsonModel",
"labpack.parsing.magic.labMagic"
] | [((33484, 33528), 'labpack.records.settings.load_settings', 'load_settings', (['"""../../../cred/telegram.yaml"""'], {}), "('../../../cred/telegram.yaml')\n", (33497, 33528), False, 'from labpack.records.settings import load_settings, save_settings\n'), ((35673, 35705), 'labpack.parsing.magic.labMagic', 'labMagic', (['"""../../data/magic.mgc"""'], {}), "('../../data/magic.mgc')\n", (35681, 35705), False, 'from labpack.parsing.magic import labMagic\n'), ((5215, 5244), 'jsonmodel.validators.jsonModel', 'jsonModel', (['self._class_fields'], {}), '(self._class_fields)\n', (5224, 5244), False, 'from jsonmodel.validators import jsonModel\n'), ((7353, 7384), 'io.BytesIO', 'io.BytesIO', (['remote_file.content'], {}), '(remote_file.content)\n', (7363, 7384), False, 'import io\n'), ((8100, 8123), 'labpack.parsing.regex.labRegex', 'labRegex', (['extension_map'], {}), '(extension_map)\n', (8108, 8123), False, 'from labpack.parsing.regex import labRegex\n'), ((9412, 9439), 'json.dumps', 'json.dumps', (['keyboard_kwargs'], {}), '(keyboard_kwargs)\n', (9422, 9439), False, 'import json\n'), ((11136, 11163), 'json.dumps', 'json.dumps', (['keyboard_kwargs'], {}), '(keyboard_kwargs)\n', (11146, 11163), False, 'import json\n'), ((33860, 33886), 'labpack.records.settings.load_settings', 'load_settings', (['update_path'], {}), '(update_path)\n', (33873, 33886), False, 'from labpack.records.settings import load_settings, save_settings\n'), ((34519, 34577), 'labpack.records.settings.save_settings', 'save_settings', (['offset_details', 'update_path'], {'overwrite': '(True)'}), '(offset_details, update_path, overwrite=True)\n', (34532, 34577), False, 'from labpack.records.settings import load_settings, save_settings\n'), ((6960, 6982), 'requests.get', 'requests.get', (['file_url'], {}), '(file_url)\n', (6972, 6982), False, 'import requests\n'), ((12016, 12047), 'requests.post', 'requests.post', ([], {}), '(**request_kwargs)\n', (12029, 12047), False, 'import requests\n'), ((14612, 14644), 'os.path.exists', 'os.path.exists', (['certificate_path'], {}), '(certificate_path)\n', (14626, 14644), False, 'import os\n'), ((32323, 32349), 'os.path.exists', 'os.path.exists', (['photo_path'], {}), '(photo_path)\n', (32337, 32349), False, 'import os\n'), ((12199, 12233), 'requests.Request', 'requests.Request', ([], {}), '(**request_kwargs)\n', (12215, 12233), False, 'import requests\n')] |
# Generated by Django 2.2.13 on 2021-02-12 16:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("resources_portal", "0010_grant_year"),
]
operations = [
migrations.RemoveField(model_name="notification", name="text_body",),
]
| [
"django.db.migrations.RemoveField"
] | [((229, 296), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""notification"""', 'name': '"""text_body"""'}), "(model_name='notification', name='text_body')\n", (251, 296), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Construct PSF estimation performance comparisons included in the appendix of the extended version (arXiv) of the paper.
"""
import os
import numpy as np
from sporco.interp import interpolation_points
from sporco.metric import snr
from sporco import plot
from cdlpsf.util import interpolate
from cdlpsf.util import translatescale
clrs = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', 'black', '#9467bd']
def get_psf_arrays(noise, pps, shape, M, wp, psfpath, rcapath, cdlpath):
if shape == 'complex' or shape == 'narrow':
K = 5
else:
K = 10
rsp = interpolation_points(M)
g1d = np.linspace(-wp, wp, 2*wp+1)
grd = (g1d[:, np.newaxis] + rsp[np.newaxis, :] * np.diff(g1d)[0]).ravel()
psffile = os.path.join(psfpath, '%s.npz' % shape)
npz = np.load(psffile, allow_pickle=True)
refpsf = npz['refpsf'].item()[M]
rcafile = os.path.join(rcapath, '%s_d%03d_n%7.1e.npz' %
(shape, int(pps), noise))
npz = np.load(rcafile, allow_pickle=True)
rcapsf = np.pad(npz['psf'], (2, 2))
rcapsfi = interpolate(rcapsf, M, K)
rcapsfi = translatescale(refpsf, rcapsfi)
cdlfile = os.path.join(cdlpath, '%s_d%03d_n%7.1e.npz' %
(shape, int(pps), noise))
npz = np.load(cdlfile, allow_pickle=True)
cdlpsf = npz['psfgrd']
cdlpsfi = interpolate(cdlpsf, M, K)
cdlpsfi = translatescale(refpsf, cdlpsfi)
return grd, refpsf, rcapsfi, cdlpsfi
def plot_psf_sections(ref, rca, cdl, grd, title=None, maxcnt=True):
if maxcnt:
gc, gr = np.unravel_index(ref.argmax(), ref.shape)
else:
gc = ref.shape[0] // 2
gr = ref.shape[1] // 2
fig, ax = plot.subplots(nrows=1, ncols=2, sharex=True, sharey=True,
figsize=(16, 5))
if title is not None:
fig.suptitle(title, fontsize=14)
plot.plot(ref[gc], grd, c=clrs[0], lw=2, alpha=0.75, fig=fig, ax=ax[0])
plot.plot(rca[gc], grd, c=clrs[1], lw=2, alpha=0.75, fig=fig, ax=ax[0])
plot.plot(cdl[gc], grd, c=clrs[2], lw=2, alpha=0.75, title='Row slice',
lgnd=('Reference', 'RCA', 'CDL'), fig=fig, ax=ax[0])
plot.plot(ref[:, gr], grd, c=clrs[0], lw=2, alpha=0.75, fig=fig, ax=ax[1])
plot.plot(rca[:, gr], grd, c=clrs[1], lw=2, alpha=0.75, fig=fig, ax=ax[1])
plot.plot(cdl[:, gr], grd, c=clrs[2], lw=2, alpha=0.75,
title='Column slice', lgnd=('Reference', 'RCA', 'CDL'),
fig=fig, ax=ax[1])
fig.show()
return fig, ax
def plot_psf_section_diffs(ref, rca, cdl, grd, title=None, maxcnt=True):
if maxcnt:
gc, gr = np.unravel_index(ref.argmax(), ref.shape)
else:
gc = ref.shape[0] // 2
gr = ref.shape[1] // 2
fig, ax = plot.subplots(nrows=1, ncols=2, sharex=True, sharey=True,
figsize=(16, 5))
if title is not None:
fig.suptitle(title, fontsize=14)
plot.plot(rca[gc] - ref[gc], grd, c=clrs[1], lw=2, alpha=0.75,
fig=fig, ax=ax[0])
plot.plot(cdl[gc] - ref[gc], grd, c=clrs[2], lw=2, alpha=0.75,
title='Row slice', lgnd=('RCA - Ref.', 'CDL - Ref.'),
fig=fig, ax=ax[0])
plot.plot(rca[:, gr] - ref[:, gr], grd, c=clrs[1], lw=2, alpha=0.75,
fig=fig, ax=ax[1])
plot.plot(cdl[:, gr] - ref[:, gr], grd, c=clrs[2], lw=2, alpha=0.75,
title='Column slice', lgnd=('RCA - Ref.', 'CDL - Ref.'),
fig=fig, ax=ax[1])
fig.show()
return fig, ax
def plot_psf_contours(ref, rca, cdl, grd, v=5, xrng=None, yrng=None,
title=None):
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(18.15, 5))
if title is not None:
fig.suptitle(title, fontsize=14)
plot.contour(ref, grd, grd, v=v, title='Reference',
fig=fig, ax=ax[0])
plot.contour(rca, grd, grd, v=v, title='RCA',
fig=fig, ax=ax[1])
plot.contour(cdl, grd, grd, v=v, title='CDL',
fig=fig, ax=ax[2])
if xrng is not None or yrng is not None:
for x in ax:
if xrng is not None:
x.set_xlim(xrng)
if yrng is not None:
x.set_ylim(yrng)
fig.show()
return fig, ax
# Subpixel estimation factor (common for all runs)
M = 5
# Define standard integer sampling grid -wp ... wp
wp = 7
# Paths to data files
psfpath = 'data/reference_psfs'
rcapath = 'data/rca_results'
cdlpath = 'data/icdl_results'
noise = 1.0
pps = 1.0
shape = 'complex'
grd, refpsf, rcapsf, cdlpsf = get_psf_arrays(
noise, pps, shape, M, wp, psfpath, rcapath, cdlpath)
# The reference complex PSF is different scaling from the other PSFs:
# rescale for plotting
rmax = refpsf.max()
refpsf /= rmax
rcapsf /= rmax
cdlpsf /= rmax
fig, ax = plot_psf_sections(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('complex_d1_n1_section.pdf', bbox_inches='tight')
fig, ax = plot_psf_section_diffs(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('complex_d1_n1_secdiff.pdf', bbox_inches='tight')
fig, ax = plot_psf_contours(refpsf, rcapsf, cdlpsf, grd,
v=(0.05, 0.2, 0.4, 0.6, 0.8),
xrng=(-5, 4), yrng=(-5, 4))
fig.savefig('complex_d1_n1_contour.pdf', bbox_inches='tight')
noise = 1.0
pps = 1.0
shape = 'elong'
grd, refpsf, rcapsf, cdlpsf = get_psf_arrays(
noise, pps, shape, M, wp, psfpath, rcapath, cdlpath)
fig, ax = plot_psf_sections(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('elong_d1_n1_section.pdf', bbox_inches='tight')
fig, ax = plot_psf_section_diffs(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('elong_d1_n1_secdiff.pdf', bbox_inches='tight')
fig, ax = plot_psf_contours(refpsf, rcapsf, cdlpsf, grd,
v=(0.05, 0.2, 0.4, 0.6, 0.8),
xrng=(-4, 4), yrng=(-4, 4))
fig.savefig('elong_d1_n1_contour.pdf', bbox_inches='tight')
noise = 1.0
pps = 1.0
shape = 'narrow'
grd, refpsf, rcapsf, cdlpsf = get_psf_arrays(
noise, pps, shape, M, wp, psfpath, rcapath, cdlpath)
fig, ax = plot_psf_sections(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('narrow_d1_n1_section.pdf', bbox_inches='tight')
fig, ax = plot_psf_section_diffs(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('narrow_d1_n1_secdiff.pdf', bbox_inches='tight')
fig, ax = plot_psf_contours(refpsf, rcapsf, cdlpsf, grd,
v=(0.05, 0.2, 0.4, 0.6, 0.8),
xrng=(-4, 4), yrng=(-4, 4))
fig.savefig('narrow_d1_n1_contour.pdf', bbox_inches='tight')
noise = 1.0
pps = 1.0
shape = 'wide'
grd, refpsf, rcapsf, cdlpsf = get_psf_arrays(
noise, pps, shape, M, wp, psfpath, rcapath, cdlpath)
fig, ax = plot_psf_sections(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('wide_d1_n1_section.pdf', bbox_inches='tight')
fig, ax = plot_psf_section_diffs(refpsf, rcapsf, cdlpsf, grd)
fig.savefig('wide_d1_n1_secdiff.pdf', bbox_inches='tight')
fig, ax = plot_psf_contours(refpsf, rcapsf, cdlpsf, grd,
v=(0.05, 0.2, 0.4, 0.6, 0.8))
fig.savefig('wide_d1_n1_contour.pdf', bbox_inches='tight')
input()
| [
"sporco.interp.interpolation_points",
"cdlpsf.util.translatescale",
"os.path.join",
"numpy.diff",
"numpy.linspace",
"sporco.plot.contour",
"cdlpsf.util.interpolate",
"sporco.plot.subplots",
"numpy.pad",
"numpy.load",
"sporco.plot.plot"
] | [((633, 656), 'sporco.interp.interpolation_points', 'interpolation_points', (['M'], {}), '(M)\n', (653, 656), False, 'from sporco.interp import interpolation_points\n'), ((667, 699), 'numpy.linspace', 'np.linspace', (['(-wp)', 'wp', '(2 * wp + 1)'], {}), '(-wp, wp, 2 * wp + 1)\n', (678, 699), True, 'import numpy as np\n'), ((789, 828), 'os.path.join', 'os.path.join', (['psfpath', "('%s.npz' % shape)"], {}), "(psfpath, '%s.npz' % shape)\n", (801, 828), False, 'import os\n'), ((839, 874), 'numpy.load', 'np.load', (['psffile'], {'allow_pickle': '(True)'}), '(psffile, allow_pickle=True)\n', (846, 874), True, 'import numpy as np\n'), ((1036, 1071), 'numpy.load', 'np.load', (['rcafile'], {'allow_pickle': '(True)'}), '(rcafile, allow_pickle=True)\n', (1043, 1071), True, 'import numpy as np\n'), ((1085, 1111), 'numpy.pad', 'np.pad', (["npz['psf']", '(2, 2)'], {}), "(npz['psf'], (2, 2))\n", (1091, 1111), True, 'import numpy as np\n'), ((1126, 1151), 'cdlpsf.util.interpolate', 'interpolate', (['rcapsf', 'M', 'K'], {}), '(rcapsf, M, K)\n', (1137, 1151), False, 'from cdlpsf.util import interpolate\n'), ((1166, 1197), 'cdlpsf.util.translatescale', 'translatescale', (['refpsf', 'rcapsfi'], {}), '(refpsf, rcapsfi)\n', (1180, 1197), False, 'from cdlpsf.util import translatescale\n'), ((1322, 1357), 'numpy.load', 'np.load', (['cdlfile'], {'allow_pickle': '(True)'}), '(cdlfile, allow_pickle=True)\n', (1329, 1357), True, 'import numpy as np\n'), ((1399, 1424), 'cdlpsf.util.interpolate', 'interpolate', (['cdlpsf', 'M', 'K'], {}), '(cdlpsf, M, K)\n', (1410, 1424), False, 'from cdlpsf.util import interpolate\n'), ((1439, 1470), 'cdlpsf.util.translatescale', 'translatescale', (['refpsf', 'cdlpsfi'], {}), '(refpsf, cdlpsfi)\n', (1453, 1470), False, 'from cdlpsf.util import translatescale\n'), ((1745, 1819), 'sporco.plot.subplots', 'plot.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(16, 5)'}), '(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(16, 5))\n', (1758, 1819), False, 'from sporco import plot\n'), ((1919, 1990), 'sporco.plot.plot', 'plot.plot', (['ref[gc]', 'grd'], {'c': 'clrs[0]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[0]'}), '(ref[gc], grd, c=clrs[0], lw=2, alpha=0.75, fig=fig, ax=ax[0])\n', (1928, 1990), False, 'from sporco import plot\n'), ((1995, 2066), 'sporco.plot.plot', 'plot.plot', (['rca[gc]', 'grd'], {'c': 'clrs[1]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[0]'}), '(rca[gc], grd, c=clrs[1], lw=2, alpha=0.75, fig=fig, ax=ax[0])\n', (2004, 2066), False, 'from sporco import plot\n'), ((2071, 2199), 'sporco.plot.plot', 'plot.plot', (['cdl[gc]', 'grd'], {'c': 'clrs[2]', 'lw': '(2)', 'alpha': '(0.75)', 'title': '"""Row slice"""', 'lgnd': "('Reference', 'RCA', 'CDL')", 'fig': 'fig', 'ax': 'ax[0]'}), "(cdl[gc], grd, c=clrs[2], lw=2, alpha=0.75, title='Row slice',\n lgnd=('Reference', 'RCA', 'CDL'), fig=fig, ax=ax[0])\n", (2080, 2199), False, 'from sporco import plot\n'), ((2214, 2288), 'sporco.plot.plot', 'plot.plot', (['ref[:, gr]', 'grd'], {'c': 'clrs[0]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[1]'}), '(ref[:, gr], grd, c=clrs[0], lw=2, alpha=0.75, fig=fig, ax=ax[1])\n', (2223, 2288), False, 'from sporco import plot\n'), ((2293, 2367), 'sporco.plot.plot', 'plot.plot', (['rca[:, gr]', 'grd'], {'c': 'clrs[1]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[1]'}), '(rca[:, gr], grd, c=clrs[1], lw=2, alpha=0.75, fig=fig, ax=ax[1])\n', (2302, 2367), False, 'from sporco import plot\n'), ((2372, 2507), 'sporco.plot.plot', 'plot.plot', (['cdl[:, gr]', 'grd'], {'c': 'clrs[2]', 'lw': '(2)', 'alpha': '(0.75)', 'title': '"""Column slice"""', 'lgnd': "('Reference', 'RCA', 'CDL')", 'fig': 'fig', 'ax': 'ax[1]'}), "(cdl[:, gr], grd, c=clrs[2], lw=2, alpha=0.75, title=\n 'Column slice', lgnd=('Reference', 'RCA', 'CDL'), fig=fig, ax=ax[1])\n", (2381, 2507), False, 'from sporco import plot\n'), ((2802, 2876), 'sporco.plot.subplots', 'plot.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(16, 5)'}), '(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(16, 5))\n', (2815, 2876), False, 'from sporco import plot\n'), ((2976, 3062), 'sporco.plot.plot', 'plot.plot', (['(rca[gc] - ref[gc])', 'grd'], {'c': 'clrs[1]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[0]'}), '(rca[gc] - ref[gc], grd, c=clrs[1], lw=2, alpha=0.75, fig=fig, ax=\n ax[0])\n', (2985, 3062), False, 'from sporco import plot\n'), ((3076, 3216), 'sporco.plot.plot', 'plot.plot', (['(cdl[gc] - ref[gc])', 'grd'], {'c': 'clrs[2]', 'lw': '(2)', 'alpha': '(0.75)', 'title': '"""Row slice"""', 'lgnd': "('RCA - Ref.', 'CDL - Ref.')", 'fig': 'fig', 'ax': 'ax[0]'}), "(cdl[gc] - ref[gc], grd, c=clrs[2], lw=2, alpha=0.75, title=\n 'Row slice', lgnd=('RCA - Ref.', 'CDL - Ref.'), fig=fig, ax=ax[0])\n", (3085, 3216), False, 'from sporco import plot\n'), ((3244, 3336), 'sporco.plot.plot', 'plot.plot', (['(rca[:, gr] - ref[:, gr])', 'grd'], {'c': 'clrs[1]', 'lw': '(2)', 'alpha': '(0.75)', 'fig': 'fig', 'ax': 'ax[1]'}), '(rca[:, gr] - ref[:, gr], grd, c=clrs[1], lw=2, alpha=0.75, fig=\n fig, ax=ax[1])\n', (3253, 3336), False, 'from sporco import plot\n'), ((3350, 3499), 'sporco.plot.plot', 'plot.plot', (['(cdl[:, gr] - ref[:, gr])', 'grd'], {'c': 'clrs[2]', 'lw': '(2)', 'alpha': '(0.75)', 'title': '"""Column slice"""', 'lgnd': "('RCA - Ref.', 'CDL - Ref.')", 'fig': 'fig', 'ax': 'ax[1]'}), "(cdl[:, gr] - ref[:, gr], grd, c=clrs[2], lw=2, alpha=0.75, title=\n 'Column slice', lgnd=('RCA - Ref.', 'CDL - Ref.'), fig=fig, ax=ax[1])\n", (3359, 3499), False, 'from sporco import plot\n'), ((3680, 3731), 'sporco.plot.subplots', 'plot.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(18.15, 5)'}), '(nrows=1, ncols=3, figsize=(18.15, 5))\n', (3693, 3731), False, 'from sporco import plot\n'), ((3803, 3873), 'sporco.plot.contour', 'plot.contour', (['ref', 'grd', 'grd'], {'v': 'v', 'title': '"""Reference"""', 'fig': 'fig', 'ax': 'ax[0]'}), "(ref, grd, grd, v=v, title='Reference', fig=fig, ax=ax[0])\n", (3815, 3873), False, 'from sporco import plot\n'), ((3895, 3959), 'sporco.plot.contour', 'plot.contour', (['rca', 'grd', 'grd'], {'v': 'v', 'title': '"""RCA"""', 'fig': 'fig', 'ax': 'ax[1]'}), "(rca, grd, grd, v=v, title='RCA', fig=fig, ax=ax[1])\n", (3907, 3959), False, 'from sporco import plot\n'), ((3981, 4045), 'sporco.plot.contour', 'plot.contour', (['cdl', 'grd', 'grd'], {'v': 'v', 'title': '"""CDL"""', 'fig': 'fig', 'ax': 'ax[2]'}), "(cdl, grd, grd, v=v, title='CDL', fig=fig, ax=ax[2])\n", (3993, 4045), False, 'from sporco import plot\n'), ((749, 761), 'numpy.diff', 'np.diff', (['g1d'], {}), '(g1d)\n', (756, 761), True, 'import numpy as np\n')] |
from __future__ import print_function
import time
from collections import defaultdict
import random
import math
import sys
import argparse
import torch
from torch.autograd import Variable
import numpy as np
# format of files: each line is "word1 word2 ..." aligned line-by-line
train_src_file = "../data/parallel/train.ja"
train_trg_file = "../data/parallel/train.en"
dev_src_file = "../data/parallel/dev.ja"
dev_trg_file = "../data/parallel/dev.en"
w2i_src = defaultdict(lambda: len(w2i_src))
w2i_trg = defaultdict(lambda: len(w2i_trg))
def read(fname_src, fname_trg):
"""
Read parallel files where each line lines up
"""
with open(fname_src, "r") as f_src, open(fname_trg, "r") as f_trg:
for line_src, line_trg in zip(f_src, f_trg):
sent_src = [w2i_src[x] for x in line_src.strip().split()]
sent_trg = [w2i_trg[x] for x in line_trg.strip().split()]
yield (sent_src, sent_trg)
# Read the data
train = list(read(train_src_file, train_trg_file))
unk_src = w2i_src["<unk>"]
w2i_src = defaultdict(lambda: unk_src, w2i_src)
unk_trg = w2i_trg["<unk>"]
w2i_trg = defaultdict(lambda: unk_trg, w2i_trg)
nwords_src = len(w2i_src)
nwords_trg = len(w2i_trg)
dev = list(read(dev_src_file, dev_trg_file))
# Model parameters
EMB_SIZE = 64
HID_SIZE = 64
BATCH_SIZE = 16
class bilstm(torch.nn.Module):
def __init__(self, nwords_src, nwords_trg, EMB_SIZE, HID_SIZE, use_cuda):
super(bilstm, self).__init__()
self.useCuda = use_cuda
self.hidSize = HID_SIZE
self.embeddingSRC = torch.nn.Embedding(nwords_src, EMB_SIZE)
self.embeddingTRG = torch.nn.Embedding(nwords_trg, EMB_SIZE)
torch.nn.init.uniform_(self.embeddingSRC.weight, -0.25, 0.25)
torch.nn.init.uniform_(self.embeddingTRG.weight, -0.25, 0.25)
self.srcLstm = torch.nn.LSTM(input_size=EMB_SIZE, hidden_size=HID_SIZE, num_layers=1, bidirectional=True, batch_first=True)
self.trgLstm = torch.nn.LSTM(input_size=EMB_SIZE, hidden_size=HID_SIZE, num_layers=1, bidirectional=True, batch_first=True)
def forward(self, sent, isSrc):
if isSrc:
sentEmb = self.embeddingSRC(sent)
if use_cuda:
srch0 = Variable(torch.zeros(2, 1, self.hidSize).cuda())
srcc0 = Variable(torch.zeros(2, 1, self.hidSize).cuda())
else:
srch0 = Variable(torch.zeros(2, 1, self.hidSize))
srcc0 = Variable(torch.zeros(2, 1, self.hidSize))
self.srchidden = (srch0, srcc0)
sentEmb = sentEmb.unsqueeze(0)
output, _ = self.srcLstm(sentEmb, self.srchidden)
else:
sentEmb = self.embeddingTRG(sent)
if use_cuda:
trgh0 = Variable(torch.zeros(2, 1, self.hidSize).cuda())
trgc0 = Variable(torch.zeros(2, 1, self.hidSize).cuda())
else:
trgh0 = Variable(torch.zeros(2, 1, self.hidSize))
trgc0 = Variable(torch.zeros(2, 1, self.hidSize))
self.trghidden = (trgh0, trgc0)
sentEmb = sentEmb.unsqueeze(0)
output, _ = self.trgLstm(sentEmb, self.trghidden)
return output[:, -1,:]
type = torch.LongTensor
use_cuda = torch.cuda.is_available()
model = bilstm(nwords_src, nwords_trg, EMB_SIZE, HID_SIZE, use_cuda)
criterion = torch.nn.MultiMarginLoss(reduce=False)
optimizer = torch.optim.Adam(model.parameters())
if use_cuda:
type = torch.cuda.LongTensor
model.cuda()
def calc_loss(sents):
srcSent = [torch.tensor(src).type(type) for src, tag in sents]
trgSent = [torch.tensor(tag).type(type) for src, tag in sents]
src_reps = [model(sent, True) for sent in srcSent]
trg_reps = [model(sent, False) for sent in trgSent]
src_mtx = torch.cat(src_reps)
trg_mtx = torch.cat(trg_reps)
sim_mtx = torch.matmul(src_mtx, trg_mtx.transpose(1, 0))
y = torch.tensor(list(range(len(sents)))).type(type)
loss = criterion(input=sim_mtx, target=y)
return torch.sum(loss)
# Calculate representations for one corpus
def index_corpus(sents):
# To take advantage of auto-batching, do several at a time
for sid in range(0, len(sents), BATCH_SIZE):
srcSent = [torch.tensor(src).type(type) for src, tag in sents[sid:min(sid + BATCH_SIZE, len(sents))]]
trgSent = [torch.tensor(tag).type(type) for src, tag in sents[sid:min(sid + BATCH_SIZE, len(sents))]]
src_exprs = [model(sent, True) for sent in srcSent]
trg_exprs = [model(sent, False) for sent in trgSent]
for src_expr, trg_expr in zip(src_exprs, trg_exprs):
yield (src_expr.data.numpy()[0], trg_expr.data.numpy()[0])
# Perform retrieval, and return both scores and ranked order of candidates
def retrieve(src, db_mtx):
scores = np.dot(db_mtx, src)
ranks = np.argsort(-scores)
return ranks, scores
# Perform training
start = time.time()
train_mbs = all_time = dev_time = all_tagged = this_sents = this_loss = 0
for ITER in range(100):
random.shuffle(train)
for sid in range(0, len(train), BATCH_SIZE):
my_size = min(BATCH_SIZE, len(train)-sid)
train_mbs += 1
if train_mbs % int(1000/BATCH_SIZE) == 0:
print("loss/sent=%.4f, sent/sec=%.4f" % (this_loss / this_sents, (train_mbs * BATCH_SIZE) / (time.time() - start - dev_time)), file=sys.stderr)
this_loss = this_sents = 0
# train on the minibatch
loss_exp = calc_loss(train[sid:sid+BATCH_SIZE])
this_loss += loss_exp.item()
this_sents += BATCH_SIZE
optimizer.zero_grad()
loss_exp.backward()
optimizer.step()
# Perform evaluation
dev_start = time.time()
rec_at_1, rec_at_5, rec_at_10 = 0, 0, 0
reps = list(index_corpus(dev))
trg_mtx = np.stack([trg for src, trg in reps])
for i, (src, trg) in enumerate(reps):
ranks, scores = retrieve(src, trg_mtx)
if ranks[0] == i: rec_at_1 += 1
if i in ranks[:5]: rec_at_5 += 1
if i in ranks[:10]: rec_at_10 += 1
dev_time += time.time()-dev_start
print("epoch %r: dev recall@1=%.2f%% recall@5=%.2f%% recall@10=%.2f%%" % (ITER, rec_at_1/len(dev)*100, rec_at_5/len(dev)*100, rec_at_10/len(dev)*100))
| [
"torch.nn.Embedding",
"torch.nn.MultiMarginLoss",
"random.shuffle",
"torch.nn.LSTM",
"numpy.argsort",
"numpy.stack",
"numpy.dot",
"torch.cuda.is_available",
"collections.defaultdict",
"torch.sum",
"torch.nn.init.uniform_",
"torch.tensor",
"time.time",
"torch.zeros",
"torch.cat"
] | [((1048, 1086), 'collections.defaultdict', 'defaultdict', (['(lambda : unk_src)', 'w2i_src'], {}), '(lambda : unk_src, w2i_src)\n', (1059, 1086), False, 'from collections import defaultdict\n'), ((1123, 1161), 'collections.defaultdict', 'defaultdict', (['(lambda : unk_trg)', 'w2i_trg'], {}), '(lambda : unk_trg, w2i_trg)\n', (1134, 1161), False, 'from collections import defaultdict\n'), ((3251, 3276), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3274, 3276), False, 'import torch\n'), ((3359, 3397), 'torch.nn.MultiMarginLoss', 'torch.nn.MultiMarginLoss', ([], {'reduce': '(False)'}), '(reduce=False)\n', (3383, 3397), False, 'import torch\n'), ((4924, 4935), 'time.time', 'time.time', ([], {}), '()\n', (4933, 4935), False, 'import time\n'), ((3796, 3815), 'torch.cat', 'torch.cat', (['src_reps'], {}), '(src_reps)\n', (3805, 3815), False, 'import torch\n'), ((3830, 3849), 'torch.cat', 'torch.cat', (['trg_reps'], {}), '(trg_reps)\n', (3839, 3849), False, 'import torch\n'), ((4028, 4043), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (4037, 4043), False, 'import torch\n'), ((4818, 4837), 'numpy.dot', 'np.dot', (['db_mtx', 'src'], {}), '(db_mtx, src)\n', (4824, 4837), True, 'import numpy as np\n'), ((4850, 4869), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (4860, 4869), True, 'import numpy as np\n'), ((5038, 5059), 'random.shuffle', 'random.shuffle', (['train'], {}), '(train)\n', (5052, 5059), False, 'import random\n'), ((5710, 5721), 'time.time', 'time.time', ([], {}), '()\n', (5719, 5721), False, 'import time\n'), ((5815, 5851), 'numpy.stack', 'np.stack', (['[trg for src, trg in reps]'], {}), '([trg for src, trg in reps])\n', (5823, 5851), True, 'import numpy as np\n'), ((1565, 1605), 'torch.nn.Embedding', 'torch.nn.Embedding', (['nwords_src', 'EMB_SIZE'], {}), '(nwords_src, EMB_SIZE)\n', (1583, 1605), False, 'import torch\n'), ((1634, 1674), 'torch.nn.Embedding', 'torch.nn.Embedding', (['nwords_trg', 'EMB_SIZE'], {}), '(nwords_trg, EMB_SIZE)\n', (1652, 1674), False, 'import torch\n'), ((1683, 1744), 'torch.nn.init.uniform_', 'torch.nn.init.uniform_', (['self.embeddingSRC.weight', '(-0.25)', '(0.25)'], {}), '(self.embeddingSRC.weight, -0.25, 0.25)\n', (1705, 1744), False, 'import torch\n'), ((1753, 1814), 'torch.nn.init.uniform_', 'torch.nn.init.uniform_', (['self.embeddingTRG.weight', '(-0.25)', '(0.25)'], {}), '(self.embeddingTRG.weight, -0.25, 0.25)\n', (1775, 1814), False, 'import torch\n'), ((1838, 1950), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': 'EMB_SIZE', 'hidden_size': 'HID_SIZE', 'num_layers': '(1)', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=EMB_SIZE, hidden_size=HID_SIZE, num_layers=1,\n bidirectional=True, batch_first=True)\n', (1851, 1950), False, 'import torch\n'), ((1970, 2082), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': 'EMB_SIZE', 'hidden_size': 'HID_SIZE', 'num_layers': '(1)', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=EMB_SIZE, hidden_size=HID_SIZE, num_layers=1,\n bidirectional=True, batch_first=True)\n', (1983, 2082), False, 'import torch\n'), ((6081, 6092), 'time.time', 'time.time', ([], {}), '()\n', (6090, 6092), False, 'import time\n'), ((3549, 3566), 'torch.tensor', 'torch.tensor', (['src'], {}), '(src)\n', (3561, 3566), False, 'import torch\n'), ((3616, 3633), 'torch.tensor', 'torch.tensor', (['tag'], {}), '(tag)\n', (3628, 3633), False, 'import torch\n'), ((2403, 2434), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2414, 2434), False, 'import torch\n'), ((2469, 2500), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2480, 2500), False, 'import torch\n'), ((2934, 2965), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2945, 2965), False, 'import torch\n'), ((3000, 3031), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (3011, 3031), False, 'import torch\n'), ((4245, 4262), 'torch.tensor', 'torch.tensor', (['src'], {}), '(src)\n', (4257, 4262), False, 'import torch\n'), ((4355, 4372), 'torch.tensor', 'torch.tensor', (['tag'], {}), '(tag)\n', (4367, 4372), False, 'import torch\n'), ((2239, 2270), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2250, 2270), False, 'import torch\n'), ((2312, 2343), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2323, 2343), False, 'import torch\n'), ((2770, 2801), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2781, 2801), False, 'import torch\n'), ((2843, 2874), 'torch.zeros', 'torch.zeros', (['(2)', '(1)', 'self.hidSize'], {}), '(2, 1, self.hidSize)\n', (2854, 2874), False, 'import torch\n'), ((5337, 5348), 'time.time', 'time.time', ([], {}), '()\n', (5346, 5348), False, 'import time\n')] |
import RPi.GPIO as GPIO
import time
class Translator:
_tens = [40, 38, 37, 36]
_lastDigits =[3, 5, 7, 12, 11, 13, 15, 16]
def __init__(self):
GPIO.setmode(GPIO.BOARD)
for pin in self._tens:
GPIO.setup(pin, GPIO.OUT)
for pin in self._lastDigits:
GPIO.setup(pin, GPIO.OUT)
def parse(self, number):
for pin in self._tens:
GPIO.output(pin, GPIO.LOW)
for pin in self._lastDigits:
GPIO.output(pin, GPIO.LOW)
for x in range(0, number % 10):
GPIO.output(self._lastDigits[x], GPIO.HIGH)
for x in range(0, number / 10):
GPIO.output(self._tens[x], GPIO.HIGH) | [
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.setmode"
] | [((165, 189), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (177, 189), True, 'import RPi.GPIO as GPIO\n'), ((233, 258), 'RPi.GPIO.setup', 'GPIO.setup', (['pin', 'GPIO.OUT'], {}), '(pin, GPIO.OUT)\n', (243, 258), True, 'import RPi.GPIO as GPIO\n'), ((308, 333), 'RPi.GPIO.setup', 'GPIO.setup', (['pin', 'GPIO.OUT'], {}), '(pin, GPIO.OUT)\n', (318, 333), True, 'import RPi.GPIO as GPIO\n'), ((407, 433), 'RPi.GPIO.output', 'GPIO.output', (['pin', 'GPIO.LOW'], {}), '(pin, GPIO.LOW)\n', (418, 433), True, 'import RPi.GPIO as GPIO\n'), ((483, 509), 'RPi.GPIO.output', 'GPIO.output', (['pin', 'GPIO.LOW'], {}), '(pin, GPIO.LOW)\n', (494, 509), True, 'import RPi.GPIO as GPIO\n'), ((562, 605), 'RPi.GPIO.output', 'GPIO.output', (['self._lastDigits[x]', 'GPIO.HIGH'], {}), '(self._lastDigits[x], GPIO.HIGH)\n', (573, 605), True, 'import RPi.GPIO as GPIO\n'), ((658, 695), 'RPi.GPIO.output', 'GPIO.output', (['self._tens[x]', 'GPIO.HIGH'], {}), '(self._tens[x], GPIO.HIGH)\n', (669, 695), True, 'import RPi.GPIO as GPIO\n')] |
import Console_Output
import Trick
import click
class Players:
def __init__(self):
self._players = []
self._start_player_next_round = None
def add_player(self, player):
self._players.append(player)
def num_players(self):
return len(self._players)
def __iter__(self):
return iter(self._players)
def play_round(self, round_nr, trump_color):
self._start_player_next_round = self._players[1]
self.reset_trick_guesses()
self.request_trick_guesses()
self.play_tricks(round_nr, trump_color)
self.update_scores()
Console_Output.print_current_scores(self._players)
self.rotate_players_to_player(self._start_player_next_round)
def request_trick_guesses(self):
for p in self._players:
p.guess_tricks()
def reset_trick_guesses(self):
for p in self._players:
p.reset_tricks()
def play_tricks(self, nr_hand_cards=0, trump_color=""):
for _ in range(nr_hand_cards):
self.play_one_trick(trump_color)
def play_one_trick(self, trump_color):
t = Trick.Trick(trump_color=trump_color)
for p in self._players:
p.play_card(t)
winner_card_ix = t.determine_winner()
winner = self._players[winner_card_ix]
winner.won_tricks += 1
Console_Output.print_trick_winner(winner.name)
# set winning player as first to act for the next trick
self.rotate_players_to_player(winner)
def rotate_players_to_player(self, player):
ix = self._players.index(player)
self._players = self._players[ix:] + self._players[:ix]
def update_scores(self):
for p in self._players:
p.score += p.won_tricks
if p.guessed_tricks == p.won_tricks:
p.score += 5
| [
"Console_Output.print_trick_winner",
"Console_Output.print_current_scores",
"Trick.Trick"
] | [((619, 669), 'Console_Output.print_current_scores', 'Console_Output.print_current_scores', (['self._players'], {}), '(self._players)\n', (654, 669), False, 'import Console_Output\n'), ((1136, 1172), 'Trick.Trick', 'Trick.Trick', ([], {'trump_color': 'trump_color'}), '(trump_color=trump_color)\n', (1147, 1172), False, 'import Trick\n'), ((1364, 1410), 'Console_Output.print_trick_winner', 'Console_Output.print_trick_winner', (['winner.name'], {}), '(winner.name)\n', (1397, 1410), False, 'import Console_Output\n')] |
#!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Iterates over two lists simultaneously. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : November 23, 2019 #
# #
############################################################################################
import random
def random_int_list(low: int, high: int, size: int) -> list:
if size < 0:
raise ValueError('Invalid size of new list')
return [random.randint(low, high) for _ in range(size)]
def display_both_lists(listA: list, listB: list) -> None:
for valA, valB in zip(listA, listB):
print(f'--> {valA} {valB}')
if __name__ == "__main__":
list_A = random_int_list(low=0, high=15, size=10)
list_B = random_int_list(low=0, high=15, size=10)
print(f'Generate list data [A]: {list_A}')
print(f'Generate list data [B]: {list_B}')
display_both_lists(listA=list_A, listB=list_B)
| [
"random.randint"
] | [((814, 839), 'random.randint', 'random.randint', (['low', 'high'], {}), '(low, high)\n', (828, 839), False, 'import random\n')] |
from pyspark.sql import DataFrame
from utils.spark_spawner import SparkSpawner
class DataSourceSql:
def __init__(self, sql_data_object):
self.__spark__ = SparkSpawner().get_spark()
self.__sql_data_object=sql_data_object
def load_data(self) -> DataFrame:
sql="SELECT * FROM {}".format(self.__sql_data_object)
return self.__spark__.sql(sql)
| [
"utils.spark_spawner.SparkSpawner"
] | [((167, 181), 'utils.spark_spawner.SparkSpawner', 'SparkSpawner', ([], {}), '()\n', (179, 181), False, 'from utils.spark_spawner import SparkSpawner\n')] |
"""Module with utilities to create tests."""
from unittest.mock import Mock, create_autospec
from kytos.core import Controller
from kytos.core.config import KytosConfig
from kytos.core.connection import (Connection, ConnectionProtocol,
ConnectionState)
from kytos.core.events import KytosEvent
from kytos.core.interface import Interface
from kytos.core.link import Link
from kytos.core.switch import Switch
def get_controller_mock(loop=None):
"""Return a controller mock."""
options = KytosConfig().options['daemon']
controller = Controller(options, loop=loop)
controller.log = Mock()
return controller
def get_interface_mock(name, port_number, switch, address="00:00:00:00:00:00"):
"""Return a interface mock."""
interface = create_autospec(Interface)
interface.id = "{}:{}".format(switch.dpid, port_number)
interface.name = name
interface.port_number = port_number
interface.switch = switch
interface.address = address
interface.lldp = True
return interface
def get_link_mock(endpoint_a, endpoint_b):
"""Return a link mock."""
link = create_autospec(Link)
link.endpoint_a = endpoint_a
link.endpoint_b = endpoint_b
link.metadata = {"A": 0, "BB": 0.0, "CCC": "test"}
return link
def get_switch_mock(dpid, of_version=None):
"""Return a switch mock."""
switch = create_autospec(Switch)
switch.dpid = dpid
if of_version:
switch.ofp_version = '0x0' + str(of_version)
switch.connection = get_connection_mock(of_version, switch)
return switch
def get_connection_mock(of_version, switch, address="00:00:00:00:00:00",
state=ConnectionState.NEW):
"""Return a connection mock."""
protocol = create_autospec(ConnectionProtocol)
protocol.version = of_version
connection = create_autospec(Connection)
connection.protocol = protocol
connection.switch = switch
connection.address = address
connection.state = state
return connection
def get_kytos_event_mock(name, content):
"""Return a kytos event mock."""
event = create_autospec(KytosEvent)
event.name = name
event.content = content
event.message = content.get('message')
event.destination = content.get('destination')
event.source = content.get('source')
return event
def get_test_client(controller, napp):
"""Return a flask api test client."""
controller.api_server.register_napp_endpoints(napp)
return controller.api_server.app.test_client()
| [
"kytos.core.Controller",
"kytos.core.config.KytosConfig",
"unittest.mock.Mock",
"unittest.mock.create_autospec"
] | [((580, 610), 'kytos.core.Controller', 'Controller', (['options'], {'loop': 'loop'}), '(options, loop=loop)\n', (590, 610), False, 'from kytos.core import Controller\n'), ((632, 638), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (636, 638), False, 'from unittest.mock import Mock, create_autospec\n'), ((794, 820), 'unittest.mock.create_autospec', 'create_autospec', (['Interface'], {}), '(Interface)\n', (809, 820), False, 'from unittest.mock import Mock, create_autospec\n'), ((1142, 1163), 'unittest.mock.create_autospec', 'create_autospec', (['Link'], {}), '(Link)\n', (1157, 1163), False, 'from unittest.mock import Mock, create_autospec\n'), ((1392, 1415), 'unittest.mock.create_autospec', 'create_autospec', (['Switch'], {}), '(Switch)\n', (1407, 1415), False, 'from unittest.mock import Mock, create_autospec\n'), ((1775, 1810), 'unittest.mock.create_autospec', 'create_autospec', (['ConnectionProtocol'], {}), '(ConnectionProtocol)\n', (1790, 1810), False, 'from unittest.mock import Mock, create_autospec\n'), ((1862, 1889), 'unittest.mock.create_autospec', 'create_autospec', (['Connection'], {}), '(Connection)\n', (1877, 1889), False, 'from unittest.mock import Mock, create_autospec\n'), ((2132, 2159), 'unittest.mock.create_autospec', 'create_autospec', (['KytosEvent'], {}), '(KytosEvent)\n', (2147, 2159), False, 'from unittest.mock import Mock, create_autospec\n'), ((531, 544), 'kytos.core.config.KytosConfig', 'KytosConfig', ([], {}), '()\n', (542, 544), False, 'from kytos.core.config import KytosConfig\n')] |
# Generated by Django 2.2.10 on 2020-06-17 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20200515_2232'),
]
operations = [
migrations.AlterField(
model_name='telegrambotlogs',
name='log_type',
field=models.IntegerField(choices=[(0, 'Error'), (1, 'Not Sended'), (2, 'Not Found'), (3, 'Success')], default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((345, 456), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Error'), (1, 'Not Sended'), (2, 'Not Found'), (3, 'Success')]", 'default': '(0)'}), "(choices=[(0, 'Error'), (1, 'Not Sended'), (2,\n 'Not Found'), (3, 'Success')], default=0)\n", (364, 456), False, 'from django.db import migrations, models\n')] |
import unittest
from model.pasteme_rim import BidirectionalLSTM
class MyModelTestCase(unittest.TestCase):
def test_BidirectionalLSTM(self):
model = BidirectionalLSTM(
host='http://docker:8501',
model_name='PasteMeRIM',
version=1, max_length=128)
prediction = model.predict({'content': ['你好,世界!']})
print(prediction)
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"model.pasteme_rim.BidirectionalLSTM"
] | [((452, 467), 'unittest.main', 'unittest.main', ([], {}), '()\n', (465, 467), False, 'import unittest\n'), ((162, 262), 'model.pasteme_rim.BidirectionalLSTM', 'BidirectionalLSTM', ([], {'host': '"""http://docker:8501"""', 'model_name': '"""PasteMeRIM"""', 'version': '(1)', 'max_length': '(128)'}), "(host='http://docker:8501', model_name='PasteMeRIM',\n version=1, max_length=128)\n", (179, 262), False, 'from model.pasteme_rim import BidirectionalLSTM\n')] |
#!/usr/bin/env python
import rospy
import xavier_command
from std_msgs.msg import String
from geometry_msgs.msg import Twist
class TurtleBot:
def callback(self, data):
print(data)
def listener(self):
print(xavier_command.FORWARD)
rospy.init_node('meuComputador', anonymous=True)
rospy.Subscriber('letterX', String, self.callback)
rospy.spin()
if __name__ == '__main__':
x = TurtleBot()
x.listener()
| [
"rospy.init_node",
"rospy.Subscriber",
"rospy.spin"
] | [((298, 346), 'rospy.init_node', 'rospy.init_node', (['"""meuComputador"""'], {'anonymous': '(True)'}), "('meuComputador', anonymous=True)\n", (313, 346), False, 'import rospy\n'), ((363, 413), 'rospy.Subscriber', 'rospy.Subscriber', (['"""letterX"""', 'String', 'self.callback'], {}), "('letterX', String, self.callback)\n", (379, 413), False, 'import rospy\n'), ((430, 442), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (440, 442), False, 'import rospy\n')] |
#Python Modules
import os
# Neural Network Modules
import torch
#Gym Environment Dependencies
import gym
from ai_invader.util import load_obj
from ai_invader.agent import EvoAgentTrainer
from ai_invader.model import DQNCNN
def main():
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Get the game_actions from the env
action_space = 6
# Get the number of agents per generation
num_agents = 2
# Get the input shape (No of frames, x pixels, y pixels)
# No of frames is to let AI to perceive motion
input_shape = (4, 160, 120)
# Get the Top k scores
elites = 1
# Number of generations to train the AI
generations = 2
# Start evolution (Uncomment to start training)
ag = EvoAgentTrainer(input_shape,action_space, num_agents, elites, 1, env = make_env)
ag.train(generations)
# Load the model to evaluate
if __name__ == '__main__':
main() | [
"ai_invader.agent.EvoAgentTrainer",
"torch.cuda.is_available"
] | [((739, 818), 'ai_invader.agent.EvoAgentTrainer', 'EvoAgentTrainer', (['input_shape', 'action_space', 'num_agents', 'elites', '(1)'], {'env': 'make_env'}), '(input_shape, action_space, num_agents, elites, 1, env=make_env)\n', (754, 818), False, 'from ai_invader.agent import EvoAgentTrainer\n'), ((259, 284), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (282, 284), False, 'import torch\n')] |
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from versatileimagefield.fields import VersatileImageField, PPOIField
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(max_length=255)
content = models.TextField()
class_id = models.IntegerField(unique=True)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(User, related_name='posts', on_delete=models.CASCADE)
content = models.TextField()
image = models.ManyToManyField('mona.Image', related_name='posts')
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
public = models.BooleanField(default=True)
category = models.ManyToManyField(Category, related_name='posts')
class Meta:
ordering = ['-created']
def __str__(self):
return self.title
class Like(models.Model):
post = models.ForeignKey(Post, related_name='liked_post', on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name='liker', on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} : {}'.format(self.user, self.post)
class Comment(models.Model):
content = models.CharField(max_length=255)
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments', related_query_name='comment')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments', related_query_name='comment')
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
def __str__(self):
return self.content
class Image(models.Model):
name = models.CharField(max_length=255)
image = VersatileImageField(
'Image',
upload_to='images/',
ppoi_field='image_ppoi',
)
image_ppoi = PPOIField()
public = models.BooleanField(default=True)
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
class Meta:
ordering = ['-created']
def __str__(self):
return self.name
| [
"django.db.models.DateField",
"django.db.models.TextField",
"versatileimagefield.fields.PPOIField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"versatileimagefield.fields.VersatileImageField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((258, 290), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (274, 290), False, 'from django.db import models\n'), ((305, 323), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (321, 323), False, 'from django.db import models\n'), ((339, 371), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (358, 371), False, 'from django.db import models\n'), ((459, 491), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (475, 491), False, 'from django.db import models\n'), ((505, 576), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""posts"""', 'on_delete': 'models.CASCADE'}), "(User, related_name='posts', on_delete=models.CASCADE)\n", (522, 576), False, 'from django.db import models\n'), ((591, 609), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (607, 609), False, 'from django.db import models\n'), ((622, 680), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""mona.Image"""'], {'related_name': '"""posts"""'}), "('mona.Image', related_name='posts')\n", (644, 680), False, 'from django.db import models\n'), ((695, 730), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (711, 730), False, 'from django.db import models\n'), ((745, 776), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (761, 776), False, 'from django.db import models\n'), ((790, 823), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (809, 823), False, 'from django.db import models\n'), ((839, 893), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Category'], {'related_name': '"""posts"""'}), "(Category, related_name='posts')\n", (861, 893), False, 'from django.db import models\n'), ((1034, 1110), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'related_name': '"""liked_post"""', 'on_delete': 'models.CASCADE'}), "(Post, related_name='liked_post', on_delete=models.CASCADE)\n", (1051, 1110), False, 'from django.db import models\n'), ((1122, 1193), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""liker"""', 'on_delete': 'models.CASCADE'}), "(User, related_name='liker', on_delete=models.CASCADE)\n", (1139, 1193), False, 'from django.db import models\n'), ((1213, 1252), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1233, 1252), False, 'from django.db import models\n'), ((1375, 1407), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1391, 1407), False, 'from django.db import models\n'), ((1419, 1527), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""', 'related_query_name': '"""comment"""'}), "(Post, on_delete=models.CASCADE, related_name='comments',\n related_query_name='comment')\n", (1436, 1527), False, 'from django.db import models\n'), ((1535, 1643), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""', 'related_query_name': '"""comment"""'}), "(User, on_delete=models.CASCADE, related_name='comments',\n related_query_name='comment')\n", (1552, 1643), False, 'from django.db import models\n'), ((1654, 1689), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1670, 1689), False, 'from django.db import models\n'), ((1704, 1735), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1720, 1735), False, 'from django.db import models\n'), ((1826, 1858), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1842, 1858), False, 'from django.db import models\n'), ((1871, 1945), 'versatileimagefield.fields.VersatileImageField', 'VersatileImageField', (['"""Image"""'], {'upload_to': '"""images/"""', 'ppoi_field': '"""image_ppoi"""'}), "('Image', upload_to='images/', ppoi_field='image_ppoi')\n", (1890, 1945), False, 'from versatileimagefield.fields import VersatileImageField, PPOIField\n'), ((1994, 2005), 'versatileimagefield.fields.PPOIField', 'PPOIField', ([], {}), '()\n', (2003, 2005), False, 'from versatileimagefield.fields import VersatileImageField, PPOIField\n'), ((2019, 2052), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2038, 2052), False, 'from django.db import models\n'), ((2068, 2103), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2084, 2103), False, 'from django.db import models\n'), ((2118, 2149), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2134, 2149), False, 'from django.db import models\n')] |
import queue
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
'''
The binary tree in this example is shown as follow
1
/ \
2 3
/ \ /
4 5 6
/
7
level-order traversal: 1 2 3 4 5 6 7
'''
def levelorder_traversal(root):
if not root:
return
que = queue.Queue()
que.put(root)
while not que.empty():
node = que.get()
print(node.val, end=" ")
if node.left:
que.put(node.left)
if node.right:
que.put(node.right)
def generate_B_tree(nodes, index):
node = None
if index < len(nodes) and nodes[index]:
node = TreeNode(nodes[index])
node.left = generate_B_tree(nodes, index * 2 + 1)
node.right = generate_B_tree(nodes, index * 2 + 2)
return node
def main():
nodes = [1, 2, 3, 4, 5, 6, None, None, None, 7]
root = generate_B_tree(nodes, 0)
levelorder_traversal(root)
print()
if __name__ == '__main__':
main()
| [
"queue.Queue"
] | [((378, 391), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (389, 391), False, 'import queue\n')] |
#!/usr/bin/env python
__author__ = '<NAME>'
#============================================================================
import os
import numpy as np
from Utils.utils import ParserJSON, Printer
from ConfigSpace.hyperparameters import UniformFloatHyperparameter
from smac.configspace import ConfigurationSpace
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC as SMAC_instance
#============================================================================
class SMAC(Printer):
def __init__(self, config_file, work_dir):
Printer.__init__(self, 'SMAC', color = 'grey')
self.work_dir = work_dir
self._parse_config_file(config_file)
try:
self.batch_size = self.param_dict['general']['batches_per_round']
self.num_batches = self.param_dict['general']['batch_size']
except KeyError:
self.num_batches = 1
self._create_config_space()
def rand_gens(self, var_type = 'float', size = 1):
if var_type == 'float':
return np.random.uniform(low = 0, high = 1, size = size)
else:
raise NotImplementedError
def _parse_config_file(self, config_file):
print(config_file)
self.json_parser = ParserJSON(file_name = config_file)
self.json_parser.parse()
self.param_dict = self.json_parser.param_dict
# now get the total number of variables
# and create a dictionary with the size of each variable
self.total_size = 0
self.var_sizes = []
self.var_names = []
for var_dict in self.param_dict['variables']:
self.total_size += var_dict[list(var_dict)[0]]['size']
self.var_sizes.append(int(var_dict[list(var_dict)[0]]['size']))
self.var_names.append(list(var_dict)[0])
def _create_config_space(self):
self.cs = ConfigurationSpace()
for var_index, var_dict in enumerate(self.param_dict['variables']):
variable = var_dict[self.var_names[var_index]]
if variable['type'] == 'float':
param = UniformFloatHyperparameter('x%d' % var_index, variable['low'], variable['high'])#, default = np.random.uniform(low = variable['low'], high = variable['high'], size = variable['size']))
else:
raise NotImplementedError()
self.cs.add_hyperparameter(param)
def _generate_uniform(self, num_samples = 10):
self.container, self.sampled_params = {}, {}
values = []
for var_index, var_name in enumerate(self.var_names):
sampled_values = self.rand_gens(var_type = self.param_dict['variables'][var_index][var_name]['type'], size = (self.param_dict['variables'][var_index][var_name]['size'], num_samples))
values.extend(sampled_values)
self.container[var_name] = sampled_values
values = np.array(values)
self.proposed = values.transpose()
def _parse_observations(self, observations):
all_params, all_losses = [], []
for observation in observations:
all_losses.append(observation['loss'])
params = []
for var_name in self.var_names:
params.extend(observation[var_name]['samples'])
all_params.append(params)
return all_params, all_losses
def _create_smac_instance(self):
scenario = Scenario({'run_obj': 'quality',
'runcount-limit': 500,
'cs': self.cs,
'deterministic': 'true'})
self.smac = SMAC_instance(scenario = scenario, rng = np.random.RandomState(np.random.randint(0, 10**5)))
def _sample_parameter_sets(self, num_samples, observations):
all_params, all_losses = self._parse_observations(observations)
self._create_smac_instance()
# get next parameter point
challengers = self.smac.solver.choose_next(np.array(all_params), np.array(all_losses), np.amin(all_losses))
self.proposed = []
for index in range(self.num_batches * self.batch_size):
self.proposed.append(challengers.challengers[index]._vector)
def choose(self, num_samples = None, observations = None):
current_dir = os.getcwd()
os.chdir(self.work_dir)
if not num_samples:
num_samples = self.param_dict['general']['batches_per_round']
if observations:
self._print('proposing samples')
self._sample_parameter_sets(num_samples, observations)
else:
self._print('choosing uniformly')
self._generate_uniform(num_samples)
os.chdir(current_dir)
return self.proposed
| [
"smac.configspace.ConfigurationSpace",
"numpy.amin",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"os.getcwd",
"os.chdir",
"Utils.utils.ParserJSON",
"numpy.array",
"numpy.random.uniform",
"numpy.random.randint",
"smac.scenario.scenario.Scenario",
"Utils.utils.Printer.__init__"
] | [((567, 611), 'Utils.utils.Printer.__init__', 'Printer.__init__', (['self', '"""SMAC"""'], {'color': '"""grey"""'}), "(self, 'SMAC', color='grey')\n", (583, 611), False, 'from Utils.utils import ParserJSON, Printer\n'), ((1160, 1193), 'Utils.utils.ParserJSON', 'ParserJSON', ([], {'file_name': 'config_file'}), '(file_name=config_file)\n', (1170, 1193), False, 'from Utils.utils import ParserJSON, Printer\n'), ((1706, 1726), 'smac.configspace.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (1724, 1726), False, 'from smac.configspace import ConfigurationSpace\n'), ((2600, 2616), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2608, 2616), True, 'import numpy as np\n'), ((3028, 3127), 'smac.scenario.scenario.Scenario', 'Scenario', (["{'run_obj': 'quality', 'runcount-limit': 500, 'cs': self.cs,\n 'deterministic': 'true'}"], {}), "({'run_obj': 'quality', 'runcount-limit': 500, 'cs': self.cs,\n 'deterministic': 'true'})\n", (3036, 3127), False, 'from smac.scenario.scenario import Scenario\n'), ((3779, 3790), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3788, 3790), False, 'import os\n'), ((3793, 3816), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (3801, 3816), False, 'import os\n'), ((4105, 4126), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (4113, 4126), False, 'import os\n'), ((984, 1027), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': 'size'}), '(low=0, high=1, size=size)\n', (1001, 1027), True, 'import numpy as np\n'), ((3492, 3512), 'numpy.array', 'np.array', (['all_params'], {}), '(all_params)\n', (3500, 3512), True, 'import numpy as np\n'), ((3514, 3534), 'numpy.array', 'np.array', (['all_losses'], {}), '(all_losses)\n', (3522, 3534), True, 'import numpy as np\n'), ((3536, 3555), 'numpy.amin', 'np.amin', (['all_losses'], {}), '(all_losses)\n', (3543, 3555), True, 'import numpy as np\n'), ((1894, 1979), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (["('x%d' % var_index)", "variable['low']", "variable['high']"], {}), "('x%d' % var_index, variable['low'], variable['high']\n )\n", (1920, 1979), False, 'from ConfigSpace.hyperparameters import UniformFloatHyperparameter\n'), ((3225, 3254), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10 ** 5)'], {}), '(0, 10 ** 5)\n', (3242, 3254), True, 'import numpy as np\n')] |
import json
from jwt import base64url_decode
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework_jwt import utils
class UtilsTests(TestCase):
def setUp(self):
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
def test_jwt_payload_handler(self):
payload = utils.jwt_payload_handler(self.user)
self.assertTrue(isinstance(payload, dict))
self.assertEqual(payload['user_id'], self.user.pk)
self.assertEqual(payload['email'], self.email)
self.assertEqual(payload['username'], self.username)
self.assertTrue('exp' in payload)
def test_jwt_encode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
payload_data = base64url_decode(token.split('.')[1].encode('utf-8'))
payload_from_token = json.loads(payload_data.decode('utf-8'))
self.assertEqual(payload_from_token, payload)
def test_jwt_decode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
| [
"rest_framework_jwt.utils.jwt_payload_handler",
"rest_framework_jwt.utils.jwt_encode_handler",
"rest_framework_jwt.utils.jwt_decode_handler",
"django.contrib.auth.models.User.objects.create_user"
] | [((296, 347), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['self.username', 'self.email'], {}), '(self.username, self.email)\n', (320, 347), False, 'from django.contrib.auth.models import User\n'), ((407, 443), 'rest_framework_jwt.utils.jwt_payload_handler', 'utils.jwt_payload_handler', (['self.user'], {}), '(self.user)\n', (432, 443), False, 'from rest_framework_jwt import utils\n'), ((763, 799), 'rest_framework_jwt.utils.jwt_payload_handler', 'utils.jwt_payload_handler', (['self.user'], {}), '(self.user)\n', (788, 799), False, 'from rest_framework_jwt import utils\n'), ((816, 849), 'rest_framework_jwt.utils.jwt_encode_handler', 'utils.jwt_encode_handler', (['payload'], {}), '(payload)\n', (840, 849), False, 'from rest_framework_jwt import utils\n'), ((1103, 1139), 'rest_framework_jwt.utils.jwt_payload_handler', 'utils.jwt_payload_handler', (['self.user'], {}), '(self.user)\n', (1128, 1139), False, 'from rest_framework_jwt import utils\n'), ((1156, 1189), 'rest_framework_jwt.utils.jwt_encode_handler', 'utils.jwt_encode_handler', (['payload'], {}), '(payload)\n', (1180, 1189), False, 'from rest_framework_jwt import utils\n'), ((1216, 1247), 'rest_framework_jwt.utils.jwt_decode_handler', 'utils.jwt_decode_handler', (['token'], {}), '(token)\n', (1240, 1247), False, 'from rest_framework_jwt import utils\n')] |
from . import auth
from app import db
from flask_login import current_user
from .security import generate_confirmation_token, confirm_token, send_mail_async
from flask import jsonify, request, redirect, url_for, abort, render_template
from app import db
from .forms import ResetPasswordForm
import requests
from datetime import datetime
from flask_login import current_user, login_user, logout_user, login_required
from .models import UserProfile, UserAccount, UserAccountStatus, FacebookAccount
@auth.route("register", methods=["GET", "POST"])
def register():
"""
Registers a new user, get request data, parse it and register user accordingly
successful registration of user will store data in db and send back a response to client
informing user to confirm their email account. (An email will be sent for confirmation)
Thus, afterwards, the user will then confirm their email and the client will then
redirect user to login and they can proceed to login with their registered credentials
:return: JSON response of the registering user process
"""
# if the data from request values is available, perform data transaction
if request.method == "POST":
email = request.values.get("email")
user_account = UserAccount.query.filter_by(email=email).first()
# check if the user already exists
if user_account is not None:
# return registration failed message back to client
return jsonify(dict(response=400, message="User already exists"))
else:
# create the new user and store values in dict
email = request.values.get("email")
first_name = request.values.get("first_name")
last_name = request.values.get("last_name")
username = request.values.get("username")
password = request.values.get("password")
# create a new user profile
new_user_profile = UserProfile(
email=email,
first_name=first_name,
last_name=last_name,
accept_tos=True,
)
# add the new user profile and commit
db.session.add(new_user_profile)
db.session.commit()
# now we add the status of this new account and commit it
new_user_account_status = UserAccountStatus(code="0", name="EMAIL_NON_CONFIRMED")
db.session.add(new_user_account_status)
db.session.commit()
# add the new user account and commit it
new_user_account = UserAccount(
email=email,
username=username,
password=password,
user_profile_id=new_user_profile.id,
user_account_status_id=new_user_account_status.id
)
db.session.add(new_user_account)
db.session.commit()
# create a token from the new user account
token = new_user_account.generate_confirm_token()
# _external adds the full absolute URL that includes the hostname and port
confirm_url = url_for("auth.confirm_email", token=token, _external=True)
# send user confirmation email asynchronously
# Todo: fails to send email, why?
send_mail_async.delay(new_user_account.email, "Please Confirm you email",
"auth.confirm_email.html", confirm_url)
# log in the new user
login_user(new_user_account)
# post a success message back to client so that the client can redirect user
# to login
return jsonify(dict(status="success", message="User created",
state="User Logged in", response=200,
confirm_email_sent=True))
elif request.method == "GET":
return jsonify(dict())
return jsonify(dict())
@auth.route('confirm/<token>')
# @login_required
def confirm_email(token):
"""
Confirm email route for the user.
Checks if the user has already confirmed their account
If they have, log them in. If they have not,
confirm their account and direct them to their dashboard we call the confirm_token()
function, passing in the token. If successful, we update the user,
changing the email_confirmed attribute to True and setting the datetime for when the
confirmation took place.
Also, in case the user already went through the confirmation process – and is
confirmed – then we alert the user of this.
:param token: Generated in the user registration
:return: A redirect to login
"""
# if the current user had been confirmed, redirect them to login
if current_user.confirmed:
return redirect(url_for('auth.login'))
# else confirm them
# get the email for the confirmed
email = confirm_token(token)
# get the author or throw an error
user = UserAccount.query.filter_by(email=current_user.email).first_or_404()
if user.email == email:
user.confirmed = True
user.confirmed_on = datetime.now()
# update the confirmed_on column
db.session.add(user)
db.session.commit()
# redirect to login
return redirect(url_for('auth.login'))
else:
pass
# redirect to the user's dashboard
return redirect(url_for('auth.login'))
@auth.route("login", methods=["GET", "POST"])
def login():
pass
@auth.route("signup", methods=["GET", "POST"])
def signup():
pass
if request.method == "POST":
# if request is POST, retrieve data and log in the user
user_email = request.values.get("email")
user_password = request.values.get("password")
# get the user object and check if they exist
user = UserAccount.query.filter_by(email=user_email).first()
# if the user exists, check their password
if user is not None:
if user.verify_password(user_password):
# log in the user
login_user(user)
# return response to client
return jsonify(dict(message="Logged in success", success=True, response_code=200))
else:
# wrong password, return error message to client
return jsonify(dict(message="Log in Failure", success=False, response_code=400,
cause="Wrong password"))
else:
# this user does not exist
return jsonify(dict(message="User does not exist", success=False, response_code=400))
return jsonify(dict())
@auth.route("reset", methods=["GET", "POST"])
def reset_password():
"""
Resets the user's password if they have forgotten it. In this case, we shall get the user email
and send a confirmation link to the given email. This, in turn, will let us know that the user
exists, because they will then click on the url in their email and be given instructions on
resetting the user password
:return: Response to user stating that their new password has been sent to their email
"""
if request.method == "POST":
# get email from request
email = request.values.get("email")
# generate token
token = generate_confirmation_token(email)
# create the recover url to be sent in the email
recover_url = url_for("auth.reset_with_token", token=token, _external=True)
# send user confirmation email asynchronously
# Todo: fails to send email, why?
send_mail_async.delay(email, "Please reset requested", "auth.reset_email.html", recover_url)
return jsonify(dict(message="Password reset sent", success=True))
return jsonify(dict())
@auth.route("reset_password/<token>")
def reset_with_token(token):
"""
Resets the user account with the given token they will get from the url given in their email
:param token: random secure token user will get in their email address
:return:
"""
# get the email for user to reset their account
email = confirm_token(token)
if email is None:
abort(404)
form = ResetPasswordForm(request.form)
if form.validate_on_submit():
# get the author or throw an error
user = UserAccount.query.filter_by(email=email).first_or_404()
user.password = form.password.data
user.confirmed = True
user.confirmed_on = datetime.now()
# update the confirmed_on column
db.session.add(user)
db.session.commit()
# todo: redirect to client login page
return redirect(url_for('auth.login'))
# render this template
return render_template('auth.reset_with_token.html', form=form)
@auth.route("facebook", methods=["GET", "POST"])
def login_with_facebook():
"""
Login user with facebook
"""
pass
@auth.route("google", methods=["GET", "POST"])
def login_with_google():
"""
Login user with facebook
"""
pass
@auth.route("twitter", methods=["GET", "POST"])
def login_with_twitter():
"""
Login user with facebook
"""
pass
@auth.route("logout")
@login_required
def logout():
"""
Logs out the user from server session
:return: json response
:rtype: dict
"""
logout_user()
return jsonify(dict(message="User logged out", success=True))
| [
"flask.render_template",
"app.db.session.commit",
"flask_login.login_user",
"flask_login.logout_user",
"flask.url_for",
"datetime.datetime.now",
"flask.request.values.get",
"app.db.session.add",
"flask.abort"
] | [((8725, 8781), 'flask.render_template', 'render_template', (['"""auth.reset_with_token.html"""'], {'form': 'form'}), "('auth.reset_with_token.html', form=form)\n", (8740, 8781), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((9332, 9345), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (9343, 9345), False, 'from flask_login import current_user, login_user, logout_user, login_required\n'), ((1210, 1237), 'flask.request.values.get', 'request.values.get', (['"""email"""'], {}), "('email')\n", (1228, 1237), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((5128, 5142), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5140, 5142), False, 'from datetime import datetime\n'), ((5193, 5213), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (5207, 5213), False, 'from app import db\n'), ((5222, 5241), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5239, 5241), False, 'from app import db\n'), ((5400, 5421), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (5407, 5421), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((5683, 5710), 'flask.request.values.get', 'request.values.get', (['"""email"""'], {}), "('email')\n", (5701, 5710), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((5735, 5765), 'flask.request.values.get', 'request.values.get', (['"""password"""'], {}), "('password')\n", (5753, 5765), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((7238, 7265), 'flask.request.values.get', 'request.values.get', (['"""email"""'], {}), "('email')\n", (7256, 7265), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((7423, 7484), 'flask.url_for', 'url_for', (['"""auth.reset_with_token"""'], {'token': 'token', '_external': '(True)'}), "('auth.reset_with_token', token=token, _external=True)\n", (7430, 7484), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((8172, 8182), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (8177, 8182), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((8479, 8493), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8491, 8493), False, 'from datetime import datetime\n'), ((8544, 8564), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (8558, 8564), False, 'from app import db\n'), ((8573, 8592), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8590, 8592), False, 'from app import db\n'), ((1626, 1653), 'flask.request.values.get', 'request.values.get', (['"""email"""'], {}), "('email')\n", (1644, 1653), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((1679, 1711), 'flask.request.values.get', 'request.values.get', (['"""first_name"""'], {}), "('first_name')\n", (1697, 1711), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((1736, 1767), 'flask.request.values.get', 'request.values.get', (['"""last_name"""'], {}), "('last_name')\n", (1754, 1767), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((1791, 1821), 'flask.request.values.get', 'request.values.get', (['"""username"""'], {}), "('username')\n", (1809, 1821), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((1845, 1875), 'flask.request.values.get', 'request.values.get', (['"""password"""'], {}), "('password')\n", (1863, 1875), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((2176, 2208), 'app.db.session.add', 'db.session.add', (['new_user_profile'], {}), '(new_user_profile)\n', (2190, 2208), False, 'from app import db\n'), ((2221, 2240), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2238, 2240), False, 'from app import db\n'), ((2419, 2458), 'app.db.session.add', 'db.session.add', (['new_user_account_status'], {}), '(new_user_account_status)\n', (2433, 2458), False, 'from app import db\n'), ((2471, 2490), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2488, 2490), False, 'from app import db\n'), ((2834, 2866), 'app.db.session.add', 'db.session.add', (['new_user_account'], {}), '(new_user_account)\n', (2848, 2866), False, 'from app import db\n'), ((2879, 2898), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2896, 2898), False, 'from app import db\n'), ((3131, 3189), 'flask.url_for', 'url_for', (['"""auth.confirm_email"""'], {'token': 'token', '_external': '(True)'}), "('auth.confirm_email', token=token, _external=True)\n", (3138, 3189), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((3502, 3530), 'flask_login.login_user', 'login_user', (['new_user_account'], {}), '(new_user_account)\n', (3512, 3530), False, 'from flask_login import current_user, login_user, logout_user, login_required\n'), ((4802, 4823), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (4809, 4823), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((5295, 5316), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (5302, 5316), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((8664, 8685), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (8671, 8685), False, 'from flask import jsonify, request, redirect, url_for, abort, render_template\n'), ((6073, 6089), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (6083, 6089), False, 'from flask_login import current_user, login_user, logout_user, login_required\n')] |
'''Summarise the content of all the articles in the graph database'''
from transformers import BartTokenizer, BartForConditionalGeneration
from tqdm.auto import tqdm
import warnings
from .graph import Graph
def summarise_articles(start_from_scratch: bool = False):
# Initialise the graph database
graph = Graph()
# Remove all summaries from the graph
if start_from_scratch:
query = '''
MATCH (a:Article)
WHERE exists(a.summary)
REMOVE a.summary
'''
graph.query(query)
# Load the summarisation model and its tokeniser
transformer = 'facebook/bart-large-cnn'
tokeniser = BartTokenizer.from_pretrained(transformer)
model = BartForConditionalGeneration.from_pretrained(transformer).cuda()
# Define the cypher query used to get the next article
get_article_query = '''
MATCH (a:Article)
WHERE exists(a.title_en) AND
exists(a.content_en) AND
NOT exists(a.summary)
RETURN a.url as url, a.title_en as title, a.content_en as content
LIMIT 2
'''
# Define the cypher query used to set the summary on the Article node
set_summary_query = '''
UNWIND $url_summaries as url_summary
MATCH (a:Article {url:url_summary.url})
SET a.summary = url_summary.summary
'''
# Define the cypher query used to count the remaining articles
total_count_query = '''
MATCH (a:Article)
RETURN count(a) as num_articles
'''
summarised_count_query = '''
MATCH (a:Article)
WHERE exists(a.summary)
RETURN count(a) as num_articles
'''
not_summarised_count_query = '''
MATCH (a:Article)
WHERE not exists(a.summary)
RETURN count(a) as num_articles
'''
# Get the total number of articles and define a progress bar
num_urls = graph.query(total_count_query).num_articles[0]
num_summarised = graph.query(summarised_count_query).num_articles[0]
pbar = tqdm(total=num_urls, desc='Summarising articles')
pbar.update(num_summarised)
# Continue summarising until all articles have been summnarised
while graph.query(not_summarised_count_query).num_articles[0] > 0:
# Fetch new articles
article_df = graph.query(get_article_query)
urls = article_df.url.tolist()
docs = [row.title + '\n' + row.content
for _, row in article_df.iterrows()]
# Tokenise the content of the articles
with warnings.catch_warnings():
warnings.simplefilter('ignore')
tokens = tokeniser(docs, return_tensors='pt', padding=True,
truncation=True, max_length=1_000)
# Extract the summary of the articles
summary_ids = model.generate(tokens['input_ids'].cuda(),
num_beams=4,
max_length=512,
early_stopping=True)
summaries = tokeniser.batch_decode(
summary_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=False
)
# Set the summary as an attribute on the Article nodes
url_summaries = [dict(url=url, summary=summary)
for url, summary in zip(urls, summaries)]
graph.query(set_summary_query, url_summaries=url_summaries)
# Update the progress bar
pbar.update(len(docs))
pbar.total = graph.query(total_count_query).num_articles[0]
pbar.refresh()
# Close the progress bar
pbar.close()
| [
"transformers.BartForConditionalGeneration.from_pretrained",
"warnings.catch_warnings",
"tqdm.auto.tqdm",
"warnings.simplefilter",
"transformers.BartTokenizer.from_pretrained"
] | [((663, 705), 'transformers.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['transformer'], {}), '(transformer)\n', (692, 705), False, 'from transformers import BartTokenizer, BartForConditionalGeneration\n'), ((2023, 2072), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'num_urls', 'desc': '"""Summarising articles"""'}), "(total=num_urls, desc='Summarising articles')\n", (2027, 2072), False, 'from tqdm.auto import tqdm\n'), ((718, 775), 'transformers.BartForConditionalGeneration.from_pretrained', 'BartForConditionalGeneration.from_pretrained', (['transformer'], {}), '(transformer)\n', (762, 775), False, 'from transformers import BartTokenizer, BartForConditionalGeneration\n'), ((2527, 2552), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2550, 2552), False, 'import warnings\n'), ((2566, 2597), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2587, 2597), False, 'import warnings\n')] |
#!/usr/bin/python3
# by <NAME>
# watches raspberry pi GPIO pins and translates that
# behavior into midi data. Midi data is accessible to
# other clients through a virtual midi device that is
# created with amidithru, via os.system
import RPi.GPIO as GPIO
import time
import mido
import os
import re
#
# script setup
#
# midi device naming and setup
name = "GuitarRotaryEncoder"
# set up pi GPIO pins for rotary encoder
sw_pin = 19
dt_pin = 21
clk_pin = 23
# button midi info; cc#12 = effect control 1
button_state = 0
button_channel = 0
button_cc_num = 12
# don't let button signals trigger until X ms have passed
button_stagger_time = 220
# knob midi info; cc#7 = volume, default position near half
position = 63
rotary_increment = 1
rotary_channel = 0
rotary_cc_num = 7
# wait some seconds for other software after reboot
init_sleep_secs = 10
#
# subroutines
#
def ret_mili_time():
current_milli_time = int(round(time.time() * 1000))
return current_milli_time
def short_circuit_time(val):
global last_time
myTime = ret_mili_time()
time_diff = myTime - last_time
if (time_diff > val):
last_time = myTime
return 0
else:
return 1
def send_cc(channel, ccnum, val):
msg = mido.Message('control_change', channel=channel, control=ccnum, value=val)
output = mido.open_output(output_name)
output.send(msg)
def rotary_callback(unused):
# rotating clockwise will cause pins to be different
global position
global rotary_increment
# rotary encoder voltages are equal when going counter-clockwise
if (GPIO.input(sw_pin) == GPIO.input(clk_pin)):
position -= rotary_increment
if (position < 0):
position = 0
#print("counterclockwise, pos = %s", position)
else:
position += rotary_increment
if (position > 127):
position = 127
#print("clockwise, pos = %s", position)
send_cc(rotary_channel, rotary_cc_num, position)
def button_push(unused):
global button_state
global button_stagger_time
# do not trigger button actions unless 220 ms have passed
if (short_circuit_time(button_stagger_time)):
return
#print("Button was released!")
if (button_state == 1):
button_state = 0
else:
button_state = 1
midi_state = 127 * button_state
send_cc(button_channel, button_cc_num, midi_state)
#
# main stuff below
# TODO maybe use the pythonic if __name__ == "__main__":
#
# use P1 header pin numbering convention, ignore warnings
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# Set up the GPIO channels
GPIO.setup(dt_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # dt
GPIO.setup(sw_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # sw
GPIO.setup(clk_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # clk
# wait some seconds, so we don't step on MODEP's toes
time.sleep(init_sleep_secs)
# set up backend
mido.set_backend('mido.backends.rtmidi')
# system command to set up the midi thru port
# TODO would be nice to do this in python, but
# rtmidi has issues seeing ports it has created
runCmd = "amidithru '" + name + "' &"
os.system(runCmd)
# regex to match on rtmidi port name convention
#GuitarRotaryEncoder:GuitarRotaryEncoder 132:0
# TODO is it necessary to write: "\s+(\d+)?:\d+)" instead?
nameRegex = "(" + name + ":" + name + "\s+\d+:\d+)"
matcher = re.compile(nameRegex)
newList = list(filter(matcher.match, mido.get_output_names()))
# all to get the name of the thing we just made
output_name = newList[0]
# starting time
last_time = ret_mili_time()
# button
GPIO.add_event_detect(dt_pin,GPIO.FALLING,callback=button_push)
# rotary encoder
GPIO.add_event_detect(clk_pin,GPIO.BOTH,callback=rotary_callback)
# keep running
while True:
time.sleep(0.1)
| [
"RPi.GPIO.add_event_detect",
"re.compile",
"RPi.GPIO.setup",
"mido.get_output_names",
"RPi.GPIO.setwarnings",
"time.sleep",
"mido.Message",
"mido.set_backend",
"mido.open_output",
"RPi.GPIO.input",
"os.system",
"time.time",
"RPi.GPIO.setmode"
] | [((2434, 2458), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (2446, 2458), True, 'import RPi.GPIO as GPIO\n'), ((2459, 2482), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (2475, 2482), True, 'import RPi.GPIO as GPIO\n'), ((2511, 2566), 'RPi.GPIO.setup', 'GPIO.setup', (['dt_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), '(dt_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n', (2521, 2566), True, 'import RPi.GPIO as GPIO\n'), ((2572, 2627), 'RPi.GPIO.setup', 'GPIO.setup', (['sw_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), '(sw_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n', (2582, 2627), True, 'import RPi.GPIO as GPIO\n'), ((2633, 2689), 'RPi.GPIO.setup', 'GPIO.setup', (['clk_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), '(clk_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n', (2643, 2689), True, 'import RPi.GPIO as GPIO\n'), ((2751, 2778), 'time.sleep', 'time.sleep', (['init_sleep_secs'], {}), '(init_sleep_secs)\n', (2761, 2778), False, 'import time\n'), ((2797, 2837), 'mido.set_backend', 'mido.set_backend', (['"""mido.backends.rtmidi"""'], {}), "('mido.backends.rtmidi')\n", (2813, 2837), False, 'import mido\n'), ((3018, 3035), 'os.system', 'os.system', (['runCmd'], {}), '(runCmd)\n', (3027, 3035), False, 'import os\n'), ((3255, 3276), 're.compile', 're.compile', (['nameRegex'], {}), '(nameRegex)\n', (3265, 3276), False, 'import re\n'), ((3468, 3533), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['dt_pin', 'GPIO.FALLING'], {'callback': 'button_push'}), '(dt_pin, GPIO.FALLING, callback=button_push)\n', (3489, 3533), True, 'import RPi.GPIO as GPIO\n'), ((3550, 3617), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['clk_pin', 'GPIO.BOTH'], {'callback': 'rotary_callback'}), '(clk_pin, GPIO.BOTH, callback=rotary_callback)\n', (3571, 3617), True, 'import RPi.GPIO as GPIO\n'), ((1212, 1285), 'mido.Message', 'mido.Message', (['"""control_change"""'], {'channel': 'channel', 'control': 'ccnum', 'value': 'val'}), "('control_change', channel=channel, control=ccnum, value=val)\n", (1224, 1285), False, 'import mido\n'), ((1297, 1326), 'mido.open_output', 'mido.open_output', (['output_name'], {}), '(output_name)\n', (1313, 1326), False, 'import mido\n'), ((3648, 3663), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3658, 3663), False, 'import time\n'), ((1549, 1567), 'RPi.GPIO.input', 'GPIO.input', (['sw_pin'], {}), '(sw_pin)\n', (1559, 1567), True, 'import RPi.GPIO as GPIO\n'), ((1571, 1590), 'RPi.GPIO.input', 'GPIO.input', (['clk_pin'], {}), '(clk_pin)\n', (1581, 1590), True, 'import RPi.GPIO as GPIO\n'), ((3314, 3337), 'mido.get_output_names', 'mido.get_output_names', ([], {}), '()\n', (3335, 3337), False, 'import mido\n'), ((928, 939), 'time.time', 'time.time', ([], {}), '()\n', (937, 939), False, 'import time\n')] |
import cv2
cap = cv2.VideoCapture(
"rtsp://admin:[email protected]:554/Streaming/Channels/1")
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
ret, frame = cap.read()
while ret:
ret, frame = cap.read()
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.imshow"
] | [((17, 96), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""rtsp://admin:[email protected]:554/Streaming/Channels/1"""'], {}), "('rtsp://admin:[email protected]:554/Streaming/Channels/1')\n", (33, 96), False, 'import cv2\n'), ((333, 356), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (354, 356), False, 'import cv2\n'), ((250, 276), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (260, 276), False, 'import cv2\n'), ((284, 298), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (295, 298), False, 'import cv2\n')] |
from dmppl.scripts.beamer_times import entryPoint
from dmppl.base import rdTxt
from dmppl.test import runEntryPoint
import os
import tempfile
import shutil
import sys
import unittest
class Test_BeamerTimes(unittest.TestCase): # {{{
def setUp(self):
self.tstDir = tempfile.mkdtemp()
self.tex0 = r'''
ignore
these lines
1m23s 23m45s also ignored
including lines with the word "frametitle"
\frametitle{Or missing time notation}
\frametitle{SingleWordA} % 1m23s
\frametitle{SingleWordB} More TeX \commands here. % foo bar 1m23s
\frametitle{SingleWordC} % A comment in here 1m23s
\frametitle{SingleWordD} more TeX \commands here % and a comment 1m23s
Some
lines between
the frame titles
\frametitle{A Multi Words} foo bar % foo bar 1m23s
\frametitle{B Just use the last time} % 1m23s 23m45s
\frametitle{C Allow X or x to denote 0 time.} foo % XmXs
\frametitle{D time counter continues afterwards} % 1m23s
\frametitle{E time not in a comment (invalid TeX)} 1m23s
\frametitle{F Some} 0m5s
\frametitle{G weird but allowed time notations} 12m345s
asdf
'''
self.fnamei0 = os.path.join(self.tstDir, "tst0.tex")
with open(self.fnamei0, 'w') as fd:
fd.write(self.tex0)
self.goldenOut0 = '''\
# Slide Finish Title
1 1m23s 1m23s SingleWordA
2 1m23s 2m46s SingleWordB
3 1m23s 4m09s SingleWordC
4 1m23s 5m32s SingleWordD
5 1m23s 6m55s A Multi Words
6 23m45s 30m40s B Just use the last time
7 0m00s 30m40s C Allow X or x to denote 0 time.
8 1m23s 32m03s D time counter continues afterwards
9 1m23s 33m26s E time not in a comment (invalid TeX)
10 0m05s 33m31s F Some
11 17m45s 51m16s G weird but allowed time notations
'''
def tearDown(self):
shutil.rmtree(self.tstDir)
def test_Basic0(self):
cmd = "beamer-times %s" % (self.fnamei0)
stdout, stderr = runEntryPoint(cmd, entryPoint)
self.assertEqual(stderr, "")
self.assertEqual(self.goldenOut0, stdout)
def test_FileIO(self):
fnameo = self.fnamei0 + ".rpt"
cmd = "beamer-times %s -o %s" % (self.fnamei0, fnameo)
stdout, stderr = runEntryPoint(cmd, entryPoint, stdinput="fubar")
self.assertEqual(stderr, "")
resultTxt = rdTxt(os.path.join(self.tstDir, fnameo))
self.assertEqual(self.goldenOut0, resultTxt)
def test_StdIO(self):
cmd = "beamer-times"
stdout, stderr = runEntryPoint(cmd, entryPoint, stdinput=self.tex0)
self.assertEqual(stderr, "")
self.assertEqual(self.goldenOut0, stdout)
# }}} class Test_BeamerTimes
| [
"os.path.join",
"tempfile.mkdtemp",
"dmppl.test.runEntryPoint",
"shutil.rmtree"
] | [((278, 296), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (294, 296), False, 'import tempfile\n'), ((1094, 1131), 'os.path.join', 'os.path.join', (['self.tstDir', '"""tst0.tex"""'], {}), "(self.tstDir, 'tst0.tex')\n", (1106, 1131), False, 'import os\n'), ((1735, 1761), 'shutil.rmtree', 'shutil.rmtree', (['self.tstDir'], {}), '(self.tstDir)\n', (1748, 1761), False, 'import shutil\n'), ((1864, 1894), 'dmppl.test.runEntryPoint', 'runEntryPoint', (['cmd', 'entryPoint'], {}), '(cmd, entryPoint)\n', (1877, 1894), False, 'from dmppl.test import runEntryPoint\n'), ((2137, 2185), 'dmppl.test.runEntryPoint', 'runEntryPoint', (['cmd', 'entryPoint'], {'stdinput': '"""fubar"""'}), "(cmd, entryPoint, stdinput='fubar')\n", (2150, 2185), False, 'from dmppl.test import runEntryPoint\n'), ((2418, 2468), 'dmppl.test.runEntryPoint', 'runEntryPoint', (['cmd', 'entryPoint'], {'stdinput': 'self.tex0'}), '(cmd, entryPoint, stdinput=self.tex0)\n', (2431, 2468), False, 'from dmppl.test import runEntryPoint\n'), ((2249, 2282), 'os.path.join', 'os.path.join', (['self.tstDir', 'fnameo'], {}), '(self.tstDir, fnameo)\n', (2261, 2282), False, 'import os\n')] |
import glob
import os.path
import numpy as np
import os
# 图片数据文件夹
INPUT_DATA = 'D:\\scrawl_images\\images2_160\\'
def create_same_pairs():
matched_result = set()
k = 0
# 获取当前目录下所有的子目录,这里x 是一个三元组(root,dirs,files),第一个元素表示INPUT_DATA当前目录,
# 第二个元素表示当前目录下的所有子目录,第三个元素表示当前目录下的所有的文件
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
match_num = 3000
while len(matched_result) < match_num:
for sub_dir in sub_dirs[1:]:
if len(matched_result) >= match_num:
break
# 使用mtcnn预先生成的文件都是png后缀
extensions = 'png'
# 把单个人物图片存放在file_list列表里
person_pics = []
dir_name = os.path.basename(sub_dir)
file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extensions)
person_pics.extend(glob.glob(file_glob))
if not person_pics: continue
# 通过目录名获取类别的名称
label_name = dir_name
length = len(person_pics)
random_number1 = np.random.randint(50)
random_number2 = np.random.randint(50)
base_name1 = os.path.basename(person_pics[random_number1 % length]) # 获取文件的名称
base_name2 = os.path.basename(person_pics[random_number2 % length])
if person_pics[random_number1 % length] != person_pics[random_number2 % length]:
matched_result.add(label_name + '\t' + base_name1[base_name1.rfind('_')+1:base_name1.rfind('.')] + '\t' + base_name2[base_name2.rfind('_')+1:base_name2.rfind('.')])
k += 1
if k % 100 == 0:
print('len(match): %d' % len(matched_result))
print(k)
if len(matched_result) >= match_num:
break
# 返回整理好的所有数据
return matched_result, match_num
# 创建pairs.txt
def create_diff_pairs():
unmatched_result = set() # 不同类的匹配对
k = 0
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
# sub_dirs[0]表示当前文件夹本身的地址,不予考虑,只考虑他的子目录
for sub_dir in sub_dirs[1:]:
# 获取当前目录下所有的有效图片文件
extensions = ['png']
file_list = []
# 把图片存放在file_list列表里
dir_name = os.path.basename(sub_dir)
for extension in extensions:
file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extension)
# glob.glob(file_glob)获取指定目录下的所有图片,存放在file_list中
file_list.extend(glob.glob(file_glob))
length_of_dir = len(sub_dirs)
print(length_of_dir)
match_num = 3000
for k in range(1000):
for i in range(length_of_dir):
if len(unmatched_result) >= match_num:
break
class1 = sub_dirs[i]
random_num = np.random.randint(5000000)
i2 = random_num % length_of_dir
if i == i2:
continue
class2 = sub_dirs[i2]
class1_name = os.path.basename(class1)
class2_name = os.path.basename(class2)
# 获取当前目录下所有的有效图片文件
extensions = 'png'
file_list1 = []
file_list2 = []
# 把图片存放在file_list列表里
file_glob1 = os.path.join(INPUT_DATA, class1_name, '*.' + extension)
file_list1.extend(glob.glob(file_glob1))
file_glob2 = os.path.join(INPUT_DATA, class2_name, '*.' + extension)
file_list2.extend(glob.glob(file_glob2))
if file_list1 and file_list2:
base_name1 = os.path.basename(file_list1[random_num % len(file_list1)]) # 获取文件的名称
base_name2 = os.path.basename(file_list2[random_num % len(file_list2)])
# unmatched_result.add([class1_name, base_name1, class2_name, base_name2])
s = class2_name + '\t' + base_name2 + '\t' + class1_name + '\t' + base_name1
if (s not in unmatched_result):
unmatched_result.add(class1_name + '\t' + base_name1[base_name1.rfind('_')+1:base_name1.rfind('.')] + '\t' + class2_name + '\t' + base_name2[base_name2.rfind('_')+1:base_name2.rfind('.')])
k = k + 1
if k % 100 == 0:
print(k)
if len(unmatched_result) >= match_num:
break
return unmatched_result, match_num
result, k1 = create_same_pairs()
print(len(result))
# print(result)
result_un, k2 = create_diff_pairs()
print(len(result_un))
# print(result_un)
file = open(os.path.join(INPUT_DATA, 'pairs.txt'), 'w', encoding='utf-8')
result1 = list(result)
result2 = list(result_un)
file.write('10 300\n')
j = 0
for i in range(100):
j = 0
print("=============================================第" + str(i) + '次, 相同的')
for pair in result1[i * 300:i * 300 + 300]:
j = j + 1
print(str(j) + ': ' + pair)
file.write(pair + '\n')
print("=============================================第" + str(i) + '次, 不同的')
for pair in result2[i * 300:i * 300 + 300]:
j = j + 1
print(str(j) + ': ' + pair)
file.write(pair + '\n') | [
"os.walk",
"os.path.join",
"numpy.random.randint",
"os.path.basename",
"glob.glob"
] | [((4363, 4400), 'os.path.join', 'os.path.join', (['INPUT_DATA', '"""pairs.txt"""'], {}), "(INPUT_DATA, 'pairs.txt')\n", (4375, 4400), False, 'import os\n'), ((2134, 2159), 'os.path.basename', 'os.path.basename', (['sub_dir'], {}), '(sub_dir)\n', (2150, 2159), False, 'import os\n'), ((324, 343), 'os.walk', 'os.walk', (['INPUT_DATA'], {}), '(INPUT_DATA)\n', (331, 343), False, 'import os\n'), ((673, 698), 'os.path.basename', 'os.path.basename', (['sub_dir'], {}), '(sub_dir)\n', (689, 698), False, 'import os\n'), ((723, 776), 'os.path.join', 'os.path.join', (['INPUT_DATA', 'dir_name', "('*.' + extensions)"], {}), "(INPUT_DATA, dir_name, '*.' + extensions)\n", (735, 776), False, 'import os\n'), ((999, 1020), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (1016, 1020), True, 'import numpy as np\n'), ((1050, 1071), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (1067, 1071), True, 'import numpy as np\n'), ((1097, 1151), 'os.path.basename', 'os.path.basename', (['person_pics[random_number1 % length]'], {}), '(person_pics[random_number1 % length])\n', (1113, 1151), False, 'import os\n'), ((1188, 1242), 'os.path.basename', 'os.path.basename', (['person_pics[random_number2 % length]'], {}), '(person_pics[random_number2 % length])\n', (1204, 1242), False, 'import os\n'), ((1908, 1927), 'os.walk', 'os.walk', (['INPUT_DATA'], {}), '(INPUT_DATA)\n', (1915, 1927), False, 'import os\n'), ((2221, 2273), 'os.path.join', 'os.path.join', (['INPUT_DATA', 'dir_name', "('*.' + extension)"], {}), "(INPUT_DATA, dir_name, '*.' + extension)\n", (2233, 2273), False, 'import os\n'), ((2663, 2689), 'numpy.random.randint', 'np.random.randint', (['(5000000)'], {}), '(5000000)\n', (2680, 2689), True, 'import numpy as np\n'), ((2844, 2868), 'os.path.basename', 'os.path.basename', (['class1'], {}), '(class1)\n', (2860, 2868), False, 'import os\n'), ((2895, 2919), 'os.path.basename', 'os.path.basename', (['class2'], {}), '(class2)\n', (2911, 2919), False, 'import os\n'), ((3096, 3151), 'os.path.join', 'os.path.join', (['INPUT_DATA', 'class1_name', "('*.' + extension)"], {}), "(INPUT_DATA, class1_name, '*.' + extension)\n", (3108, 3151), False, 'import os\n'), ((3230, 3285), 'os.path.join', 'os.path.join', (['INPUT_DATA', 'class2_name', "('*.' + extension)"], {}), "(INPUT_DATA, class2_name, '*.' + extension)\n", (3242, 3285), False, 'import os\n'), ((808, 828), 'glob.glob', 'glob.glob', (['file_glob'], {}), '(file_glob)\n', (817, 828), False, 'import glob\n'), ((2364, 2384), 'glob.glob', 'glob.glob', (['file_glob'], {}), '(file_glob)\n', (2373, 2384), False, 'import glob\n'), ((3182, 3203), 'glob.glob', 'glob.glob', (['file_glob1'], {}), '(file_glob1)\n', (3191, 3203), False, 'import glob\n'), ((3316, 3337), 'glob.glob', 'glob.glob', (['file_glob2'], {}), '(file_glob2)\n', (3325, 3337), False, 'import glob\n')] |
# -*- coding: utf-8 -*-
from os import urandom
from secrets import token_bytes
from hashlib import sha384
from flask import Blueprint
from flask import abort
from flask import request
from sqlalchemy.exc import IntegrityError
from app import db
from models import Client
from app.module import aes256, mail, send
bp = Blueprint(
name=__name__.split(".")[-1],
import_name=__name__,
url_prefix=f"/{__name__.split('.')[-1]}"
)
def add_to_database(email: str, secret: str):
idx = urandom(18).hex()
try:
db.session.add(Client(
idx=idx,
email=email,
secret=sha384(secret.encode()).hexdigest()
))
db.session.commit()
return idx
except IntegrityError:
return add_to_database(email, secret)
@bp.route("", methods=['POST'])
def add_client():
secret = token_bytes(20).hex()
email = request.form.get("email")
if email is None or "@" not in email:
abort(400)
worker = aes256.AESCipher()
idx = add_to_database(
email=worker.encrypt(plaintext=email),
secret=sha384(secret.encode()).hexdigest()
)
mail.send(
to_address=email,
message=f"{secret}",
title="API 사용시에 사용하는 API Secret 키 입니다"
)
return send.send(
code=200,
response=dict(
idx=idx,
message="Check your email",
)
)
| [
"app.db.session.commit",
"os.urandom",
"app.module.mail.send",
"secrets.token_bytes",
"flask.request.form.get",
"flask.abort",
"app.module.aes256.AESCipher"
] | [((890, 915), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (906, 915), False, 'from flask import request\n'), ((991, 1009), 'app.module.aes256.AESCipher', 'aes256.AESCipher', ([], {}), '()\n', (1007, 1009), False, 'from app.module import aes256, mail, send\n'), ((1146, 1239), 'app.module.mail.send', 'mail.send', ([], {'to_address': 'email', 'message': 'f"""{secret}"""', 'title': '"""API 사용시에 사용하는 API Secret 키 입니다"""'}), "(to_address=email, message=f'{secret}', title=\n 'API 사용시에 사용하는 API Secret 키 입니다')\n", (1155, 1239), False, 'from app.module import aes256, mail, send\n'), ((677, 696), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (694, 696), False, 'from app import db\n'), ((966, 976), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (971, 976), False, 'from flask import abort\n'), ((498, 509), 'os.urandom', 'urandom', (['(18)'], {}), '(18)\n', (505, 509), False, 'from os import urandom\n'), ((855, 870), 'secrets.token_bytes', 'token_bytes', (['(20)'], {}), '(20)\n', (866, 870), False, 'from secrets import token_bytes\n')] |
from os import pipe
import time
from socket import timeout # it is used to connect a client and a server
from selenium import webdriver # the holy webdriver to control all all the commands
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# print("\n\nHello, welcome to the automated speed scanner:\n\n")
''' <NAME>(abhi00o7)
'''
# this must be targeted to the webdriver installation folder
PATH = "D:\Installed_Programs\chromedriver.exe"
driver = webdriver.Chrome(PATH) #storing the webdriver in a variable
def autospeedtest(url):
driver.get(url) #this time its an custom made website to test web threats.
print("\n\n")
wait = WebDriverWait(driver, 40)
try:
infoLink = wait.until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div[2]/div[1]/div[4]/div[1]/a'))
)
status = infoLink.is_displayed()
#CHECK THE STATUS FOR PEACE OF MIND
# print(status)
if(status == True):
#the speed values be it in 100 or thousands
speedvalue = wait.until(
EC.presence_of_element_located(
(By.ID, 'speed-value'))
)
#for the speed test units be it in Mpbs or Kbps acc. to the browser
speedunits = wait.until(
EC.presence_of_element_located(
(By.ID, 'speed-units'))
)
print("Your connection speed is :")
print (speedvalue.text ,speedunits.text)
# driver.quit()
except :
print("You DO NOT have a working internet connection.")
# driver.quit()
url = "https://fast.com/"
# for index in range(15):
# print("test: ", index+1)
# autospeedtest(url)
def main():
autospeedtest(url)
#closing options
# driver.close() #to close just the current tab of the browser
driver.quit() #to completely force close the browser
if __name__ == '__main__':
main() | [
"selenium.webdriver.Chrome",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.support.expected_conditions.element_to_be_clickable"
] | [((661, 683), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['PATH'], {}), '(PATH)\n', (677, 683), False, 'from selenium import webdriver\n'), ((856, 881), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(40)'], {}), '(driver, 40)\n', (869, 881), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((938, 1028), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.XPATH, '/html/body/div/div[2]/div[1]/div[4]/div[1]/a')"], {}), "((By.XPATH,\n '/html/body/div/div[2]/div[1]/div[4]/div[1]/a'))\n", (964, 1028), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1312, 1366), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'speed-value')"], {}), "((By.ID, 'speed-value'))\n", (1342, 1366), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1537, 1591), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'speed-units')"], {}), "((By.ID, 'speed-units'))\n", (1567, 1591), True, 'from selenium.webdriver.support import expected_conditions as EC\n')] |
from django.core.exceptions import ValidationError
from django.test import TestCase
import ipaddress
import pytest
from django_peeringdb.models import (
URLField,
)
from tests.models import FieldModel
class FieldTests(TestCase):
""" test model functionality """
def test_init(self):
new = URLField()
def test_url(self):
model = FieldModel()
model.url = 'telnet://example.com'
model.full_clean()
model.url = 'http://example.com'
model.full_clean()
model.url = 'https://example.com'
model.full_clean()
model.url = 'ftp://example.com'
model.full_clean()
model.url = 'ftps://example.com'
model.full_clean()
with pytest.raises(ValidationError):
model.url = 'invalid'
model.full_clean()
| [
"tests.models.FieldModel",
"django_peeringdb.models.URLField",
"pytest.raises"
] | [((314, 324), 'django_peeringdb.models.URLField', 'URLField', ([], {}), '()\n', (322, 324), False, 'from django_peeringdb.models import URLField\n'), ((367, 379), 'tests.models.FieldModel', 'FieldModel', ([], {}), '()\n', (377, 379), False, 'from tests.models import FieldModel\n'), ((741, 771), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (754, 771), False, 'import pytest\n')] |
# Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import os
import random
import socket
import sys
import tempfile
import time
import paramiko
try:
import shade
except ImportError:
sys.exit('ERROR: Driver missing, install shade!')
from molecule import util
from molecule.driver import basedriver
LOG = util.get_logger(__name__)
class OpenstackDriver(basedriver.BaseDriver):
def __init__(self, molecule):
super(OpenstackDriver, self).__init__(molecule)
self._provider = self._get_provider()
self._platform = self._get_platform()
self._openstack = shade.openstack_cloud()
self._keypair_name = None
self._molecule_generated_ssh_key = False
@property
def name(self):
return 'openstack'
@property
def instances(self):
return self.molecule.config.config['openstack']['instances']
@property
def default_provider(self):
return self._provider
@property
def default_platform(self):
return self._platform
@property
def provider(self):
return self._provider
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, val):
self._platform = val
@property
def valid_providers(self):
return [{'name': self.provider}]
@property
def valid_platforms(self):
return [{'name': self.platform}]
@property
def ssh_config_file(self):
return
@property
def ansible_connection_params(self):
return {'connection': 'ssh'}
@property
def testinfra_args(self):
return {
'ansible_inventory':
self.molecule.config.config['ansible']['inventory_file'],
'connection': 'ansible'
}
@property
def serverspec_args(self):
return {}
def up(self, no_provision=True):
self.molecule.state.change_state('driver', self.name)
self._set_keypair()
active_instances = self._openstack.list_servers()
active_instance_names = {
instance['name']: instance['status']
for instance in active_instances
}
LOG.warning("Creating openstack instances ...")
for instance in self.instances:
if instance['name'] not in active_instance_names:
LOG.info("\tBringing up {}".format(instance['name']))
server = self._openstack.create_server(
name=instance['name'],
image=self._openstack.get_image(instance['image']),
flavor=self._openstack.get_flavor(instance['flavor']),
auto_ip=True,
wait=True,
key_name=self._keypair_name,
security_groups=instance['security_groups']
if 'security_groups' in instance else None)
self._reset_known_host_key(server['interface_ip'])
instance['created'] = True
num_retries = 0
while not self._check_ssh_availability(
server['interface_ip'],
instance['sshuser'],
timeout=6,
sshkey_filename=self._get_keyfile(
)) or num_retries == 5:
LOG.info("\t Waiting for ssh availability ...")
num_retries += 1
def destroy(self):
LOG.info("Deleting openstack instances ...")
active_instances = self._openstack.list_servers()
active_instance_names = {
instance['name']: instance['id']
for instance in active_instances
}
for instance in self.instances:
LOG.warning("\tRemoving {} ...".format(instance['name']))
if instance['name'] in active_instance_names:
if not self._openstack.delete_server(
active_instance_names[instance['name']], wait=True):
LOG.error("Unable to remove {}!".format(instance['name']))
else:
util.print_success('\tRemoved {}'.format(instance['name']))
instance['created'] = False
# cleanup any molecule generated files
if self._molecule_generated_keypair() and self._keypair_name:
self._openstack.delete_keypair(self._keypair_name)
if self._molecule_generated_ssh_key:
self._remove_temp_ssh_key()
def status(self):
Status = collections.namedtuple('Status',
['name', 'state', 'provider'])
status_list = []
for instance in self.instances:
if self._instance_is_accessible(instance):
status_list.append(
Status(
name=instance['name'],
state='UP',
provider=self.provider))
else:
status_list.append(
Status(
name=instance['name'],
state='not_created',
provider=self.provider))
return status_list
def conf(self, name=None, ssh_config=False):
inventory_file = self.molecule.config.config['ansible'][
'inventory_file']
if os.path.exists(inventory_file):
with open(inventory_file) as stream:
for line in stream:
if len(line.split()) > 0 and line.split()[0] == name:
ansible_host = line.split()[1]
host_address = ansible_host.split('=')[1]
return host_address
return
def inventory_entry(self, instance):
template = self._host_template()
for server in self._openstack.list_servers(detailed=False):
if server['name'] == instance['name']:
server_config = {
'hostname': instance['name'],
'interface_ip_address': server['interface_ip'],
'ssh_username': instance['sshuser']
}
if self._molecule_generated_ssh_key:
server_config[
'ssh_key_filename'] = \
'ansible_ssh_private_key_file={}'.format(
self._generated_ssh_key_location())
else:
ssh_line = 'ansible_ssh_private_key_file={}'.format(
self._get_keyfile())
server_config['ssh_key_filename'] = ssh_line
return template.format(**server_config)
return ''
def login_cmd(self, instance_name):
return 'ssh {} -l {}'
def login_args(self, instance_name):
# Try to retrieve the SSH configuration of the host.
conf = self.conf(name=instance_name)
user = ''
for instance in self.instances:
if instance_name == instance['name']:
user = instance['sshuser']
return [conf, user]
def _get_provider(self):
return 'openstack'
def _get_platform(self):
return 'openstack'
def _set_keypair(self):
self._keypair_name = self._get_keypair_name()
kpn = self._keypair_name
pub_key_file = self._get_keyfile() + ".pub"
if self._openstack.search_keypairs(kpn):
LOG.info('Keypair already exists. Skipping import.')
else:
LOG.info('Adding keypair ... ' + kpn)
self._openstack.create_keypair(kpn,
open(pub_key_file,
'r').read().strip())
def _reset_known_host_key(self, hostname):
return os.system('ssh-keygen -R {}'.format(hostname))
def _check_ssh_availability(self, hostip, user, timeout, sshkey_filename):
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostip, username=user, key_filename=sshkey_filename)
return True
except (paramiko.BadHostKeyException, paramiko.AuthenticationException,
paramiko.SSHException, socket.error):
time.sleep(timeout)
return False
def _generate_temp_ssh_key(self):
fileloc = self._generated_ssh_key_location()
# create the private key
k = paramiko.RSAKey.generate(2048)
k.write_private_key_file(fileloc)
# write the public key too
pub = paramiko.RSAKey(filename=fileloc)
with open("%s.pub" % fileloc, 'w') as f:
f.write("%s %s" % (pub.get_name(), pub.get_base64()))
return fileloc
def _remove_temp_ssh_key(self):
fileloc = self._generated_ssh_key_location()
os.remove(fileloc)
os.remove(fileloc + ".pub")
def _generate_random_keypair_name(self, prefix, length):
r = "".join(
[random.choice('<KEY>') for n in xrange(length)])
return prefix + "_" + r
def _host_template(self):
return ('{hostname} ansible_ssh_host={interface_ip_address} '
'ansible_ssh_user={ssh_username} {ssh_key_filename} '
'ansible_ssh_extra_args="-o ConnectionAttempts=5"\n')
def _generated_ssh_key_location(self):
return tempfile.gettempdir() + '/molecule_rsa'
def _instance_is_accessible(self, instance):
instance_ip = self.conf(instance['name'])
if instance_ip is not None:
return self._check_ssh_availability(
instance_ip,
instance['sshuser'],
timeout=0,
sshkey_filename=self._get_keyfile())
return False
def _get_keyfile(self):
if ('keyfile' in self.molecule.config.config['openstack']):
return self.molecule.config.config['openstack']['keyfile']
elif self._molecule_generated_ssh_key:
return self._generated_ssh_key_location()
else:
LOG.info('Keyfile not specified. molecule will generate a '
'temporary one.')
self._molecule_generated_ssh_key = True
return self._generate_temp_ssh_key()
def _get_keypair_name(self):
if ('keypair' in self.molecule.config.config['openstack']):
return self.molecule.config.config['openstack']['keypair']
else:
LOG.info('Keypair not specified. molecule will generate one.')
return self._generate_random_keypair_name('molecule', 10)
def _molecule_generated_keypair(self):
return 'keypair' not in self.molecule.config.config['openstack']
| [
"os.path.exists",
"collections.namedtuple",
"random.choice",
"paramiko.AutoAddPolicy",
"paramiko.RSAKey",
"paramiko.RSAKey.generate",
"time.sleep",
"molecule.util.get_logger",
"tempfile.gettempdir",
"sys.exit",
"paramiko.SSHClient",
"shade.openstack_cloud",
"os.remove"
] | [((1402, 1427), 'molecule.util.get_logger', 'util.get_logger', (['__name__'], {}), '(__name__)\n', (1417, 1427), False, 'from molecule import util\n'), ((1279, 1328), 'sys.exit', 'sys.exit', (['"""ERROR: Driver missing, install shade!"""'], {}), "('ERROR: Driver missing, install shade!')\n", (1287, 1328), False, 'import sys\n'), ((1684, 1707), 'shade.openstack_cloud', 'shade.openstack_cloud', ([], {}), '()\n', (1705, 1707), False, 'import shade\n'), ((5623, 5686), 'collections.namedtuple', 'collections.namedtuple', (['"""Status"""', "['name', 'state', 'provider']"], {}), "('Status', ['name', 'state', 'provider'])\n", (5645, 5686), False, 'import collections\n'), ((6450, 6480), 'os.path.exists', 'os.path.exists', (['inventory_file'], {}), '(inventory_file)\n', (6464, 6480), False, 'import os\n'), ((9040, 9060), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (9058, 9060), False, 'import paramiko\n'), ((9606, 9636), 'paramiko.RSAKey.generate', 'paramiko.RSAKey.generate', (['(2048)'], {}), '(2048)\n', (9630, 9636), False, 'import paramiko\n'), ((9729, 9762), 'paramiko.RSAKey', 'paramiko.RSAKey', ([], {'filename': 'fileloc'}), '(filename=fileloc)\n', (9744, 9762), False, 'import paramiko\n'), ((10000, 10018), 'os.remove', 'os.remove', (['fileloc'], {}), '(fileloc)\n', (10009, 10018), False, 'import os\n'), ((10027, 10054), 'os.remove', 'os.remove', (["(fileloc + '.pub')"], {}), "(fileloc + '.pub')\n", (10036, 10054), False, 'import os\n'), ((9137, 9161), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (9159, 9161), False, 'import paramiko\n'), ((10532, 10553), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (10551, 10553), False, 'import tempfile\n'), ((9423, 9442), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (9433, 9442), False, 'import time\n'), ((10151, 10173), 'random.choice', 'random.choice', (['"""<KEY>"""'], {}), "('<KEY>')\n", (10164, 10173), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tools import float_is_zero
from odoo.addons.sale.tests.test_sale_common import TestSale
class TestSaleTimesheet(TestSale):
def test_timesheet_order(self):
""" Test timesheet invoicing with 'invoice on order' timetracked products """
# intial so
prod_ts = self.env.ref('product.service_order_01')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_ts.name, 'product_id': prod_ts.id, 'product_uom_qty': 50, 'product_uom': prod_ts.uom_id.id, 'price_unit': prod_ts.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
so = self.env['sale.order'].create(so_vals)
so.action_confirm()
so.action_invoice_create()
# let's log some timesheets
self.env['account.analytic.line'].create({
'name': 'Test Line',
'project_id': so.project_project_id.id,
'unit_amount': 10.5,
'user_id': self.manager.id,
})
self.assertEqual(so.order_line.qty_delivered, 10.5, 'Sale Timesheet: timesheet does not increase delivered quantity on so line')
self.assertEqual(so.invoice_status, 'invoiced', 'Sale Timesheet: "invoice on order" timesheets should not modify the invoice_status of the so')
self.env['account.analytic.line'].create({
'name': 'Test Line',
'project_id': so.project_project_id.id,
'unit_amount': 39.5,
'user_id': self.user.id,
})
self.assertEqual(so.order_line.qty_delivered, 50, 'Sale Timesheet: timesheet does not increase delivered quantity on so line')
self.assertEqual(so.invoice_status, 'invoiced', 'Sale Timesheet: "invoice on order" timesheets should not modify the invoice_status of the so')
self.env['account.analytic.line'].create({
'name': 'Test Line',
'project_id': so.project_project_id.id,
'unit_amount': 10,
'user_id': self.user.id,
})
self.assertEqual(so.order_line.qty_delivered, 60, 'Sale Timesheet: timesheet does not increase delivered quantity on so line')
self.assertEqual(so.invoice_status, 'upselling', 'Sale Timesheet: "invoice on order" timesheets should not modify the invoice_status of the so')
def test_timesheet_delivery(self):
""" Test timesheet invoicing with 'invoice on delivery' timetracked products """
inv_obj = self.env['account.invoice']
# intial so
prod_ts = self.env.ref('product.product_product_2')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_ts.name, 'product_id': prod_ts.id, 'product_uom_qty': 50, 'product_uom': prod_ts.uom_id.id, 'price_unit': prod_ts.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
so = self.env['sale.order'].create(so_vals)
so.action_confirm()
self.assertEqual(so.invoice_status, 'no', 'Sale Timesheet: "invoice on delivery" should not need to be invoiced on so confirmation')
# let's log some timesheets
self.env['account.analytic.line'].create({
'name': '<NAME>',
'project_id': so.project_project_id.id,
'unit_amount': 10.5,
'user_id': self.manager.id,
})
self.assertEqual(so.invoice_status, 'to invoice', 'Sale Timesheet: "invoice on delivery" timesheets should set the so in "to invoice" status when logged')
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertTrue(float_is_zero(inv.amount_total - so.order_line.price_unit * 10.5, precision_digits=2), 'Sale: invoice generation on timesheets product is wrong')
self.env['account.analytic.line'].create({
'name': '<NAME>',
'project_id': so.project_project_id.id,
'unit_amount': 39.5,
'user_id': self.user.id,
})
self.assertEqual(so.invoice_status, 'to invoice', 'Sale Timesheet: "invoice on delivery" timesheets should not modify the invoice_status of the so')
so.action_invoice_create()
self.assertEqual(so.invoice_status, 'invoiced', 'Sale Timesheet: "invoice on delivery" timesheets should be invoiced completely by now')
self.env['account.analytic.line'].create({
'name': '<NAME>',
'project_id': so.project_project_id.id,
'unit_amount': 10,
'user_id': self.user.id,
})
self.assertEqual(so.invoice_status, 'to invoice', 'Sale Timesheet: supplementary timesheets do not change the status of the SO')
def test_timesheet_uom(self):
""" Test timesheet invoicing and uom conversion """
# intial so
prod_ts = self.env.ref('product.product_product_2')
uom_days = self.env.ref('product.product_uom_day')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_ts.name, 'product_id': prod_ts.id, 'product_uom_qty': 5, 'product_uom': uom_days.id, 'price_unit': prod_ts.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
so = self.env['sale.order'].create(so_vals)
so.action_confirm()
# let's log some timesheets
self.env['account.analytic.line'].create({
'name': '<NAME>',
'project_id': so.project_project_id.id,
'unit_amount': 16,
'user_id': self.manager.id,
})
self.assertEqual(so.order_line.qty_delivered, 2, 'Sale: uom conversion of timesheets is wrong')
self.env['account.analytic.line'].create({
'name': '<NAME>',
'project_id': so.project_project_id.id,
'unit_amount': 24,
'user_id': self.user.id,
})
so.action_invoice_create()
self.assertEqual(so.invoice_status, 'invoiced', 'Sale Timesheet: "invoice on delivery" timesheets should not modify the invoice_status of the so')
| [
"odoo.tools.float_is_zero"
] | [((3933, 4022), 'odoo.tools.float_is_zero', 'float_is_zero', (['(inv.amount_total - so.order_line.price_unit * 10.5)'], {'precision_digits': '(2)'}), '(inv.amount_total - so.order_line.price_unit * 10.5,\n precision_digits=2)\n', (3946, 4022), False, 'from odoo.tools import float_is_zero\n')] |
# Made by Taguar258 | Licence: MIT #
import argparse
import os
import subprocess
import sys
from os import path
from .src.check import check_for_app_injection, check_for_pkg_injection
from .src.inject import inject_app, inject_pkg
from .src.remove_injection import remove_app_injection, remove_pkg_injection
class Main:
def __init__(self):
# Colors
self.C_None = "\x1b[0;39m"
self.C_Blink = "\x1b[5;39m"
self.C_Bold = "\x1b[1;39m"
self.C_Red = "\x1b[31m"
self.C_Green = "\x1b[32m"
self.C_Yellow = "\x1b[33m"
self.C_Blue = "\x1b[34m"
self.C_BRed = "\x1b[1;31m"
self.C_BGreen = "\x1b[1;32m"
self.C_BYellow = "\x1b[1;33m"
self.C_BBlue = "\x1b[1;34m"
# self.C_Cyan = "\x1b[36m"
# self.C_Magenta = "\x1b[35m"
# self.C_BCyan = "\x1b[1;36m"
# self.C_BMagenta = "\x1b[1;35m"
self.mode = None
self.target_mode = None
self.include_files = False
def parse_args(self):
if '-r' in sys.argv or '--reset' in sys.argv:
self.parser = argparse.ArgumentParser()
self.parser.add_argument('-r', '--reset', required=False, help='Remove the injection from an infected application', action='store_true')
self.parser.add_argument('-a', '--app', type=str, nargs=1, required=False, help='Target OSX application')
self.parser.add_argument('-p', '--pkg', type=str, nargs=1, required=False, help='Target package')
self.mode = "Reset"
elif '-c' in sys.argv or '--check' in sys.argv:
self.parser = argparse.ArgumentParser()
self.parser.add_argument('-c', '--check', required=False, help='Check if application was injected by injectra', action='store_true')
self.parser.add_argument('-a', '--app', type=str, nargs=1, required=False, help='Target OSX application')
self.parser.add_argument('-p', '--pkg', type=str, nargs=1, required=False, help='Target package')
self.mode = "Check"
else:
self.parser = argparse.ArgumentParser()
self.parser.add_argument('-c', '--check', required=False, help='Check if application was injected by injectra', action='store_true')
self.parser.add_argument('-r', '--reset', required=False, help='Remove the injection of an application', action='store_true')
self.parser.add_argument('-a', '--app', type=str, nargs=1, required=False, help='Target OSX application')
self.parser.add_argument('-p', '--pkg', type=str, nargs=1, required=False, help='Target package')
self.parser.add_argument('-i', '--inject', type=str, nargs=1, required=True, help='Bash/Shell script to inject')
self.parser.add_argument('-o', '--output', type=str, nargs=1, required=True, help='Output for the infected application')
self.parser.add_argument('-in', '--include', type=str, nargs=1, required=False, help='Add files of a given folder to the application')
self.mode = "Inject"
self.args = self.parser.parse_args()
def banner(self):
bannertxt = """
----------------------------------------------------------------
@@@ @@@ @@@ @@@ @@@@@@@@ @@@@@@@ @@@@@@@ @@@@@@@ @@@@@@
@@! @@!@!@@@ @@! @@! !@@ @!! @@! @@@ @@! @@@
!!@ @!@@!!@! !!@ @!!!:! !@! @!! @!@!!@! @!@!@!@!
!!: !!: !!! . .!! !!: :!! !!: !!: :!! !!: !!!
: :: : ::.:: : :: :: :: :: : : : : : : : :
----------------------------------------------------------------
Made by Taguar258 | MIT 2020
"""
bannertxt = bannertxt.replace("@", self.C_BRed + "@")
bannertxt = bannertxt.replace(":", self.C_BGreen + ":")
bannertxt = bannertxt.replace("!", self.C_BYellow + "!")
bannertxt = bannertxt.replace(".", self.C_BYellow + ".")
bannertxt = bannertxt.replace(" ", self.C_None + " ")
bannertxt = bannertxt.replace("-", self.C_Bold + "-")
print(bannertxt)
def get_abs_path(self):
calc_check = 0
if self.args.app is not None: calc_check += 1
if self.args.pkg is not None: calc_check += 1
if calc_check == 0:
print(self.C_BRed + "[!] Missing target argument." + self.C_None)
print(self.C_BRed + "[!] Please provide the app or pkg argument." + self.C_None)
quit()
elif calc_check == 2:
print(self.C_BRed + "[!] Recived two target arguments where as only one is needed." + self.C_None)
print(self.C_BRed + "[!] Please provide only the app or pkg argument." + self.C_None)
quit()
try:
if self.mode == "Inject":
if not self.args.output[0].endswith(".app"):
if self.args.app is not None:
self.args.output[0] = self.args.output[0] + ".app"
elif self.args.pkg is not None:
self.args.output[0] = self.args.output[0] + ".pkg"
self.args.inject[0] = path.abspath(self.args.inject[0])
self.args.output[0] = path.abspath(self.args.output[0])
if self.args.app is not None:
self.args.app[0] = path.abspath(self.args.app[0])
self.target_mode = "app"
elif self.args.pkg is not None:
self.args.pkg[0] = path.abspath(self.args.pkg[0])
self.target_mode = "pkg"
except Exception as e:
print(self.C_BRed + "[!] Cannot get the full path of a given argument." + self.C_None)
print(e)
quit()
if self.mode == "Inject":
try:
if self.args.include[0] != "":
self.args.include[0] = path.abspath(self.args.include[0])
if self.args.include[0][-1:] == "/":
self.args.include[0] = self.args.include[0][-1:]
self.include_files = True
except Exception:
pass
def main_logic(self):
if self.mode == "Reset" and self.target_mode == "app":
remove_app_injection(self.args)
elif self.mode == "Reset" and self.target_mode == "pkg":
remove_pkg_injection(self.args)
elif self.mode == "Check" and self.target_mode == "app":
check_for_app_injection(self.args)
elif self.mode == "Check" and self.target_mode == "pkg":
check_for_pkg_injection(self.args)
elif self.mode == "Inject" and self.target_mode == "app":
inject_app(self.args, self.include_files)
elif self.mode == "Inject" and self.target_mode == "pkg":
inject_pkg(self.args, self.include_files)
def run(self):
self.banner()
self.parse_args()
self.get_abs_path()
self.main_logic()
def exec_main():
main = Main()
main.run()
if __name__ == '__main__':
exec_main()
| [
"os.path.abspath",
"argparse.ArgumentParser"
] | [((1106, 1131), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1129, 1131), False, 'import argparse\n'), ((1624, 1649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1647, 1649), False, 'import argparse\n'), ((2096, 2121), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2119, 2121), False, 'import argparse\n'), ((5145, 5178), 'os.path.abspath', 'path.abspath', (['self.args.inject[0]'], {}), '(self.args.inject[0])\n', (5157, 5178), False, 'from os import path\n'), ((5217, 5250), 'os.path.abspath', 'path.abspath', (['self.args.output[0]'], {}), '(self.args.output[0])\n', (5229, 5250), False, 'from os import path\n'), ((5329, 5359), 'os.path.abspath', 'path.abspath', (['self.args.app[0]'], {}), '(self.args.app[0])\n', (5341, 5359), False, 'from os import path\n'), ((5481, 5511), 'os.path.abspath', 'path.abspath', (['self.args.pkg[0]'], {}), '(self.args.pkg[0])\n', (5493, 5511), False, 'from os import path\n'), ((5870, 5904), 'os.path.abspath', 'path.abspath', (['self.args.include[0]'], {}), '(self.args.include[0])\n', (5882, 5904), False, 'from os import path\n')] |
import numpy as np
import random
import cPickle as cp
import gzip
import zipfile
import tarfile
import os
import theano
#from sklearn import datasets
def _split_data(data, split):
starts = np.cumsum(np.r_[0, split[:-1]])
ends = np.cumsum(split)
splits = [data[s:e] for s, e in zip(starts, ends)]
return splits
def _shared_dataset(data_xy):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX))
return shared_x, shared_y
def load_mnist(path, target_as_one_hot=False, flatten=False, split=(50000, 10000, 10000), drop_percentage=0.):
''' Loads the MNIST dataset.
Input examples are 28x28 pixels grayscaled images. Each input example is represented
as a ndarray of shape (28*28), i.e. (height*width).
Example labels are integers between [0,9] respresenting one of the ten classes.
Parameters
----------
path : str
The path to the dataset file (.npz).
target_as_one_hot : {True, False}, optional
If True, represent targets as one hot vectors.
flatten : {True, False}, optional
If True, represents each individual example as a vector.
split : tuple of int, optional
Numbers of examples in each split of the dataset. Default: (50000, 10000, 10000)
References
----------
This dataset comes from http://www.iro.umontreal.ca/~lisa/deep/data/mnist/
'''
if not os.path.isfile(path):
# Download the dataset.
data_dir, data_file = os.path.split(path)
mnist_picklefile = os.path.join(data_dir, 'mnist.pkl.gz')
if not os.path.isfile(mnist_picklefile):
import urllib
origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
print("Downloading data (16 Mb) from {} ...".format(origin))
urllib.urlretrieve(origin, mnist_picklefile)
# Load the dataset and process it.
inputs = []
labels = []
print("Processing data ...")
with gzip.open(mnist_picklefile, 'rb') as f:
trainset, validset, testset = cp.load(f)
inputs = np.concatenate([trainset[0], validset[0], testset[0]], axis=0).reshape((-1, 1, 28, 28))
labels = np.concatenate([trainset[1], validset[1], testset[1]], axis=0).astype(np.int8)
np.savez(path, inputs=inputs, labels=labels)
print("Loading data ...")
data = np.load(path)
inputs, labels = data['inputs'], data['labels']
if flatten:
inputs = inputs.reshape((len(inputs), -1))
#shuffle
idxs = range(inputs.shape[0])
random.shuffle(idxs)
inputs = inputs[idxs,:]
labels = labels[idxs]
if target_as_one_hot:
one_hot_vectors = np.zeros((labels.shape[0], 10), dtype=theano.config.floatX)
one_hot_vectors[np.arange(labels.shape[0]), labels] = 1
labels = one_hot_vectors
datasets_inputs = _split_data(inputs, split)
datasets_labels = _split_data(labels, split)
if drop_percentage > 0.:
N_train = split[0]
N_wo_label = int(drop_percentage * N_train)
# split inputs
labeled_data = datasets_inputs[0][N_wo_label:,:]
unlabeled_data = datasets_inputs[0][:N_wo_label,:]
datasets_inputs[0] = labeled_data
datasets_inputs.insert(2, unlabeled_data)
# split labels
labeled_data = datasets_labels[0][N_wo_label:]
unlabeled_data = datasets_labels[0][:N_wo_label]
datasets_labels[0] = labeled_data
datasets_labels.insert(2, unlabeled_data)
datasets = [_shared_dataset((i, l)) for i, l in zip(datasets_inputs, datasets_labels)]
return datasets
def load_mnist_w_rotations(path, target_as_one_hot=False, flatten=False, split=(70000, 10000, 20000), drop_percentage=0.):
''' Loads the augmented MNIST dataset containing 50k regular MNIST digits and 50k rotated MNIST digits
Input examples are 28x28 pixels grayscaled images. Each input example is represented
as a ndarray of shape (28*28), i.e. (height*width).
Example labels are integers between [0,9] respresenting one of the ten classes.
Parameters
----------
path : str
The path to the dataset file (.npz).
target_as_one_hot : {True, False}, optional
If True, represent targets as one hot vectors.
flatten : {True, False}, optional
If True, represents each individual example as a vector.
split : tuple of int, optional
Numbers of examples in each split of the dataset. Default: (70000, 10000, 20000)
References
----------
The regular MNIST portion of this dataset comes from http://www.iro.umontreal.ca/~lisa/deep/data/mnist/
The rotated MNIST portion comes from http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/MnistVariations
'''
if not os.path.isfile(path):
# Download the dataset.
data_dir, data_file = os.path.split(path)
mnist_picklefile = os.path.join(data_dir, 'mnist_plus_rot.pkl.gz')
if not os.path.isfile(mnist_picklefile):
import urllib
origin = 'http://www.ics.uci.edu/~enalisni/mnist_plus_rot.pkl.gz'
print("Downloading data (100 Mb) from {} ...".format(origin))
urllib.urlretrieve(origin, mnist_picklefile)
with gzip.open(mnist_picklefile, 'rb') as f:
data = cp.load(f)
cp.dump(data, open(os.path.join(data_dir, 'mnist_plus_rot.pkl'), 'wb'), protocol=cp.HIGHEST_PROTOCOL)
else:
data = np.load(path)
inputs, labels = data['inputs'], data['labels']
if flatten:
inputs = inputs.reshape((len(inputs), -1))
#shuffle
idxs = range(inputs.shape[0])
random.shuffle(idxs)
inputs = inputs[idxs,:]
labels = labels[idxs]
if target_as_one_hot:
one_hot_vectors = np.zeros((labels.shape[0], 10), dtype=theano.config.floatX)
one_hot_vectors[np.arange(labels.shape[0]), labels.astype(int)] = 1
labels = one_hot_vectors
datasets_inputs = _split_data(inputs, split)
datasets_labels = _split_data(labels, split)
if drop_percentage > 0.:
N_train = split[0]
N_wo_label = int(drop_percentage * N_train)
# split inputs
labeled_data = datasets_inputs[0][N_wo_label:,:]
unlabeled_data = datasets_inputs[0][:N_wo_label,:]
datasets_inputs[0] = labeled_data
datasets_inputs.insert(2, unlabeled_data)
# split labels
labeled_data = datasets_labels[0][N_wo_label:]
unlabeled_data = datasets_labels[0][:N_wo_label]
datasets_labels[0] = labeled_data
datasets_labels.insert(2, unlabeled_data)
datasets = [_shared_dataset((i, l)) for i, l in zip(datasets_inputs, datasets_labels)]
return datasets
def load_svhn_pca(path, target_as_one_hot=True, train_valid_split=(65000, 8254), drop_percentage=0.):
''' Loads the Street View House Numbers (SVHN) dataset pre-processed with PCA, reduced to 500 dimensions.
Example labels are integers between [0,9] respresenting one of the ten classes.
Parameters
----------
path : str
The path to the dataset file (.pkl).
target_as_one_hot : {True, False}, optional
If True, represent targets as one hot vectors.
flatten : {True, False}, optional
If True, represents each individual example as a vector.
split : tuple of int, optional
Numbers of examples in each split of the dataset. Default: (65000, 8254)
References
----------
The original dataset can be attained at http://ufldl.stanford.edu/housenumbers/
'''
if not os.path.isfile(path):
# Download the dataset.
data_dir, data_file = os.path.split(path)
svhn_picklefile = os.path.join(data_dir, 'svhn_pca.pkl.gz')
if not os.path.isfile(svhn_picklefile):
import urllib
origin = 'http://www.ics.uci.edu/~enalisni/svhn_pca.pkl.gz'
print("Downloading data (370 Mb) from {} ...".format(origin))
urllib.urlretrieve(origin, svhn_picklefile)
with gzip.open(svhn_picklefile, 'rb') as f:
data = cp.load(f)
cp.dump(data, open(os.path.join(data_dir, 'svhn_pca.pkl'), 'wb'), protocol=cp.HIGHEST_PROTOCOL)
else:
data = cp.load(open(path,'rb'))
train_inputs = data['train_data']
test_inputs = data['test_data']
train_labels = data['train_labels']
test_labels = data['test_labels']
#shuffle
idxs = range(train_inputs.shape[0])
random.shuffle(idxs)
train_inputs = train_inputs[idxs,:]
train_labels = train_labels[idxs]
if target_as_one_hot:
one_hot_vectors_train = np.zeros((train_labels.shape[0], 10), dtype=theano.config.floatX)
for idx in xrange(train_labels.shape[0]):
one_hot_vectors_train[idx, train_labels[idx]] = 1.
train_labels = one_hot_vectors_train
one_hot_vectors_test = np.zeros((test_labels.shape[0], 10), dtype=theano.config.floatX)
for idx in xrange(test_labels.shape[0]):
one_hot_vectors_test[idx, test_labels[idx]] = 1.
test_labels = one_hot_vectors_test
datasets_inputs = [ train_inputs[:train_valid_split[0],:], train_inputs[-1*train_valid_split[1]:,:], test_inputs ]
datasets_labels = [ train_labels[:train_valid_split[0]], train_labels[-1*train_valid_split[1]:], test_labels ]
if drop_percentage > 0.:
N_train = train_valid_split[0]
N_wo_label = int(drop_percentage * N_train)
# split inputs
labeled_input_data = datasets_inputs[0][N_wo_label:,:]
unlabeled_input_data = datasets_inputs[0][:N_wo_label,:]
datasets_inputs[0] = labeled_input_data
datasets_inputs.insert(2, unlabeled_input_data)
# split labels
labeled_label_data = datasets_labels[0][N_wo_label:]
unlabeled_label_data = datasets_labels[0][:N_wo_label]
datasets_labels[0] = labeled_label_data
datasets_labels.insert(2, unlabeled_label_data)
datasets = [_shared_dataset((i, l)) for i, l in zip(datasets_inputs, datasets_labels)]
return datasets
| [
"numpy.savez",
"random.shuffle",
"gzip.open",
"urllib.urlretrieve",
"numpy.asarray",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.zeros",
"numpy.concatenate",
"numpy.cumsum",
"numpy.load",
"cPickle.load",
"numpy.arange"
] | [((195, 226), 'numpy.cumsum', 'np.cumsum', (['np.r_[0, split[:-1]]'], {}), '(np.r_[0, split[:-1]])\n', (204, 226), True, 'import numpy as np\n'), ((238, 254), 'numpy.cumsum', 'np.cumsum', (['split'], {}), '(split)\n', (247, 254), True, 'import numpy as np\n'), ((2892, 2905), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (2899, 2905), True, 'import numpy as np\n'), ((3248, 3268), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (3262, 3268), False, 'import random\n'), ((6541, 6561), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (6555, 6561), False, 'import random\n'), ((9570, 9590), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (9584, 9590), False, 'import random\n'), ((829, 875), 'numpy.asarray', 'np.asarray', (['data_x'], {'dtype': 'theano.config.floatX'}), '(data_x, dtype=theano.config.floatX)\n', (839, 875), True, 'import numpy as np\n'), ((906, 952), 'numpy.asarray', 'np.asarray', (['data_y'], {'dtype': 'theano.config.floatX'}), '(data_y, dtype=theano.config.floatX)\n', (916, 952), True, 'import numpy as np\n'), ((1906, 1926), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1920, 1926), False, 'import os\n'), ((1990, 2009), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (2003, 2009), False, 'import os\n'), ((2037, 2075), 'os.path.join', 'os.path.join', (['data_dir', '"""mnist.pkl.gz"""'], {}), "(data_dir, 'mnist.pkl.gz')\n", (2049, 2075), False, 'import os\n'), ((2805, 2849), 'numpy.savez', 'np.savez', (['path'], {'inputs': 'inputs', 'labels': 'labels'}), '(path, inputs=inputs, labels=labels)\n', (2813, 2849), True, 'import numpy as np\n'), ((3376, 3435), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 10)'], {'dtype': 'theano.config.floatX'}), '((labels.shape[0], 10), dtype=theano.config.floatX)\n', (3384, 3435), True, 'import numpy as np\n'), ((5466, 5486), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5480, 5486), False, 'import os\n'), ((5550, 5569), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (5563, 5569), False, 'import os\n'), ((5597, 5644), 'os.path.join', 'os.path.join', (['data_dir', '"""mnist_plus_rot.pkl.gz"""'], {}), "(data_dir, 'mnist_plus_rot.pkl.gz')\n", (5609, 5644), False, 'import os\n'), ((6150, 6163), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (6157, 6163), True, 'import numpy as np\n'), ((6669, 6728), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 10)'], {'dtype': 'theano.config.floatX'}), '((labels.shape[0], 10), dtype=theano.config.floatX)\n', (6677, 6728), True, 'import numpy as np\n'), ((8648, 8668), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (8662, 8668), False, 'import os\n'), ((8732, 8751), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (8745, 8751), False, 'import os\n'), ((8778, 8819), 'os.path.join', 'os.path.join', (['data_dir', '"""svhn_pca.pkl.gz"""'], {}), "(data_dir, 'svhn_pca.pkl.gz')\n", (8790, 8819), False, 'import os\n'), ((9732, 9797), 'numpy.zeros', 'np.zeros', (['(train_labels.shape[0], 10)'], {'dtype': 'theano.config.floatX'}), '((train_labels.shape[0], 10), dtype=theano.config.floatX)\n', (9740, 9797), True, 'import numpy as np\n'), ((9988, 10052), 'numpy.zeros', 'np.zeros', (['(test_labels.shape[0], 10)'], {'dtype': 'theano.config.floatX'}), '((test_labels.shape[0], 10), dtype=theano.config.floatX)\n', (9996, 10052), True, 'import numpy as np\n'), ((2092, 2124), 'os.path.isfile', 'os.path.isfile', (['mnist_picklefile'], {}), '(mnist_picklefile)\n', (2106, 2124), False, 'import os\n'), ((2323, 2367), 'urllib.urlretrieve', 'urllib.urlretrieve', (['origin', 'mnist_picklefile'], {}), '(origin, mnist_picklefile)\n', (2341, 2367), False, 'import urllib\n'), ((2502, 2535), 'gzip.open', 'gzip.open', (['mnist_picklefile', '"""rb"""'], {}), "(mnist_picklefile, 'rb')\n", (2511, 2535), False, 'import gzip\n'), ((2584, 2594), 'cPickle.load', 'cp.load', (['f'], {}), '(f)\n', (2591, 2594), True, 'import cPickle as cp\n'), ((5661, 5693), 'os.path.isfile', 'os.path.isfile', (['mnist_picklefile'], {}), '(mnist_picklefile)\n', (5675, 5693), False, 'import os\n'), ((5885, 5929), 'urllib.urlretrieve', 'urllib.urlretrieve', (['origin', 'mnist_picklefile'], {}), '(origin, mnist_picklefile)\n', (5903, 5929), False, 'import urllib\n'), ((5944, 5977), 'gzip.open', 'gzip.open', (['mnist_picklefile', '"""rb"""'], {}), "(mnist_picklefile, 'rb')\n", (5953, 5977), False, 'import gzip\n'), ((6003, 6013), 'cPickle.load', 'cp.load', (['f'], {}), '(f)\n', (6010, 6013), True, 'import cPickle as cp\n'), ((8844, 8875), 'os.path.isfile', 'os.path.isfile', (['svhn_picklefile'], {}), '(svhn_picklefile)\n', (8858, 8875), False, 'import os\n'), ((9061, 9104), 'urllib.urlretrieve', 'urllib.urlretrieve', (['origin', 'svhn_picklefile'], {}), '(origin, svhn_picklefile)\n', (9079, 9104), False, 'import urllib\n'), ((9127, 9159), 'gzip.open', 'gzip.open', (['svhn_picklefile', '"""rb"""'], {}), "(svhn_picklefile, 'rb')\n", (9136, 9159), False, 'import gzip\n'), ((9185, 9195), 'cPickle.load', 'cp.load', (['f'], {}), '(f)\n', (9192, 9195), True, 'import cPickle as cp\n'), ((2613, 2675), 'numpy.concatenate', 'np.concatenate', (['[trainset[0], validset[0], testset[0]]'], {'axis': '(0)'}), '([trainset[0], validset[0], testset[0]], axis=0)\n', (2627, 2675), True, 'import numpy as np\n'), ((2718, 2780), 'numpy.concatenate', 'np.concatenate', (['[trainset[1], validset[1], testset[1]]'], {'axis': '(0)'}), '([trainset[1], validset[1], testset[1]], axis=0)\n', (2732, 2780), True, 'import numpy as np\n'), ((3460, 3486), 'numpy.arange', 'np.arange', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (3469, 3486), True, 'import numpy as np\n'), ((6041, 6085), 'os.path.join', 'os.path.join', (['data_dir', '"""mnist_plus_rot.pkl"""'], {}), "(data_dir, 'mnist_plus_rot.pkl')\n", (6053, 6085), False, 'import os\n'), ((6753, 6779), 'numpy.arange', 'np.arange', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (6762, 6779), True, 'import numpy as np\n'), ((9223, 9261), 'os.path.join', 'os.path.join', (['data_dir', '"""svhn_pca.pkl"""'], {}), "(data_dir, 'svhn_pca.pkl')\n", (9235, 9261), False, 'import os\n')] |
from itertools import cycle
from toolz.itertoolz import concatv, take
import numpy as np
import pytest
from tensorforce.environments import Environment
from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements
def test_initialization():
bad_seeds_03_env = Environment.create(
environment=BadSeeds03, seed_count=10, bad_seed_count=3, max_episode_length=100
)
assert bad_seeds_03_env.history_array.shape == (100, 10)
assert bad_seeds_03_env.state.shape == (7, 10)
assert len(bad_seeds_03_env.bad_seeds) == 3
assert len(bad_seeds_03_env.good_seeds) == 7
measurement_count_per_seed, measurement_count = count_measurements(
bad_seeds_03_env.history_array
)
assert np.all(measurement_count_per_seed == 3 * np.ones((1, 10)))
# all seeds have been measured
assert measurement_count == 10
def test_bad_initialization():
with pytest.raises(ValueError):
BadSeeds03(seed_count=3, bad_seed_count=10, max_episode_length=100)
def test_count_measurements():
history = np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, -0.5, 0.0],
[0.5, 0.0, 0.0, 0.0],
[0.0, -0.5, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.5, 0.0, 0.0],
]
)
measurement_counts, measurement_count = count_measurements(
time_steps_by_seeds_state=history
)
assert np.all(measurement_counts == np.array([1, 3, 2, 0]))
assert measurement_count == 3
def test_play_the_game_badly():
bad_seeds_03_env = BadSeeds03(
seed_count=5, bad_seed_count=3, max_episode_length=3 + 5
)
measurement_counts, measured_seed_count = count_measurements(
bad_seeds_03_env.history_array
)
assert np.all(measurement_counts == np.array([3, 3, 3, 3, 3]))
# all seeds were measured at reset()
assert measured_seed_count == 5
# print(f"history before start:\n{bad_seeds_03_env.history}")
# measure all seeds but the last seed
for time_i, seed_i in enumerate(range(len(bad_seeds_03_env.all_seeds) - 1)):
time_i += 3
# print(f"time_i: {time_i}")
# print(f"turn before execute: {bad_seeds_03_env.turn}")
next_state, terminal, reward = bad_seeds_03_env.execute(actions=seed_i)
# print(f"turn after execute: {bad_seeds_03_env.turn}")
# print(f"history:\n{bad_seeds_03_env.history}")
assert bad_seeds_03_env.history_array[time_i, seed_i] != 0.0
assert terminal is False
assert reward == 0.0
# measurement_counts looks like this
# time_i = 0: [4 3 3 3 3 ]
# time_i = 1: [4 4 3 3 3 ]
# ...
# time_i = 3: [4 4 4 4 3 ]
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_03_env.history_array
)
for seed_j in range(seed_i):
# print(seed_j)
# print(measurement_counts)
assert measurement_counts[0, seed_j] == 4
assert measured_seed_counts == len(bad_seeds_03_env.all_seeds)
# measure the first seed again
# no reward because the last seed is never measured
next_state, terminal, reward = bad_seeds_03_env.execute(actions=4)
# print(f"bad_seed_measured_counts: {bad_seed_measured_counts}")
# print(f"least_measured_bad_seed_count: {least_measured_bad_seed_count}")
assert next_state[len(bad_seeds_03_env.all_seeds) - 1, 0] != 0.0
assert terminal is True
assert reward == 4.0
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_03_env.state
)
assert np.all(measurement_counts == np.array([[7, 7, 7, 7, 7]]))
assert measured_seed_counts == 5
def test_play_the_game_less_badly():
bad_seeds_03_env = BadSeeds03(
seed_count=5, bad_seed_count=3, max_episode_length=3 + 2 * 2 + 3 * 3 + 1
)
# measure the good seeds twice
# measure the bad seeds three times
for time_i, seed_i in enumerate(
concatv(
take(
n=2 * len(bad_seeds_03_env.good_seeds),
seq=cycle(bad_seeds_03_env.good_seed_indices),
),
take(
n=3 * len(bad_seeds_03_env.bad_seeds),
seq=cycle(bad_seeds_03_env.bad_seed_indices),
),
)
):
time_i += 3
next_state, terminal, reward = bad_seeds_03_env.execute(actions=seed_i)
assert bad_seeds_03_env.history_array[time_i, seed_i] != 0.0
assert terminal is False
assert reward == 0.0
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_03_env.history_array
)
expected_measurement_counts = np.zeros_like(measurement_counts)
expected_measurement_counts[0, bad_seeds_03_env.good_seed_indices] = 5
expected_measurement_counts[0, bad_seeds_03_env.bad_seed_indices] = 6
assert np.all(measurement_counts == expected_measurement_counts)
# measure the first good seed again
next_state, terminal, reward = bad_seeds_03_env.execute(
actions=bad_seeds_03_env.good_seed_indices[0]
)
print(f"history:\n{bad_seeds_03_env.history_array}")
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_03_env.history_array
)
print(f"measurement_counts: {measurement_counts}")
assert next_state[-1, bad_seeds_03_env.good_seed_indices[0]] != 0.0
assert terminal is True
# reward is the number of times the least-measured seed was measured
assert reward == 6.0
expected_measurement_counts[0, bad_seeds_03_env.good_seed_indices[0]] += 1
assert np.all(measurement_counts == expected_measurement_counts)
| [
"tensorforce.environments.Environment.create",
"itertools.cycle",
"numpy.ones",
"bad_seeds.simple.bad_seeds_03.BadSeeds03",
"numpy.array",
"bad_seeds.simple.bad_seeds_03.count_measurements",
"pytest.raises",
"numpy.all",
"numpy.zeros_like"
] | [((279, 382), 'tensorforce.environments.Environment.create', 'Environment.create', ([], {'environment': 'BadSeeds03', 'seed_count': '(10)', 'bad_seed_count': '(3)', 'max_episode_length': '(100)'}), '(environment=BadSeeds03, seed_count=10, bad_seed_count=3,\n max_episode_length=100)\n', (297, 382), False, 'from tensorforce.environments import Environment\n'), ((656, 706), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.history_array'], {}), '(bad_seeds_03_env.history_array)\n', (674, 706), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((1053, 1201), 'numpy.array', 'np.array', (['[[0.0, 0.5, 0.0, 0.0], [0.0, 0.0, -0.5, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, -\n 0.5, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0], [0.0, 0.5, 0.0, 0.0]]'], {}), '([[0.0, 0.5, 0.0, 0.0], [0.0, 0.0, -0.5, 0.0], [0.5, 0.0, 0.0, 0.0],\n [0.0, -0.5, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0], [0.0, 0.5, 0.0, 0.0]])\n', (1061, 1201), True, 'import numpy as np\n'), ((1340, 1393), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', ([], {'time_steps_by_seeds_state': 'history'}), '(time_steps_by_seeds_state=history)\n', (1358, 1393), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((1563, 1631), 'bad_seeds.simple.bad_seeds_03.BadSeeds03', 'BadSeeds03', ([], {'seed_count': '(5)', 'bad_seed_count': '(3)', 'max_episode_length': '(3 + 5)'}), '(seed_count=5, bad_seed_count=3, max_episode_length=3 + 5)\n', (1573, 1631), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((1693, 1743), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.history_array'], {}), '(bad_seeds_03_env.history_array)\n', (1711, 1743), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((3557, 3599), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.state'], {}), '(bad_seeds_03_env.state)\n', (3575, 3599), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((3782, 3870), 'bad_seeds.simple.bad_seeds_03.BadSeeds03', 'BadSeeds03', ([], {'seed_count': '(5)', 'bad_seed_count': '(3)', 'max_episode_length': '(3 + 2 * 2 + 3 * 3 + 1)'}), '(seed_count=5, bad_seed_count=3, max_episode_length=3 + 2 * 2 + 3 *\n 3 + 1)\n', (3792, 3870), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((4609, 4659), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.history_array'], {}), '(bad_seeds_03_env.history_array)\n', (4627, 4659), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((4708, 4741), 'numpy.zeros_like', 'np.zeros_like', (['measurement_counts'], {}), '(measurement_counts)\n', (4721, 4741), True, 'import numpy as np\n'), ((4902, 4959), 'numpy.all', 'np.all', (['(measurement_counts == expected_measurement_counts)'], {}), '(measurement_counts == expected_measurement_counts)\n', (4908, 4959), True, 'import numpy as np\n'), ((5227, 5277), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.history_array'], {}), '(bad_seeds_03_env.history_array)\n', (5245, 5277), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((5637, 5694), 'numpy.all', 'np.all', (['(measurement_counts == expected_measurement_counts)'], {}), '(measurement_counts == expected_measurement_counts)\n', (5643, 5694), True, 'import numpy as np\n'), ((903, 928), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (916, 928), False, 'import pytest\n'), ((938, 1005), 'bad_seeds.simple.bad_seeds_03.BadSeeds03', 'BadSeeds03', ([], {'seed_count': '(3)', 'bad_seed_count': '(10)', 'max_episode_length': '(100)'}), '(seed_count=3, bad_seed_count=10, max_episode_length=100)\n', (948, 1005), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((2771, 2821), 'bad_seeds.simple.bad_seeds_03.count_measurements', 'count_measurements', (['bad_seeds_03_env.history_array'], {}), '(bad_seeds_03_env.history_array)\n', (2789, 2821), False, 'from bad_seeds.simple.bad_seeds_03 import BadSeeds03, count_measurements\n'), ((1448, 1470), 'numpy.array', 'np.array', (['[1, 3, 2, 0]'], {}), '([1, 3, 2, 0])\n', (1456, 1470), True, 'import numpy as np\n'), ((1798, 1823), 'numpy.array', 'np.array', (['[3, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 3])\n', (1806, 1823), True, 'import numpy as np\n'), ((3654, 3681), 'numpy.array', 'np.array', (['[[7, 7, 7, 7, 7]]'], {}), '([[7, 7, 7, 7, 7]])\n', (3662, 3681), True, 'import numpy as np\n'), ((773, 789), 'numpy.ones', 'np.ones', (['(1, 10)'], {}), '((1, 10))\n', (780, 789), True, 'import numpy as np\n'), ((4105, 4146), 'itertools.cycle', 'cycle', (['bad_seeds_03_env.good_seed_indices'], {}), '(bad_seeds_03_env.good_seed_indices)\n', (4110, 4146), False, 'from itertools import cycle\n'), ((4256, 4296), 'itertools.cycle', 'cycle', (['bad_seeds_03_env.bad_seed_indices'], {}), '(bad_seeds_03_env.bad_seed_indices)\n', (4261, 4296), False, 'from itertools import cycle\n')] |
import setuptools
setuptools.setup(
name='trackdays',
version='0.1.9',
description='An RL environment and training code for a car on a racetrack.',
url='http://github.com/hr0nix/trackdays',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=setuptools.find_packages(),
zip_safe=False,
python_requires='>=3.6',
install_requires=[
'tensorflow',
'tf-agents',
'imageio',
'imageio-ffmpeg',
'highway-env @ git+https://github.com/eleurent/highway-env'
],
)
| [
"setuptools.find_packages"
] | [((304, 330), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (328, 330), False, 'import setuptools\n')] |
"""
Django settings for conf project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(&(&mcuz4ks_7+)eluza_n3%)_8r$o@vol+e5$o@f3cnyk*qfs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', 'sugar.mansonsolutions.hk',]
#SECURE_SSL_REDIRECT=True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
INSTALLED_APPS += ['webframe', 'method_override', 'sugar', 'django_tables2']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
#file: settings.py
MIDDLEWARE += [
'webframe.methodoverridemiddleware.MethodOverrideMiddleware', #django 1.10 or aboves
'webframe.LangMiddleware.LangMiddleware',
'webframe.CurrentUserMiddleware.CurrentUserMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
#URL
ROOT_URLCONF = 'conf.urls'
LOGIN_URL = 'webframe/login'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'webframe.providers.absolute_path', 'webframe.providers.fmt_injection', 'webframe.providers.template_injection',
],
},
},
]
WSGI_APPLICATION = 'conf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'dbhost',
'NAME': 'sugar',
'USER': 'sugar',
'PASSWORD': '<PASSWORD>',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh_hant'
TIME_ZONE = 'Asia/Hong_Kong'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/sugar/'
STATIC_ROOT= 'static/sugar'
#LOGGING
if not os.path.isdir('logs'):os.mkdir('logs')
LOGGING={
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': { 'format':'[%(asctime)s] %(levelname)s [%(name)s:%(filename)s:%(lineno)s] %(message)s', 'datefmt':'%d/%b/%Y %H:%M:%S' },
'simple': { 'format':'%(levelname)s <%(filename)s:%(lineno)d> %(message)s' },
},
'handlers': {
'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' },
'file': {'level': 'INFO', 'class': 'logging.handlers.TimedRotatingFileHandler', 'formatter': 'verbose', 'filename': './logs/runtime.log', 'when':'midnight'},
},
'loggers':{
'django': {'handlers':['console', 'file'], 'propagate': True, 'level': 'WARNING'},
'webframe': { 'handlers': ['console', ], 'level': 'INFO'},
'sugar': {'handlers': ['console', ], 'level':'DEBUG'},
},
}
#Template
TMPL_HEADER='sugar/header.html'
#Version
VERSION='v0.1.0'
| [
"os.path.abspath",
"os.path.isdir",
"os.mkdir"
] | [((3798, 3819), 'os.path.isdir', 'os.path.isdir', (['"""logs"""'], {}), "('logs')\n", (3811, 3819), False, 'import os\n'), ((3820, 3836), 'os.mkdir', 'os.mkdir', (['"""logs"""'], {}), "('logs')\n", (3828, 3836), False, 'import os\n'), ((433, 458), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (448, 458), False, 'import os\n')] |
#!/usr/bin/env python
import pickle
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from seq_model import CoinModel
import os
from tensorflow import keras
from image_processing import automatic_brightness_and_contrast, image_blurring, contour
import numpy as np
import cv2
import sys
batch_size = 32
img_height = 150
img_width = 150
data_dir = 'project/data/CoinsDataset/images'
# Checks if dataset was downloaded
if not os.path.isdir(data_dir):
sys.exit('Error. Please download dataset in folder project/data/CoinsDataset/images before continuing')
# Applies preprocessing functions to dataset images
if not os.path.isdir('project/dataset'):
print("Preprocessing dataset; it may take a while...")
for subdir, dirs, files in os.walk(data_dir):
for file in files:
img_dir = os.path.join(subdir, file)
classs = os.path.basename((os.path.dirname(os.path.join(subdir, file))))
img = keras.preprocessing.image.load_img(
img_dir, target_size=(img_height, img_width))
img = np.array(img)
im_auto, alpha, beta = automatic_brightness_and_contrast(img)
im_blur = image_blurring(im_auto)
im_contour = contour(im_blur)
# if the class folder doesn't exist, it creates a new folder
new_img_dir = os.path.join('project/dataset', classs)
if not os.path.exists(new_img_dir):
os.makedirs(new_img_dir)
new_img_dir = os.path.join(new_img_dir, file)
# saves the processed picture in the new dataset
cv2.imwrite(new_img_dir, im_contour)
# Divides dataset into train test and validation set
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
'project/dataset',
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
'project/dataset',
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# Collects all classes labels
class_names = train_ds.class_names
f = open('project/coin_labels.pickle', "wb")
f.write(pickle.dumps(class_names))
f.close()
# Saves data in cache for speeding up training process
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1. / 255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
# Trains dataset using model
model = CoinModel(len(class_names))
epochs = 20
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Prints accuracy and loss
metrics_train = model.evaluate_generator(train_ds, steps=batch_size)
metrics_test = model.evaluate_generator(val_ds, steps=batch_size)
print("Train Accuracy = %.4f - Train Loss = %.4f" % (metrics_train[1], metrics_train[0]))
print("Validation Accuracy = %.4f - Validation Loss = %.4f" % (metrics_test[1], metrics_test[0]))
# Creates accuracy and loss graphs
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# Saves model
model.save("project/coin_generator.h5")
| [
"pickle.dumps",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"numpy.array",
"sys.exit",
"image_processing.contour",
"os.walk",
"os.path.exists",
"matplotlib.pyplot.plot",
"os.path.isdir",
"tensorflow.keras.preprocessing.image_dataset_from_directory",
"tensorflow.keras.preprocessing.image.load_img",
"image_processing.image_blurring",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"cv2.imwrite",
"image_processing.automatic_brightness_and_contrast",
"os.makedirs",
"os.path.join",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
] | [((1736, 1925), 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['"""project/dataset"""'], {'validation_split': '(0.2)', 'subset': '"""training"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), "('project/dataset',\n validation_split=0.2, subset='training', seed=123, image_size=(\n img_height, img_width), batch_size=batch_size)\n", (1787, 1925), True, 'import tensorflow as tf\n'), ((1952, 2143), 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['"""project/dataset"""'], {'validation_split': '(0.2)', 'subset': '"""validation"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), "('project/dataset',\n validation_split=0.2, subset='validation', seed=123, image_size=(\n img_height, img_width), batch_size=batch_size)\n", (2003, 2143), True, 'import tensorflow as tf\n'), ((2552, 2606), 'tensorflow.keras.layers.experimental.preprocessing.Rescaling', 'layers.experimental.preprocessing.Rescaling', (['(1.0 / 255)'], {}), '(1.0 / 255)\n', (2595, 2606), False, 'from tensorflow.keras import layers\n'), ((3462, 3488), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3472, 3488), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3500, 3509), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3564), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (3518, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3625), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (3573, 3625), True, 'import matplotlib.pyplot as plt\n'), ((3626, 3655), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3636, 3655), True, 'import matplotlib.pyplot as plt\n'), ((3656, 3701), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (3665, 3701), True, 'import matplotlib.pyplot as plt\n'), ((3703, 3723), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3714, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3775), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (3732, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3833), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (3784, 3833), True, 'import matplotlib.pyplot as plt\n'), ((3834, 3863), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3844, 3863), True, 'import matplotlib.pyplot as plt\n'), ((3864, 3905), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (3873, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3917), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3915, 3917), True, 'import matplotlib.pyplot as plt\n'), ((468, 491), 'os.path.isdir', 'os.path.isdir', (['data_dir'], {}), '(data_dir)\n', (481, 491), False, 'import os\n'), ((497, 610), 'sys.exit', 'sys.exit', (['"""Error. Please download dataset in folder project/data/CoinsDataset/images before continuing"""'], {}), "(\n 'Error. Please download dataset in folder project/data/CoinsDataset/images before continuing'\n )\n", (505, 610), False, 'import sys\n'), ((660, 692), 'os.path.isdir', 'os.path.isdir', (['"""project/dataset"""'], {}), "('project/dataset')\n", (673, 692), False, 'import os\n'), ((784, 801), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (791, 801), False, 'import os\n'), ((2279, 2304), 'pickle.dumps', 'pickle.dumps', (['class_names'], {}), '(class_names)\n', (2291, 2304), False, 'import pickle\n'), ((852, 878), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (864, 878), False, 'import os\n'), ((982, 1067), 'tensorflow.keras.preprocessing.image.load_img', 'keras.preprocessing.image.load_img', (['img_dir'], {'target_size': '(img_height, img_width)'}), '(img_dir, target_size=(img_height, img_width)\n )\n', (1016, 1067), False, 'from tensorflow import keras\n'), ((1098, 1111), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1106, 1111), True, 'import numpy as np\n'), ((1147, 1185), 'image_processing.automatic_brightness_and_contrast', 'automatic_brightness_and_contrast', (['img'], {}), '(img)\n', (1180, 1185), False, 'from image_processing import automatic_brightness_and_contrast, image_blurring, contour\n'), ((1208, 1231), 'image_processing.image_blurring', 'image_blurring', (['im_auto'], {}), '(im_auto)\n', (1222, 1231), False, 'from image_processing import automatic_brightness_and_contrast, image_blurring, contour\n'), ((1257, 1273), 'image_processing.contour', 'contour', (['im_blur'], {}), '(im_blur)\n', (1264, 1273), False, 'from image_processing import automatic_brightness_and_contrast, image_blurring, contour\n'), ((1373, 1412), 'os.path.join', 'os.path.join', (['"""project/dataset"""', 'classs'], {}), "('project/dataset', classs)\n", (1385, 1412), False, 'import os\n'), ((1528, 1559), 'os.path.join', 'os.path.join', (['new_img_dir', 'file'], {}), '(new_img_dir, file)\n', (1540, 1559), False, 'import os\n'), ((1633, 1669), 'cv2.imwrite', 'cv2.imwrite', (['new_img_dir', 'im_contour'], {}), '(new_img_dir, im_contour)\n', (1644, 1669), False, 'import cv2\n'), ((1432, 1459), 'os.path.exists', 'os.path.exists', (['new_img_dir'], {}), '(new_img_dir)\n', (1446, 1459), False, 'import os\n'), ((1477, 1501), 'os.makedirs', 'os.makedirs', (['new_img_dir'], {}), '(new_img_dir)\n', (1488, 1501), False, 'import os\n'), ((934, 960), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (946, 960), False, 'import os\n')] |
#!/usr/bin/env python
from builtins import input
from .fizzbuzz import FizzBuzzer
def main():
"""Run Fizzbuzz program"""
fizzbuzzer = FizzBuzzer()
while True:
output = fizzbuzzer(input("Provide a value> "))
if output:
print("%s" % (output))
if __name__ == "__main__":
main()
| [
"builtins.input"
] | [((204, 230), 'builtins.input', 'input', (['"""Provide a value> """'], {}), "('Provide a value> ')\n", (209, 230), False, 'from builtins import input\n')] |
#!/usr/bin/env python3
"""This module helps to inspect an executable.
In particulare:
1. If it has been compiled with debug option `_g`
2.
"""
import subprocess as sbp
import os.path
def is_debeg(root=b"./", execname=b"lppic", verbose = True):
""" return True if the executable has been compiled with debug """
fname = root+execname
if verbose:
print("Inspecting the debug option of the executable")
print("File name:", fname)
print("=======================")
if not os.path.isfile(fname) :
if verbose:
print("the executable do not existe ! Check your location or arguments")
return None
p = sbp.run(["gdb", root+execname], input=b"q", stdout=sbp.PIPE)
iout= p.stdout.strip()
lines = iout.decode('ascii').splitlines()
#print(iout.decode())
line = lines[-3]
info = line[-35:-9]
if info == "no debugging symbols found":
if verbose:
print("The executable is runing without the debug option")
return False
else:
if verbose:
print("This executable is running with the debug option `-g`")
return True
if __name__ == "__main__":
if is_debeg(verbose=True):
print("True")
else:
print("False")
| [
"subprocess.run"
] | [((670, 732), 'subprocess.run', 'sbp.run', (["['gdb', root + execname]"], {'input': "b'q'", 'stdout': 'sbp.PIPE'}), "(['gdb', root + execname], input=b'q', stdout=sbp.PIPE)\n", (677, 732), True, 'import subprocess as sbp\n')] |
import cv2
#import os
#dataset = "dataset"
#name = "champ"
#path = os.path.join(dataset,name)
#if not os.path.isdir(path):
# os.mkdir(path)
#(width,height) = (130,100)
alg = "haarcascade_frontalface_default.xml"
haar_cascade = cv2.CascadeClassifier(alg)
cam = cv2.VideoCapture(0)
#count = 1
while True:
# print(count)
_,img = cam.read()
text="face not detected"
grayImg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
face = haar_cascade.detectMultiScale(grayImg,1.3,4)
for (x,y,w,h) in face:
text="Face Detected"
cv2.rectangle(img,(x,y),(x+w,y+h), (0,255,0),2)
# faceOnly = grayImg[y:y+h,x:x+w]
# resizeImg = cv2.resize(faceOnly,(width,height))
# cv2.imwrite("%s/%s.jpg" %(path,count),resizeImg)
# count+=1
print(text)
cv2.imshow("FaceDetection",img)
key = cv2.waitKey(10)
if key == 27:
break
print("Image Captured successfully")
cam.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((243, 269), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['alg'], {}), '(alg)\n', (264, 269), False, 'import cv2\n'), ((277, 296), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (293, 296), False, 'import cv2\n'), ((968, 991), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (989, 991), False, 'import cv2\n'), ((412, 449), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (424, 449), False, 'import cv2\n'), ((821, 853), 'cv2.imshow', 'cv2.imshow', (['"""FaceDetection"""', 'img'], {}), "('FaceDetection', img)\n", (831, 853), False, 'import cv2\n'), ((864, 879), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (875, 879), False, 'import cv2\n'), ((573, 631), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (586, 631), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.assistants.fmea.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2007 - 2020 <NAME> doyle.rowland <AT> reliaqual <DOT> com
"""The RAMSTK (D)FME(C)A Assistants Module."""
# RAMSTK Package Imports
from ramstk.views.gtk3 import Gtk, _
from ramstk.views.gtk3.widgets import RAMSTKDialog, RAMSTKLabel
class AddControlAction(RAMSTKDialog):
"""Assistant to walk user through process of adding control or action."""
def __init__(self, parent=None):
"""Initialize on instance of the Add Control or Action Assistant."""
super().__init__(
_("RAMSTK FMEA/FMECA Design Control and " "Action Addition Assistant"),
dlgparent=parent,
)
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.rdoControl = Gtk.RadioButton.new_with_label_from_widget(
None, _("Add control")
)
self.rdoAction = Gtk.RadioButton.new_from_widget(self.rdoControl)
self.rdoAction.set_label(_("Add action"))
self.__make_ui()
def __make_ui(self):
"""Build the user interface.
:return: None
:rtype: None
"""
self.set_default_size(250, -1)
_fixed = Gtk.Fixed()
self.vbox.pack_start(_fixed, True, True, 0)
_label = RAMSTKLabel(
_(
"This is the RAMSTK Design Control and Action "
"Addition Assistant. Enter the information "
"requested below and then press 'OK' to add "
"a new design control or action to the RAMSTK "
"Program database."
)
)
_label.do_set_properties(width=600, height=-1, wrap=True)
_fixed.put(_label, 5, 10)
_y_pos: int = _label.get_preferred_size()[0].height + 50
self.rdoControl.set_tooltip_text(
_("Select to add a design control " "to the selected failure cause.")
)
self.rdoAction.set_tooltip_text(
_("Select to add an action to the selected failure cause.")
)
_fixed.put(self.rdoControl, 10, _y_pos)
_fixed.put(self.rdoAction, 10, _y_pos + 35)
_fixed.show_all()
def _cancel(self, __button):
"""Destroy the assistant when the 'Cancel' button is pressed.
:param gtk.Button __button: the gtk.Button() that called this method.
"""
self.destroy()
| [
"ramstk.views.gtk3.Gtk.Fixed",
"ramstk.views.gtk3.Gtk.RadioButton.new_from_widget",
"ramstk.views.gtk3._"
] | [((1188, 1236), 'ramstk.views.gtk3.Gtk.RadioButton.new_from_widget', 'Gtk.RadioButton.new_from_widget', (['self.rdoControl'], {}), '(self.rdoControl)\n', (1219, 1236), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((1489, 1500), 'ramstk.views.gtk3.Gtk.Fixed', 'Gtk.Fixed', ([], {}), '()\n', (1498, 1500), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((641, 708), 'ramstk.views.gtk3._', '_', (['"""RAMSTK FMEA/FMECA Design Control and Action Addition Assistant"""'], {}), "('RAMSTK FMEA/FMECA Design Control and Action Addition Assistant')\n", (642, 708), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((1136, 1152), 'ramstk.views.gtk3._', '_', (['"""Add control"""'], {}), "('Add control')\n", (1137, 1152), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((1270, 1285), 'ramstk.views.gtk3._', '_', (['"""Add action"""'], {}), "('Add action')\n", (1271, 1285), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((1596, 1799), 'ramstk.views.gtk3._', '_', (['"""This is the RAMSTK Design Control and Action Addition Assistant. Enter the information requested below and then press \'OK\' to add a new design control or action to the RAMSTK Program database."""'], {}), '("This is the RAMSTK Design Control and Action Addition Assistant. Enter the information requested below and then press \'OK\' to add a new design control or action to the RAMSTK Program database."\n )\n', (1597, 1799), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((2132, 2198), 'ramstk.views.gtk3._', '_', (['"""Select to add a design control to the selected failure cause."""'], {}), "('Select to add a design control to the selected failure cause.')\n", (2133, 2198), False, 'from ramstk.views.gtk3 import Gtk, _\n'), ((2265, 2324), 'ramstk.views.gtk3._', '_', (['"""Select to add an action to the selected failure cause."""'], {}), "('Select to add an action to the selected failure cause.')\n", (2266, 2324), False, 'from ramstk.views.gtk3 import Gtk, _\n')] |
""" This code is modified from pyclustertend
https://github.com/lachhebo/pyclustertend/blob/master/LICENSE
"""
import numpy as np
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
def ordered_dissimilarity_matrix(X):
"""The ordered dissimilarity matrix is used by visual assesement of tendency. It is a just a a reordering
of the dissimilarity matrix.
Parameters
----------
X : matrix
numpy array
Return
-------
ODM : matrix
the ordered dissimalarity matrix .
"""
# Step 1 :
I = []
R = pairwise_distances(X)
P = np.zeros(R.shape[0], dtype="int")
argmax = np.argmax(R)
j = argmax % R.shape[1]
i = argmax // R.shape[1]
P[0] = i
I.append(i)
K = np.linspace(0, R.shape[0] - 1, R.shape[0], dtype="int")
J = np.delete(K, i)
# Step 2 :
# for each row
total_ticks = np.sum(
[i * j for i, j in zip(range(1, R.shape[0] + 1), range(R.shape[0])[::-1])]
)
pbar = tqdm(total=total_ticks, desc="candidates")
for r in tqdm(range(1, R.shape[0]), desc="row"):
p, q = (-1, -1)
mini = np.max(R)
for candidate_p in I:
for candidate_j in J:
if R[candidate_p, candidate_j] < mini:
p = candidate_p
q = candidate_j
mini = R[p, q]
pbar.update(len(J))
P[r] = q
I.append(q)
ind_q = np.where(np.array(J) == q)[0][0]
J = np.delete(J, ind_q)
# Step 3
ODM = np.zeros(R.shape)
for i in range(ODM.shape[0]):
for j in range(ODM.shape[1]):
ODM[i, j] = R[P[i], P[j]]
# Step 4 :
return ODM, P
def ivat_ordered_dissimilarity_matrix(D):
"""The ordered dissimilarity matrix is used by ivat. It is a just a a reordering
of the dissimilarity matrix.
Parameters
----------
X : matrix
numpy array
Return
-------
D_prim : matrix
the ordered dissimalarity matrix .
"""
D_prim = np.zeros((D.shape[0], D.shape[0]))
for r in range(1, D.shape[0]):
# Step 1 : find j for which D[r,j] is minimum and j in [1:r-1]
j = np.argmin(D[r, 0:r])
# Step 2 :
D_prim[r, j] = D[r, j]
# Step 3 : pour c : 1,r-1 avec c !=j
c_tab = np.array(range(0, r))
c_tab = c_tab[c_tab != j]
for c in c_tab:
D_prim[r, c] = max(D[r, j], D_prim[j, c])
D_prim[c, r] = D_prim[r, c]
return D_prim
| [
"numpy.delete",
"numpy.argmax",
"sklearn.metrics.pairwise_distances",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"tqdm.autonotebook.tqdm",
"numpy.argmin"
] | [((620, 641), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (638, 641), False, 'from sklearn.metrics import pairwise_distances\n'), ((650, 683), 'numpy.zeros', 'np.zeros', (['R.shape[0]'], {'dtype': '"""int"""'}), "(R.shape[0], dtype='int')\n", (658, 683), True, 'import numpy as np\n'), ((698, 710), 'numpy.argmax', 'np.argmax', (['R'], {}), '(R)\n', (707, 710), True, 'import numpy as np\n'), ((808, 863), 'numpy.linspace', 'np.linspace', (['(0)', '(R.shape[0] - 1)', 'R.shape[0]'], {'dtype': '"""int"""'}), "(0, R.shape[0] - 1, R.shape[0], dtype='int')\n", (819, 863), True, 'import numpy as np\n'), ((872, 887), 'numpy.delete', 'np.delete', (['K', 'i'], {}), '(K, i)\n', (881, 887), True, 'import numpy as np\n'), ((1050, 1092), 'tqdm.autonotebook.tqdm', 'tqdm', ([], {'total': 'total_ticks', 'desc': '"""candidates"""'}), "(total=total_ticks, desc='candidates')\n", (1054, 1092), False, 'from tqdm.autonotebook import tqdm\n'), ((1601, 1618), 'numpy.zeros', 'np.zeros', (['R.shape'], {}), '(R.shape)\n', (1609, 1618), True, 'import numpy as np\n'), ((2101, 2135), 'numpy.zeros', 'np.zeros', (['(D.shape[0], D.shape[0])'], {}), '((D.shape[0], D.shape[0]))\n', (2109, 2135), True, 'import numpy as np\n'), ((1187, 1196), 'numpy.max', 'np.max', (['R'], {}), '(R)\n', (1193, 1196), True, 'import numpy as np\n'), ((1556, 1575), 'numpy.delete', 'np.delete', (['J', 'ind_q'], {}), '(J, ind_q)\n', (1565, 1575), True, 'import numpy as np\n'), ((2256, 2276), 'numpy.argmin', 'np.argmin', (['D[r, 0:r]'], {}), '(D[r, 0:r])\n', (2265, 2276), True, 'import numpy as np\n'), ((1520, 1531), 'numpy.array', 'np.array', (['J'], {}), '(J)\n', (1528, 1531), True, 'import numpy as np\n')] |
import numpy as np
from ..utils import uwa
from .calibrate_base import CAL_PARAMS
from .calibrate_ek import CalibrateBase
class CalibrateAZFP(CalibrateBase):
def __init__(self, echodata, env_params, cal_params, **kwargs):
super().__init__(echodata, env_params)
# initialize cal params
self.cal_params = dict.fromkeys(CAL_PARAMS["AZFP"])
# load env and cal parameters
self.get_env_params()
if cal_params is None:
cal_params = {}
self.get_cal_params(cal_params)
# self.range_meter computed under self._cal_power()
# because the implementation is different for Sv and TS
def get_cal_params(self, cal_params):
"""Get cal params using user inputs or values from data file.
Parameters
----------
cal_params : dict
"""
# Get params from Beam_group1
self.cal_params["equivalent_beam_angle"] = (
cal_params["equivalent_beam_angle"]
if "equivalent_beam_angle" in cal_params
else self.echodata.beam["equivalent_beam_angle"]
)
# Get params from the Vendor_specific group
for p in ["EL", "DS", "TVR", "VTX", "Sv_offset"]:
# substitute if None in user input
self.cal_params[p] = cal_params[p] if p in cal_params else self.echodata.vendor[p]
def get_env_params(self):
"""Get env params using user inputs or values from data file.
Parameters
----------
env_params : dict
"""
# Temperature comes from either user input or data file
# Below, renaming time1 to ping_time is necessary because we are performing
# calculations with the beam groups that use ping_time
self.env_params["temperature"] = (
self.env_params["temperature"]
if "temperature" in self.env_params
else self.echodata.environment["temperature"].rename({"time1": "ping_time"})
)
# Salinity and pressure always come from user input
if ("salinity" not in self.env_params) or ("pressure" not in self.env_params):
raise ReferenceError("Please supply both salinity and pressure in env_params.")
else:
self.env_params["salinity"] = self.env_params["salinity"]
self.env_params["pressure"] = self.env_params["pressure"]
# Always calculate sound speed and absorption
self.env_params["sound_speed"] = uwa.calc_sound_speed(
temperature=self.env_params["temperature"],
salinity=self.env_params["salinity"],
pressure=self.env_params["pressure"],
formula_source="AZFP",
)
self.env_params["sound_absorption"] = uwa.calc_absorption(
frequency=self.echodata.beam["frequency_nominal"],
temperature=self.env_params["temperature"],
salinity=self.env_params["salinity"],
pressure=self.env_params["pressure"],
formula_source="AZFP",
)
def compute_range_meter(self, cal_type):
"""Calculate range (``echo_range``) in meter using AZFP formula.
Note the range calculation differs for Sv and TS per AZFP matlab code.
Parameters
----------
cal_type : str
'Sv' for calculating volume backscattering strength, or
'TS' for calculating target strength
"""
self.range_meter = self.echodata.compute_range(self.env_params, azfp_cal_type=cal_type)
def _cal_power(self, cal_type, **kwargs):
"""Calibrate to get volume backscattering strength (Sv) from AZFP power data.
The calibration formulae used here is based on Appendix G in
the GU-100-AZFP-01-R50 Operator's Manual.
Note a Sv_offset factor that varies depending on frequency is used
in the calibration as documented on p.90.
See calc_Sv_offset() in convert/azfp.py
"""
# Compute range in meters
self.compute_range_meter(
cal_type=cal_type
) # range computation different for Sv and TS per AZFP matlab code
# Compute various params
# TODO: take care of dividing by zero encountered in log10
spreading_loss = 20 * np.log10(self.range_meter)
absorption_loss = 2 * self.env_params["sound_absorption"] * self.range_meter
SL = self.cal_params["TVR"] + 20 * np.log10(self.cal_params["VTX"]) # eq.(2)
# scaling factor (slope) in Fig.G-1, units Volts/dB], see p.84
a = self.cal_params["DS"]
EL = (
self.cal_params["EL"] - 2.5 / a + self.echodata.beam.backscatter_r / (26214 * a)
) # eq.(5) # has beam dim due to backscatter_r
if cal_type == "Sv":
# eq.(9)
out = (
EL
- SL
+ spreading_loss
+ absorption_loss
- 10
* np.log10(
0.5
* self.env_params["sound_speed"]
* self.echodata.beam["transmit_duration_nominal"]
* self.cal_params["equivalent_beam_angle"]
)
+ self.cal_params["Sv_offset"]
) # see p.90-91 for this correction to Sv
out.name = "Sv"
elif cal_type == "TS":
# eq.(10)
out = EL - SL + 2 * spreading_loss + absorption_loss
out.name = "TS"
else:
raise ValueError("cal_type not recognized!")
# Attach calculated range (with units meter) into data set
out = out.to_dataset()
out = out.merge(self.range_meter)
# Add frequency_nominal to data set
out["frequency_nominal"] = self.echodata.beam["frequency_nominal"]
# Add env and cal parameters
out = self._add_params_to_output(out)
# Squeeze out the beam dim
# doing it here because both out and self.cal_params["equivalent_beam_angle"] has beam dim
return out.squeeze("beam", drop=True)
def compute_Sv(self, **kwargs):
return self._cal_power(cal_type="Sv")
def compute_TS(self, **kwargs):
return self._cal_power(cal_type="TS")
| [
"numpy.log10"
] | [((4265, 4291), 'numpy.log10', 'np.log10', (['self.range_meter'], {}), '(self.range_meter)\n', (4273, 4291), True, 'import numpy as np\n'), ((4420, 4452), 'numpy.log10', 'np.log10', (["self.cal_params['VTX']"], {}), "(self.cal_params['VTX'])\n", (4428, 4452), True, 'import numpy as np\n'), ((4951, 5095), 'numpy.log10', 'np.log10', (["(0.5 * self.env_params['sound_speed'] * self.echodata.beam[\n 'transmit_duration_nominal'] * self.cal_params['equivalent_beam_angle'])"], {}), "(0.5 * self.env_params['sound_speed'] * self.echodata.beam[\n 'transmit_duration_nominal'] * self.cal_params['equivalent_beam_angle'])\n", (4959, 5095), True, 'import numpy as np\n')] |
import unittest
import adn as f
class pruebas(unittest.TestCase):
def test_obtener_complemento(self):
self.assertEqual(f.obtener_complemento('A'), 'T')
self.assertEqual(f.obtener_complemento('G'), 'C')
self.assertEqual(f.obtener_complemento('T'), 'A')
self.assertRaises(ValueError, f.obtener_complemento, 'Z')
def test_generar_cadena_complementaria(self):
self.assertEqual(f.generar_cadena_complementaria('ATGC'), 'TACG')
self.assertEqual(f.generar_cadena_complementaria('GATC'), 'CTAG')
self.assertEqual(f.generar_cadena_complementaria('CA'), 'GT')
def test_calcular_correspondencia(self):
self.assertEqual(f.calcular_correspondencia('ATATTACGGC', 'TATAATGCCG'), 100.0)
self.assertEqual(f.calcular_correspondencia('ATATATCGGC', 'TATAATGCCG'), 80.0)
self.assertEqual(f.calcular_correspondencia('ATATATCGGC', 'CGATTTACGA'), 20.0)
self.assertEqual(f.calcular_correspondencia('TTGGAACC', 'ACTA'), 'Las cadenas no tienen la misma longitud')
def test_corresponden(self):
self.assertTrue(f.corresponden('A', 'T'), True)
self.assertFalse(f.corresponden('G', 'T'), False)
def test_es_cadena_valida(self):
self.assertFalse(f.es_cadena_valida('FTATTACGGC'), False)
self.assertTrue(f.es_cadena_valida('ATATTACGGC'), True)
def test_es_base(self):
self.assertTrue(f.es_base('A'), True)
self.assertTrue(f.es_base('T'), True)
self.assertTrue(f.es_base('G'), True)
self.assertTrue(f.es_base('C'), True)
self.assertFalse(f.es_base('B'), False)
def test_es_subcadena(self):
self.assertTrue(f.es_subcadena('ATCTTA', 'ATC'), True)
self.assertFalse(f.es_subcadena('TCGA', 'AAT'), False)
def test_reparar_dano(self):
self.assertEqual(f.reparar_dano('ATGPPP', 'C'), 'ATGPPP')
self.assertEqual(f.reparar_dano('ATGCCC', 'G'), 'ATGCCC')
def test_obtener_secciones(self):
self.assertEqual(f.obtener_secciones('atata', 3), ['a', 't', 'ata'])
self.assertEqual(f.obtener_secciones('ATGCTACAG', 2), ['ATGC', 'TACAG'])
def test_obtener_complementos(self):
self.assertEqual(f.obtener_complementos(['AAA', 'CGC']), ['TTT', 'GCG'])
self.assertEqual(f.obtener_complementos(['AGT', 'GTA']), ['TCA', 'CAT'])
def test_unir_cadena(self):
self.assertEqual(f.unir_cadena(['CGTA', 'ATTA']), 'CGTAATTA')
self.assertEqual(f.unir_cadena(['GC', 'GCATTT']), 'GCGCATTT')
def test_complementar_cadenas(self):
self.assertEqual(f.complementar_cadenas(['GCC', 'CGG']), 'CGGGCC')
self.assertEqual(f.complementar_cadenas(['AT', 'GTA', 'CC']), 'TACATGG')
| [
"adn.obtener_complementos",
"adn.corresponden",
"adn.es_cadena_valida",
"adn.reparar_dano",
"adn.complementar_cadenas",
"adn.obtener_complemento",
"adn.es_base",
"adn.obtener_secciones",
"adn.calcular_correspondencia",
"adn.generar_cadena_complementaria",
"adn.unir_cadena",
"adn.es_subcadena"
] | [((133, 159), 'adn.obtener_complemento', 'f.obtener_complemento', (['"""A"""'], {}), "('A')\n", (154, 159), True, 'import adn as f\n'), ((191, 217), 'adn.obtener_complemento', 'f.obtener_complemento', (['"""G"""'], {}), "('G')\n", (212, 217), True, 'import adn as f\n'), ((249, 275), 'adn.obtener_complemento', 'f.obtener_complemento', (['"""T"""'], {}), "('T')\n", (270, 275), True, 'import adn as f\n'), ((423, 462), 'adn.generar_cadena_complementaria', 'f.generar_cadena_complementaria', (['"""ATGC"""'], {}), "('ATGC')\n", (454, 462), True, 'import adn as f\n'), ((497, 536), 'adn.generar_cadena_complementaria', 'f.generar_cadena_complementaria', (['"""GATC"""'], {}), "('GATC')\n", (528, 536), True, 'import adn as f\n'), ((571, 608), 'adn.generar_cadena_complementaria', 'f.generar_cadena_complementaria', (['"""CA"""'], {}), "('CA')\n", (602, 608), True, 'import adn as f\n'), ((686, 740), 'adn.calcular_correspondencia', 'f.calcular_correspondencia', (['"""ATATTACGGC"""', '"""TATAATGCCG"""'], {}), "('ATATTACGGC', 'TATAATGCCG')\n", (712, 740), True, 'import adn as f\n'), ((774, 828), 'adn.calcular_correspondencia', 'f.calcular_correspondencia', (['"""ATATATCGGC"""', '"""TATAATGCCG"""'], {}), "('ATATATCGGC', 'TATAATGCCG')\n", (800, 828), True, 'import adn as f\n'), ((862, 916), 'adn.calcular_correspondencia', 'f.calcular_correspondencia', (['"""ATATATCGGC"""', '"""CGATTTACGA"""'], {}), "('ATATATCGGC', 'CGATTTACGA')\n", (888, 916), True, 'import adn as f\n'), ((949, 995), 'adn.calcular_correspondencia', 'f.calcular_correspondencia', (['"""TTGGAACC"""', '"""ACTA"""'], {}), "('TTGGAACC', 'ACTA')\n", (975, 995), True, 'import adn as f\n'), ((1097, 1121), 'adn.corresponden', 'f.corresponden', (['"""A"""', '"""T"""'], {}), "('A', 'T')\n", (1111, 1121), True, 'import adn as f\n'), ((1154, 1178), 'adn.corresponden', 'f.corresponden', (['"""G"""', '"""T"""'], {}), "('G', 'T')\n", (1168, 1178), True, 'import adn as f\n'), ((1249, 1281), 'adn.es_cadena_valida', 'f.es_cadena_valida', (['"""FTATTACGGC"""'], {}), "('FTATTACGGC')\n", (1267, 1281), True, 'import adn as f\n'), ((1314, 1346), 'adn.es_cadena_valida', 'f.es_cadena_valida', (['"""ATATTACGGC"""'], {}), "('ATATTACGGC')\n", (1332, 1346), True, 'import adn as f\n'), ((1406, 1420), 'adn.es_base', 'f.es_base', (['"""A"""'], {}), "('A')\n", (1415, 1420), True, 'import adn as f\n'), ((1452, 1466), 'adn.es_base', 'f.es_base', (['"""T"""'], {}), "('T')\n", (1461, 1466), True, 'import adn as f\n'), ((1498, 1512), 'adn.es_base', 'f.es_base', (['"""G"""'], {}), "('G')\n", (1507, 1512), True, 'import adn as f\n'), ((1544, 1558), 'adn.es_base', 'f.es_base', (['"""C"""'], {}), "('C')\n", (1553, 1558), True, 'import adn as f\n'), ((1591, 1605), 'adn.es_base', 'f.es_base', (['"""B"""'], {}), "('B')\n", (1600, 1605), True, 'import adn as f\n'), ((1671, 1702), 'adn.es_subcadena', 'f.es_subcadena', (['"""ATCTTA"""', '"""ATC"""'], {}), "('ATCTTA', 'ATC')\n", (1685, 1702), True, 'import adn as f\n'), ((1735, 1764), 'adn.es_subcadena', 'f.es_subcadena', (['"""TCGA"""', '"""AAT"""'], {}), "('TCGA', 'AAT')\n", (1749, 1764), True, 'import adn as f\n'), ((1831, 1860), 'adn.reparar_dano', 'f.reparar_dano', (['"""ATGPPP"""', '"""C"""'], {}), "('ATGPPP', 'C')\n", (1845, 1860), True, 'import adn as f\n'), ((1897, 1926), 'adn.reparar_dano', 'f.reparar_dano', (['"""ATGCCC"""', '"""G"""'], {}), "('ATGCCC', 'G')\n", (1911, 1926), True, 'import adn as f\n'), ((2001, 2032), 'adn.obtener_secciones', 'f.obtener_secciones', (['"""atata"""', '(3)'], {}), "('atata', 3)\n", (2020, 2032), True, 'import adn as f\n'), ((2078, 2113), 'adn.obtener_secciones', 'f.obtener_secciones', (['"""ATGCTACAG"""', '(2)'], {}), "('ATGCTACAG', 2)\n", (2097, 2113), True, 'import adn as f\n'), ((2201, 2239), 'adn.obtener_complementos', 'f.obtener_complementos', (["['AAA', 'CGC']"], {}), "(['AAA', 'CGC'])\n", (2223, 2239), True, 'import adn as f\n'), ((2282, 2320), 'adn.obtener_complementos', 'f.obtener_complementos', (["['AGT', 'GTA']"], {}), "(['AGT', 'GTA'])\n", (2304, 2320), True, 'import adn as f\n'), ((2395, 2426), 'adn.unir_cadena', 'f.unir_cadena', (["['CGTA', 'ATTA']"], {}), "(['CGTA', 'ATTA'])\n", (2408, 2426), True, 'import adn as f\n'), ((2465, 2496), 'adn.unir_cadena', 'f.unir_cadena', (["['GC', 'GCATTT']"], {}), "(['GC', 'GCATTT'])\n", (2478, 2496), True, 'import adn as f\n'), ((2576, 2614), 'adn.complementar_cadenas', 'f.complementar_cadenas', (["['GCC', 'CGG']"], {}), "(['GCC', 'CGG'])\n", (2598, 2614), True, 'import adn as f\n'), ((2651, 2694), 'adn.complementar_cadenas', 'f.complementar_cadenas', (["['AT', 'GTA', 'CC']"], {}), "(['AT', 'GTA', 'CC'])\n", (2673, 2694), True, 'import adn as f\n')] |
import os
print("1")
try:
os.remove("filename")
except Exception as e:
pass
print("2")
| [
"os.remove"
] | [((31, 52), 'os.remove', 'os.remove', (['"""filename"""'], {}), "('filename')\n", (40, 52), False, 'import os\n')] |
import os
# Limit to this many copies. Mainly for formatting siapaths cleanly.
MAX_DATASET_COPIES = 100000000
class Error(Exception):
pass
class InvalidCopyCountError(Error):
pass
def from_dataset(input_dataset, dataset_copies):
"""Converts a Dataset to a list of Job instances.
Args:
input_dataset: The Dataset of files to upload to Sia.
dataset_copies: The number of times each file in the dataset should be
uploaded to Sia.
Returns:
A list of upload jobs.
"""
jobs = []
if dataset_copies < 1 or dataset_copies > MAX_DATASET_COPIES:
raise InvalidCopyCountError(
'dataset_copies must be an integer between 1 and %d. got: %d' %
(MAX_DATASET_COPIES, dataset_copies))
for copy_index in xrange(dataset_copies):
for local_path in input_dataset.paths:
sia_path = _local_path_to_sia_path(local_path,
input_dataset.root_dir)
if dataset_copies != 1:
sia_path = _append_file_index(sia_path, copy_index)
jobs.append(Job(local_path, sia_path))
return jobs
def _local_path_to_sia_path(local_path, dataset_root_dir):
sia_path = os.path.relpath(local_path, dataset_root_dir)
path_separator = os.path.sep
# Normalize to forward slash path separators.
return sia_path.replace(path_separator, '/')
def _append_file_index(sia_path, copy_index):
"""Appends a file index to a Sia path to represent which copy this is.
Args:
sia_path: The original Sia path before the copy index is added.
copy_index: An index of which copy number this file is.
Returns:
An indexed path, for example ('foo/bar.txt', 5) returns:
foo/bar-00000005.txt
"""
base_path, extension = os.path.splitext(sia_path)
return '%s-%08d%s' % (base_path, copy_index, extension)
class Job(object):
"""A job upload task.
Represents the information needed to perform a single file upload from the
local system to the Sia network.
"""
def __init__(self, local_path, sia_path):
self._local_path = local_path
self._sia_path = sia_path
self._failure_count = 0
def __eq__(self, other):
return ((self.local_path == other.local_path) and
(self.sia_path == other.sia_path) and
(self.failure_count == other.failure_count))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s -> %s)' % (self.__class__.__name__, self._local_path,
self._sia_path)
def increment_failure_count(self):
self._failure_count += 1
@property
def local_path(self):
return self._local_path
@property
def sia_path(self):
return self._sia_path
@property
def failure_count(self):
return self._failure_count
| [
"os.path.splitext",
"os.path.relpath"
] | [((1245, 1290), 'os.path.relpath', 'os.path.relpath', (['local_path', 'dataset_root_dir'], {}), '(local_path, dataset_root_dir)\n', (1260, 1290), False, 'import os\n'), ((1841, 1867), 'os.path.splitext', 'os.path.splitext', (['sia_path'], {}), '(sia_path)\n', (1857, 1867), False, 'import os\n')] |
import os
import pickle as pkl
import numpy as np
import scipy.misc
import scipy.signal
import scipy.ndimage
from PIL import Image
FLOAT_X = 'float32'
def preprocess_and_finalise_visual_data(vidpath, vid_save_path, final_save_path,
seq_length=50, tensorize_clips=False, max_examples_per_vid=1000,
filter_type='whiten', filetype='.png', clip_edges=False,
max_examples=20000, random_order=True, verbose=True):
"""
Convenience function to first preprocess individual videos, saving one at a time and then
compile into finalised dataset
"""
print('preprocessing individual videos...')
preprocess_vids(vidpath, vid_save_path,
seq_length=seq_length, tensorize_clips=tensorize_clips,
max_examples_per_vid=max_examples_per_vid,
filter_type=filter_type, filetype=filetype,
clip_edges=clip_edges)
print('Compiling into final dataset...')
finalise_dataset(vid_save_path, final_save_path, max_examples=20000, random_order=True)
print('Done')
return
def preprocess_vids(vidpath, save_path, n_pixels=180,
seq_length=50, tensorize_clips=False,
max_examples_per_vid=1000,
filter_type='whiten', filetype='.png',
clip_edges=False):
"""
Loop through all subfolders in directory, where each subfolder contains a seperate movie clip to
be preprocessed. Each frame of the movie clip should be a .png or .jpeg image.
"""
if not os.path.isdir(save_path):
os.makedirs(save_path)
folder_names = [x[0] for x in os.walk(vidpath)]
folder_names = folder_names[1:]
print(folder_names)
for folder_name in folder_names:
print(folder_name)
filenames = os.listdir(folder_name)
filenames.sort()
imlist = []
for filename in filenames:
if filename.lower().endswith(filetype) or filename.lower().endswith(filetype) or filename.upper().endswith(filetype):
im = Image.open(os.path.join(folder_name, filename))
im = im.convert('L', (0.2989, 0.5870, 0.1140, 0)) #Convert to grayscale
im = np.array(im)
im = clip_longest_dim(im)
im = scipy.misc.imresize(im,(n_pixels,n_pixels))
if filter_type is not None:
if filter_type=='whiten':
imw = whiten_and_filter_image(im)
elif filter_type=='lowpass':
sigma= 2
imw = scipy.ndimage.filters.gaussian_filter(im, sigma)
else:
imw = im
else:
imw = im
if clip_edges:
start_x = 45
end_x = -35
start_y = 10
end_y = -10
imw = imw[start_x:end_x,start_y:end_y]
imlist.append(imw.astype(FLOAT_X))
if imlist:
[d1,d2] = imlist[0].shape
n_images = len(imlist)
print(n_images)
imarr = np.dstack(imlist) #This will give an array of size [d1xd2xn_images]
print(imarr.shape)
if tensorize_clips:
imarr = np.reshape(imarr, [d1*d2, n_images], order='f') #collapse d1 and d2, starting with first axis first
print(imarr.shape)
tensarr = tensorize(imarr,seq_length)
n_stacks = tensarr.shape[-1]
tensarr = np.reshape(tensarr, [d1,d2,seq_length,n_stacks], order = 'f') #want the first ix to be changed first
else:
# n_stacks = int(np.floor(imarr.shape[-1]/seq_length))
n_stacks = int(np.floor(n_images/seq_length))
print(n_images)
print(n_stacks)
print(n_stacks*seq_length)
imarr = imarr[:,:,:int(n_stacks*seq_length)]
#tensarr = np.reshape(imarr, [d1, d2, seq_length, n_stacks])
tensarr = np.reshape(imarr, [d1, d2, n_stacks, seq_length])
tensarr = np.rollaxis(tensarr, -1, -2)
tensarr = np.rollaxis(tensarr,-1) #bring the n_stacks examples to the front
print(tensarr.shape)
#Sometimes you migh thave some disproportionally long videos and you only want to
#save a limited number of frames from each one to prevent a single video from
#dominating the training set.
if tensarr.shape[0]>max_examples_per_vid:
tensarr = tensarr[:max_examples_per_vid,:,:,:]
#Save preprocessed array
pickle_data(tensarr, os.path.join(save_path+os.path.split(folder_name)[-1])+'.pkl')
def finalise_dataset(file_path, full_save_path, max_examples = 20000, random_order=True):
"""
Compile the individually preprocessed movie clips saved in the preprocess_vids function
into a single array with a given
Arguments:
file_path {string} -- path to folder where individually preprocessed movie clips are saved
full_save_path {[type]} -- path to location where the final dataset will be saved, ending in .pkl
Keyword Arguments:
max_examples {int} -- The maximum number of traiing examples to include in the compiled dataset.
If there are fewer exmaples than these, the all of the examples form the preprocessed
clips will be included. Otherwise, up to the (default: {'normalized_concattrain.pkl'})
save_name {str} -- (default: {'normalized_concattrain.pkl'})
random_order {bool} -- shuffle the exampl order before saving (default: {True})
"""
pickled_arr_paths = os.listdir(file_path)
n_pickled_arrs = len(pickled_arr_paths)
n_arrs_parsed = 0
n_examples = 0
example_filename = pickled_arr_paths[0]
example_arr = load_pickled_data(os.path.join(file_path, example_filename))
concattrain = np.zeros([max_examples, *example_arr.shape[1:]])
print('here')
while n_examples < max_examples and n_arrs_parsed < n_pickled_arrs:
this_filename = pickled_arr_paths[n_arrs_parsed]
this_arr = load_pickled_data(os.path.join(file_path, this_filename))
#randomly select example sequences from each movie
n_entries = this_arr.shape[0]
select_ix = np.random.permutation(this_arr.shape[0])
concattrain[n_examples:n_examples+n_entries,:,:,:] = this_arr[select_ix[:n_entries],:,:,:]
n_examples += n_entries
n_arrs_parsed +=1
if random_order:
perm_seq = np.random.permutation(np.arange(n_examples))
else:
perm_seq = np.arange(n_examples)
concattrain = concattrain[perm_seq,...]
#normalize by subtracting the mean and dividing by the standard deviation of the whole dataset
normalized_concattrain = (concattrain - np.mean(concattrain[:]))/np.std(concattrain[:])
#save the dataset
pickle_data(normalized_concattrain, full_save_path)
return
def load_pickled_data(load_path):
load_path = os.path.expanduser(load_path)
with open(load_path, "rb") as f:
dat = pkl.load(f)
return dat
def pickle_data(data, save_path, protocol=4, create_par_dirs=True):
save_path = os.path.expanduser(save_path)
if not os.path.exists(os.path.dirname(os.path.abspath(save_path))) and create_par_dirs:
os.makedirs(os.path.dirname(os.path.abspath(save_path)))
with open(save_path, "wb") as f:
pkl.dump(data, f, protocol=protocol)
return
def clip_longest_dim(frame):
[h, w] = frame.shape
# print('h: %i' %h)
# print('w: %i' %w)
shortest_dim = np.minimum(h, w)
longest_dim = np.maximum(h, w)
# print(shortest_dim)
# print(longest_dim)
start_clip = int(np.round(longest_dim/2) - np.round(shortest_dim/2))
end_clip = int(start_clip + shortest_dim)
# print(start_clip)
# print(end_clip)
if longest_dim == h:
clip_frame = frame[start_clip:end_clip, :]
else:
clip_frame = frame[:, start_clip:end_clip]
# print(clip_frame.shape)
return clip_frame
def whiten_and_filter_image(im_to_filt):
N = im_to_filt.shape[0]
imf=np.fft.fftshift(np.fft.fft2(im_to_filt))
f=np.arange(-N/2,N/2)
[fx, fy] = np.meshgrid(f,f)
[rho,theta]=cart2pol(fx,fy)
filtf = rho*np.exp(-0.5*(rho/(0.7*N/2))**2)
imwf = filtf*imf
imw = np.real(np.fft.ifft2(np.fft.fftshift(imwf)))
return imw
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def tensorize(X_arr, n_h, lag=0, remove_zeros = True, float_X = 'float32'):
#Modified from Benware
# Add a history dimension to a 2D stimulus grid
# Inputs:
# X_arr -- stimulus, freq x time
# n_h -- number of history steps
# lag -- minimum lag
#
# Outputs:
# X_tens -- stimulus, freq x history x time
n_d1 = np.shape(X_arr)[0]
n_d2 = np.shape(X_arr)[1]
# pad with zeros
# X_arr = np.concatenate(np.zeros((n_d1, n_h)), X_arr)
n_d2_pad = np.shape(X_arr)[1]
# preallocate
X_tens = np.zeros((n_d1, n_h, n_d2_pad), dtype=float_X);
for ii in range(n_h):
X_tens[:,ii,:] = shift(X_arr, (0,lag+n_h-ii-1))#.reshape(n_d1, 1, n_d2_pad)
if remove_zeros:
# X_tens = X_tens[:, :, n_h+1:]
X_tens = X_tens[:, :, n_h-1:]
return X_tens
| [
"numpy.sqrt",
"numpy.rollaxis",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"os.walk",
"numpy.mean",
"os.listdir",
"numpy.reshape",
"numpy.fft.fft2",
"numpy.exp",
"os.path.split",
"os.path.isdir",
"numpy.meshgrid",
"numpy.maximum",
"os.path.expanduser",
"numpy.random.permutation",
"numpy.round",
"pickle.load",
"numpy.floor",
"numpy.cos",
"numpy.std",
"numpy.shape",
"numpy.dstack",
"pickle.dump",
"numpy.minimum",
"os.makedirs",
"os.path.join",
"numpy.zeros",
"os.path.abspath",
"numpy.fft.fftshift"
] | [((5938, 5959), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (5948, 5959), False, 'import os\n'), ((6189, 6237), 'numpy.zeros', 'np.zeros', (['[max_examples, *example_arr.shape[1:]]'], {}), '([max_examples, *example_arr.shape[1:]])\n', (6197, 6237), True, 'import numpy as np\n'), ((7300, 7329), 'os.path.expanduser', 'os.path.expanduser', (['load_path'], {}), '(load_path)\n', (7318, 7329), False, 'import os\n'), ((7493, 7522), 'os.path.expanduser', 'os.path.expanduser', (['save_path'], {}), '(save_path)\n', (7511, 7522), False, 'import os\n'), ((7895, 7911), 'numpy.minimum', 'np.minimum', (['h', 'w'], {}), '(h, w)\n', (7905, 7911), True, 'import numpy as np\n'), ((7930, 7946), 'numpy.maximum', 'np.maximum', (['h', 'w'], {}), '(h, w)\n', (7940, 7946), True, 'import numpy as np\n'), ((8477, 8501), 'numpy.arange', 'np.arange', (['(-N / 2)', '(N / 2)'], {}), '(-N / 2, N / 2)\n', (8486, 8501), True, 'import numpy as np\n'), ((8512, 8529), 'numpy.meshgrid', 'np.meshgrid', (['f', 'f'], {}), '(f, f)\n', (8523, 8529), True, 'import numpy as np\n'), ((8731, 8755), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (8738, 8755), True, 'import numpy as np\n'), ((8762, 8778), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (8772, 8778), True, 'import numpy as np\n'), ((9448, 9494), 'numpy.zeros', 'np.zeros', (['(n_d1, n_h, n_d2_pad)'], {'dtype': 'float_X'}), '((n_d1, n_h, n_d2_pad), dtype=float_X)\n', (9456, 9494), True, 'import numpy as np\n'), ((1642, 1666), 'os.path.isdir', 'os.path.isdir', (['save_path'], {}), '(save_path)\n', (1655, 1666), False, 'import os\n'), ((1676, 1698), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (1687, 1698), False, 'import os\n'), ((1896, 1919), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (1906, 1919), False, 'import os\n'), ((6127, 6168), 'os.path.join', 'os.path.join', (['file_path', 'example_filename'], {}), '(file_path, example_filename)\n', (6139, 6168), False, 'import os\n'), ((6588, 6628), 'numpy.random.permutation', 'np.random.permutation', (['this_arr.shape[0]'], {}), '(this_arr.shape[0])\n', (6609, 6628), True, 'import numpy as np\n'), ((6901, 6922), 'numpy.arange', 'np.arange', (['n_examples'], {}), '(n_examples)\n', (6910, 6922), True, 'import numpy as np\n'), ((7136, 7158), 'numpy.std', 'np.std', (['concattrain[:]'], {}), '(concattrain[:])\n', (7142, 7158), True, 'import numpy as np\n'), ((7381, 7392), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (7389, 7392), True, 'import pickle as pkl\n'), ((7725, 7761), 'pickle.dump', 'pkl.dump', (['data', 'f'], {'protocol': 'protocol'}), '(data, f, protocol=protocol)\n', (7733, 7761), True, 'import pickle as pkl\n'), ((8446, 8469), 'numpy.fft.fft2', 'np.fft.fft2', (['im_to_filt'], {}), '(im_to_filt)\n', (8457, 8469), True, 'import numpy as np\n'), ((8577, 8618), 'numpy.exp', 'np.exp', (['(-0.5 * (rho / (0.7 * N / 2)) ** 2)'], {}), '(-0.5 * (rho / (0.7 * N / 2)) ** 2)\n', (8583, 8618), True, 'import numpy as np\n'), ((8839, 8850), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (8845, 8850), True, 'import numpy as np\n'), ((8865, 8876), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (8871, 8876), True, 'import numpy as np\n'), ((9251, 9266), 'numpy.shape', 'np.shape', (['X_arr'], {}), '(X_arr)\n', (9259, 9266), True, 'import numpy as np\n'), ((9281, 9296), 'numpy.shape', 'np.shape', (['X_arr'], {}), '(X_arr)\n', (9289, 9296), True, 'import numpy as np\n'), ((9397, 9412), 'numpy.shape', 'np.shape', (['X_arr'], {}), '(X_arr)\n', (9405, 9412), True, 'import numpy as np\n'), ((1734, 1750), 'os.walk', 'os.walk', (['vidpath'], {}), '(vidpath)\n', (1741, 1750), False, 'import os\n'), ((3283, 3300), 'numpy.dstack', 'np.dstack', (['imlist'], {}), '(imlist)\n', (3292, 3300), True, 'import numpy as np\n'), ((4349, 4373), 'numpy.rollaxis', 'np.rollaxis', (['tensarr', '(-1)'], {}), '(tensarr, -1)\n', (4360, 4373), True, 'import numpy as np\n'), ((6431, 6469), 'os.path.join', 'os.path.join', (['file_path', 'this_filename'], {}), '(file_path, this_filename)\n', (6443, 6469), False, 'import os\n'), ((6849, 6870), 'numpy.arange', 'np.arange', (['n_examples'], {}), '(n_examples)\n', (6858, 6870), True, 'import numpy as np\n'), ((7111, 7134), 'numpy.mean', 'np.mean', (['concattrain[:]'], {}), '(concattrain[:])\n', (7118, 7134), True, 'import numpy as np\n'), ((8019, 8044), 'numpy.round', 'np.round', (['(longest_dim / 2)'], {}), '(longest_dim / 2)\n', (8027, 8044), True, 'import numpy as np\n'), ((8045, 8071), 'numpy.round', 'np.round', (['(shortest_dim / 2)'], {}), '(shortest_dim / 2)\n', (8053, 8071), True, 'import numpy as np\n'), ((8661, 8682), 'numpy.fft.fftshift', 'np.fft.fftshift', (['imwf'], {}), '(imwf)\n', (8676, 8682), True, 'import numpy as np\n'), ((2310, 2322), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2318, 2322), True, 'import numpy as np\n'), ((3438, 3487), 'numpy.reshape', 'np.reshape', (['imarr', '[d1 * d2, n_images]'], {'order': '"""f"""'}), "(imarr, [d1 * d2, n_images], order='f')\n", (3448, 3487), True, 'import numpy as np\n'), ((3698, 3760), 'numpy.reshape', 'np.reshape', (['tensarr', '[d1, d2, seq_length, n_stacks]'], {'order': '"""f"""'}), "(tensarr, [d1, d2, seq_length, n_stacks], order='f')\n", (3708, 3760), True, 'import numpy as np\n'), ((4221, 4270), 'numpy.reshape', 'np.reshape', (['imarr', '[d1, d2, n_stacks, seq_length]'], {}), '(imarr, [d1, d2, n_stacks, seq_length])\n', (4231, 4270), True, 'import numpy as np\n'), ((4297, 4325), 'numpy.rollaxis', 'np.rollaxis', (['tensarr', '(-1)', '(-2)'], {}), '(tensarr, -1, -2)\n', (4308, 4325), True, 'import numpy as np\n'), ((7651, 7677), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (7666, 7677), False, 'import os\n'), ((2164, 2199), 'os.path.join', 'os.path.join', (['folder_name', 'filename'], {}), '(folder_name, filename)\n', (2176, 2199), False, 'import os\n'), ((3919, 3950), 'numpy.floor', 'np.floor', (['(n_images / seq_length)'], {}), '(n_images / seq_length)\n', (3927, 3950), True, 'import numpy as np\n'), ((7565, 7591), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (7580, 7591), False, 'import os\n'), ((4889, 4915), 'os.path.split', 'os.path.split', (['folder_name'], {}), '(folder_name)\n', (4902, 4915), False, 'import os\n')] |
#! /usr/bin/python2
# -*- coding: utf-8 -*-
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler
from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, \
ReplyKeyboardMarkup, KeyboardButton
import requests
import sys
import os
# Add django as interface to Databse
import django
django.setup()
from django_cat_app.models import UserLog
# if run locally, we can read the TOKEN from a file, as fallback (if e.g. run on heroku, we use an
# environment variable)
try:
from secret_chat_key import TELEGRAM_TOKEN
except ImportError as e: # Exception as e:
try:
TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN']
except KeyError:
print("'TELEGRAM_TOKEN' not in ENV")
print("Read TELEGRAM_TOKEN from env")
def get_random_cat_url():
response = requests.get(url="https://api.thecatapi.com/v1/images/search")
cat_url = str(response.json()[0]['url'])
return cat_url
class DemoTelegramBot:
def __init__(self):
# activate webhooks (instead of polling)
self.with_webhooks = False
try:
self.with_webhooks = os.environ['WITH_HOOK']
except KeyError:
pass
self.updater = Updater(token=TELEGRAM_TOKEN)
if self.with_webhooks:
port = int(os.environ.get('PORT', '8443')) # is set by Heroku if run there
print("Running with webhook on port %i" % port)
self.updater.start_webhook(listen="0.0.0.0", port=port, url_path=TELEGRAM_TOKEN)
self.updater.bot.set_webhook("https://telegramcatbott.herokuapp.com/" + TELEGRAM_TOKEN)
self.dispatcher = self.updater.dispatcher
# create callbacks for some commands
self.dispatcher.add_handler(CommandHandler("help", self.on_help))
self.dispatcher.add_handler(CommandHandler("options", self.on_options))
self.dispatcher.add_handler(CommandHandler("location", self.on_location))
self.dispatcher.add_handler(CommandHandler("cat", self.on_cat))
# Callback for normal messages from user
# This function also contains the Database-counter(!)
self.dispatcher.add_handler(MessageHandler(Filters.text, self.text_cb))
# Callback for position
self.dispatcher.add_handler(MessageHandler(Filters.location, self.got_location, edited_updates=True))
# callback for custom keyboards
self.updater.dispatcher.add_handler(CallbackQueryHandler(self.mode_button_cb))
@staticmethod
def on_options(bot, update):
# encode question in callback_data ('w'): hack, could be something better
keyboard = [[InlineKeyboardButton("Bad", callback_data='w,1'),
InlineKeyboardButton("OK", callback_data='w,2'),
InlineKeyboardButton("Great", callback_data='w,3')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('How is the weather today?', reply_markup=reply_markup)
@staticmethod
def mode_button_cb(bot, update):
assert isinstance(update, Update)
assert isinstance(update.callback_query, CallbackQuery)
# user_id = update.callback_query.from_user.id
query = update.callback_query
ans = query.data.split(',')
cmd = str(ans[0])
value = int(ans[1])
if cmd == 'w':
text = "Weather score of %i" % value
else:
text = "Unhandled callback_data %s" % query.data
print(text)
# Replace keyboard with this message to clean up the window
bot.edit_message_text(text=text, chat_id=query.message.chat_id, message_id=query.message.message_id)
@staticmethod
def text_cb(bot, update):
assert isinstance(update, Update)
# used to identify users. Is not unique, but we don't want to store unique personal information by design
first_name = update.message.chat.first_name
ul, created = UserLog.objects.get_or_create(user_id=first_name)
assert isinstance(ul, UserLog)
ul.cat_count += 1
ul.save()
# print (update) -> https://www.cleancss.com/python-beautify/
print("Got text: %s, cat_count: %i" % (str(update.message.text), ul.cat_count))
msg = "Hello %s: %s (you can also use /help) (this is your cat nr %i)" % (first_name,
update.message.text.upper(),
ul.cat_count)
bot.send_message(chat_id=update.message.chat_id, text=msg)
bot.send_photo(chat_id=update.message.chat_id, photo=get_random_cat_url())
@staticmethod
def got_location(bot, update):
assert isinstance(update, Update)
if update.edited_message:
message = update.edited_message
else:
message = update.message
a, b = message.location.latitude, message.location.longitude
bot.send_message(chat_id=message.chat_id, text="You are at %.3f, %.3f" % (a, b))
@staticmethod
def on_location(bot, update):
location_keyboard = [[KeyboardButton(text="Send my location", request_location=True)]]
update.message.reply_text('Please share your location.',
reply_markup=ReplyKeyboardMarkup(location_keyboard, one_time_keyboard=True))
@staticmethod
def on_help(bot, update):
update.message.reply_text(u'Send any message to get an uppercase response. \n'
u'/location to send your location \n️'
u'/cat to get a cat image \n️'
u'/options to talk about weather️ ☺')
@staticmethod
def on_cat(bot, update):
bot.send_photo(chat_id=update.message.chat_id, photo=get_random_cat_url())
def run(self):
if not self.with_webhooks:
print("Start polling")
sys.stdout.flush()
self.updater.start_polling()
self.updater.idle()
if __name__ == "__main__":
dtb = DemoTelegramBot()
dtb.run()
| [
"django.setup",
"telegram.InlineKeyboardMarkup",
"telegram.InlineKeyboardButton",
"telegram.KeyboardButton",
"os.environ.get",
"django_cat_app.models.UserLog.objects.get_or_create",
"requests.get",
"telegram.ext.MessageHandler",
"telegram.ext.CallbackQueryHandler",
"sys.stdout.flush",
"telegram.ReplyKeyboardMarkup",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((362, 376), 'django.setup', 'django.setup', ([], {}), '()\n', (374, 376), False, 'import django\n'), ((854, 916), 'requests.get', 'requests.get', ([], {'url': '"""https://api.thecatapi.com/v1/images/search"""'}), "(url='https://api.thecatapi.com/v1/images/search')\n", (866, 916), False, 'import requests\n'), ((1251, 1280), 'telegram.ext.Updater', 'Updater', ([], {'token': 'TELEGRAM_TOKEN'}), '(token=TELEGRAM_TOKEN)\n', (1258, 1280), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((2896, 2926), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (2916, 2926), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n'), ((3995, 4044), 'django_cat_app.models.UserLog.objects.get_or_create', 'UserLog.objects.get_or_create', ([], {'user_id': 'first_name'}), '(user_id=first_name)\n', (4024, 4044), False, 'from django_cat_app.models import UserLog\n'), ((1787, 1823), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""help"""', 'self.on_help'], {}), "('help', self.on_help)\n", (1801, 1823), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1861, 1903), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""options"""', 'self.on_options'], {}), "('options', self.on_options)\n", (1875, 1903), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1941, 1985), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""location"""', 'self.on_location'], {}), "('location', self.on_location)\n", (1955, 1985), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((2023, 2057), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""cat"""', 'self.on_cat'], {}), "('cat', self.on_cat)\n", (2037, 2057), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((2208, 2250), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'self.text_cb'], {}), '(Filters.text, self.text_cb)\n', (2222, 2250), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((2321, 2393), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.location', 'self.got_location'], {'edited_updates': '(True)'}), '(Filters.location, self.got_location, edited_updates=True)\n', (2335, 2393), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((2480, 2521), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['self.mode_button_cb'], {}), '(self.mode_button_cb)\n', (2500, 2521), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((6026, 6044), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6042, 6044), False, 'import sys\n'), ((1336, 1366), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '"""8443"""'], {}), "('PORT', '8443')\n", (1350, 1366), False, 'import os\n'), ((2678, 2726), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Bad"""'], {'callback_data': '"""w,1"""'}), "('Bad', callback_data='w,1')\n", (2698, 2726), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n'), ((2749, 2796), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""OK"""'], {'callback_data': '"""w,2"""'}), "('OK', callback_data='w,2')\n", (2769, 2796), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n'), ((2819, 2869), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Great"""'], {'callback_data': '"""w,3"""'}), "('Great', callback_data='w,3')\n", (2839, 2869), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n'), ((5206, 5268), 'telegram.KeyboardButton', 'KeyboardButton', ([], {'text': '"""Send my location"""', 'request_location': '(True)'}), "(text='Send my location', request_location=True)\n", (5220, 5268), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n'), ((5383, 5445), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (['location_keyboard'], {'one_time_keyboard': '(True)'}), '(location_keyboard, one_time_keyboard=True)\n', (5402, 5445), False, 'from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, ReplyKeyboardMarkup, KeyboardButton\n')] |
from clustviz.agglomerative import (
update_mat,
dist_mat_gen,
dist_mat,
compute_ward_ij,
sl_dist,
avg_dist,
cl_dist,
agg_clust,
point_plot_mod
)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def test_dist_mat_gen():
df_for_dist_mat_gen = pd.DataFrame([[0, 0, 1], [0, 2, 0]])
assert dist_mat_gen(df_for_dist_mat_gen).equals(
pd.DataFrame([[np.inf, 2], [2, np.inf]])
)
def test_update_mat_single():
df_for_update_mat = pd.DataFrame(
[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]],
columns=["a", "b", "c"],
index=["a", "b", "c"],
)
temp_values = update_mat(df_for_update_mat, 1, 0, "single").values
assert (temp_values == [[np.inf, 2], [2, np.inf]]).all()
def test_update_mat_average():
df_for_update_mat = pd.DataFrame(
[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]],
columns=["a", "b", "c"],
index=["a", "b", "c"],
)
temp_values = update_mat(df_for_update_mat, 1, 0, "average").values
print(temp_values)
assert (temp_values == [[np.inf, 3], [3, np.inf]]).all()
def test_update_mat_complete():
df_for_update_mat = pd.DataFrame(
[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]],
columns=["a", "b", "c"],
index=["a", "b", "c"],
)
temp_values = update_mat(df_for_update_mat, 1, 0, "complete").values
assert (temp_values == [[np.inf, 4], [4, np.inf]]).all()
def test_dist_mat_single():
df_for_dist_mat = pd.DataFrame([[0, 0, 1], [0, 2, 0]])
assert dist_mat(df_for_dist_mat, "single").equals(
pd.DataFrame([[np.inf, 2], [np.inf, np.inf]])
)
def test_dist_mat_avg():
df_for_dist_mat = pd.DataFrame([[0, 0, 1], [0, 2, 0]])
assert dist_mat(df_for_dist_mat, "average").equals(
pd.DataFrame([[np.inf, 2], [np.inf, np.inf]])
)
def test_dist_mat_complete():
df_for_dist_mat = pd.DataFrame([[0, 0, 1], [0, 2, 0]])
assert dist_mat(df_for_dist_mat, "complete").equals(
pd.DataFrame([[np.inf, 2], [np.inf, np.inf]])
)
def test_compute_ward_ij():
X = [[1, 2], [3, 2], [0, 0], [1, 1]]
b = pd.DataFrame(X, index=["0", "1", "2", "3"], columns=["0x", "0y"])
assert compute_ward_ij(X, b) == (("0", "3"), 0.5, 0.5)
def test_sl_dist():
first_cluster = [np.array([3, 1]), np.array([1, 7]), np.array([2, 1])]
second_cluster = [np.array([1, 1]), np.array([3, 6]), np.array([1, 3])]
assert sl_dist(first_cluster, second_cluster) == 1
def test_avg_dist():
first_cluster = [np.array([1, 1]), np.array([2, 1])]
second_cluster = [np.array([0, 1]), np.array([4, 1])]
assert avg_dist(first_cluster, second_cluster) == 2
def test_cl_dist():
first_cluster = [np.array([1, 1]), np.array([2, 1])]
second_cluster = [np.array([0, 1]), np.array([4, 1])]
assert cl_dist(first_cluster, second_cluster) == 3
def test_agg_clust_ward():
X = np.array([[1, 2], [3, 2], [0, 0], [1, 1]])
agg_clust(X, linkage="ward", plotting=False)
def test_agg_clust_single():
X = np.array([[1, 2], [3, 2], [0, 0], [1, 1]])
agg_clust(X, linkage="single", plotting=False)
def test_plot_fn(monkeypatch):
X = np.array([[1, 2], [3, 2], [0, 0]])
a = pd.DataFrame(
[[0.0, 0.0, np.nan, np.nan, np.nan, np.nan], [1.0, 2.0, 3, 2, np.nan, np.nan]],
index=["2", "(0)-(1)"],
columns=["0x", "0y", "1x", "1y", "2x", "2y"],
)
monkeypatch.setattr(plt, "show", lambda: None)
point_plot_mod(X, a, 2.57)
| [
"clustviz.agglomerative.point_plot_mod",
"clustviz.agglomerative.agg_clust",
"clustviz.agglomerative.compute_ward_ij",
"clustviz.agglomerative.avg_dist",
"clustviz.agglomerative.dist_mat_gen",
"clustviz.agglomerative.cl_dist",
"numpy.array",
"clustviz.agglomerative.sl_dist",
"pandas.DataFrame",
"clustviz.agglomerative.update_mat",
"clustviz.agglomerative.dist_mat"
] | [((307, 343), 'pandas.DataFrame', 'pd.DataFrame', (['[[0, 0, 1], [0, 2, 0]]'], {}), '([[0, 0, 1], [0, 2, 0]])\n', (319, 343), True, 'import pandas as pd\n'), ((509, 623), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]]'], {'columns': "['a', 'b', 'c']", 'index': "['a', 'b', 'c']"}), "([[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]], columns=['a',\n 'b', 'c'], index=['a', 'b', 'c'])\n", (521, 623), True, 'import pandas as pd\n'), ((842, 956), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]]'], {'columns': "['a', 'b', 'c']", 'index': "['a', 'b', 'c']"}), "([[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]], columns=['a',\n 'b', 'c'], index=['a', 'b', 'c'])\n", (854, 956), True, 'import pandas as pd\n'), ((1200, 1314), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]]'], {'columns': "['a', 'b', 'c']", 'index': "['a', 'b', 'c']"}), "([[np.inf, 1, 4], [1, np.inf, 2], [4, 2, np.inf]], columns=['a',\n 'b', 'c'], index=['a', 'b', 'c'])\n", (1212, 1314), True, 'import pandas as pd\n'), ((1530, 1566), 'pandas.DataFrame', 'pd.DataFrame', (['[[0, 0, 1], [0, 2, 0]]'], {}), '([[0, 0, 1], [0, 2, 0]])\n', (1542, 1566), True, 'import pandas as pd\n'), ((1732, 1768), 'pandas.DataFrame', 'pd.DataFrame', (['[[0, 0, 1], [0, 2, 0]]'], {}), '([[0, 0, 1], [0, 2, 0]])\n', (1744, 1768), True, 'import pandas as pd\n'), ((1940, 1976), 'pandas.DataFrame', 'pd.DataFrame', (['[[0, 0, 1], [0, 2, 0]]'], {}), '([[0, 0, 1], [0, 2, 0]])\n', (1952, 1976), True, 'import pandas as pd\n'), ((2175, 2240), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'index': "['0', '1', '2', '3']", 'columns': "['0x', '0y']"}), "(X, index=['0', '1', '2', '3'], columns=['0x', '0y'])\n", (2187, 2240), True, 'import pandas as pd\n'), ((2955, 2997), 'numpy.array', 'np.array', (['[[1, 2], [3, 2], [0, 0], [1, 1]]'], {}), '([[1, 2], [3, 2], [0, 0], [1, 1]])\n', (2963, 2997), True, 'import numpy as np\n'), ((3003, 3047), 'clustviz.agglomerative.agg_clust', 'agg_clust', (['X'], {'linkage': '"""ward"""', 'plotting': '(False)'}), "(X, linkage='ward', plotting=False)\n", (3012, 3047), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((3087, 3129), 'numpy.array', 'np.array', (['[[1, 2], [3, 2], [0, 0], [1, 1]]'], {}), '([[1, 2], [3, 2], [0, 0], [1, 1]])\n', (3095, 3129), True, 'import numpy as np\n'), ((3135, 3181), 'clustviz.agglomerative.agg_clust', 'agg_clust', (['X'], {'linkage': '"""single"""', 'plotting': '(False)'}), "(X, linkage='single', plotting=False)\n", (3144, 3181), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((3223, 3257), 'numpy.array', 'np.array', (['[[1, 2], [3, 2], [0, 0]]'], {}), '([[1, 2], [3, 2], [0, 0]])\n', (3231, 3257), True, 'import numpy as np\n'), ((3266, 3436), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.0, 0.0, np.nan, np.nan, np.nan, np.nan], [1.0, 2.0, 3, 2, np.nan, np.nan]]'], {'index': "['2', '(0)-(1)']", 'columns': "['0x', '0y', '1x', '1y', '2x', '2y']"}), "([[0.0, 0.0, np.nan, np.nan, np.nan, np.nan], [1.0, 2.0, 3, 2,\n np.nan, np.nan]], index=['2', '(0)-(1)'], columns=['0x', '0y', '1x',\n '1y', '2x', '2y'])\n", (3278, 3436), True, 'import pandas as pd\n'), ((3516, 3542), 'clustviz.agglomerative.point_plot_mod', 'point_plot_mod', (['X', 'a', '(2.57)'], {}), '(X, a, 2.57)\n', (3530, 3542), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((406, 446), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 2], [2, np.inf]]'], {}), '([[np.inf, 2], [2, np.inf]])\n', (418, 446), True, 'import pandas as pd\n'), ((670, 715), 'clustviz.agglomerative.update_mat', 'update_mat', (['df_for_update_mat', '(1)', '(0)', '"""single"""'], {}), "(df_for_update_mat, 1, 0, 'single')\n", (680, 715), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1003, 1049), 'clustviz.agglomerative.update_mat', 'update_mat', (['df_for_update_mat', '(1)', '(0)', '"""average"""'], {}), "(df_for_update_mat, 1, 0, 'average')\n", (1013, 1049), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1361, 1408), 'clustviz.agglomerative.update_mat', 'update_mat', (['df_for_update_mat', '(1)', '(0)', '"""complete"""'], {}), "(df_for_update_mat, 1, 0, 'complete')\n", (1371, 1408), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1631, 1676), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 2], [np.inf, np.inf]]'], {}), '([[np.inf, 2], [np.inf, np.inf]])\n', (1643, 1676), True, 'import pandas as pd\n'), ((1834, 1879), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 2], [np.inf, np.inf]]'], {}), '([[np.inf, 2], [np.inf, np.inf]])\n', (1846, 1879), True, 'import pandas as pd\n'), ((2043, 2088), 'pandas.DataFrame', 'pd.DataFrame', (['[[np.inf, 2], [np.inf, np.inf]]'], {}), '([[np.inf, 2], [np.inf, np.inf]])\n', (2055, 2088), True, 'import pandas as pd\n'), ((2253, 2274), 'clustviz.agglomerative.compute_ward_ij', 'compute_ward_ij', (['X', 'b'], {}), '(X, b)\n', (2268, 2274), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((2344, 2360), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (2352, 2360), True, 'import numpy as np\n'), ((2362, 2378), 'numpy.array', 'np.array', (['[1, 7]'], {}), '([1, 7])\n', (2370, 2378), True, 'import numpy as np\n'), ((2380, 2396), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (2388, 2396), True, 'import numpy as np\n'), ((2420, 2436), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2428, 2436), True, 'import numpy as np\n'), ((2438, 2454), 'numpy.array', 'np.array', (['[3, 6]'], {}), '([3, 6])\n', (2446, 2454), True, 'import numpy as np\n'), ((2456, 2472), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (2464, 2472), True, 'import numpy as np\n'), ((2486, 2524), 'clustviz.agglomerative.sl_dist', 'sl_dist', (['first_cluster', 'second_cluster'], {}), '(first_cluster, second_cluster)\n', (2493, 2524), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((2574, 2590), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2582, 2590), True, 'import numpy as np\n'), ((2592, 2608), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (2600, 2608), True, 'import numpy as np\n'), ((2632, 2648), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2640, 2648), True, 'import numpy as np\n'), ((2650, 2666), 'numpy.array', 'np.array', (['[4, 1]'], {}), '([4, 1])\n', (2658, 2666), True, 'import numpy as np\n'), ((2680, 2719), 'clustviz.agglomerative.avg_dist', 'avg_dist', (['first_cluster', 'second_cluster'], {}), '(first_cluster, second_cluster)\n', (2688, 2719), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((2768, 2784), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2776, 2784), True, 'import numpy as np\n'), ((2786, 2802), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (2794, 2802), True, 'import numpy as np\n'), ((2826, 2842), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2834, 2842), True, 'import numpy as np\n'), ((2844, 2860), 'numpy.array', 'np.array', (['[4, 1]'], {}), '([4, 1])\n', (2852, 2860), True, 'import numpy as np\n'), ((2874, 2912), 'clustviz.agglomerative.cl_dist', 'cl_dist', (['first_cluster', 'second_cluster'], {}), '(first_cluster, second_cluster)\n', (2881, 2912), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((356, 389), 'clustviz.agglomerative.dist_mat_gen', 'dist_mat_gen', (['df_for_dist_mat_gen'], {}), '(df_for_dist_mat_gen)\n', (368, 389), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1579, 1614), 'clustviz.agglomerative.dist_mat', 'dist_mat', (['df_for_dist_mat', '"""single"""'], {}), "(df_for_dist_mat, 'single')\n", (1587, 1614), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1781, 1817), 'clustviz.agglomerative.dist_mat', 'dist_mat', (['df_for_dist_mat', '"""average"""'], {}), "(df_for_dist_mat, 'average')\n", (1789, 1817), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n'), ((1989, 2026), 'clustviz.agglomerative.dist_mat', 'dist_mat', (['df_for_dist_mat', '"""complete"""'], {}), "(df_for_dist_mat, 'complete')\n", (1997, 2026), False, 'from clustviz.agglomerative import update_mat, dist_mat_gen, dist_mat, compute_ward_ij, sl_dist, avg_dist, cl_dist, agg_clust, point_plot_mod\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import ruamel.yaml
from ruamel.yaml.util import load_yaml_guess_indent
import os
import sys
class TemplateConfig:
yamlContent = None
ind = None
bsi = None
project_name="" #项目名称
project_number="" #项目标号
git_project_name="" #git工程
git_branch="" #提测分支
self_is_test="""是 / dev环境""" #研发是否自测
test_options="" #提测功能项
review_members=""#代码Review人员
project_pm=""#产品
project_developers=""#开发
project_pr_diff=""#提测内容pr
project_ui=""#ui人员
poject_comment=""#备注
online_time=""#预计上线时间
#部署发布顺序
#上线发布的分支
#上线时间
#测试报告
def readConfig(self,path):
yamlContent,ind,bsi = load_yaml_guess_indent(open(path.decode('utf-8')))
self.git_project_name = yamlContent['git_project_name']
self.project_name = yamlContent['project_name']
self.git_branch = yamlContent['git_branch']
self.test_options = yamlContent['test_options']
self.review_members = yamlContent['review_members']
self.project_pm = yamlContent['project_pm']
self.project_developers = yamlContent['project_developers']
self.poject_comment = yamlContent['poject_comment']
self.project_ui = yamlContent['project_ui']
self.project_pr_diff = yamlContent['project_pr_diff']
self.yamlContent = yamlContent
self.ind = ind
self.bsi = bsi
def readConfigFromTemplate(self):
path = os.path.dirname(os.path.realpath(__file__))
configs_path = os.path.join(path,'template.yaml')
self.readConfig(configs_path)
def save(self,path):
self.yamlContent['git_project_name'] = self.git_project_name
self.yamlContent['project_name'] = self.project_name
self.yamlContent['git_branch'] = self.git_branch
self.yamlContent['test_options'] = self.test_options
self.yamlContent['review_members'] = self.review_members
self.yamlContent['project_pm'] = self.project_pm
self.yamlContent['project_developers'] = self.project_developers
self.yamlContent['poject_comment'] = self.poject_comment
self.yamlContent['project_ui'] = self.project_ui
self.yamlContent['project_pr_diff'] = self.project_pr_diff
ruamel.yaml.round_trip_dump(self.yamlContent,open(path,'w'),indent=self.ind,block_seq_indent=self.bsi)
def log(self):
print('项目名称:'+self.project_name)
print('提测分支:'+self.git_branch)
print('测试项:'+self.test_options)
print('代码review人员:'+self.review_members)
print('pm:'+self.project_pm)
print('开发者:'+self.project_developers)
print('备注:'+self.poject_comment)
print('git工程:'+self.git_project_name)
print('ui:'+self.project_ui)
print('pr:'+self.project_pr_diff)
if __name__ == "__main__":
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
config = TemplateConfig()
config.readConfigFromTemplate()
config.log()
| [
"os.path.realpath",
"sys.setdefaultencoding",
"os.path.join",
"sys.getdefaultencoding"
] | [((1629, 1664), 'os.path.join', 'os.path.join', (['path', '"""template.yaml"""'], {}), "(path, 'template.yaml')\n", (1641, 1664), False, 'import os\n'), ((2970, 2994), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (2992, 2994), False, 'import sys\n'), ((3035, 3066), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (3057, 3066), False, 'import sys\n'), ((1573, 1599), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1589, 1599), False, 'import os\n')] |
import os; os.environ['GLOG_minloglevel'] = '2'
import caffe
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
caffe.set_mode_cpu();
from time import time
cap = cv2.VideoCapture(0)
# Hand structure
o1_parent = np.concatenate([
[0], np.arange(0,4),
[0], np.arange(5,8),
[0], np.arange(9,12),
[0], np.arange(13,16),
[0], np.arange(17,20),
])
net = caffe.Net('RegNet_deploy.prototxt','RegNet_weights.caffemodel',caffe.TEST);
plt.ion()
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.ion()
fig.show()
fig.canvas.draw()
while True:
_, img = cap.read()
cv2.imshow('img', img)
if cv2.waitKey(1) == ord('q'): break
#resize and normalize
tight_crop_sized = cv2.resize(img, dsize=(128,128), interpolation=cv2.INTER_CUBIC)
tight_crop_sized = np.subtract(np.divide(tight_crop_sized,127.5), 1)
tight_crop_sized = np.reshape(np.moveaxis(tight_crop_sized, (0,1,2), (2,0,1)), (1,3,128,128))
# assert(tight_crop_sized.shape == net.blobs[net.inputs[0]].data.shape)
net.blobs[net.inputs[0]].data[...] = tight_crop_sized
pred = net.forward()
pred_3D = np.reshape(pred['joints3D_final_vec'], (21,3)).T
ax.clear()
for segment in range(pred_3D.shape[1]):
ax.plot(
[pred_3D[0,segment], pred_3D[0,o1_parent[segment]]],
[pred_3D[1,segment], pred_3D[1,o1_parent[segment]]],
[pred_3D[2,segment], pred_3D[2,o1_parent[segment]]],
)
fig.canvas.draw()
| [
"numpy.reshape",
"numpy.divide",
"cv2.imshow",
"numpy.moveaxis",
"matplotlib.pyplot.figure",
"cv2.VideoCapture",
"caffe.Net",
"caffe.set_mode_cpu",
"cv2.resize",
"cv2.waitKey",
"numpy.arange",
"matplotlib.pyplot.ion"
] | [((163, 183), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (181, 183), False, 'import caffe\n'), ((214, 233), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (230, 233), False, 'import cv2\n'), ((419, 495), 'caffe.Net', 'caffe.Net', (['"""RegNet_deploy.prototxt"""', '"""RegNet_weights.caffemodel"""', 'caffe.TEST'], {}), "('RegNet_deploy.prototxt', 'RegNet_weights.caffemodel', caffe.TEST)\n", (428, 495), False, 'import caffe\n'), ((495, 504), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (502, 504), True, 'import matplotlib.pyplot as plt\n'), ((511, 523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (521, 523), True, 'import matplotlib.pyplot as plt\n'), ((554, 563), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (561, 563), True, 'import matplotlib.pyplot as plt\n'), ((635, 657), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (645, 657), False, 'import cv2\n'), ((749, 813), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(128, 128)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC)\n', (759, 813), False, 'import cv2\n'), ((289, 304), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (298, 304), True, 'import numpy as np\n'), ((314, 329), 'numpy.arange', 'np.arange', (['(5)', '(8)'], {}), '(5, 8)\n', (323, 329), True, 'import numpy as np\n'), ((339, 355), 'numpy.arange', 'np.arange', (['(9)', '(12)'], {}), '(9, 12)\n', (348, 355), True, 'import numpy as np\n'), ((365, 382), 'numpy.arange', 'np.arange', (['(13)', '(16)'], {}), '(13, 16)\n', (374, 382), True, 'import numpy as np\n'), ((392, 409), 'numpy.arange', 'np.arange', (['(17)', '(20)'], {}), '(17, 20)\n', (401, 409), True, 'import numpy as np\n'), ((665, 679), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (676, 679), False, 'import cv2\n'), ((848, 882), 'numpy.divide', 'np.divide', (['tight_crop_sized', '(127.5)'], {}), '(tight_crop_sized, 127.5)\n', (857, 882), True, 'import numpy as np\n'), ((920, 971), 'numpy.moveaxis', 'np.moveaxis', (['tight_crop_sized', '(0, 1, 2)', '(2, 0, 1)'], {}), '(tight_crop_sized, (0, 1, 2), (2, 0, 1))\n', (931, 971), True, 'import numpy as np\n'), ((1158, 1205), 'numpy.reshape', 'np.reshape', (["pred['joints3D_final_vec']", '(21, 3)'], {}), "(pred['joints3D_final_vec'], (21, 3))\n", (1168, 1205), True, 'import numpy as np\n')] |
from Pipeline.Preprocessing.PreprocessingModule import PreprocessingModule as pp
from Pipeline.FeatureExtractor.FeatureExtraction import FeatureExtraction as fe
from Pipeline.DataGenerator.DataGenerator import DataGenerator as dg
from Pipeline.DataGenerator.DataGenerator import KindOfData as kd
import os
import pickle
# define some path
# for preprocessing
PATH_TO_META_FOLDER = os.path.join('auxiliary_files', 'data_to_preproc')
PATH_TO_VIDEO_FOLDER = os.path.join('auxiliary_files', 'video_files')
PATH_TO_WAV_FOLDER = os.path.join('auxiliary_files', 'wav_files')
# for feature extraction
PATH_TO_LIVE_DATA = os.path.join('auxiliary_files', 'wav_files')
LIVE_DATA_TARGET = 'pickle_samples.pkl'
# for feature extraction and data generator
PATH_TO_LIVE_DATA_WITH_EMBEDDINGS = os.path.join('auxiliary_files', 'products')
LIVE_WITH_EMBEDDINGS_TARGET = 'res.pkl'
# for dataset
PATH_TO_DATASET = os.path.join('auxiliary_files', 'dataset')
DATASET_NAME = 'live_set_genres.pkl'
if __name__ == "__main__":
SEQ_LEN = 96
dataset_dict = {}
for data_part in ['train', 'valid', 'test']:
# manage paths
path_to_meta = os.path.join(PATH_TO_META_FOLDER, data_part)
path_to_video = os.path.join(PATH_TO_VIDEO_FOLDER, data_part)
path_to_wav = os.path.join(PATH_TO_WAV_FOLDER, data_part)
path_to_live_data_dir = os.path.join(PATH_TO_LIVE_DATA, data_part)
path_to_live_data_with_embeddings_dir = os.path.join(PATH_TO_LIVE_DATA_WITH_EMBEDDINGS, data_part)
# ensure dirs exist
for cur_path in [path_to_meta, path_to_video, path_to_wav, path_to_live_data_dir,
path_to_live_data_with_embeddings_dir]:
if not os.path.isdir(cur_path):
os.makedirs(cur_path, exist_ok=True)
# create paths to the target pickles
path_to_live_data = os.path.join(path_to_live_data_dir, LIVE_DATA_TARGET)
path_to_live_data_with_embeddings = os.path.join(path_to_live_data_with_embeddings_dir,
LIVE_WITH_EMBEDDINGS_TARGET)
# preprocessing live samples and get embeddings
pp.preprocess_train(path_to_meta, path_to_video, path_to_wav, seq_len=SEQ_LEN)
fe.get_audioset_features(path_to_live_data, path_to_live_data_with_embeddings)
# generate liveset samples
sets = dg.get_generated_sample(kd.LIVE, [1, 0, 0], path_to_live_data=path_to_live_data_with_embeddings,
need_shuffle=False)
x, y, genres = sets['train'] # get samples
dataset_dict[data_part] = (x, y, genres)
# save dataset
if not os.path.isdir(PATH_TO_DATASET):
os.makedirs(PATH_TO_DATASET, exist_ok=True)
dataset_filepath = os.path.join(PATH_TO_DATASET, DATASET_NAME)
with open(dataset_filepath, "wb") as f:
pickle.dump(dataset_dict, f)
| [
"pickle.dump",
"os.makedirs",
"Pipeline.Preprocessing.PreprocessingModule.PreprocessingModule.preprocess_train",
"Pipeline.DataGenerator.DataGenerator.DataGenerator.get_generated_sample",
"os.path.join",
"os.path.isdir",
"Pipeline.FeatureExtractor.FeatureExtraction.FeatureExtraction.get_audioset_features"
] | [((384, 434), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""data_to_preproc"""'], {}), "('auxiliary_files', 'data_to_preproc')\n", (396, 434), False, 'import os\n'), ((458, 504), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""video_files"""'], {}), "('auxiliary_files', 'video_files')\n", (470, 504), False, 'import os\n'), ((526, 570), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""wav_files"""'], {}), "('auxiliary_files', 'wav_files')\n", (538, 570), False, 'import os\n'), ((616, 660), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""wav_files"""'], {}), "('auxiliary_files', 'wav_files')\n", (628, 660), False, 'import os\n'), ((781, 824), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""products"""'], {}), "('auxiliary_files', 'products')\n", (793, 824), False, 'import os\n'), ((897, 939), 'os.path.join', 'os.path.join', (['"""auxiliary_files"""', '"""dataset"""'], {}), "('auxiliary_files', 'dataset')\n", (909, 939), False, 'import os\n'), ((2773, 2816), 'os.path.join', 'os.path.join', (['PATH_TO_DATASET', 'DATASET_NAME'], {}), '(PATH_TO_DATASET, DATASET_NAME)\n', (2785, 2816), False, 'import os\n'), ((1141, 1185), 'os.path.join', 'os.path.join', (['PATH_TO_META_FOLDER', 'data_part'], {}), '(PATH_TO_META_FOLDER, data_part)\n', (1153, 1185), False, 'import os\n'), ((1210, 1255), 'os.path.join', 'os.path.join', (['PATH_TO_VIDEO_FOLDER', 'data_part'], {}), '(PATH_TO_VIDEO_FOLDER, data_part)\n', (1222, 1255), False, 'import os\n'), ((1278, 1321), 'os.path.join', 'os.path.join', (['PATH_TO_WAV_FOLDER', 'data_part'], {}), '(PATH_TO_WAV_FOLDER, data_part)\n', (1290, 1321), False, 'import os\n'), ((1354, 1396), 'os.path.join', 'os.path.join', (['PATH_TO_LIVE_DATA', 'data_part'], {}), '(PATH_TO_LIVE_DATA, data_part)\n', (1366, 1396), False, 'import os\n'), ((1445, 1503), 'os.path.join', 'os.path.join', (['PATH_TO_LIVE_DATA_WITH_EMBEDDINGS', 'data_part'], {}), '(PATH_TO_LIVE_DATA_WITH_EMBEDDINGS, data_part)\n', (1457, 1503), False, 'import os\n'), ((1859, 1912), 'os.path.join', 'os.path.join', (['path_to_live_data_dir', 'LIVE_DATA_TARGET'], {}), '(path_to_live_data_dir, LIVE_DATA_TARGET)\n', (1871, 1912), False, 'import os\n'), ((1957, 2042), 'os.path.join', 'os.path.join', (['path_to_live_data_with_embeddings_dir', 'LIVE_WITH_EMBEDDINGS_TARGET'], {}), '(path_to_live_data_with_embeddings_dir, LIVE_WITH_EMBEDDINGS_TARGET\n )\n', (1969, 2042), False, 'import os\n'), ((2160, 2238), 'Pipeline.Preprocessing.PreprocessingModule.PreprocessingModule.preprocess_train', 'pp.preprocess_train', (['path_to_meta', 'path_to_video', 'path_to_wav'], {'seq_len': 'SEQ_LEN'}), '(path_to_meta, path_to_video, path_to_wav, seq_len=SEQ_LEN)\n', (2179, 2238), True, 'from Pipeline.Preprocessing.PreprocessingModule import PreprocessingModule as pp\n'), ((2247, 2325), 'Pipeline.FeatureExtractor.FeatureExtraction.FeatureExtraction.get_audioset_features', 'fe.get_audioset_features', (['path_to_live_data', 'path_to_live_data_with_embeddings'], {}), '(path_to_live_data, path_to_live_data_with_embeddings)\n', (2271, 2325), True, 'from Pipeline.FeatureExtractor.FeatureExtraction import FeatureExtraction as fe\n'), ((2377, 2498), 'Pipeline.DataGenerator.DataGenerator.DataGenerator.get_generated_sample', 'dg.get_generated_sample', (['kd.LIVE', '[1, 0, 0]'], {'path_to_live_data': 'path_to_live_data_with_embeddings', 'need_shuffle': '(False)'}), '(kd.LIVE, [1, 0, 0], path_to_live_data=\n path_to_live_data_with_embeddings, need_shuffle=False)\n', (2400, 2498), True, 'from Pipeline.DataGenerator.DataGenerator import DataGenerator as dg\n'), ((2665, 2695), 'os.path.isdir', 'os.path.isdir', (['PATH_TO_DATASET'], {}), '(PATH_TO_DATASET)\n', (2678, 2695), False, 'import os\n'), ((2705, 2748), 'os.makedirs', 'os.makedirs', (['PATH_TO_DATASET'], {'exist_ok': '(True)'}), '(PATH_TO_DATASET, exist_ok=True)\n', (2716, 2748), False, 'import os\n'), ((2869, 2897), 'pickle.dump', 'pickle.dump', (['dataset_dict', 'f'], {}), '(dataset_dict, f)\n', (2880, 2897), False, 'import pickle\n'), ((1707, 1730), 'os.path.isdir', 'os.path.isdir', (['cur_path'], {}), '(cur_path)\n', (1720, 1730), False, 'import os\n'), ((1748, 1784), 'os.makedirs', 'os.makedirs', (['cur_path'], {'exist_ok': '(True)'}), '(cur_path, exist_ok=True)\n', (1759, 1784), False, 'import os\n')] |
from django.db import models
from cms.models import CMSPlugin
class SomeModel(models.Model):
# If we don't define this field, a SystemCheckError will say: The field
# 'id' from parent model 'project.somemodel' clashes with the field
# 'id' from parent model 'cms.cmsplugin'.
primary_key = models.PositiveIntegerField(primary_key=True)
class SomeCMSPlugin(SomeModel, CMSPlugin):
pass
| [
"django.db.models.PositiveIntegerField"
] | [((308, 353), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (335, 353), False, 'from django.db import models\n')] |
"""
Chiasm Shell backend and version configuration.
:author: <NAME>
:license: MIT
"""
from __future__ import absolute_import
from pkg_resources import resource_string
BACKENDS = None
def get_backends():
"""
Returns a list of the available backends.
"""
# pylint: disable=W0603
global BACKENDS
if BACKENDS is None:
# deferred import to avoid circular dependency hell
from chiasm_shell.assembler import Assembler
from chiasm_shell.disassembler import Disassembler
BACKENDS = {
'asm' : Assembler(),
'disasm' : Disassembler()
}
return BACKENDS
def get_default_backend():
"""
Returns the backend instantiated by default by the ChiasmShell class.
"""
return 'asm'
__VERSION__ = resource_string('chiasm_shell', 'VERSION').strip().decode('utf-8')
| [
"chiasm_shell.assembler.Assembler",
"chiasm_shell.disassembler.Disassembler",
"pkg_resources.resource_string"
] | [((554, 565), 'chiasm_shell.assembler.Assembler', 'Assembler', ([], {}), '()\n', (563, 565), False, 'from chiasm_shell.assembler import Assembler\n'), ((590, 604), 'chiasm_shell.disassembler.Disassembler', 'Disassembler', ([], {}), '()\n', (602, 604), False, 'from chiasm_shell.disassembler import Disassembler\n'), ((785, 827), 'pkg_resources.resource_string', 'resource_string', (['"""chiasm_shell"""', '"""VERSION"""'], {}), "('chiasm_shell', 'VERSION')\n", (800, 827), False, 'from pkg_resources import resource_string\n')] |
import os
def csv_appender(*args, file='data/pc_stats.csv'):
if os.path.exists(file):
append_write = 'a'
else:
append_write = 'w'
f = open(file,append_write)
line = ','.join(args) + '\n'
f.write(line)
f.close()
if __name__ == '__main__':
csv_appender('dummy.csv', '1', 'asdasasd')
| [
"os.path.exists"
] | [((70, 90), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (84, 90), False, 'import os\n')] |
# Copyright (C) 2016-present the ayncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import datetime
from asyncpg import utils
from asyncpg import _testbase as tb
class TestUtils(tb.ConnectedTestCase):
async def test_mogrify_simple(self):
cases = [
('timestamp',
datetime.datetime(2016, 10, 10),
"SELECT '2016-10-10 00:00:00'::timestamp"),
('int[]',
[[1, 2], [3, 4]],
"SELECT '{{1,2},{3,4}}'::int[]"),
]
for typename, data, expected in cases:
with self.subTest(value=data, type=typename):
mogrified = await utils._mogrify(
self.con, 'SELECT $1::{}'.format(typename), [data])
self.assertEqual(mogrified, expected)
async def test_mogrify_multiple(self):
mogrified = await utils._mogrify(
self.con, 'SELECT $1::int, $2::int[]',
[1, [2, 3, 4, 5]])
expected = "SELECT '1'::int, '{2,3,4,5}'::int[]"
self.assertEqual(mogrified, expected)
| [
"asyncpg.utils._mogrify",
"datetime.datetime"
] | [((997, 1069), 'asyncpg.utils._mogrify', 'utils._mogrify', (['self.con', '"""SELECT $1::int, $2::int[]"""', '[1, [2, 3, 4, 5]]'], {}), "(self.con, 'SELECT $1::int, $2::int[]', [1, [2, 3, 4, 5]])\n", (1011, 1069), False, 'from asyncpg import utils\n'), ((436, 467), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(10)', '(10)'], {}), '(2016, 10, 10)\n', (453, 467), False, 'import datetime\n')] |
from django.http import HttpResponse, HttpResponseRedirect
from django.core import serializers
from django.template import loader
from django.shortcuts import render, redirect
from django.urls import reverse
from django.forms import forms
from django.core.exceptions import ObjectDoesNotExist
import time
import secret
from .models import User, Item, Receipt, Group
from .forms import loginForm,createGroupForm, addReceiptForm, registrationForm
from django.shortcuts import render
import cv2
import os
import io
import json
import re
# import file
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
from google.oauth2 import service_account
from twilio.rest import Client
def getJSONofCurrentUser(sessionData):
currentUserData = User.objects.get(phone=sessionData).__dict__
return currentUserData
def index(request):
return HttpResponse("Hello World!")
def login(request):
if request.session.has_key('currentUser'):
return redirect('/shopperHelper/landing')
else:
if request.method == 'POST':
form = loginForm(request.POST or None)
print(form.errors)
if form.is_valid():
raw_Phone_Data = int(form.cleaned_data['login_form_data'])
registeredStatus = None
all_Users = list(User.objects.all())
model_Attribute = 'phone'
i = 0
while i < len(all_Users):
userPhoneNumber = int(User.objects.values(model_Attribute)[i][model_Attribute])
if userPhoneNumber == raw_Phone_Data:
registeredStatus = True
break
i += 1
print(registeredStatus)
if registeredStatus == True:
request.session['currentUser'] = str(raw_Phone_Data)
return HttpResponseRedirect('/shopperHelper/landing')
else:
print ('You should be redirected now')
return HttpResponseRedirect('/shopperHelper/register')
else:
return HttpResponseRedirect('/shopperHelper/login')
else:
form = loginForm()
args = {'form': form}
return render(request, 'shopperHelper/login.html', args)
def landing(request):
if request.session.has_key('currentUser'):
userData = getJSONofCurrentUser(request.session['currentUser'])
args = {'userFirstName': userData['first_Name'], 'createGroupSuccessFlag' : "True"}
return render(request, 'shopperHelper/landing.html',args)
else:
return redirect('/shopperHelper/')
def createGroup(request):
if request.session.has_key('currentUser'):
if request.method == 'POST':
form = createGroupForm(request.POST or None)
#print (form.errors) --for debugging purposes
if form.is_valid():
# groupName = form.cleaned_data['group_name']
# group_members = form.cleaned_data['group_members']
groupName = form.cleaned_data['name']
group_members = form.cleaned_data['members']
newGroup = Group(name=groupName, groupOwner=User.objects.get(phone=request.session['currentUser']))
newGroup.save()
newGroup.members.set(group_members)
newGroup.save()
return redirect('/shopperHelper/landing/?createGroupSuccessFlag=True')
else:
return HttpResponseRedirect('/shopperHelper/create_group')
else:
GroupForm = createGroupForm()
userData = getJSONofCurrentUser(request.session['currentUser'])
args = {'groupForm': GroupForm, 'userFirstName': userData['first_Name']}
return render(request, 'shopperHelper/create_group.html', args)
else:
return redirect('/shopperHelper/')
def addReceipt(request):
if request.session.has_key('currentUser'):
if request.method == 'POST':
form = addReceiptForm(request.POST, request.FILES)
if form.is_valid():
receiptIDText = time.time()
newReceipt = Receipt(image=form.cleaned_data['image'],owner=User.objects.get(phone=request.session['currentUser']),groupAssigned=Group.objects.get(name=form.cleaned_data['group_Assigned']),receiptID=receiptIDText)
print ('--------------------------------------')
currentGroup = newReceipt.groupAssigned
request.session['group'] = str(currentGroup)
#members = Group.objects.filter(name=currentGroup)
members_qs = Group.objects.filter(name=currentGroup).values_list('members', flat = True).order_by('id')
membersList= []
for item in members_qs:
phoneNumber = User.objects.get(pk = item)
membersList.append(phoneNumber)
phoneNumberOfGroupMembersList = []
for number in membersList:
phoneNumberOfGroupMembersList.append(str(number))
# print (type(nameOfGroupMembers))
# print (nameOfGroupMembers[0])
# print (type(nameOfGroupMembers[0]))
# print (str(nameOfGroupMembers[0]))
print (phoneNumberOfGroupMembersList)
print (type(phoneNumberOfGroupMembersList[0]))
request.session['phoneNumberOfGroupMembersList'] = phoneNumberOfGroupMembersList
newReceipt.save()
request.session['receiptID'] = newReceipt.receiptID
imageLocation = Receipt.objects.filter(image=form.cleaned_data['image'])
image = cv2.imread("media/receipt_images/{}".format(form.cleaned_data['image']))
credentials = service_account.Credentials.from_service_account_file("..//slohacks-servicekey.json")
# Instantiates a client
client = vision.ImageAnnotatorClient(credentials=credentials)
file_name = "media/receipt_images/{}".format(form.cleaned_data['image'])
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
document = response.full_text_annotation
texts = response.text_annotations
t = response.text_annotations[0].description
r = re.search('([0-9]|\s]*)[0-9|\s]*-[0-9|\s]*', t)
#Trying to implment regex for member numbers
#r = re.search('[0-9]*[A-Z]*[\s]*[M]*[e]*[m]*[b]*[e]*[r]*[\s]*[0-9|\s]*', t)
#r = re.search('[a-zA-Z0-9_|\s]*[M]*[e]*[m]*[b]*[e]*[r]*[\s]*[0-9|\s]*', t)
#r = re.search('[a-zA-Z0-9|\s]*[Member\s]*[\d{12}*', t)
#r = re.search('[0-9]\w+\s]*[Member\s]*\d{12}', t)
i = r.end(0)
t = t[i+1:]
r = re.search('\n[S]*[U]*[B]*[T]*[O]*[T]*[A]*[L]*\n', t)
i = r.start(0)
t = t[:i]
r = re.findall('\n[0-9]+\s.+', t) # might need to remove \n for other OS in deployment
no_and_names = []
for p in r:
no_and_names.append(p)
item_prices = []
r = re.findall('[0-9]+\.[0-9]+', t)
for p in r:
item_prices.append(p)
item_nos = []
item_names = []
for element in no_and_names:
i = element.find(' ')
item_nos.append(element[1:i])
item_names.append(element[i + 1:])
item_nos.extend(['0', '0', '0'])
item_names.extend(['SUBTOTAL', 'TAX', 'TOTAL'])
master_list = []
min_length = min([len(item_nos), len(item_names), len(item_prices)])
for i in range(0, min_length):
master_list.append((item_nos[i], item_names[i], item_prices[i]))
print (master_list)
item_list = master_list[:-2]
print (item_list)
itemNumList = []
itemPriceList = []
for item in item_list:
itemNumList.append(item[0])
itemPriceList.append((item[2]))
print (itemNumList)
print ('########################################')
itemFloatPriceList = []
# for item in itemPriceList:
for i in range(0, len(itemPriceList)):
item = float(itemPriceList[i])
itemFloatPriceList.append(item)
print (itemPriceList)
request.session['itemNumList'] = itemNumList
request.session['itemFloatPriceList'] = itemFloatPriceList
#request.session['master_list'] = master_list
request.session['list'] = item_list
for val in master_list:
itemT = Item(number = val[0], name = val[1], price = val[2])
itemT.save()
newReceipt.items.add(itemT)
newReceipt.save()
return redirect('/shopperHelper/select_items/?imageUploadSuccessFlag=True')
else:
return HttpResponse(':(')
else:
addReceiptFormData = addReceiptForm()
userData = getJSONofCurrentUser(request.session['currentUser'])
args = {'receiptForm': addReceiptFormData, 'userFirstName': userData['first_Name']}
return render(request, 'shopperHelper/addReceipt.html', args)
else:
return redirect('/shopperHelper')
def checkBoxPage(request):
if request.session.has_key('currentUser'):
return render(request, 'shopperHelper/checkBoxPage.html')
else:
return redirect('/shopperHelper/')
def register(request):
if request.method == 'POST':
form = registrationForm(request.POST)
if form.is_valid():
form.save()
args = {'logoutFlag' : "True"}
return redirect('/shopperHelper/login/?registerFlag=True', args)
else:
form = registrationForm()
args = {'form': form}
return render(request, 'shopperHelper/register.html', args)
def selectItems(request):
if request.session.has_key('currentUser'):
if request.method == 'POST':
itemData = json.loads(request.POST['itemData'])
print (itemData)
receiptList = []
for name, item in itemData.items():
print ('------------------------------------')
print (item)
print (item.items()) #{'assigned': True, 'userAssigned': '9253535156', 'item': '1'}
elements = (item['assigned'], item['userAssigned'], item['item'])
receiptList.append(elements)
for key, value in item.items():
print('{} - {}'.format(key,value))
# y=(key,value)
# x.append(y)
print (receiptList)
receiptList.sort()
print (receiptList)
for item in receiptList:
if item[0] == False:
receiptList.remove(item)
print (receiptList)
phoneNumberOfGroupMembersList = request.session['phoneNumberOfGroupMembersList']
itemNumList = request.session['itemNumList']
itemFloatPriceList = request.session['itemFloatPriceList']
print (itemFloatPriceList)
tempList = []
masterList = []
print('Start Loop')
print (len(itemNumList))
print (len(receiptList))
print (len(phoneNumberOfGroupMembersList))
#For 1 user
dummyList = []
for k in range(0, len(itemNumList)+1):
dummyList.append(0)
masterList.append(dummyList)
for h in range(0, len(phoneNumberOfGroupMembersList)):
tempList.append(phoneNumberOfGroupMembersList[h])
for i in range(0, len(itemNumList)):
j = 0
while 1:
if itemNumList[i] == receiptList[j][2] and phoneNumberOfGroupMembersList[h] == receiptList[j][1]:
if receiptList[j][0] == True:
tempList.append(1)
break
else:
tempList.append(0)
break
else:
j += 1
if j > len(itemNumList):
tempList.append(0)
break
masterList.append(tempList)
tempList = []
print ('Finish Loop')
print (masterList)
itemizedList = []
itemizedList.append(calc_totals(masterList, itemFloatPriceList))
print (itemizedList)
# Your Account SID from twilio.com/console
account_sid = secret.account_sid
# Your Auth Token from twilio.com/console
auth_token = secret.auth_token
for i in range(0, len(itemizedList[0])):
print (type(itemizedList[0]))
print (type(itemizedList[0][i]))
# print (item[0][i][1])
print (type(itemizedList[0][i][1]))
message = ("Hello! You owe: $" + str(itemizedList[0][i][1]))
client = Client(account_sid, auth_token)
message = client.api.account.messages.create(
to= ("+1" + str(itemizedList[0][i][0])),
from_="+15106942080",
body=message)
'''
print (receiptList) #[(True, '5108610831', '1'), (True, '9253535156', '1'), (True, '8189394534', '1')]
#[(True, '9253535156', '1'), (True, '5108610831', '1'), (True, '8189394534', '1')]
receiptList.sort()
print (receiptList)
itemNumList = request.session['itemNumList']
phoneNumberOfGroupMembersList = request.session['phoneNumberOfGroupMembersList']
print ('-------------------------------------------')
print (itemNumList) #['1', '44004', '287783', '30669', '18600']
print (phoneNumberOfGroupMembersList) #['5108610831', '9253535156', '8189394534']
masterArray = []
for member in phoneNumberOfGroupMembersList:
tempArray = []
for receipt in receiptList:
print (receipt[0])
if receipt[0] == True:
for itemNum in itemNumList:
print (receipt[1], receipt[2], itemNum)
if receipt[1] == member and receipt[2] == itemNum:
tempArray.append('1')
else:
tempArray.append('0')
print (tempArray)
masterArray = []
i = 0
for stdTuple in receiptList:
if stdTuple[0] == True:
while i < len(receiptList):
tempArray = []
if phoneNumberOfGroupMembersList[i] == stdTuple[1]:
tempTuple = (stdTuple[1], stdTuple[2])
tempArray.append(tempTuple)
masterArray.append(tempArray)
i += 1
else:
pass
print (masterArray)
masterArray.sort()
print (masterArray)
'''
# return redirect("/shopperHelper/summary/")
return HttpResponse("Hello")
else:
item_list = request.session['list'] #takes masterList data from addReceiptView
phoneNumberOfGroupMembersList = request.session['phoneNumberOfGroupMembersList']
#print (master_list)
print (item_list)
userData = getJSONofCurrentUser(request.session['currentUser'])
args = {'item_list': item_list, 'userFirstName': userData['first_Name'], 'imageUploadSuccessFlag' : "True", 'phoneNumberOfGroupMembersList' : phoneNumberOfGroupMembersList,}
return render(request, 'shopperHelper/selectItems.html', args)
else:
return redirect('/shopperHelper/')
# '''
def summary(request):
if request.session.has_key('currentUser'):
# if request.method == 'POST':
return render(request, 'shopperHelper/summary.html')
# else:
# return render(request, 'shopperHelper/summary.html')
else:
return redirect('/shopperHelper/')
# '''
# class Item:
# def __init__(self, price):
# self.price = price
#
# def __eq__(self, other):
# return ((type(other) == Item)
# and self.price == other.price
# )
#
# def __repr__(self):
# return ("Item({!r})".format(self.price))
def calc_totals(list1, items):
print("list1: " + str(list1))
print("item prices: " + str(items))
list2 = []
list3 = []
i = 0
j = 0
for j in range(1, len(list1[i])):
list2.append(0)
for i in range(1, len(list1)):
list2[j-1] += list1[i][j]
#print(list2[j-1])
#print(list2)
i = 0
for i in range(0, len(list2)):
list2[i] = items[i]/list2[i]
print("split price per item: " + str(list2))
i = 0
j = 0
for i in range(1, len(list1)):
list3.append([list1[i][0], 0])
for j in range(0, len(list2)):
list3[i-1][1] += list1[i][j+1]*list2[j]
list3[i-1][1] = round(list3[i-1][1], 2)
return list3
# list1 = [[0, "i1", "i2", "i3"],["dom", 1, 1, 0],["russ", 1, 1, 1],["alex", 1, 0, 0]]
# item1 = Item(2.99)
# item2 = Item(5.99)
# item3 = Item(9.99)
# items = [item1, item2, item3]
# print("total per person: " + str(calc_totals(list1, items)))
def logout(request):
try:
del request.session['currentUser']
except:
print("Fail")
pass
args = {'logoutFlag' : "True"}
return redirect('/shopperHelper/login/?logoutFlag=True', args)
| [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"json.loads",
"google.oauth2.service_account.Credentials.from_service_account_file",
"twilio.rest.Client",
"django.http.HttpResponse",
"google.cloud.vision.ImageAnnotatorClient",
"io.open",
"google.cloud.vision.types.Image",
"django.shortcuts.redirect",
"re.findall",
"time.time",
"re.search"
] | [((905, 933), 'django.http.HttpResponse', 'HttpResponse', (['"""Hello World!"""'], {}), "('Hello World!')\n", (917, 933), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((18629, 18684), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/login/?logoutFlag=True"""', 'args'], {}), "('/shopperHelper/login/?logoutFlag=True', args)\n", (18637, 18684), False, 'from django.shortcuts import render, redirect\n'), ((1018, 1052), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/landing"""'], {}), "('/shopperHelper/landing')\n", (1026, 1052), False, 'from django.shortcuts import render, redirect\n'), ((2629, 2680), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/landing.html"""', 'args'], {}), "(request, 'shopperHelper/landing.html', args)\n", (2635, 2680), False, 'from django.shortcuts import render\n'), ((2705, 2732), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/"""'], {}), "('/shopperHelper/')\n", (2713, 2732), False, 'from django.shortcuts import render, redirect\n'), ((3968, 3995), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/"""'], {}), "('/shopperHelper/')\n", (3976, 3995), False, 'from django.shortcuts import render, redirect\n'), ((9959, 9985), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper"""'], {}), "('/shopperHelper')\n", (9967, 9985), False, 'from django.shortcuts import render, redirect\n'), ((10076, 10126), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/checkBoxPage.html"""'], {}), "(request, 'shopperHelper/checkBoxPage.html')\n", (10082, 10126), False, 'from django.shortcuts import render\n'), ((10152, 10179), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/"""'], {}), "('/shopperHelper/')\n", (10160, 10179), False, 'from django.shortcuts import render, redirect\n'), ((10544, 10596), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/register.html"""', 'args'], {}), "(request, 'shopperHelper/register.html', args)\n", (10550, 10596), False, 'from django.shortcuts import render\n'), ((16857, 16884), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/"""'], {}), "('/shopperHelper/')\n", (16865, 16884), False, 'from django.shortcuts import render, redirect\n'), ((17015, 17060), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/summary.html"""'], {}), "(request, 'shopperHelper/summary.html')\n", (17021, 17060), False, 'from django.shortcuts import render\n'), ((17170, 17197), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/"""'], {}), "('/shopperHelper/')\n", (17178, 17197), False, 'from django.shortcuts import render, redirect\n'), ((2329, 2378), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/login.html"""', 'args'], {}), "(request, 'shopperHelper/login.html', args)\n", (2335, 2378), False, 'from django.shortcuts import render\n'), ((3887, 3943), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/create_group.html"""', 'args'], {}), "(request, 'shopperHelper/create_group.html', args)\n", (3893, 3943), False, 'from django.shortcuts import render\n'), ((9879, 9933), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/addReceipt.html"""', 'args'], {}), "(request, 'shopperHelper/addReceipt.html', args)\n", (9885, 9933), False, 'from django.shortcuts import render\n'), ((10397, 10454), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/login/?registerFlag=True"""', 'args'], {}), "('/shopperHelper/login/?registerFlag=True', args)\n", (10405, 10454), False, 'from django.shortcuts import render, redirect\n'), ((10731, 10767), 'json.loads', 'json.loads', (["request.POST['itemData']"], {}), "(request.POST['itemData'])\n", (10741, 10767), False, 'import json\n'), ((16211, 16232), 'django.http.HttpResponse', 'HttpResponse', (['"""Hello"""'], {}), "('Hello')\n", (16223, 16232), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((16775, 16830), 'django.shortcuts.render', 'render', (['request', '"""shopperHelper/selectItems.html"""', 'args'], {}), "(request, 'shopperHelper/selectItems.html', args)\n", (16781, 16830), False, 'from django.shortcuts import render\n'), ((2186, 2230), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shopperHelper/login"""'], {}), "('/shopperHelper/login')\n", (2206, 2230), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((3494, 3557), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/landing/?createGroupSuccessFlag=True"""'], {}), "('/shopperHelper/landing/?createGroupSuccessFlag=True')\n", (3502, 3557), False, 'from django.shortcuts import render, redirect\n'), ((3599, 3650), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shopperHelper/create_group"""'], {}), "('/shopperHelper/create_group')\n", (3619, 3650), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((4233, 4244), 'time.time', 'time.time', ([], {}), '()\n', (4242, 4244), False, 'import time\n'), ((5942, 6032), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['"""..//slohacks-servicekey.json"""'], {}), "(\n '..//slohacks-servicekey.json')\n", (5995, 6032), False, 'from google.oauth2 import service_account\n'), ((6094, 6146), 'google.cloud.vision.ImageAnnotatorClient', 'vision.ImageAnnotatorClient', ([], {'credentials': 'credentials'}), '(credentials=credentials)\n', (6121, 6146), False, 'from google.cloud import vision\n'), ((6373, 6401), 'google.cloud.vision.types.Image', 'types.Image', ([], {'content': 'content'}), '(content=content)\n', (6384, 6401), False, 'from google.cloud.vision import types\n'), ((6655, 6705), 're.search', 're.search', (['"""([0-9]|\\\\s]*)[0-9|\\\\s]*-[0-9|\\\\s]*"""', 't'], {}), "('([0-9]|\\\\s]*)[0-9|\\\\s]*-[0-9|\\\\s]*', t)\n", (6664, 6705), False, 'import re\n'), ((7168, 7222), 're.search', 're.search', (['"""\n[S]*[U]*[B]*[T]*[O]*[T]*[A]*[L]*\n"""', 't'], {}), '("""\n[S]*[U]*[B]*[T]*[O]*[T]*[A]*[L]*\n""", t)\n', (7177, 7222), False, 'import re\n'), ((7300, 7330), 're.findall', 're.findall', (['"""\n[0-9]+\\\\s.+"""', 't'], {}), "('\\n[0-9]+\\\\s.+', t)\n", (7310, 7330), False, 'import re\n'), ((7543, 7575), 're.findall', 're.findall', (['"""[0-9]+\\\\.[0-9]+"""', 't'], {}), "('[0-9]+\\\\.[0-9]+', t)\n", (7553, 7575), False, 'import re\n'), ((9495, 9563), 'django.shortcuts.redirect', 'redirect', (['"""/shopperHelper/select_items/?imageUploadSuccessFlag=True"""'], {}), "('/shopperHelper/select_items/?imageUploadSuccessFlag=True')\n", (9503, 9563), False, 'from django.shortcuts import render, redirect\n'), ((9605, 9623), 'django.http.HttpResponse', 'HttpResponse', (['""":("""'], {}), "(':(')\n", (9617, 9623), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((13934, 13965), 'twilio.rest.Client', 'Client', (['account_sid', 'auth_token'], {}), '(account_sid, auth_token)\n', (13940, 13965), False, 'from twilio.rest import Client\n'), ((1942, 1988), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shopperHelper/landing"""'], {}), "('/shopperHelper/landing')\n", (1962, 1988), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((2097, 2144), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shopperHelper/register"""'], {}), "('/shopperHelper/register')\n", (2117, 2144), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((6260, 6284), 'io.open', 'io.open', (['file_name', '"""rb"""'], {}), "(file_name, 'rb')\n", (6267, 6284), False, 'import io\n')] |
# -*- coding: UTF-8 -*-
import os
import json
import csv
from utils import mysql, log, Configuration, parse_conf_args, path, process_assert
def prepare_settle_future(context, conf):
result_code = 0
logger = log.get_logger(category="PrepareSettleFuture")
trade_system_id = conf.get("tradeSystemId")
settlement_id = conf.get("settlementId")
base_dir = conf.get("baseDataHome")
data_target_dir = os.path.join(base_dir, trade_system_id, settlement_id)
data_target_dir = path.convert(data_target_dir)
# 下场文件导入数据库
logger.info("[load csv to database with %s] begin" % json.dumps(conf, encoding="UTF-8", ensure_ascii=False))
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
logger.info("[get current trading day]......")
sql = """SELECT t1.tradingday FROM siminfo.t_tradesystemtradingday t1 WHERE t1.tradesystemid = %s"""
cursor.execute(sql, (trade_system_id,))
row = cursor.fetchone()
current_trading_day = str(row[0])
logger.info("[get current trading day] current_trading_day = %s" % (current_trading_day))
sql = """SELECT
t1.tradingday
FROM
dbclear.t_settlement t1,
siminfo.t_tradesystemsettlementgroup t3
WHERE t1.tradingday = %s
AND t1.settlementgroupid = t3.settlementgroupid
AND t3.tradesystemid = %s
AND t1.settlementid = %s Limit 1 """
cursor.execute(sql, (current_trading_day, trade_system_id, settlement_id))
row = cursor.fetchone()
if row is not None:
logger.error("[load data to dbclear] Error: Data for %s-%s is existed." % (trade_system_id, settlement_id))
else:
logger.info("[generate settlement info]......")
sql = """INSERT INTO dbclear.t_settlement(tradingday, settlementgroupid, settlementid, settlementstatus)
SELECT %s, settlementgroupid, %s, '0'
FROM siminfo.t_tradesystemsettlementgroup
WHERE tradesystemid = %s"""
cursor.execute(sql, (current_trading_day, settlement_id, trade_system_id))
logger.info("[load ClientPosition.csv to dbclear]......")
sql = """DELETE FROM dbclear.t_ClientPosition WHERE tradingday = '%s' AND SettlementGroupID = 'TS-%s' AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
csv_path = os.path.join(data_target_dir, "ClientPosition.csv")
csv_path = csv_path.replace("\\", "/")
sql = """LOAD DATA LOCAL INFILE '%s'
INTO TABLE dbclear.t_ClientPosition
CHARACTER SET utf8
fields terminated by ','
IGNORE 1 LINES
SET TradingDay = '%s', SettlementGroupID = 'TS-%s', SettlementID = '%s'""" % (
csv_path, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """UPDATE
dbclear.t_ClientPosition t1,
(SELECT
t1.clientid,
t1.settlementgroupid
FROM
siminfo.t_investorclient t1,
siminfo.t_tradesystemsettlementgroup t2
WHERE t2.tradesystemid = '%s'
AND t1.settlementgroupid = t2.settlementgroupid) t2
SET t1.settlementgroupid = t2.settlementgroupid
WHERE t1.tradingday = '%s' AND t1.clientid = t2.clientid AND t1.settlementgroupid = 'TS-%s' AND t1.settlementid = '%s'""" % (
trade_system_id, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
logger.info("[load PartPosition.csv to dbclear]......")
sql = """DELETE FROM dbclear.t_PartPosition WHERE tradingday = '%s' AND SettlementGroupID = 'TS-%s' AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
csv_path = os.path.join(data_target_dir, "PartPosition.csv")
csv_path = csv_path.replace("\\", "/")
sql = """LOAD DATA LOCAL INFILE '%s'
INTO TABLE dbclear.t_PartPosition
CHARACTER SET utf8
fields terminated by ','
IGNORE 1 LINES
SET TradingDay = '%s', SettlementGroupID = 'TS-%s', SettlementID = '%s'""" % (
csv_path, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """UPDATE
dbclear.t_PartPosition t1,
(SELECT
t1.participantid,
t1.settlementgroupid
FROM
siminfo.t_participant t1,
siminfo.t_tradesystemsettlementgroup t2
WHERE t2.tradesystemid = '%s'
AND t1.settlementgroupid = t2.settlementgroupid) t2
SET t1.settlementgroupid = t2.settlementgroupid
WHERE t1.tradingday = '%s' AND t1.participantid = t2.participantid AND t1.settlementgroupid = 'TS-%s' AND t1.settlementid = '%s'""" % (
trade_system_id, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
logger.info("[load MarketData.csv to dbclear]......")
sql = """DELETE FROM dbclear.t_MarketData WHERE tradingday = '%s' AND SettlementGroupID = 'TS-%s' AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
csv_path = os.path.join(data_target_dir, "MarketData.csv")
csv_path = csv_path.replace("\\", "/")
sql = """LOAD DATA LOCAL INFILE '%s'
INTO TABLE dbclear.t_MarketData
CHARACTER SET utf8
fields terminated by ','
IGNORE 1 LINES
(TradingDay,SettlementGroupID,SettlementID,LastPrice,PreSettlementPrice,PreClosePrice,PreOpenInterest,OpenPrice,HighestPrice,LowestPrice,Volume,Turnover,OpenInterest,ClosePrice,SettlementPrice,UpperLimitPrice,LowerLimitPrice,PreDelta,CurrDelta,UpdateTime,UpdateMillisec,InstrumentID)
SET TradingDay = '%s', SettlementGroupID = 'TS-%s', SettlementID = '%s'""" % (
csv_path, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """UPDATE
dbclear.t_MarketData t1,
(SELECT
t1.instrumentid,
t1.settlementgroupid
FROM
siminfo.t_instrument t1,
siminfo.t_tradesystemsettlementgroup t2
WHERE t2.tradesystemid = '%s'
AND t1.settlementgroupid = t2.settlementgroupid) t2
SET t1.settlementgroupid = t2.settlementgroupid
WHERE t1.tradingday = '%s' AND t1.instrumentid = t2.instrumentid AND t1.settlementgroupid = 'TS-%s' AND t1.settlementid = '%s'""" % (
trade_system_id, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
logger.info("[load Order.csv to dbclear]......")
sql = """DELETE FROM dbclear.t_Order WHERE tradingday = '%s' AND SettlementGroupID = 'TS-%s' AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
csv_path = os.path.join(data_target_dir, "Order.csv")
csv_path = csv_path.replace("\\", "/")
sql = """LOAD DATA LOCAL INFILE '%s'
INTO TABLE dbclear.t_Order
CHARACTER SET utf8
fields terminated by ','
IGNORE 1 LINES
SET TradingDay = '%s', SettlementGroupID = 'TS-%s', SettlementID = '%s'""" % (
csv_path, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """UPDATE
dbclear.t_Order t1,
(SELECT
t1.clientid,
t1.settlementgroupid
FROM
siminfo.t_investorclient t1,
siminfo.t_tradesystemsettlementgroup t2
WHERE t2.tradesystemid = '%s'
AND t1.settlementgroupid = t2.settlementgroupid) t2
SET t1.settlementgroupid = t2.settlementgroupid
WHERE t1.tradingday = '%s' AND t1.clientid = t2.clientid AND t1.settlementgroupid = 'TS-%s' AND t1.settlementid = '%s'""" % (
trade_system_id, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
logger.info("[load Trade.csv to dbclear]......")
sql = """DELETE FROM dbclear.t_Trade WHERE tradingday = '%s' AND SettlementGroupID = 'TS-%s' AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
csv_path = os.path.join(data_target_dir, "Trade.csv")
csv_path = csv_path.replace("\\", "/")
sql = """LOAD DATA LOCAL INFILE '%s'
INTO TABLE dbclear.t_Trade
CHARACTER SET utf8
fields terminated by ','
IGNORE 1 LINES
SET TradingDay = '%s', SettlementGroupID = 'TS-%s', SettlementID = '%s'""" % (
csv_path, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """UPDATE
dbclear.t_Trade t1,
(SELECT
t1.clientid,
t1.settlementgroupid
FROM
siminfo.t_investorclient t1,
siminfo.t_tradesystemsettlementgroup t2
WHERE t2.tradesystemid = '%s'
AND t1.settlementgroupid = t2.settlementgroupid) t2
SET t1.settlementgroupid = t2.settlementgroupid
WHERE t1.tradingday = '%s' AND t1.clientid = t2.clientid AND t1.settlementgroupid = 'TS-%s' AND t1.settlementid = '%s'""" % (
trade_system_id, current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
# 加载交易手续费率
logger.info("[load ClientTransFeeRatio to dbclear]......")
sql = """DELETE FROM dbclear.t_clienttransfeeratio WHERE tradingday = '%s' AND SettlementGroupID in (SELECT settlementgroupid FROM siminfo.t_tradesystemsettlementgroup WHERE tradesystemid = %s) AND SettlementID = '%s'""" % (
current_trading_day, trade_system_id, settlement_id)
cursor.execute(sql)
sql = """INSERT INTO dbclear.t_clienttransfeeratio(tradingday, settlementgroupid, settlementid, participantid, clientid, instrumentid, tradingrole, hedgeflag, openfeeratio, closeyesterdayfeeratio, closetodayfeeratio,valuemode,minopenfee,maxopenfee,minclosefee,maxclosefee)
SELECT %s AS tradingday,
t1.settlementgroupid,
%s AS settlementid,
IFNULL(t2.participantid, t3.participantid),
IFNULL(t2.clientid, t3.clientid),
t1.instrumentid,
IFNULL(t2.tradingrole, t3.tradingrole),
IFNULL(t2.hedgeflag, t3.hedgeflag),
IFNULL(t2.openfeeratio, t3.openfeeratio),
IFNULL(t2.closeyesterdayfeeratio, t3.closeyesterdayfeeratio),
IFNULL(t2.closetodayfeeratio, t3.closetodayfeeratio),
IFNULL(t2.valuemode, t3.valuemode),
IFNULL(t2.minopenfee, t3.minopenfee),
IFNULL(t2.maxopenfee, t3.maxopenfee),
IFNULL(t2.minclosefee, t3.minclosefee),
IFNULL(t2.maxclosefee, t3.maxclosefee)
FROM siminfo.t_instrument t1
LEFT JOIN siminfo.t_transfeeratedetail t2
ON(t1.settlementgroupid = t2.settlementgroupid AND t1.instrumentid = t2.instrumentid)
LEFT JOIN siminfo.t_transfeeratedetail t3
ON(t1.settlementgroupid = t3.settlementgroupid AND t3.instrumentid = '00000000')
WHERE t1.settlementgroupid IN (SELECT settlementgroupid FROM siminfo.t_tradesystemsettlementgroup WHERE tradesystemid = %s)"""
cursor.execute(sql, (current_trading_day, settlement_id, trade_system_id))
# 加载客户资金表数据
logger.info("[load ClientFund to dbclear]......")
sql = """DELETE FROM dbclear.t_clientfund WHERE tradingday = %s AND settlementgroupid IN (SELECT settlementgroupid FROM siminfo.t_tradesystemsettlementgroup WHERE tradesystemid = %s) AND settlementid = %s"""
cursor.execute(sql, (current_trading_day, trade_system_id, settlement_id))
sql = """INSERT INTO dbclear.t_clientfund (TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, Available, TransFee, DelivFee, PositionMargin, Profit, StockValue)
SELECT %s, t1.settlementgroupid, %s, t1.participantid, t1.clientid, t1.accountid, 0, 0, 0, 0, 0, 0
FROM siminfo.t_clientfund t1
WHERE t1.settlementgroupid IN (SELECT settlementgroupid FROM siminfo.t_tradesystemsettlementgroup WHERE tradesystemid = %s)"""
cursor.execute(sql, (current_trading_day, settlement_id, trade_system_id))
# 更新结算价
logger.info("[update future settlementprice to dbclear]......")
sql = """update dbclear.t_marketdata t, siminfo.t_tradesystemsettlementgroup t2
set t.SettlementPrice = %s where t.InstrumentID = %s
and t.TradingDay = %s and t.SettlementID = %s and t.SettlementGroupID = t2.SettlementGroupID
and t2.TradeSystemID = %s"""
params = []
marketdata = [row for row in csv.DictReader(open(os.path.join(data_target_dir, "future_depthmarketdata.csv")))]
for data in marketdata:
params.append((data['SettlementPrice'], data['InstrumentID'], current_trading_day, settlement_id, trade_system_id))
cursor.executemany(sql, params)
mysql_conn.commit()
except Exception as e:
logger.error(
"[load data to dbclear with %s] Error: %s" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), e))
result_code = -1
finally:
mysql_conn.close()
logger.info("[load csv to database with %s] end" % json.dumps(conf, encoding="UTF-8", ensure_ascii=False))
return result_code
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["hosts:hosts", "mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
process_assert(prepare_settle_future(context, conf))
if __name__ == "__main__":
main()
| [
"utils.Configuration.load",
"utils.log.get_logger",
"json.dumps",
"os.path.join",
"utils.path.convert",
"utils.parse_conf_args"
] | [((230, 276), 'utils.log.get_logger', 'log.get_logger', ([], {'category': '"""PrepareSettleFuture"""'}), "(category='PrepareSettleFuture')\n", (244, 276), False, 'from utils import mysql, log, Configuration, parse_conf_args, path, process_assert\n'), ((442, 496), 'os.path.join', 'os.path.join', (['base_dir', 'trade_system_id', 'settlement_id'], {}), '(base_dir, trade_system_id, settlement_id)\n', (454, 496), False, 'import os\n'), ((522, 551), 'utils.path.convert', 'path.convert', (['data_target_dir'], {}), '(data_target_dir)\n', (534, 551), False, 'from utils import mysql, log, Configuration, parse_conf_args, path, process_assert\n'), ((17076, 17140), 'utils.parse_conf_args', 'parse_conf_args', (['__file__'], {'config_names': "['hosts:hosts', 'mysql']"}), "(__file__, config_names=['hosts:hosts', 'mysql'])\n", (17091, 17140), False, 'from utils import mysql, log, Configuration, parse_conf_args, path, process_assert\n'), ((17164, 17259), 'utils.Configuration.load', 'Configuration.load', ([], {'base_dir': 'base_dir', 'config_names': 'config_names', 'config_files': 'config_files'}), '(base_dir=base_dir, config_names=config_names,\n config_files=config_files)\n', (17182, 17259), False, 'from utils import mysql, log, Configuration, parse_conf_args, path, process_assert\n'), ((629, 683), 'json.dumps', 'json.dumps', (['conf'], {'encoding': '"""UTF-8"""', 'ensure_ascii': '(False)'}), "(conf, encoding='UTF-8', ensure_ascii=False)\n", (639, 683), False, 'import json\n'), ((2864, 2915), 'os.path.join', 'os.path.join', (['data_target_dir', '"""ClientPosition.csv"""'], {}), "(data_target_dir, 'ClientPosition.csv')\n", (2876, 2915), False, 'import os\n'), ((4694, 4743), 'os.path.join', 'os.path.join', (['data_target_dir', '"""PartPosition.csv"""'], {}), "(data_target_dir, 'PartPosition.csv')\n", (4706, 4743), False, 'import os\n'), ((6566, 6613), 'os.path.join', 'os.path.join', (['data_target_dir', '"""MarketData.csv"""'], {}), "(data_target_dir, 'MarketData.csv')\n", (6578, 6613), False, 'import os\n'), ((8740, 8782), 'os.path.join', 'os.path.join', (['data_target_dir', '"""Order.csv"""'], {}), "(data_target_dir, 'Order.csv')\n", (8752, 8782), False, 'import os\n'), ((10569, 10611), 'os.path.join', 'os.path.join', (['data_target_dir', '"""Trade.csv"""'], {}), "(data_target_dir, 'Trade.csv')\n", (10581, 10611), False, 'import os\n'), ((16926, 16980), 'json.dumps', 'json.dumps', (['conf'], {'encoding': '"""UTF-8"""', 'ensure_ascii': '(False)'}), "(conf, encoding='UTF-8', ensure_ascii=False)\n", (16936, 16980), False, 'import json\n'), ((16742, 16796), 'json.dumps', 'json.dumps', (['conf'], {'encoding': '"""UTF-8"""', 'ensure_ascii': '(False)'}), "(conf, encoding='UTF-8', ensure_ascii=False)\n", (16752, 16796), False, 'import json\n'), ((16325, 16384), 'os.path.join', 'os.path.join', (['data_target_dir', '"""future_depthmarketdata.csv"""'], {}), "(data_target_dir, 'future_depthmarketdata.csv')\n", (16337, 16384), False, 'import os\n')] |
from PySide import QtCore, QtGui
from ui.ui_dlg_add_preset import Ui_DlgAddPreset
class DlgAddPreset(QtGui.QDialog, Ui_DlgAddPreset):
def __init__(self, parent=None):
super(DlgAddPreset, self).__init__(parent)
self.playlist = parent._mixer.default_layer()._playlist
self.setupUi(self)
# Populate preset list
classes = self.playlist.get_available_presets()
self.cb_preset_type.addItems(classes)
self.cb_preset_type.currentIndexChanged.connect(self.populate_preset_name)
self.edit_preset_name.textChanged.connect(self.validate_preset_name)
self.populate_preset_name()
def populate_preset_name(self):
self.edit_preset_name.setText(self.playlist.suggest_preset_name(self.cb_preset_type.currentText()))
def validate_preset_name(self):
if self.playlist.preset_name_exists(self.edit_preset_name.text()):
self.edit_preset_name.setStyleSheet("QLineEdit{background: #fdd;}")
return False
else:
self.edit_preset_name.setStyleSheet("QLineEdit{background: #fff;}")
return True
def accept(self):
if self.validate_preset_name():
QtGui.QDialog.accept(self)
| [
"PySide.QtGui.QDialog.accept"
] | [((1202, 1228), 'PySide.QtGui.QDialog.accept', 'QtGui.QDialog.accept', (['self'], {}), '(self)\n', (1222, 1228), False, 'from PySide import QtCore, QtGui\n')] |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import roslib
import rospy
import socket
import geometry_msgs.msg
import math
import tf
import struct
import numpy as np
from geometry_msgs.msg import PoseWithCovarianceStamped
global trans
global rot
global brtrans
global brrot
# -----------------------------------------------------------------------------
#
def initialposeCB(msg):
#robot odom-base (input)
global trans
global rot
#robot map-odom (output)
global brtrans
global brrot
#massage to translation, rotation
inittrans=(msg.pose.pose.position.x,msg.pose.pose.position.y,msg.pose.pose.position.z)
initposequot=(msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
initrot=tf.transformations.quaternion_matrix(initposequot)
map2foot= np.dot(tf.transformations.translation_matrix(inittrans),initrot)
odom2foot = np.dot(tf.transformations.translation_matrix(trans),tf.transformations.quaternion_matrix(rot))
foot2odom=np.linalg.inv(odom2foot)
map2odom=np.dot(map2foot,foot2odom)
br = tf.TransformBroadcaster()
#map2foot=np.dot(map2holo,holo2foot)
brtrans = (map2odom[0][3], map2odom[1][3], map2odom[2][3])
brrot = tf.transformations.quaternion_from_matrix(map2odom)
# -----------------------------------------------------------------------------
#
if __name__ == '__main__':
rospy.init_node('localizer')
listener = tf.TransformListener()
# from ros
sub = rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, initialposeCB)
# from dynamic_adjuster.py
sub2 = rospy.Subscriber('/initialpose_h', PoseWithCovarianceStamped, initialposeCB)
br = tf.TransformBroadcaster()
brtrans=(0,0, 0)
brrot=(0,0,0,1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rospy.loginfo("Getting transform for '/base_footprint'!")
try:
# obtain robot odometry to base_footprint (for pepper)
(trans, rot) = listener.lookupTransform('/odom', '/base_footprint', rospy.Time(0))
rospy.loginfo("Got transform for '/base_footprint'!")
except (tf.LookupException, tf.ConnectivityException,tf.ExtrapolationException):
rospy.logwarn("tf error. Unable to get transform for '/base_footprint'!")
continue
br.sendTransform(brtrans, brrot, rospy.Time.now(), "/odom", "/map")
rate.sleep()
rospy.loginfo("localizer.py exit...")
| [
"tf.TransformBroadcaster",
"tf.transformations.quaternion_from_matrix",
"rospy.is_shutdown",
"rospy.logwarn",
"rospy.init_node",
"rospy.Time.now",
"numpy.dot",
"tf.transformations.quaternion_matrix",
"numpy.linalg.inv",
"tf.TransformListener",
"rospy.Rate",
"rospy.Time",
"tf.transformations.translation_matrix",
"rospy.Subscriber",
"rospy.loginfo"
] | [((1028, 1078), 'tf.transformations.quaternion_matrix', 'tf.transformations.quaternion_matrix', (['initposequot'], {}), '(initposequot)\n', (1064, 1078), False, 'import tf\n'), ((1279, 1303), 'numpy.linalg.inv', 'np.linalg.inv', (['odom2foot'], {}), '(odom2foot)\n', (1292, 1303), True, 'import numpy as np\n'), ((1317, 1344), 'numpy.dot', 'np.dot', (['map2foot', 'foot2odom'], {}), '(map2foot, foot2odom)\n', (1323, 1344), True, 'import numpy as np\n'), ((1350, 1375), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (1373, 1375), False, 'import tf\n'), ((1484, 1535), 'tf.transformations.quaternion_from_matrix', 'tf.transformations.quaternion_from_matrix', (['map2odom'], {}), '(map2odom)\n', (1525, 1535), False, 'import tf\n'), ((1648, 1676), 'rospy.init_node', 'rospy.init_node', (['"""localizer"""'], {}), "('localizer')\n", (1663, 1676), False, 'import rospy\n'), ((1690, 1712), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (1710, 1712), False, 'import tf\n'), ((1733, 1807), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/initialpose"""', 'PoseWithCovarianceStamped', 'initialposeCB'], {}), "('/initialpose', PoseWithCovarianceStamped, initialposeCB)\n", (1749, 1807), False, 'import rospy\n'), ((1845, 1921), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/initialpose_h"""', 'PoseWithCovarianceStamped', 'initialposeCB'], {}), "('/initialpose_h', PoseWithCovarianceStamped, initialposeCB)\n", (1861, 1921), False, 'import rospy\n'), ((1929, 1954), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (1952, 1954), False, 'import tf\n'), ((2000, 2014), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (2010, 2014), False, 'import rospy\n'), ((2576, 2613), 'rospy.loginfo', 'rospy.loginfo', (['"""localizer.py exit..."""'], {}), "('localizer.py exit...')\n", (2589, 2613), False, 'import rospy\n'), ((1097, 1145), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['inittrans'], {}), '(inittrans)\n', (1134, 1145), False, 'import tf\n'), ((1175, 1219), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['trans'], {}), '(trans)\n', (1212, 1219), False, 'import tf\n'), ((1220, 1261), 'tf.transformations.quaternion_matrix', 'tf.transformations.quaternion_matrix', (['rot'], {}), '(rot)\n', (1256, 1261), False, 'import tf\n'), ((2026, 2045), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2043, 2045), False, 'import rospy\n'), ((2049, 2106), 'rospy.loginfo', 'rospy.loginfo', (['"""Getting transform for \'/base_footprint\'!"""'], {}), '("Getting transform for \'/base_footprint\'!")\n', (2062, 2106), False, 'import rospy\n'), ((2261, 2314), 'rospy.loginfo', 'rospy.loginfo', (['"""Got transform for \'/base_footprint\'!"""'], {}), '("Got transform for \'/base_footprint\'!")\n', (2274, 2314), False, 'import rospy\n'), ((2523, 2539), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2537, 2539), False, 'import rospy\n'), ((2243, 2256), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (2253, 2256), False, 'import rospy\n'), ((2401, 2474), 'rospy.logwarn', 'rospy.logwarn', (['"""tf error. Unable to get transform for \'/base_footprint\'!"""'], {}), '("tf error. Unable to get transform for \'/base_footprint\'!")\n', (2414, 2474), False, 'import rospy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.