code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Beta Version 0.7 by Nask
# Beta version 0.7
#
# Version prepared to deal with scenarios designed for testing, where basic WhatsApp functions can be performed, such as:
#
# Receive messages from multiple chats
# Send messages to multiple chats
# Send images to multiple chats
# Receive images from multiple chats
# Send emojis to a contact or the search bar
# Turn images into stickers
# Send links
# Send files
# Who sent the message
# When they sent the message
# This version is not production ready!
from datetime import datetime
class Message(object):
def __init__(self, message_sender: str, message_date: str, message_hour: str, message: str, message_id: str) -> None:
super().__init__()
self.message_sender: str = message_sender
self.message_date: datetime.date = datetime.strptime(
message_date, '%d/%m/%Y').date()
self.message_hour: datetime.time = datetime.strptime(
message_hour, '%H:%M').time()
self.message: str = message
self.message_id: str = message_id
| [
"datetime.datetime.strptime"
] | [((804, 847), 'datetime.datetime.strptime', 'datetime.strptime', (['message_date', '"""%d/%m/%Y"""'], {}), "(message_date, '%d/%m/%Y')\n", (821, 847), False, 'from datetime import datetime\n'), ((911, 951), 'datetime.datetime.strptime', 'datetime.strptime', (['message_hour', '"""%H:%M"""'], {}), "(message_hour, '%H:%M')\n", (928, 951), False, 'from datetime import datetime\n')] |
import scrapy
from GubaDC.items import GubaItem
from urllib.parse import urljoin
import random
from GubaDC.settings import IP_POOL
baseUrl = "http://guba.sina.com.cn"
startUrl = baseUrl + "/?s=bar&name={}&type=0&page=0"
stockCode = "sh600519"
class SinaSpider(scrapy.Spider):
name = "sinaSpider"
startPageNo = 0
def start_requests(self):
yield scrapy.Request(url=startUrl.format(stockCode))
def parse(self, response):
code = response.xpath('//div[@class="blk_stock_info clearfix"]//span[@class="bsit_code"]/text()').extract_first()
code = code[1:-1]
name = response.xpath('//div[@class="blk_stock_info clearfix"]//span[@class="bsit_name"]//a/text()').extract_first()
for line in response.xpath('//div[@id="blk_list_02"]//tr[@class="tr2"]'):
Guba_info = GubaItem()
Guba_info['stockCode'] = code
Guba_info['stockName'] = name
Guba_info['Href'] = response.request.url
Guba_info['Source'] = 'SINA'
cols = line.xpath("./td")
Guba_info['readNum'] = cols[0].xpath("./span/text()").extract_first()
Guba_info['commentNum'] = cols[1].xpath("./span/text()").extract_first()
titleSelector = cols[2].xpath("./a")
Guba_info['Title'] = titleSelector.xpath("./text()").extract_first()
Guba_info['Writer'] = cols[3].xpath("./div//a/text()").extract_first()
if Guba_info['Writer'] is None:
Guba_info['Writer'] = cols[3].xpath("./div/text()").extract_first()
Guba_info['Time'] = cols[4].xpath('./text()').extract_first()
yield Guba_info
# print(Guba_info)
for link in response.xpath('//div[@class="blk_01_b"]//p[@class="page"]//a'):
request = scrapy.Request(urljoin(baseUrl, link.xpath("./@href").extract_first()))
# useThisIp = random.choice(IP_POOL)
# print('>>>>> Using proxy : %s' % useThisIp["ip"])
# request.meta["proxy"]= useThisIp["ip"]
yield request
| [
"GubaDC.items.GubaItem"
] | [((827, 837), 'GubaDC.items.GubaItem', 'GubaItem', ([], {}), '()\n', (835, 837), False, 'from GubaDC.items import GubaItem\n')] |
import logging
DETAILED = logging.Formatter(fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
SHORT = logging.Formatter(fmt="[%(name)-8s] %(levelname)7s: %(message)s")
| [
"logging.Formatter"
] | [((27, 104), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (44, 104), False, 'import logging\n'), ((113, 178), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""[%(name)-8s] %(levelname)7s: %(message)s"""'}), "(fmt='[%(name)-8s] %(levelname)7s: %(message)s')\n", (130, 178), False, 'import logging\n')] |
# coding=utf-8
"""
person.urls
-----------
"""
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import Mock
from nose.tools import eq_
from waffle.models import Switch
from amo.authentication import AMOAuthentication
from django_browserid import auth
from person.models import Profile
class ProfileTest(TestCase):
def setUp(self):
self.user = User(
username='jdoe',
first_name='John',
last_name='Doe'
)
self.user.save()
self.profile = Profile()
self.profile.user = self.user
self.profile.nickname = 'doer'
self.profile.save()
def tearDown(self):
self.profile.delete()
self.user.delete()
def test_get_fullname(self):
self.assertEqual(self.user.get_profile().get_fullname(), '<NAME>')
def test_public_profile_url(self):
x_url = reverse('person_public_profile', args=['xxx'])
with_dash_url = reverse('person_public_profile', args=['abc-cde'])
eq_(with_dash_url, x_url.replace('xxx', 'abc-cde'))
with_underscore_url = reverse('person_public_profile', args=['abc_de'])
eq_(with_underscore_url, x_url.replace('xxx', 'abc_de'))
def test_public_utf_profile_url(self):
user = User.objects.create(username='12345')
profile = Profile.objects.create(user=user, nickname='ąbc')
response = self.client.get('/user/ąbc/')
eq_(response.status_code, 200)
def test_dashboard_utf(self):
user = User.objects.create(username='12345')
profile = Profile.objects.create(user=user, nickname='ąbc')
user.set_password('<PASSWORD>')
user.save()
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(reverse('person_dashboard'))
eq_(response.status_code, 200)
def test_user_with_question_mark(self):
user = User.objects.create(username='12345')
profile = Profile.objects.create(user=user, nickname='b?c')
# profile
response = self.client.get(profile.get_addons_url())
eq_(response.status_code, 200)
# dashboard
user.set_password('<PASSWORD>')
user.save()
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(reverse('person_dashboard'))
eq_(response.status_code, 200)
eq_(profile.get_profile_url(), '/user/%s/' % user.username)
def test_fake_profile(self):
resp = self.client.get(reverse('person_public_profile', args=['xxx']))
eq_(404, resp.status_code)
class BrowserIDLoginTest(TestCase):
TESTEMAIL = '<EMAIL>'
def setUp(self):
Switch.objects.create(
name='browserid-login',
active=True)
self.user = User.objects.create(
username='123', email=self.TESTEMAIL)
Profile.objects.create(user=self.user, nickname='jdoe')
# Mocking BrowserIDBackend
class BIDBackend():
def verify(self, assertion, site):
return {'email': assertion}
self.BIDBackend = auth.BrowserIDBackend
auth.BrowserIDBackend = BIDBackend
def tearDown(self):
auth.BrowserIDBackend = self.BIDBackend
def test_existing_user_login(self):
AMOAuthentication.auth_browserid_authenticate = Mock(
return_value={'id': '123'})
response = self.client.post(reverse('browserid_login'),
{'assertion': self.TESTEMAIL})
eq_(response.status_code, 200)
assert self.user.is_authenticated()
def test_user_changed_email_on_AMO(self):
auth.BrowserIDBackend.verify = Mock(return_value={'email': '<EMAIL>'})
AMOAuthentication.auth_browserid_authenticate = Mock(
return_value={'id': '123', 'email': '<EMAIL>'})
response = self.client.post(reverse('browserid_login'),
{'assertion': 'some-assertion'})
eq_(response.status_code, 200)
assert self.user.is_authenticated()
assert User.objects.filter(email='<EMAIL>')
self.assertRaises(User.DoesNotExist,
User.objects.get, email=self.TESTEMAIL)
| [
"django.contrib.auth.models.User",
"nose.tools.eq_",
"person.models.Profile.objects.create",
"mock.Mock",
"person.models.Profile",
"waffle.models.Switch.objects.create",
"django.core.urlresolvers.reverse",
"django.contrib.auth.models.User.objects.filter",
"django.contrib.auth.models.User.objects.create"
] | [((443, 500), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""jdoe"""', 'first_name': '"""John"""', 'last_name': '"""Doe"""'}), "(username='jdoe', first_name='John', last_name='Doe')\n", (447, 500), False, 'from django.contrib.auth.models import User\n'), ((596, 605), 'person.models.Profile', 'Profile', ([], {}), '()\n', (603, 605), False, 'from person.models import Profile\n'), ((959, 1005), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_public_profile"""'], {'args': "['xxx']"}), "('person_public_profile', args=['xxx'])\n", (966, 1005), False, 'from django.core.urlresolvers import reverse\n'), ((1030, 1080), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_public_profile"""'], {'args': "['abc-cde']"}), "('person_public_profile', args=['abc-cde'])\n", (1037, 1080), False, 'from django.core.urlresolvers import reverse\n'), ((1171, 1220), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_public_profile"""'], {'args': "['abc_de']"}), "('person_public_profile', args=['abc_de'])\n", (1178, 1220), False, 'from django.core.urlresolvers import reverse\n'), ((1345, 1382), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""12345"""'}), "(username='12345')\n", (1364, 1382), False, 'from django.contrib.auth.models import User\n'), ((1401, 1450), 'person.models.Profile.objects.create', 'Profile.objects.create', ([], {'user': 'user', 'nickname': '"""ąbc"""'}), "(user=user, nickname='ąbc')\n", (1423, 1450), False, 'from person.models import Profile\n'), ((1508, 1538), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (1511, 1538), False, 'from nose.tools import eq_\n'), ((1589, 1626), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""12345"""'}), "(username='12345')\n", (1608, 1626), False, 'from django.contrib.auth.models import User\n'), ((1645, 1694), 'person.models.Profile.objects.create', 'Profile.objects.create', ([], {'user': 'user', 'nickname': '"""ąbc"""'}), "(user=user, nickname='ąbc')\n", (1667, 1694), False, 'from person.models import Profile\n'), ((1900, 1930), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (1903, 1930), False, 'from nose.tools import eq_\n'), ((1991, 2028), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""12345"""'}), "(username='12345')\n", (2010, 2028), False, 'from django.contrib.auth.models import User\n'), ((2047, 2096), 'person.models.Profile.objects.create', 'Profile.objects.create', ([], {'user': 'user', 'nickname': '"""b?c"""'}), "(user=user, nickname='b?c')\n", (2069, 2096), False, 'from person.models import Profile\n'), ((2184, 2214), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (2187, 2214), False, 'from nose.tools import eq_\n'), ((2440, 2470), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (2443, 2470), False, 'from nose.tools import eq_\n'), ((2660, 2686), 'nose.tools.eq_', 'eq_', (['(404)', 'resp.status_code'], {}), '(404, resp.status_code)\n', (2663, 2686), False, 'from nose.tools import eq_\n'), ((2782, 2840), 'waffle.models.Switch.objects.create', 'Switch.objects.create', ([], {'name': '"""browserid-login"""', 'active': '(True)'}), "(name='browserid-login', active=True)\n", (2803, 2840), False, 'from waffle.models import Switch\n'), ((2894, 2951), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""123"""', 'email': 'self.TESTEMAIL'}), "(username='123', email=self.TESTEMAIL)\n", (2913, 2951), False, 'from django.contrib.auth.models import User\n'), ((2977, 3032), 'person.models.Profile.objects.create', 'Profile.objects.create', ([], {'user': 'self.user', 'nickname': '"""jdoe"""'}), "(user=self.user, nickname='jdoe')\n", (2999, 3032), False, 'from person.models import Profile\n'), ((3449, 3481), 'mock.Mock', 'Mock', ([], {'return_value': "{'id': '123'}"}), "(return_value={'id': '123'})\n", (3453, 3481), False, 'from mock import Mock\n'), ((3626, 3656), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (3629, 3656), False, 'from nose.tools import eq_\n'), ((3787, 3826), 'mock.Mock', 'Mock', ([], {'return_value': "{'email': '<EMAIL>'}"}), "(return_value={'email': '<EMAIL>'})\n", (3791, 3826), False, 'from mock import Mock\n'), ((3883, 3935), 'mock.Mock', 'Mock', ([], {'return_value': "{'id': '123', 'email': '<EMAIL>'}"}), "(return_value={'id': '123', 'email': '<EMAIL>'})\n", (3887, 3935), False, 'from mock import Mock\n'), ((4082, 4112), 'nose.tools.eq_', 'eq_', (['response.status_code', '(200)'], {}), '(response.status_code, 200)\n', (4085, 4112), False, 'from nose.tools import eq_\n'), ((4172, 4208), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (4191, 4208), False, 'from django.contrib.auth.models import User\n'), ((1863, 1890), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_dashboard"""'], {}), "('person_dashboard')\n", (1870, 1890), False, 'from django.core.urlresolvers import reverse\n'), ((2403, 2430), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_dashboard"""'], {}), "('person_dashboard')\n", (2410, 2430), False, 'from django.core.urlresolvers import reverse\n'), ((2604, 2650), 'django.core.urlresolvers.reverse', 'reverse', (['"""person_public_profile"""'], {'args': "['xxx']"}), "('person_public_profile', args=['xxx'])\n", (2611, 2650), False, 'from django.core.urlresolvers import reverse\n'), ((3535, 3561), 'django.core.urlresolvers.reverse', 'reverse', (['"""browserid_login"""'], {}), "('browserid_login')\n", (3542, 3561), False, 'from django.core.urlresolvers import reverse\n'), ((3989, 4015), 'django.core.urlresolvers.reverse', 'reverse', (['"""browserid_login"""'], {}), "('browserid_login')\n", (3996, 4015), False, 'from django.core.urlresolvers import reverse\n')] |
import asyncio
import logging
import aioodbc
from asab import PubSub
from ..abc.connection import Connection
#
L = logging.getLogger(__name__)
#
class ODBCConnection(Connection):
# Caution: Providing incorrect connection configuration terminates the program with 'Abort trap 6'
ConfigDefaults = {
'host': 'localhost',
'port': 3306,
'user': '',
'password': '',
'driver': '',
'db': '',
'connect_timeout': 1,
'reconnect_delay': 5.0,
'output_queue_max_size': 10,
'max_bulk_size': 2,
}
def __init__(self, app, id=None, config=None):
super().__init__(app, id=id, config=config)
self.ConnectionEvent = asyncio.Event(loop=app.Loop)
self.ConnectionEvent.clear()
self.PubSub = PubSub(app)
self.Loop = app.Loop
self._host = self.Config['host']
self._port = int(self.Config['port'])
self._user = self.Config['user']
self._password = self.Config['password']
self._connect_timeout = self.Config['connect_timeout']
self._dsn = "Driver={};Database={}".format(self.Config['driver'], self.Config['db'])
self._reconnect_delay = self.Config['reconnect_delay']
self._output_queue_max_size = self.Config['output_queue_max_size']
self._max_bulk_size = int(self.Config['max_bulk_size'])
self._conn_future = None
self._connection_request = False
self._pause = False
# Subscription
self._on_health_check('connection.open!')
app.PubSub.subscribe("Application.stop!", self._on_application_stop)
app.PubSub.subscribe("Application.tick!", self._on_health_check)
app.PubSub.subscribe("ODBCConnection.pause!", self._on_pause)
app.PubSub.subscribe("ODBCConnection.unpause!", self._on_unpause)
self._output_queue = asyncio.Queue(loop=app.Loop)
self._bulks = {} # We have a "bulk" per query
def _on_pause(self):
self._pause = True
def _on_unpause(self):
self._pause = False
def _flush(self):
for query in self._bulks.keys():
# Break if throttling was requested during the flush,
# so that put_nowait doesn't raise
if self._pause:
break
self._flush_bulk(query)
def _flush_bulk(self, query):
# Enqueue and thorttle if needed
self._output_queue.put_nowait((query, self._bulks[query]))
if self._output_queue.qsize() == self._output_queue_max_size:
self.PubSub.publish("ODBCConnection.pause!", self)
# Reset bulk
self._bulks[query] = []
def _on_application_stop(self, message_type, counter):
self._flush()
self._output_queue.put_nowait((None, None))
def _on_health_check(self, message_type):
if self._conn_future is not None:
# Connection future exists
if not self._conn_future.done():
# Connection future didn't result yet
# No sanitization needed
return
try:
self._conn_future.result()
except Exception:
# Connection future threw an error
L.exception("Unexpected connection future error")
# Connection future already resulted (with or without exception)
self._conn_future = None
assert(self._conn_future is None)
self._conn_future = asyncio.ensure_future(
self._connection(),
loop=self.Loop
)
async def _connection(self):
try:
async with aioodbc.create_pool(
host=self._host,
port=self._port,
user=self._user,
password=self._password,
dsn=self._dsn,
connect_timeout=self._connect_timeout,
loop=self.Loop) as pool:
self._conn_pool = pool
self.ConnectionEvent.set()
await self._loader()
except BaseException:
L.exception("Unexpected ODBC connection error")
raise
def acquire(self):
assert(self._conn_pool is not None)
return self._conn_pool.acquire()
def consume(self, query, data):
# Create a bulk for this query if doesn't yet exist
if query not in self._bulks:
self._bulks[query] = []
# Add data to the query's bulk
self._bulks[query].append(data)
# Flush on _max_bulk_size
if len(self._bulks[query]) >= self._max_bulk_size:
self._flush_bulk(query)
async def _loader(self):
while True:
query, data = await self._output_queue.get()
if query is None:
break
if self._output_queue.qsize() == self._output_queue_max_size - 1:
self.PubSub.publish("ODBCConnection.unpause!", self, asynchronously=True)
async with self.acquire() as conn:
async with conn.cursor() as cur:
await cur.executemany(query, data)
await conn.commit()
| [
"logging.getLogger",
"aioodbc.create_pool",
"asyncio.Queue",
"asyncio.Event",
"asab.PubSub"
] | [((119, 146), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'import logging\n'), ((631, 659), 'asyncio.Event', 'asyncio.Event', ([], {'loop': 'app.Loop'}), '(loop=app.Loop)\n', (644, 659), False, 'import asyncio\n'), ((708, 719), 'asab.PubSub', 'PubSub', (['app'], {}), '(app)\n', (714, 719), False, 'from asab import PubSub\n'), ((1666, 1694), 'asyncio.Queue', 'asyncio.Queue', ([], {'loop': 'app.Loop'}), '(loop=app.Loop)\n', (1679, 1694), False, 'import asyncio\n'), ((3115, 3289), 'aioodbc.create_pool', 'aioodbc.create_pool', ([], {'host': 'self._host', 'port': 'self._port', 'user': 'self._user', 'password': 'self._password', 'dsn': 'self._dsn', 'connect_timeout': 'self._connect_timeout', 'loop': 'self.Loop'}), '(host=self._host, port=self._port, user=self._user,\n password=self._password, dsn=self._dsn, connect_timeout=self.\n _connect_timeout, loop=self.Loop)\n', (3134, 3289), False, 'import aioodbc\n')] |
"""
Sources:
https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
"""
from threading import Thread
import cv2
class Video_stream:
def __init__(self):
# Initializes the video camera stream and read the first frame from the stream
self.stream = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.stream.read()
# Initializes the variable used to indicate if the thread should be stopped
self.stopped = False
def start(self):
"""Starts the thread to read frames from the video stream."""
Thread(target=self.update, args=()).start()
return self
def update(self):
"""Reads next frame from the stream infinitely until the thread is stopped"""
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
"""Returns the frame most recently read"""
return self.frame
def stop(self):
"""Indicate that the thread should be stopped"""
self.stopped = True
| [
"threading.Thread",
"cv2.VideoCapture"
] | [((298, 317), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (314, 317), False, 'import cv2\n'), ((587, 622), 'threading.Thread', 'Thread', ([], {'target': 'self.update', 'args': '()'}), '(target=self.update, args=())\n', (593, 622), False, 'from threading import Thread\n')] |
import logging
def get_query_for_mysql_load_full(table_name, columns, owner):
"""
JDBC query for full ingestion of one table
"""
logging.info(f"BUILDING FULL QUERY for {table_name}")
select_cols = ",".join(str(x) for x in columns)
return f"select {select_cols} from {table_name}"
def convert_schema_to_json(lists, labels):
"""
Input:
lists: non-empty list of n lists each of length x
labels: list of strings of length n
Output:
list of x dictionaries with n entries, each row corresponding
to a labelled row (merged from lists)
"""
dicts = []
for i in range(len(lists[0])):
dict = {}
for j in range(len(labels)):
dict[labels[j]] = lists[j][i]
dicts.append(dict)
return dicts
def cast_columns(columns, dtypes, casts):
# perform the Oracle castings needed within the query itself.
castings = []
for c, d in zip(columns, dtypes):
if d in casts:
cast = casts[d].replace("COLUMN", c)
castings.append(cast)
else:
castings.append(c)
return castings
def get_schema_query(owner):
"""
Oracle query for getting schema information for all tables
"""
cols = ["TABLE_NAME", "COLUMN_NAME", "DATA_TYPE", "ORDINAL_POSITION"]
cols = ",".join(cols)
return f"""select {cols} from INFORMATION_SCHEMA.COLUMNS"""
def get_table_schema_query(schema_table, source_table_name):
"""
Query to get source schema for table from BQ (previously ingested).
"""
return f" select distinct * from (select COLUMN_NAME, DATA_TYPE from `{schema_table}` \
where TABLE_NAME = '{source_table_name}' order by ORDINAL_POSITION)"
def mysql_mappings():
return {
"TINYINT": "INT64",
"SMALLINT": "INT64",
"MEDIUMINT": "INT64",
"INT": "INT64",
"BIGINT": "INT64",
"DECIMAL": "NUMERIC",
"FLOAT": "FLOAT64",
"DOUBLE": "FLOAT64",
"BIT": "BOOL",
"CHAR": "STRING",
"VARCHAR": "STRING",
"TINYTEXT": "STRING",
"TEXT": "STRING",
"MEDIUMTEXT": "STRING",
"LONGTEXT": "STRING",
"BINARY": "BYTES",
"VARBINARY": "BYTES",
"DATE": "DATE",
"TIME": "TIME",
"DATETIME": "DATETIME",
"TIMESTAMP": "TIMESTAMP",
}
def mysql_to_bq(dtypes: list):
mappings = mysql_mappings()
# Dicts are not really intended for use in this way, but for a tiny one
# this is just easier
for i in range(len(dtypes)):
dtypes[i] = mappings[dtypes[i]]
return dtypes
| [
"logging.info"
] | [((148, 201), 'logging.info', 'logging.info', (['f"""BUILDING FULL QUERY for {table_name}"""'], {}), "(f'BUILDING FULL QUERY for {table_name}')\n", (160, 201), False, 'import logging\n')] |
import math
import pyglet
from pyglet import gl
import graphicutils as gu
from app import colors
from .camera import Camera
def draw_lines(vertices):
pyglet.graphics.draw(
len(vertices) // 2,
gl.GL_LINES,
('v2f', vertices),
)
def draw_circle(x, y, r, color, mode=gl.GL_LINE_LOOP, resolution=32):
gl.glColor4f(*color)
gu.draw_circle(int(x), int(y), int(r), resolution, mode)
def draw_grid():
camera = Camera.get_active()
size = int(20 * camera.zoom)
if size > 0:
gl.glColor3f(*colors.CONTEXT_GRID_COLOR)
gu.draw_grid(
camera.w,
camera.h,
int(camera.centerx),
int(camera.centery),
size,
0,
0
)
def draw_axes():
camera = Camera.get_active()
center_x = int(camera.centerx)
center_y = int(camera.centery)
gl.glColor3f(1, 0, 0)
gu.draw_arrow(20, 40, 40, 0)
draw_lines((0, center_y, camera.w, center_y))
gl.glColor3f(0, 1, 0)
gu.draw_arrow(20, 40, 0, 40)
draw_lines((center_x, 0, center_x, camera.h))
def draw_path(obj):
camera = Camera.get_active()
gl.glColor4f(1, 0.76, 0.12, 0.8)
gl.glBegin(gl.GL_LINES)
for x, y in obj.path[:100]:
pos_x = int(x * camera.zoom + camera.centerx)
pos_y = int(y * camera.zoom + camera.centery)
gl.glVertex2d(pos_x, pos_y)
gl.glEnd()
def draw_object(obj):
camera = Camera.get_active()
pos = (obj.position * camera.zoom)
x = int(pos[0] + camera.centerx)
y = int(pos[1] + camera.centery)
gl.glColor4f(0, 1, 0, 1)
for force in obj.forces:
if any(force):
w = int(force[0] * camera.zoom)
h = int(force[1] * camera.zoom)
gu.draw_arrow(x, y, w, h)
gl.glColor4f(0.2, 0.5, 1, 1)
if any(obj.velocity):
w = int(obj.velocity[0] * camera.zoom)
h = int(obj.velocity[1] * camera.zoom)
gu.draw_arrow(x, y, w, h)
gl.glColor4f(0.9, 0.29, 0.58, 1)
if any(obj.acceleration):
w = int(obj.acceleration[0] * camera.zoom)
h = int(obj.acceleration[1] * camera.zoom)
gu.draw_arrow(x, y, w, h)
draw_circle(
x,
y,
20 * camera.zoom,
colors.RIGIDBODY_COLOR,
mode=gl.GL_POLYGON
)
draw_circle(
x,
y,
20 * camera.zoom,
colors.RIGIDBODY_BORDER_COLOR,
)
def draw_ruler(x1, y1, x2, y2):
camera = Camera.get_active()
vx1 = int(x1 * camera.zoom + camera.centerx)
vy1 = int(y1 * camera.zoom + camera.centery)
vx2 = int(x2 * camera.zoom + camera.centerx)
vy2 = int(y2 * camera.zoom + camera.centery)
gl.glColor4f(0.27, 0.63, 0.78, 0.8)
gu.draw_dashed_line(vx2, vy2, vx1, vy1)
gu.draw_circle(vx1, vy1, 4, 8, gl.GL_LINE_LOOP)
gu.draw_circle(vx2, vy2, 4, 8, gl.GL_LINE_LOOP)
size = math.hypot(x2 - x1, y2 - y1)
label = pyglet.text.Label(
font_name='verdana',
font_size=12,
color=(255, 255, 255, 200))
label.text = f'{size:.2f}m'
label.x = (vx1 + vx2) // 2
label.y = (vy1 + vy2) // 2
label.draw()
def draw_select_area(x1, y1, x2, y2):
rect = (x1, y1, x2, y1, x2, y2, x1, y2)
gl.glColor4f(0.1, 0.2, 0.3, 0.2)
pyglet.graphics.draw(4, gl.GL_QUADS, ('v2f', rect))
gl.glColor4f(0.3, 0.5, 0.8, 0.5)
pyglet.graphics.draw(4, gl.GL_LINE_LOOP, ('v2f', rect)) | [
"pyglet.gl.glVertex2d",
"graphicutils.draw_arrow",
"pyglet.gl.glEnd",
"pyglet.text.Label",
"pyglet.gl.glColor4f",
"graphicutils.draw_circle",
"pyglet.gl.glBegin",
"pyglet.graphics.draw",
"graphicutils.draw_dashed_line",
"math.hypot",
"pyglet.gl.glColor3f"
] | [((338, 358), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['*color'], {}), '(*color)\n', (350, 358), False, 'from pyglet import gl\n'), ((888, 909), 'pyglet.gl.glColor3f', 'gl.glColor3f', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (900, 909), False, 'from pyglet import gl\n'), ((914, 942), 'graphicutils.draw_arrow', 'gu.draw_arrow', (['(20)', '(40)', '(40)', '(0)'], {}), '(20, 40, 40, 0)\n', (927, 942), True, 'import graphicutils as gu\n'), ((998, 1019), 'pyglet.gl.glColor3f', 'gl.glColor3f', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (1010, 1019), False, 'from pyglet import gl\n'), ((1024, 1052), 'graphicutils.draw_arrow', 'gu.draw_arrow', (['(20)', '(40)', '(0)', '(40)'], {}), '(20, 40, 0, 40)\n', (1037, 1052), True, 'import graphicutils as gu\n'), ((1163, 1195), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(1)', '(0.76)', '(0.12)', '(0.8)'], {}), '(1, 0.76, 0.12, 0.8)\n', (1175, 1195), False, 'from pyglet import gl\n'), ((1200, 1223), 'pyglet.gl.glBegin', 'gl.glBegin', (['gl.GL_LINES'], {}), '(gl.GL_LINES)\n', (1210, 1223), False, 'from pyglet import gl\n'), ((1404, 1414), 'pyglet.gl.glEnd', 'gl.glEnd', ([], {}), '()\n', (1412, 1414), False, 'from pyglet import gl\n'), ((1595, 1619), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0)', '(1)', '(0)', '(1)'], {}), '(0, 1, 0, 1)\n', (1607, 1619), False, 'from pyglet import gl\n'), ((1803, 1831), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0.2)', '(0.5)', '(1)', '(1)'], {}), '(0.2, 0.5, 1, 1)\n', (1815, 1831), False, 'from pyglet import gl\n'), ((1991, 2023), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0.9)', '(0.29)', '(0.58)', '(1)'], {}), '(0.9, 0.29, 0.58, 1)\n', (2003, 2023), False, 'from pyglet import gl\n'), ((2709, 2744), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0.27)', '(0.63)', '(0.78)', '(0.8)'], {}), '(0.27, 0.63, 0.78, 0.8)\n', (2721, 2744), False, 'from pyglet import gl\n'), ((2749, 2788), 'graphicutils.draw_dashed_line', 'gu.draw_dashed_line', (['vx2', 'vy2', 'vx1', 'vy1'], {}), '(vx2, vy2, vx1, vy1)\n', (2768, 2788), True, 'import graphicutils as gu\n'), ((2793, 2840), 'graphicutils.draw_circle', 'gu.draw_circle', (['vx1', 'vy1', '(4)', '(8)', 'gl.GL_LINE_LOOP'], {}), '(vx1, vy1, 4, 8, gl.GL_LINE_LOOP)\n', (2807, 2840), True, 'import graphicutils as gu\n'), ((2845, 2892), 'graphicutils.draw_circle', 'gu.draw_circle', (['vx2', 'vy2', '(4)', '(8)', 'gl.GL_LINE_LOOP'], {}), '(vx2, vy2, 4, 8, gl.GL_LINE_LOOP)\n', (2859, 2892), True, 'import graphicutils as gu\n'), ((2905, 2933), 'math.hypot', 'math.hypot', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (2915, 2933), False, 'import math\n'), ((2946, 3031), 'pyglet.text.Label', 'pyglet.text.Label', ([], {'font_name': '"""verdana"""', 'font_size': '(12)', 'color': '(255, 255, 255, 200)'}), "(font_name='verdana', font_size=12, color=(255, 255, 255, 200)\n )\n", (2963, 3031), False, 'import pyglet\n'), ((3252, 3284), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0.1)', '(0.2)', '(0.3)', '(0.2)'], {}), '(0.1, 0.2, 0.3, 0.2)\n', (3264, 3284), False, 'from pyglet import gl\n'), ((3289, 3340), 'pyglet.graphics.draw', 'pyglet.graphics.draw', (['(4)', 'gl.GL_QUADS', "('v2f', rect)"], {}), "(4, gl.GL_QUADS, ('v2f', rect))\n", (3309, 3340), False, 'import pyglet\n'), ((3345, 3377), 'pyglet.gl.glColor4f', 'gl.glColor4f', (['(0.3)', '(0.5)', '(0.8)', '(0.5)'], {}), '(0.3, 0.5, 0.8, 0.5)\n', (3357, 3377), False, 'from pyglet import gl\n'), ((3382, 3437), 'pyglet.graphics.draw', 'pyglet.graphics.draw', (['(4)', 'gl.GL_LINE_LOOP', "('v2f', rect)"], {}), "(4, gl.GL_LINE_LOOP, ('v2f', rect))\n", (3402, 3437), False, 'import pyglet\n'), ((530, 570), 'pyglet.gl.glColor3f', 'gl.glColor3f', (['*colors.CONTEXT_GRID_COLOR'], {}), '(*colors.CONTEXT_GRID_COLOR)\n', (542, 570), False, 'from pyglet import gl\n'), ((1372, 1399), 'pyglet.gl.glVertex2d', 'gl.glVertex2d', (['pos_x', 'pos_y'], {}), '(pos_x, pos_y)\n', (1385, 1399), False, 'from pyglet import gl\n'), ((1960, 1985), 'graphicutils.draw_arrow', 'gu.draw_arrow', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (1973, 1985), True, 'import graphicutils as gu\n'), ((2164, 2189), 'graphicutils.draw_arrow', 'gu.draw_arrow', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (2177, 2189), True, 'import graphicutils as gu\n'), ((1772, 1797), 'graphicutils.draw_arrow', 'gu.draw_arrow', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (1785, 1797), True, 'import graphicutils as gu\n')] |
import shutil
import os
import pandas as pd
from pyarrow import Table, compat, Array, Column
from pyarrow.compat import guid
from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, \
read_table, write_table
def _to_category_cols(df, categories):
category_cols = categories or list(df.dtypes.loc[df.dtypes == 'category'].index)
category_cols = {c: df.loc[:, c].cat.categories for c in category_cols}
return category_cols
def _upsert_dataframes(df, old_df):
df = df.loc[~df.index.duplicated(keep='first')] # remove dupes in subgroup just to be sure
sub_cols = df.columns
old_sub_cols = old_df.columns
if set(sub_cols) != set(old_sub_cols):
raise ValueError('The columns in old and new groups do not match')
dft = pd.DataFrame(index=df.index)
dft['__new__data__'] = 1.0
result = old_df.join(dft, how='outer')
upd_rows = pd.notnull(result.__new__data__)
result.loc[upd_rows, sub_cols] = df
df = result[sub_cols].copy()
if len(df.loc[df.index.duplicated()]):
raise ValueError('Unexpected duplicates found in resulting dataset')
return df
def upsert_to_dataset(table, root_path, partition_cols=None,
filesystem=None, preserve_index=True,
temp_folder=None, categories=None, **kwargs):
if filesystem is None:
fs = _get_fs_from_path(root_path)
else:
fs = _ensure_filesystem(filesystem)
_mkdir_if_not_exists(fs, root_path)
if temp_folder:
if not os.path.exists(temp_folder):
temp_folder = None
if partition_cols is not None and len(partition_cols) > 0:
# df is the data in the new table
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError("No data left to save outside partition columns")
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for partition_col in partition_cols:
subschema = subschema.remove(
subschema.get_field_index(partition_col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = "/".join(
["{colname}={value}".format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
prefix = "/".join([root_path, subdir])
_mkdir_if_not_exists(fs, prefix)
existing_files = [f for f in os.listdir(prefix) if f.endswith('.parquet')]
if len(existing_files) > 1:
raise ValueError('Unsupported scenario, multiple files found in path %s' % prefix)
if len(existing_files) == 1:
outfile = existing_files[0]
full_path = "/".join([prefix, outfile])
old_table = read_table(full_path)
category_cols = _to_category_cols(subgroup, categories) # get categories before merging
old_subgroup = old_table.to_pandas()
# TODO: compare old schema with new
subgroup = _upsert_dataframes(subgroup, old_subgroup)
# subgroup = pd.concat([subgroup, old_subgroup[~old_subgroup.index.isin(subgroup.index.values)]])
for c, v in category_cols.items():
subgroup.loc[:, c] = subgroup.loc[:, c].astype('category', categories=v)
else:
outfile = compat.guid() + ".parquet"
full_path = "/".join([prefix, outfile])
subtable = Table.from_pandas(subgroup,
preserve_index=preserve_index,
schema=subschema)
write_file = os.path.join(temp_folder, outfile) if temp_folder else full_path
with fs.open(write_file, 'wb') as f:
write_table(subtable, f, **kwargs)
if temp_folder:
shutil.move(write_file, full_path)
else:
existing_files = [f for f in os.listdir(root_path) if f.endswith('.parquet')]
if len(existing_files) > 1:
raise ValueError('Unsupported scenario, multiple files found in path %s' % root_path)
if len(existing_files) == 1:
# append use case
outfile = existing_files[0]
full_path = "/".join([root_path, outfile])
old_table = read_table(full_path)
subgroup = table.to_pandas()
category_cols = _to_category_cols(subgroup, categories)
old_subgroup = old_table.to_pandas()
# TODO: compare old schema with new
subgroup = _upsert_dataframes(subgroup, old_subgroup)
# subgroup = pd.concat([old_subgroup[~old_subgroup.index.isin(subgroup.index)], subgroup])
for c, v in category_cols.items():
subgroup.loc[:, c] = subgroup.loc[:, c].astype('category', categories=v)
schema = table.schema
table = Table.from_pandas(
subgroup,
preserve_index=preserve_index,
schema=schema
)
else:
# write use case
outfile = compat.guid() + ".parquet"
full_path = "/".join([root_path, outfile])
write_file = os.path.join(temp_folder, outfile) if temp_folder else full_path
with fs.open(write_file, 'wb') as f:
write_table(table, f, **kwargs)
if temp_folder:
shutil.move(write_file, full_path)
def write_to_dataset(table, root_path, partition_cols=None,
filesystem=None, preserve_index=True, **kwargs):
"""
Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : string,
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
preserve_index : bool,
Parameter for instantiating Table; preserve pandas index or not.
**kwargs : dict, kwargs for write_table function.
"""
if filesystem is None:
fs = _get_fs_from_path(root_path)
else:
fs = _ensure_filesystem(filesystem)
_mkdir_if_not_exists(fs, root_path)
if partition_cols is not None and len(partition_cols) > 0:
#df = table.to_pandas()
#partition_keys = [df[col] for col in partition_cols]
partition_keys = [table.column(col) for col in partition_cols]
data_table = table.drop(partition_cols )
#data_cols = df.columns.drop(partition_cols)
#if len(data_cols) == 0:
# raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for partition_col in partition_cols:
subschema = subschema.remove(
subschema.get_field_index(partition_col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup,
preserve_index=preserve_index,
schema=subschema,
safe=False)
prefix = '/'.join([root_path, subdir])
_mkdir_if_not_exists(fs, prefix)
outfile = guid() + '.parquet'
full_path = '/'.join([prefix, outfile])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, **kwargs)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, **kwargs) | [
"os.path.exists",
"pyarrow.parquet.read_table",
"os.listdir",
"shutil.move",
"pyarrow.Table.from_pandas",
"os.path.join",
"pyarrow.parquet._mkdir_if_not_exists",
"pyarrow.parquet._get_fs_from_path",
"pyarrow.compat.guid",
"pandas.DataFrame",
"pyarrow.parquet.write_table",
"pandas.notnull",
"pyarrow.parquet._ensure_filesystem"
] | [((794, 822), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (806, 822), True, 'import pandas as pd\n'), ((912, 944), 'pandas.notnull', 'pd.notnull', (['result.__new__data__'], {}), '(result.__new__data__)\n', (922, 944), True, 'import pandas as pd\n'), ((1471, 1506), 'pyarrow.parquet._mkdir_if_not_exists', '_mkdir_if_not_exists', (['fs', 'root_path'], {}), '(fs, root_path)\n', (1491, 1506), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((7055, 7090), 'pyarrow.parquet._mkdir_if_not_exists', '_mkdir_if_not_exists', (['fs', 'root_path'], {}), '(fs, root_path)\n', (7075, 7090), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((1383, 1411), 'pyarrow.parquet._get_fs_from_path', '_get_fs_from_path', (['root_path'], {}), '(root_path)\n', (1400, 1411), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((1435, 1465), 'pyarrow.parquet._ensure_filesystem', '_ensure_filesystem', (['filesystem'], {}), '(filesystem)\n', (1453, 1465), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((6967, 6995), 'pyarrow.parquet._get_fs_from_path', '_get_fs_from_path', (['root_path'], {}), '(root_path)\n', (6984, 6995), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((7019, 7049), 'pyarrow.parquet._ensure_filesystem', '_ensure_filesystem', (['filesystem'], {}), '(filesystem)\n', (7037, 7049), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((1542, 1569), 'os.path.exists', 'os.path.exists', (['temp_folder'], {}), '(temp_folder)\n', (1556, 1569), False, 'import os\n'), ((2668, 2700), 'pyarrow.parquet._mkdir_if_not_exists', '_mkdir_if_not_exists', (['fs', 'prefix'], {}), '(fs, prefix)\n', (2688, 2700), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((3806, 3882), 'pyarrow.Table.from_pandas', 'Table.from_pandas', (['subgroup'], {'preserve_index': 'preserve_index', 'schema': 'subschema'}), '(subgroup, preserve_index=preserve_index, schema=subschema)\n', (3823, 3882), False, 'from pyarrow import Table, compat, Array, Column\n'), ((4650, 4671), 'pyarrow.parquet.read_table', 'read_table', (['full_path'], {}), '(full_path)\n', (4660, 4671), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((5237, 5310), 'pyarrow.Table.from_pandas', 'Table.from_pandas', (['subgroup'], {'preserve_index': 'preserve_index', 'schema': 'schema'}), '(subgroup, preserve_index=preserve_index, schema=schema)\n', (5254, 5310), False, 'from pyarrow import Table, compat, Array, Column\n'), ((5542, 5576), 'os.path.join', 'os.path.join', (['temp_folder', 'outfile'], {}), '(temp_folder, outfile)\n', (5554, 5576), False, 'import os\n'), ((5664, 5695), 'pyarrow.parquet.write_table', 'write_table', (['table', 'f'], {}), '(table, f, **kwargs)\n', (5675, 5695), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((5732, 5766), 'shutil.move', 'shutil.move', (['write_file', 'full_path'], {}), '(write_file, full_path)\n', (5743, 5766), False, 'import shutil\n'), ((8428, 8460), 'pyarrow.parquet._mkdir_if_not_exists', '_mkdir_if_not_exists', (['fs', 'prefix'], {}), '(fs, prefix)\n', (8448, 8460), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((8682, 8688), 'pyarrow.compat.guid', 'guid', ([], {}), '()\n', (8686, 8688), False, 'from pyarrow.compat import guid\n'), ((8809, 8840), 'pyarrow.parquet.write_table', 'write_table', (['table', 'f'], {}), '(table, f, **kwargs)\n', (8820, 8840), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((3096, 3117), 'pyarrow.parquet.read_table', 'read_table', (['full_path'], {}), '(full_path)\n', (3106, 3117), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((3990, 4024), 'os.path.join', 'os.path.join', (['temp_folder', 'outfile'], {}), '(temp_folder, outfile)\n', (4002, 4024), False, 'import os\n'), ((4120, 4154), 'pyarrow.parquet.write_table', 'write_table', (['subtable', 'f'], {}), '(subtable, f, **kwargs)\n', (4131, 4154), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((4199, 4233), 'shutil.move', 'shutil.move', (['write_file', 'full_path'], {}), '(write_file, full_path)\n', (4210, 4233), False, 'import shutil\n'), ((4281, 4302), 'os.listdir', 'os.listdir', (['root_path'], {}), '(root_path)\n', (4291, 4302), False, 'import os\n'), ((5438, 5451), 'pyarrow.compat.guid', 'compat.guid', ([], {}), '()\n', (5449, 5451), False, 'from pyarrow import Table, compat, Array, Column\n'), ((8483, 8489), 'pyarrow.compat.guid', 'guid', ([], {}), '()\n', (8487, 8489), False, 'from pyarrow.compat import guid\n'), ((8619, 8653), 'pyarrow.parquet.write_table', 'write_table', (['subtable', 'f'], {}), '(subtable, f, **kwargs)\n', (8630, 8653), False, 'from pyarrow.parquet import _get_fs_from_path, _ensure_filesystem, _mkdir_if_not_exists, read_table, write_table\n'), ((2742, 2760), 'os.listdir', 'os.listdir', (['prefix'], {}), '(prefix)\n', (2752, 2760), False, 'import os\n'), ((3700, 3713), 'pyarrow.compat.guid', 'compat.guid', ([], {}), '()\n', (3711, 3713), False, 'from pyarrow import Table, compat, Array, Column\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Area under 1-D and 2-D curves, various methods
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demqua03.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
# ## About
#
# Uni- and bi-vaiariate integration using Newton-Cotes, Gaussian, Monte Carlo, and quasi-Monte Carlo quadrature methods.
# ## Initial tasks
# In[1]:
import numpy as np
from compecon import qnwtrap, qnwsimp, qnwlege, demo
import matplotlib.pyplot as plt
import pandas as pd
# In[2]:
quadmethods = [qnwtrap, qnwsimp, qnwlege]
# ### Make support function
# In[3]:
a, b = -1, 1
nlist = [5, 11, 21, 31]
N = len(nlist)
def quad(func, qnw, n):
xi, wi = qnw(n,a,b)
return np.dot(func(xi),wi)
# ## Evaluating
# $\int_{-1}^1e^{-x}dx$
# In[4]:
def f(x):
return np.exp(-x)
f_quad = np.array([[quad(f, qnw, ni) for qnw in quadmethods] for ni in nlist])
f_true = np.exp(1) - 1/np.exp(1)
f_error = np.log10(np.abs(f_quad/f_true - 1))
# ## Evaluating
# $\int_{-1}^1\sqrt{|x|}dx$
# In[5]:
def g(x):
return np.sqrt(np.abs(x))
g_quad = np.array([[quad(g, qnw, ni) for qnw in quadmethods] for ni in nlist])
g_true = 4/3
g_error = np.log10(np.abs(g_quad/g_true - 1))
# ## Make table with results
# In[6]:
methods = ['Trapezoid rule', "Simpson's rule", 'Gauss-Legendre']
functions = [r'$\int_{-1}^1e^{-x}dx$', r'$\int_{-1}^1\sqrt{|x|}dx$']
results = pd.concat(
[pd.DataFrame(errors, columns=methods, index=nlist) for errors in (f_error, g_error)],
keys=functions)
results
# ## Plot the functions
# In[7]:
a, b, n = -1, 1, 301
x = np.linspace(a, b, n)
options = dict(xlim=[a,b], xticks=[-1,0,1], yticks=[0])
fig, axs = plt.subplots(1, 2, figsize=[10,4])
axs[0].plot(x, f(x), linewidth=3)
axs[0].set(title='$e^{-x}$', ylim=[0,f(a)], **options)
axs[1].plot(x, g(x), linewidth=3)
axs[1].set(title='$\sqrt{|x|}$', ylim=[0,g(a)], **options);
# ### Export figure and table
# In[8]:
#results.to_latex('demqua03.tex', escape=False, float_format='%.1f')
#demo.savefig([plt.gcf()], name='demqua03')
| [
"numpy.abs",
"numpy.exp",
"numpy.linspace",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] | [((1972, 1992), 'numpy.linspace', 'np.linspace', (['a', 'b', 'n'], {}), '(a, b, n)\n', (1983, 1992), True, 'import numpy as np\n'), ((2062, 2097), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[10, 4]'}), '(1, 2, figsize=[10, 4])\n', (2074, 2097), True, 'import matplotlib.pyplot as plt\n'), ((1183, 1193), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1189, 1193), True, 'import numpy as np\n'), ((1283, 1292), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1289, 1292), True, 'import numpy as np\n'), ((1326, 1353), 'numpy.abs', 'np.abs', (['(f_quad / f_true - 1)'], {}), '(f_quad / f_true - 1)\n', (1332, 1353), True, 'import numpy as np\n'), ((1563, 1590), 'numpy.abs', 'np.abs', (['(g_quad / g_true - 1)'], {}), '(g_quad / g_true - 1)\n', (1569, 1590), True, 'import numpy as np\n'), ((1297, 1306), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1303, 1306), True, 'import numpy as np\n'), ((1440, 1449), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1446, 1449), True, 'import numpy as np\n'), ((1794, 1844), 'pandas.DataFrame', 'pd.DataFrame', (['errors'], {'columns': 'methods', 'index': 'nlist'}), '(errors, columns=methods, index=nlist)\n', (1806, 1844), True, 'import pandas as pd\n')] |
# Assignment base class
# @author: <NAME>
# (c) <NAME> 2012
from AGrader.AgraderWorkflow import AgraderWorkflow
from AGrader.Assignment import Assignment
from os import listdir
from sys import path
class Workspace(AgraderWorkflow):
'''
Singleton controller class.
'''
def __init__(self, args):
#super(Workspace, self).__init__()
self.assignments = []
self.args = args
# set the UI
# do this first so that importing other functions can interact with the user
self.ui = None
try:
# handle the user setting a non-interactive session
if args.ui in ['none', 'off']:
args.interactive = False
except AttributeError:
pass
finally:
# hard-coded default
if self.ui is None:
from UI.AgraderCLUI import AgraderCLUI
self.ui = AgraderCLUI(args)
if args is not None and not args.interactive:
self.ui.setInteractive(False)
# set the Gradebook
#Gdata is the default gradebook spreadsheet
self.gradebook = None
if args is None or args.gradebook.lower() == 'gdata':
from AGrader.Gradebook.Gdata import GdataSpreadsheet
self.gradebook = GdataSpreadsheet(self.ui, args)
elif args is not None and args.gradebook.lower() == 'file':
from AGrader.Gradebook.FileGradebook import FileGradebook
self.gradebook = FileGradebook(self.ui, args)
elif args is not None and args.gradebook != 'none':
self.ui.notifyError('Unrecognized gradebook %s. Abort?' % args.gradebook)
__currentWorkspace = None
@staticmethod
def GetDefault(args=None):
'''
Create a default Workspace instance given the possibly specified arguments. Will cache this instance.
'''
if not Workspace.__currentWorkspace:
workspace = Workspace(args)
Workspace.__currentWorkspace = workspace
return Workspace.__currentWorkspace
@staticmethod
def GetWorkspace(id=None):
if not Workspace.__currentWorkspace:
Workspace.__currentWorkspace = Workspace.GetDefault()
return Workspace.__currentWorkspace
def addAssignment(self, assignment):
self.assignments.append(assignment)
@staticmethod
def __generateAndCallSubmissions(submission_generator, args):
def __f__():
for sub in submission_generator(args):
sub()
return __f__
def getAssignments(self, assignment_dir):
'''Import all of the assignments in the specified directory.
Currently only imports the assignment specified by the argument assignment_file'''
for modname in listdir(assignment_dir):
# only try importing file if it's a python file and it matches the assignment file we want
if modname.endswith(".py") and modname[:-3] == self.args.assignment_file:
if self.args.verbose:
self.ui.notify('Checking module %s' % modname)
# look also in the assignment_dir directory when importing
oldpath, path[:] = path[:], path + [assignment_dir]
try:
module = __import__(modname[:-3])
except ImportError as e:
self.ui.notifyError("Problem importing file %s" % ("%s: %s" % (modname, e) if self.args.verbose else modname))
continue
finally: # always restore the real path
path[:] = oldpath
# try loading assignments
assignments = []
for attr in dir(module):
theClass = getattr(module, attr)
if self.args.verbose:
self.ui.notify('Checking class %s' % attr)
try:
#this is a good assignment if it implements Assignment but isn't that base class itself
isassignment = issubclass(theClass, Assignment) and theClass is not Assignment
if isassignment:
assignments.append(theClass)
if self.args.verbose:
self.ui.notify('Found assignment %s' % theClass)
except TypeError as e:
if self.args.verbose:
self.ui.notify(e)
pass
generator = None
# try getting a submission generator if we found any assignments
if assignments:
try:
generator = Workspace.__generateAndCallSubmissions(module.SubmissionGenerator, self.args)
#TODO: scrub some things off the args? give some object?
self.addAssignment(generator)
if self.args.verbose:
self.ui.notify('Found SubmissionGenerator %s' % generator)
except AttributeError as e:
#no assignment generator
if self.args.verbose:
self.ui.notify(e)
self.ui.notifyError("No submission generator for assignment module %s" % module)
if self.args.verbose:
self.ui.notify('adding assignments themselves since no submission generator')
for a in assignments:
self.addAssignment(a)
if not self.assignments:
self.ui.notifyError("No assignments found.")
return self.assignments
def __call__(self):
'''Executes the workflow. Calls setup callbacks, runs each assignment (submitting grades after each one),
then calls cleanup callbacks. The callbacks take the workspace as an argument.'''
self.runCallbacks('setup', self)
for a in self.getAssignments(self.args.assignment_dir):
a()
self.runCallbacks('cleanup', self)
def getGrades(self, key):
'''
Returns the gradebook associated with the given key. An empty one if the gradebook isn't connected.
'''
if self.gradebook:
return self.gradebook.getGrades(key)
if self.args.gradebook != 'none':
self.ui.notifyError('No gradebook connected! Abort? ')
return {} #blank gradebook if they want to continue
| [
"AGrader.Gradebook.Gdata.GdataSpreadsheet",
"os.listdir",
"UI.AgraderCLUI.AgraderCLUI",
"AGrader.Gradebook.FileGradebook.FileGradebook"
] | [((2803, 2826), 'os.listdir', 'listdir', (['assignment_dir'], {}), '(assignment_dir)\n', (2810, 2826), False, 'from os import listdir\n'), ((1303, 1334), 'AGrader.Gradebook.Gdata.GdataSpreadsheet', 'GdataSpreadsheet', (['self.ui', 'args'], {}), '(self.ui, args)\n', (1319, 1334), False, 'from AGrader.Gradebook.Gdata import GdataSpreadsheet\n'), ((913, 930), 'UI.AgraderCLUI.AgraderCLUI', 'AgraderCLUI', (['args'], {}), '(args)\n', (924, 930), False, 'from UI.AgraderCLUI import AgraderCLUI\n'), ((1502, 1530), 'AGrader.Gradebook.FileGradebook.FileGradebook', 'FileGradebook', (['self.ui', 'args'], {}), '(self.ui, args)\n', (1515, 1530), False, 'from AGrader.Gradebook.FileGradebook import FileGradebook\n')] |
from math import ceil
from bitstring import BitArray, Bits, ConstBitStream
from construct import Adapter
class RL:
@staticmethod
def repetitions(seq):
"""Input: sequence of values, Output: sequence of (value, repeat count)."""
i = iter(seq)
prev = next(i)
count = 1
for v in i:
if v == prev:
count += 1
else:
yield prev, count
prev = v
count = 1
yield prev, count
@staticmethod
def compress(data, bit_length):
"""Input: data bytes, bit length, Output: RLE'd bytes"""
# TODO: This could be made more efficient by encoding the run-length
# as count-n, ie 0 means a run of n, where n = break_even. Then we
# can encode longer runs up to 255+n. But this needs changes in the
# blit engine.
break_even = ceil(8 / (bit_length + 1))
bits = BitArray()
for value, count in RL.repetitions(data):
while count > break_even:
chunk = min(count, 0x100)
bits.append('0b1')
bits.append(bytes([chunk - 1]))
count -= chunk
bits.append(BitArray(uint=value, length=bit_length))
for x in range(count):
bits.append('0b0')
bits.append(BitArray(uint=value, length=bit_length))
return bits.tobytes()
@staticmethod
def decompress(data, bit_length, output_length):
stream = ConstBitStream(bytes=data)
result = []
while len(result) < output_length:
t = stream.read(1)
if t:
count = stream.read(8).uint + 1
else:
count = 1
result.extend([stream.read(bit_length).uint] * count)
return bytes(result)
class PK:
@staticmethod
def compress(data, bit_length):
return BitArray().join(BitArray(uint=x, length=bit_length) for x in data).tobytes()
@staticmethod
def decompress(data, bit_length, num_pixels):
return bytes(i.uint for i in Bits(bytes=data).cut(bit_length))
packers = {cls.__name__: cls for cls in (PK, RL)}
class ImageCompressor(Adapter):
def bit_length(self, obj):
"""Compute the required bit length for image data.
Uses the count of items in the palette to determine how
densely we can pack the image data.
"""
if obj.get('type', None) == "RW":
return 8
else:
return max(1, (len(obj['data']['palette']) - 1).bit_length())
def num_pixels(self, obj):
return obj['data']['width'] * obj['data']['height']
def _decode(self, obj, context, path):
if obj['type'] != 'RW':
obj['data']['pixels'] = packers[obj['type']].decompress(
obj['data']['pixels'], self.bit_length(obj), self.num_pixels(obj)
)
return obj
def _encode(self, obj, context, path):
obj = obj.copy() # we are going to mutate this, so make a deep copy
obj['data'] = obj['data'].copy()
bl = self.bit_length(obj)
if obj.get('type', None) is None:
all_comp = [(k, v.compress(obj['data']['pixels'], bl)) for k, v in packers.items()]
best = min(all_comp, key=lambda x: len(x[1]))
# Put the best type back into the object.
obj['type'] = best[0]
obj['data']['pixels'] = best[1]
elif obj['type'] != 'RW':
obj['data']['pixels'] = packers[obj['type']].compress(obj['data']['pixels'], bl)
return obj
| [
"bitstring.Bits",
"math.ceil",
"bitstring.BitArray",
"bitstring.ConstBitStream"
] | [((903, 929), 'math.ceil', 'ceil', (['(8 / (bit_length + 1))'], {}), '(8 / (bit_length + 1))\n', (907, 929), False, 'from math import ceil\n'), ((945, 955), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (953, 955), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((1528, 1554), 'bitstring.ConstBitStream', 'ConstBitStream', ([], {'bytes': 'data'}), '(bytes=data)\n', (1542, 1554), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((1229, 1268), 'bitstring.BitArray', 'BitArray', ([], {'uint': 'value', 'length': 'bit_length'}), '(uint=value, length=bit_length)\n', (1237, 1268), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((1368, 1407), 'bitstring.BitArray', 'BitArray', ([], {'uint': 'value', 'length': 'bit_length'}), '(uint=value, length=bit_length)\n', (1376, 1407), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((1935, 1945), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (1943, 1945), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((1951, 1986), 'bitstring.BitArray', 'BitArray', ([], {'uint': 'x', 'length': 'bit_length'}), '(uint=x, length=bit_length)\n', (1959, 1986), False, 'from bitstring import BitArray, Bits, ConstBitStream\n'), ((2118, 2134), 'bitstring.Bits', 'Bits', ([], {'bytes': 'data'}), '(bytes=data)\n', (2122, 2134), False, 'from bitstring import BitArray, Bits, ConstBitStream\n')] |
#1. Extract scalp from preop (raw) file. Do HD-BET, normalization, filtering without MNI registration. Let the output file be 'preop-scalp (raw)'.
#2. Perform FLIRT from 'preop-scalp (raw)' to MNI scalp template (normalized and filtered) so as to remove non-skull regions (e.g. neck). Let the output file be 'MNI-registered preop-scalp (raw)'. Save the 4x4 affine transform matrix.
#3. Make the inverse matrix of the 4x4 affine transform matrix (mentioned in the previous step) using numpy.linalg.inv function.
#4. Perform FLIRT-applyxfm (what is applyxfm: using a known affine transform matrix) (use inverse matrix found above) to 'MNI-registered preop-scalp (raw)' with reference to 'preop-scalp (raw)' (why: to keep the same size). Let the output file be 'preop-scalp (return)'.
#5. Perform FLIRT from MNI scalp template (normalized and filtered) to 'preop-scalp (return)'. With non-skull regions removed, the transformation is now suitable for the SEGMENTED SCALP, too. Let the 4x4 affine transform matrix output (-omat) be 'MNI-return.mat'.
#6. Perform FLIRT-applyxfm (use 'MNI-return.mat' that is mentioned above) to SEGMENTED SCALP with reference to 'preop-scalp (raw)'. Let the output file be 'SEGMENTED SCALP-raw'.
#7. Overlap 'SEGMENTED SCALP-raw' and 'preop-scalp (raw)' in MRIcroGL to check if the flap sits on the scalp.
import os
import numpy as np
import nibabel as nib
def segmentation():
T1w_name = input('\nType in the name of unmodified (raw) T1w file.\n')
flap_name = input('\nType in the name of file containing the segmented, MNI-registered flap.\n')
MNI_scalp_name = input('\nType in the name of referenced MNI scalp file. We recommend to have the scalp extracted from the functions provided in the repository (will be organized).\n')
GPU_enabled = input('\nIs GPU enabled? Type Y/N.\n')
# Task 1: Brain segmentation and deletion
if (GPU_enabled == 'Y'):
os.system("hd-bet -i " + str(T1w_name))
else:
if (GPU_enabled == 'N'):
os.system("hd-bet -i " + str(T1w_name) + " -device cpu -mode fast -tta 0")
os.remove(T1w_name[:-7] + "_bet.nii.gz")
os.rename(T1w_name[:-7] + "_bet_mask.nii.gz", T1w_name[:-7] + "_BRAINMASK.nii.gz")
brain_mask = nib.load(T1w_name[:-7] + "_BRAINMASK.nii.gz")
t1w_t2w = nib.load(T1w_name)
brain_mask_A = np.array(brain_mask.dataobj)
t1w_t2w_A = np.array(t1w_t2w.dataobj)
# 1.1 : Checking dimensional congruency between brain mask and overlaid file.
if(brain_mask_A.shape == t1w_t2w_A.shape):
# 1.2 : Removing brain from overlaid file.
for x in range(0, brain_mask_A.shape[0]-1):
for y in range(0, brain_mask_A.shape[1]-1):
for z in range(0, brain_mask_A.shape[2]-1):
if(brain_mask_A[x][y][z] > 0):
t1w_t2w_A[x][y][z] = 0
else:
print("Comparison not possible due to difference in dimensions.")
# 1.3 : Isolating scalp with enclosed coordinate volume.
for x in range(0, t1w_t2w_A.shape[0]-1):
for y in range(0, t1w_t2w_A.shape[1]-1):
for z in range(0, t1w_t2w_A.shape[2]-1):
if(x < ((t1w_t2w_A.shape[0]-1)*0.03) or x > ((t1w_t2w_A.shape[0]-1)*0.96) or y < ((t1w_t2w_A.shape[1]-1)*0.01) or y > ((t1w_t2w_A.shape[1]-1)*0.99) or z < ((-(t1w_t2w_A.shape[2]-1)*y*0.000275)+85)):
t1w_t2w_A[x][y][z] = 0
# 1.4 : Finding value of threshold intensity for scalp segmentation.
def paraMAX():
M = 0
for x in range(int(0.05*(t1w_t2w_A.shape[0]-1)),int(0.95*(t1w_t2w_A.shape[0]-1))):
for y in range(int(0.05*(t1w_t2w_A.shape[1]-1)),int(0.95*(t1w_t2w_A.shape[1]-1))):
for z in range(int(0.05*(t1w_t2w_A.shape[2]-1)),int(0.95*(t1w_t2w_A.shape[2]-1))):
if(M < t1w_t2w_A[x][y][z]):
M = t1w_t2w_A[x][y][z]
return M
MAX = paraMAX()
MAX_thres = MAX*0.225
# 1.5 : Segmenting scalp using threshold intensity.
for x in range(0, t1w_t2w_A.shape[0]-1):
for y in range(0, t1w_t2w_A.shape[1]-1):
for z in range(0, t1w_t2w_A.shape[2]-1):
if(t1w_t2w_A[x][y][z] < MAX_thres):
t1w_t2w_A[x][y][z] = 0
# Task 1.6 : Removing non-scalp voxels by area inspection.
ns_thres = MAX*0.34
for x in range(1, t1w_t2w_A.shape[0]-1):
for y in range(1, t1w_t2w_A.shape[1]-1):
for z in range(1, t1w_t2w_A.shape[2]-1):
M = 0
for k in range(-1,2):
for m in range(-1,2):
for n in range(-1,2):
if t1w_t2w_A[x+k][y+m][z+n] >= M:
M = t1w_t2w_A[x+k][y+m][z+n]
if M < ns_thres:
t1w_t2w_A[x][y][z] = 0
# Task 1.7 : Extraction
scalp_array = nib.Nifti1Image(t1w_t2w_A, affine=np.eye(4))
nib.save(scalp_array, T1w_name[:-7] + "_SCALP.nii.gz")
os.remove(T1w_name[:-7] + "_BRAINMASK.nii.gz")
# Task 2 : Getting inverse of (prscalp_raw --> MNI_scalp) affine transform matrix.
prescalp_raw_name = T1w_name[:-7] + "_SCALP.nii.gz"
MNI_registered_prescalp_raw_name = "MNI_registered_" + prescalp_raw_name
os.system("flirt -in " + str(prescalp_raw_name) + " -ref " + str(MNI_scalp_name) + " -out " + str(MNI_registered_prescalp_raw_name) + " -omat prescalp_raw_to_MNI.mat -bins 640 -searchcost mutualinfo")
inv = np.linalg.inv(np.loadtxt('prescalp_raw_to_MNI.mat'))
np.savetxt('prescalp_raw_to_MNI_inv.mat',inv)
prescalp_return_name = T1w_name[:-7] + "_return_SCALP.nii.gz"
os.system("flirt -in " + str(MNI_registered_prescalp_raw_name) + " -ref " + str(prescalp_raw_name) + " -out " + str(prescalp_return_name) + " -init prescalp_raw_to_MNI_inv.mat -applyxfm")
os.system("flirt -in " + str(MNI_scalp_name) + " -ref " + str(prescalp_return_name) + " -out delete.nii.gz -omat MNI-return.mat -bins 640 -searchcost mutualinfo")
os.remove('delete.nii.gz')
segmented_flap_raw_name = flap_name[:-7] + "_raw.nii.gz"
os.system("flirt -in " + str(flap_name) + " -ref " + str(prescalp_raw_name) + " -out " + str(segmented_flap_raw_name) + " -init MNI-return.mat -applyxfm")
# reference could be 'prescalp_return_name', but would not make visible difference.
print("Completed. The name of the output flap file is : " + str(segmented_flap_raw_name) + ". Check if this file and unmodified (raw) T1w file overlap appropriately in external softwares (e.g. MRIcroGL) or python libraries.")
segmentation()
| [
"numpy.eye",
"nibabel.save",
"nibabel.load",
"os.rename",
"numpy.array",
"numpy.savetxt",
"numpy.loadtxt",
"os.remove"
] | [((2103, 2143), 'os.remove', 'os.remove', (["(T1w_name[:-7] + '_bet.nii.gz')"], {}), "(T1w_name[:-7] + '_bet.nii.gz')\n", (2112, 2143), False, 'import os\n'), ((2148, 2234), 'os.rename', 'os.rename', (["(T1w_name[:-7] + '_bet_mask.nii.gz')", "(T1w_name[:-7] + '_BRAINMASK.nii.gz')"], {}), "(T1w_name[:-7] + '_bet_mask.nii.gz', T1w_name[:-7] +\n '_BRAINMASK.nii.gz')\n", (2157, 2234), False, 'import os\n'), ((2249, 2294), 'nibabel.load', 'nib.load', (["(T1w_name[:-7] + '_BRAINMASK.nii.gz')"], {}), "(T1w_name[:-7] + '_BRAINMASK.nii.gz')\n", (2257, 2294), True, 'import nibabel as nib\n'), ((2309, 2327), 'nibabel.load', 'nib.load', (['T1w_name'], {}), '(T1w_name)\n', (2317, 2327), True, 'import nibabel as nib\n'), ((2348, 2376), 'numpy.array', 'np.array', (['brain_mask.dataobj'], {}), '(brain_mask.dataobj)\n', (2356, 2376), True, 'import numpy as np\n'), ((2393, 2418), 'numpy.array', 'np.array', (['t1w_t2w.dataobj'], {}), '(t1w_t2w.dataobj)\n', (2401, 2418), True, 'import numpy as np\n'), ((5036, 5090), 'nibabel.save', 'nib.save', (['scalp_array', "(T1w_name[:-7] + '_SCALP.nii.gz')"], {}), "(scalp_array, T1w_name[:-7] + '_SCALP.nii.gz')\n", (5044, 5090), True, 'import nibabel as nib\n'), ((5095, 5141), 'os.remove', 'os.remove', (["(T1w_name[:-7] + '_BRAINMASK.nii.gz')"], {}), "(T1w_name[:-7] + '_BRAINMASK.nii.gz')\n", (5104, 5141), False, 'import os\n'), ((5672, 5718), 'numpy.savetxt', 'np.savetxt', (['"""prescalp_raw_to_MNI_inv.mat"""', 'inv'], {}), "('prescalp_raw_to_MNI_inv.mat', inv)\n", (5682, 5718), True, 'import numpy as np\n'), ((6163, 6189), 'os.remove', 'os.remove', (['"""delete.nii.gz"""'], {}), "('delete.nii.gz')\n", (6172, 6189), False, 'import os\n'), ((5624, 5661), 'numpy.loadtxt', 'np.loadtxt', (['"""prescalp_raw_to_MNI.mat"""'], {}), "('prescalp_raw_to_MNI.mat')\n", (5634, 5661), True, 'import numpy as np\n'), ((5021, 5030), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5027, 5030), True, 'import numpy as np\n')] |
import json
import logging
from pprint import pformat
import requests
class RE_API:
def __init__(self, re_url, token):
self.re_url = re_url
self.token = token
def _call_re(self, endpoint="/api/query_results/", params=None, data=None):
header = {"Authorization": self.token}
logging.info(f"Calling RE_API with query data: {pformat(data)}")
ret = requests.post(self.re_url+endpoint, data, params=params, headers=header)
return ret.json()
def get_related_sequences(self, rid, sf_sim=1, df_sim=1, exclude_self=0):
if not rid.startswith('rxn_reaction/'):
rid = 'rxn_reaction/' + rid
body = json.dumps({'rid': rid, 'sf_sim': sf_sim, 'df_sim': df_sim,
'exclude_self': exclude_self})
ret = self._call_re(params={'view': "list_genes_for_similar_reactions"}, data=body)
if "error" in ret:
raise RuntimeError(f"{ret['error']}: {ret.get('arango_message', '')}")
logging.info(f"Found {ret['results'][0]['count']} related sequences")
return ret['results'][0]['sequences']
| [
"json.dumps",
"requests.post",
"logging.info",
"pprint.pformat"
] | [((397, 471), 'requests.post', 'requests.post', (['(self.re_url + endpoint)', 'data'], {'params': 'params', 'headers': 'header'}), '(self.re_url + endpoint, data, params=params, headers=header)\n', (410, 471), False, 'import requests\n'), ((678, 772), 'json.dumps', 'json.dumps', (["{'rid': rid, 'sf_sim': sf_sim, 'df_sim': df_sim, 'exclude_self': exclude_self}"], {}), "({'rid': rid, 'sf_sim': sf_sim, 'df_sim': df_sim, 'exclude_self':\n exclude_self})\n", (688, 772), False, 'import json\n'), ((1006, 1075), 'logging.info', 'logging.info', (['f"""Found {ret[\'results\'][0][\'count\']} related sequences"""'], {}), '(f"Found {ret[\'results\'][0][\'count\']} related sequences")\n', (1018, 1075), False, 'import logging\n'), ((366, 379), 'pprint.pformat', 'pformat', (['data'], {}), '(data)\n', (373, 379), False, 'from pprint import pformat\n')] |
from json import dumps
__all__ = ['print_formated']
DEFAULT_OPTIONS_JSON_DUMPS = {'indent': 4, 'sort_keys': False, 'default': str}
def print_formated(text, data, options=DEFAULT_OPTIONS_JSON_DUMPS):
print(f'{text}: {dumps(data, **options)}')
| [
"json.dumps"
] | [((226, 248), 'json.dumps', 'dumps', (['data'], {}), '(data, **options)\n', (231, 248), False, 'from json import dumps\n')] |
from union_find import UnionFind
# Minimize Malware Spread
# We will remove one node from the initial list. Return the node that if removed, would minimize M(initial).
class Solution(object):
def minMalwareSpread(self, graph, initial):
"""
:type graph: List[List[int]]
:type initial: List[int]
:rtype: int
"""
if not initial:
return -1
# return the smallest index if multiple results
initial.sort()
n = len(graph)
uf = UnionFind(n)
# union the whole graph
for i in range(n):
for j in range(i + 1, n):
if graph[i][j] == 1:
uf.union(i, j)
# if only one initially infected node, the damage reduced will be the group size
# => return the infected node in the largest group
# if 2+ initially infected node in a group, cannot reduce the damage
# => return the infected node with minimum index
counter = collections.Counter(uf.find(i) for i in initial) # group_parent => # of initially infected nodes
one_infected = [i for i in initial if counter[uf.find(i)] == 1]
if one_infected:
return max(one_infected, key=lambda i: uf.sizes[uf.find(i)])
else:
return min(initial)
# O(n ^ 2) time, O(n) space
# Minimize Malware Spread II
# In this case, we will remove one node from the initial list, completely removing it and any connections
# from this node to any other node. Return the node that if removed, would minimize M(initial).
class SolutionQ2(object):
def minMalwareSpread(self, graph, initial):
"""
:type graph: List[List[int]]
:type initial: List[int]
:rtype: int
"""
n = len(graph)
d = collections.defaultdict(list) # node => list of initiail nodes
for init in initial:
# find reachable nodes if other initial nodes are cut off
visited = set(initial)
queue = collections.deque([init])
while queue:
infected = queue.popleft()
for node in range(n):
if graph[infected][node] and node not in visited:
visited.add(node)
d[node].append(init)
queue.append(node)
# for node i, how many initial nodes can infected it
# without going through other initial nodes
connected = [0] * n
for node, src_nodes in d.items():
if len(src_nodes) == 1:
init = src_nodes[0]
connected[init] += 1
max_connected = max(connected)
if max_connected == 0:
return min(initial)
return connected.index(max_connected)
# thought process:
# if multiple nodes can infect a node N without going through other initial nodes
# then removing one initial node would not save node N
| [
"union_find.UnionFind"
] | [((517, 529), 'union_find.UnionFind', 'UnionFind', (['n'], {}), '(n)\n', (526, 529), False, 'from union_find import UnionFind\n')] |
import unittest
from dialogapi.entity import Application
from dialogapi.entity import Bot
from dialogapi.test.method import AssertEqual
from dialogapi.test.method import AssertIn
from dialogapi.test.method import AssertRegexEqual
from dialogapi.test.method import AssertRegexIn
from dialogapi.test.method import AssertNotEqual
from dialogapi.test.method import AssertNotIn
from dialogapi.test.method import AssertRegexNotEqual
from dialogapi.test.method import AssertRegexNotIn
from dialogapi.test.method import AssertionMethodFactory
from dialogapi.test.task import Test
from dialogapi.test.task import Task
from dialogapi.test.task import TaskConfig
from dialogapi.test.task import TaskManager
from dialogapi.test.config import Parser
class ApplicationRepositoryMock:
def register(self, bot):
return Application(
bot=bot,
app_id="test_app_id"
)
class DialogRepositoryMock:
def dialogue(self, request):
return {"systemText": {"expression": "ワールド"}}
class TaskManagerTest(unittest.TestCase):
def test_execute_tasks_ok(self):
# タスクの定義
factory = AssertionMethodFactory()
test = Test(method=factory.build("equal"),
param="response.systemText.expression",
expected="ワールド")
task = Task(
name="TaskManagerTest-tasks_ok",
request={"voiceText": "ハロー"},
tests=[test]
)
# タスクマネージャの定義
manager = TaskManager(
tasks=[task],
config=TaskConfig(keep_app_id=False),
)
# 実行
result = manager.execute_tasks(
bot=Bot(id_="JP_testBot"),
application_repository=ApplicationRepositoryMock(),
dialogue_repository=DialogRepositoryMock()
)
self.assertTrue(result)
class TaskTest(unittest.TestCase):
def test_execute_tests_ok(self):
factory = AssertionMethodFactory()
test = Test(method=factory.build("equal"),
param="response.systemText.expression",
expected="お疲れ様です。")
# task の request の内容は内部でチェックしないので None を指定しておく
task = Task(name="テストタスク", request=None, tests=[test])
response = {
"systemText": {
"expression": "お疲れ様です。",
"utterance": "おつかれさまです",
},
"dialogStatus": {},
"serverSendTime": "2017-03-30 13:31:01",
}
res = task.execute_tests(response=response)
self.assertTrue(res)
def test_execute_tests_fail(self):
factory = AssertionMethodFactory()
test = Test(method=factory.build("equal"),
param="response.systemText.expression",
expected="お疲れ様です。")
test2 = Test(method=factory.build("equal"),
param="response.systemText.expression",
expected="お疲れ様ですよね。")
# task の request の内容は内部でチェックしないので None を指定しておく
task = Task(name="テストタスク", request=None, tests=[test, test2])
response = {
"systemText": {
"expression": "お疲れ様です。",
"utterance": "おつかれさまです",
},
"dialogStatus": {},
"serverSendTime": "2017-03-30 13:31:01",
}
res = task.execute_tests(response=response)
self.assertFalse(res)
class TestTest(unittest.TestCase):
def test_execute(self):
factory = AssertionMethodFactory()
test = Test(method=factory.build("equal"),
param="response.systemText.expression",
expected="お疲れ様です。")
response = {
"systemText": {
"expression": "お疲れ様です。",
"utterance": "おつかれさまです",
},
"dialogStatus": {},
"serverSendTime": "2017-03-30 13:31:01",
}
res = test.execute(response=response)
self.assertTrue(res.bool)
self.assertEqual(res.result, "お疲れ様です。")
self.assertEqual(res.expected, "お疲れ様です。")
class AssertionMethodFactoryTest(unittest.TestCase):
def test_build_equal(self):
factory = AssertionMethodFactory()
cls = factory.build("equal")
self.assertEqual(cls.__class__.__name__, "AssertEqual")
def test_build_in(self):
factory = AssertionMethodFactory()
cls = factory.build("in")
self.assertEqual(cls.__class__.__name__, "AssertIn")
def test_build_regex_equal(self):
factory = AssertionMethodFactory()
cls = factory.build("regex_equal")
self.assertEqual(cls.__class__.__name__, "AssertRegexEqual")
def test_build_regex_in(self):
factory = AssertionMethodFactory()
cls = factory.build("regex_in")
self.assertEqual(cls.__class__.__name__, "AssertRegexIn")
def test_build_not_equal(self):
factory = AssertionMethodFactory()
cls = factory.build("not_equal")
self.assertEqual(cls.__class__.__name__, "AssertNotEqual")
def test_build_not_in(self):
factory = AssertionMethodFactory()
cls = factory.build("not_in")
self.assertEqual(cls.__class__.__name__, "AssertNotIn")
def test_build_regex_not_equal(self):
factory = AssertionMethodFactory()
cls = factory.build("regex_not_equal")
self.assertEqual(cls.__class__.__name__, "AssertRegexNotEqual")
def test_build_regex_not_in(self):
factory = AssertionMethodFactory()
cls = factory.build("regex_not_in")
self.assertEqual(cls.__class__.__name__, "AssertRegexNotIn")
class AssertEqualTest(unittest.TestCase):
def test_execute_true(self):
method = AssertEqual()
first = "テスト"
second = "テスト"
res = method.execute(first=first, second=second)
self.assertTrue(res)
def test_execute_false(self):
method = AssertEqual()
first = "テスト"
second = "テスト1"
res = method.execute(first=first, second=second)
self.assertFalse(res)
class AssertInTests(unittest.TestCase):
def test_execute_in(self):
method = AssertIn()
first = "テスト"
second = ["テスト", "テスト2"]
res = method.execute(first=first, second=second)
self.assertTrue(res)
def test_execute_not_in(self):
method = AssertIn()
first = "テスト"
second = ["テスト1", "テスト2"]
res = method.execute(first=first, second=second)
self.assertFalse(res)
class AssertRegexEqualTest(unittest.TestCase):
def test_execute_true(self):
method = AssertRegexEqual()
first = "テ スト"
second = r"^\w "
res = method.execute(first=first, second=second)
self.assertTrue(res)
def test_execute_false(self):
method = AssertRegexEqual()
first = "テ スト"
second = r"か$"
res = method.execute(first=first, second=second)
self.assertFalse(res)
class AssertRegexInTest(unittest.TestCase):
def test_execute_true(self):
method = AssertRegexIn()
first = "テ スト"
second = ["感じ$", r"^\w "]
res = method.execute(first=first, second=second)
self.assertTrue(res)
def test_execute_false(self):
method = AssertRegexIn()
first = "テ スト"
second = ["感じ$", r"[はな]"]
res = method.execute(first=first, second=second)
self.assertFalse(res)
class AssertNotEqualTest(unittest.TestCase):
def test_execute_false(self):
method = AssertNotEqual()
first = "テスト"
second = "テスト"
res = method.execute(first=first, second=second)
self.assertFalse(res)
def test_execute_true(self):
method = AssertNotEqual()
first = "テスト"
second = "テスト1"
res = method.execute(first=first, second=second)
self.assertTrue(res)
class AssertNotInTests(unittest.TestCase):
def test_execute_notin_false(self):
method = AssertNotIn()
first = "テスト"
second = ["テスト", "テスト2"]
res = method.execute(first=first, second=second)
self.assertFalse(res)
def test_execute_notin_true(self):
method = AssertNotIn()
first = "テスト"
second = ["テスト1", "テスト2"]
res = method.execute(first=first, second=second)
self.assertTrue(res)
class AssertRegexNotEqualTest(unittest.TestCase):
def test_execute_false(self):
method = AssertRegexNotEqual()
first = "テ スト"
second = r"^\w "
res = method.execute(first=first, second=second)
self.assertFalse(res)
def test_execute_true(self):
method = AssertRegexNotEqual()
first = "テ スト"
second = r"か$"
res = method.execute(first=first, second=second)
self.assertTrue(res)
class AssertRegexNotInTest(unittest.TestCase):
def test_execute_false(self):
method = AssertRegexNotIn()
first = "テ スト"
second = ["感じ$", r"^\w "]
res = method.execute(first=first, second=second)
self.assertFalse(res)
def test_execute_true(self):
method = AssertRegexNotIn()
first = "テ スト"
second = ["感じ$", r"[はな]"]
res = method.execute(first=first, second=second)
self.assertTrue(res)
class TestParser(unittest.TestCase):
def test_parse_fd(self):
test_config = """
config:
keep_app_id: true
tasks:
- name: 「こんにちは」に対するテスト
request:
voiceText: こんにちは
location:
lat: 0
lon: 0
clientData:
option:
t: ols
tests:
- method: equal
param: response.systemText.utterance
expected: "こんにちは。"
- method: in
param: response.systemText.utterance
expected:
- "こんにちは。"
- "こんにちは。元気ですか?"
- method: equal
param: response.command
expected: "xheijfeiijf=="
- name: 「はろー」に対するテスト
request:
voiceText: はろー
tests:
- method: equal
param: response.systemText.utterance
expected: "オンラインで変更済み"
"""
manager = Parser().parse_fd(test_config)
self.assertEqual(manager.tasks[0].name, "「こんにちは」に対するテスト")
self.assertEqual(len(manager.tasks[0].tests), 3)
self.assertEqual(manager.tasks[1].name, "「はろー」に対するテスト")
self.assertEqual(len(manager.tasks[1].tests), 1)
| [
"dialogapi.entity.Application",
"dialogapi.test.task.TaskConfig",
"dialogapi.test.task.Task",
"dialogapi.test.method.AssertNotIn",
"dialogapi.entity.Bot",
"dialogapi.test.method.AssertIn",
"dialogapi.test.method.AssertRegexEqual",
"dialogapi.test.method.AssertNotEqual",
"dialogapi.test.method.AssertRegexNotEqual",
"dialogapi.test.config.Parser",
"dialogapi.test.method.AssertionMethodFactory",
"dialogapi.test.method.AssertRegexNotIn",
"dialogapi.test.method.AssertEqual",
"dialogapi.test.method.AssertRegexIn"
] | [((816, 858), 'dialogapi.entity.Application', 'Application', ([], {'bot': 'bot', 'app_id': '"""test_app_id"""'}), "(bot=bot, app_id='test_app_id')\n", (827, 858), False, 'from dialogapi.entity import Application\n'), ((1126, 1150), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (1148, 1150), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((1314, 1400), 'dialogapi.test.task.Task', 'Task', ([], {'name': '"""TaskManagerTest-tasks_ok"""', 'request': "{'voiceText': 'ハロー'}", 'tests': '[test]'}), "(name='TaskManagerTest-tasks_ok', request={'voiceText': 'ハロー'}, tests=[\n test])\n", (1318, 1400), False, 'from dialogapi.test.task import Task\n'), ((1927, 1951), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (1949, 1951), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((2173, 2220), 'dialogapi.test.task.Task', 'Task', ([], {'name': '"""テストタスク"""', 'request': 'None', 'tests': '[test]'}), "(name='テストタスク', request=None, tests=[test])\n", (2177, 2220), False, 'from dialogapi.test.task import Task\n'), ((2601, 2625), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (2623, 2625), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((3003, 3057), 'dialogapi.test.task.Task', 'Task', ([], {'name': '"""テストタスク"""', 'request': 'None', 'tests': '[test, test2]'}), "(name='テストタスク', request=None, tests=[test, test2])\n", (3007, 3057), False, 'from dialogapi.test.task import Task\n'), ((3464, 3488), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (3486, 3488), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((4165, 4189), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (4187, 4189), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((4339, 4363), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (4361, 4363), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((4516, 4540), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (4538, 4540), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((4707, 4731), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (4729, 4731), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((4893, 4917), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (4915, 4917), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((5078, 5102), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (5100, 5102), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((5266, 5290), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (5288, 5290), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((5468, 5492), 'dialogapi.test.method.AssertionMethodFactory', 'AssertionMethodFactory', ([], {}), '()\n', (5490, 5492), False, 'from dialogapi.test.method import AssertionMethodFactory\n'), ((5700, 5713), 'dialogapi.test.method.AssertEqual', 'AssertEqual', ([], {}), '()\n', (5711, 5713), False, 'from dialogapi.test.method import AssertEqual\n'), ((5897, 5910), 'dialogapi.test.method.AssertEqual', 'AssertEqual', ([], {}), '()\n', (5908, 5910), False, 'from dialogapi.test.method import AssertEqual\n'), ((6134, 6144), 'dialogapi.test.method.AssertIn', 'AssertIn', ([], {}), '()\n', (6142, 6144), False, 'from dialogapi.test.method import AssertIn\n'), ((6339, 6349), 'dialogapi.test.method.AssertIn', 'AssertIn', ([], {}), '()\n', (6347, 6349), False, 'from dialogapi.test.method import AssertIn\n'), ((6592, 6610), 'dialogapi.test.method.AssertRegexEqual', 'AssertRegexEqual', ([], {}), '()\n', (6608, 6610), False, 'from dialogapi.test.method import AssertRegexEqual\n'), ((6797, 6815), 'dialogapi.test.method.AssertRegexEqual', 'AssertRegexEqual', ([], {}), '()\n', (6813, 6815), False, 'from dialogapi.test.method import AssertRegexEqual\n'), ((7045, 7060), 'dialogapi.test.method.AssertRegexIn', 'AssertRegexIn', ([], {}), '()\n', (7058, 7060), False, 'from dialogapi.test.method import AssertRegexIn\n'), ((7256, 7271), 'dialogapi.test.method.AssertRegexIn', 'AssertRegexIn', ([], {}), '()\n', (7269, 7271), False, 'from dialogapi.test.method import AssertRegexIn\n'), ((7514, 7530), 'dialogapi.test.method.AssertNotEqual', 'AssertNotEqual', ([], {}), '()\n', (7528, 7530), False, 'from dialogapi.test.method import AssertNotEqual\n'), ((7714, 7730), 'dialogapi.test.method.AssertNotEqual', 'AssertNotEqual', ([], {}), '()\n', (7728, 7730), False, 'from dialogapi.test.method import AssertNotEqual\n'), ((7965, 7978), 'dialogapi.test.method.AssertNotIn', 'AssertNotIn', ([], {}), '()\n', (7976, 7978), False, 'from dialogapi.test.method import AssertNotIn\n'), ((8178, 8191), 'dialogapi.test.method.AssertNotIn', 'AssertNotIn', ([], {}), '()\n', (8189, 8191), False, 'from dialogapi.test.method import AssertNotIn\n'), ((8437, 8458), 'dialogapi.test.method.AssertRegexNotEqual', 'AssertRegexNotEqual', ([], {}), '()\n', (8456, 8458), False, 'from dialogapi.test.method import AssertRegexNotEqual\n'), ((8645, 8666), 'dialogapi.test.method.AssertRegexNotEqual', 'AssertRegexNotEqual', ([], {}), '()\n', (8664, 8666), False, 'from dialogapi.test.method import AssertRegexNotEqual\n'), ((8899, 8917), 'dialogapi.test.method.AssertRegexNotIn', 'AssertRegexNotIn', ([], {}), '()\n', (8915, 8917), False, 'from dialogapi.test.method import AssertRegexNotIn\n'), ((9113, 9131), 'dialogapi.test.method.AssertRegexNotIn', 'AssertRegexNotIn', ([], {}), '()\n', (9129, 9131), False, 'from dialogapi.test.method import AssertRegexNotIn\n'), ((1540, 1569), 'dialogapi.test.task.TaskConfig', 'TaskConfig', ([], {'keep_app_id': '(False)'}), '(keep_app_id=False)\n', (1550, 1569), False, 'from dialogapi.test.task import TaskConfig\n'), ((1651, 1672), 'dialogapi.entity.Bot', 'Bot', ([], {'id_': '"""JP_testBot"""'}), "(id_='JP_testBot')\n", (1654, 1672), False, 'from dialogapi.entity import Bot\n'), ((10078, 10086), 'dialogapi.test.config.Parser', 'Parser', ([], {}), '()\n', (10084, 10086), False, 'from dialogapi.test.config import Parser\n')] |
import os, tempfile
import requests
from io import BytesIO
from zipfile import ZipFile
def test_doc_upload_via_api_zip_file_size(docker_hello_world):
"""Test if the container is reachable.
The docker_hello_world fixture is used, so the container will start on test
startup. Do a request to the docker service and read the body.
"""
file_path = os.path.dirname(os.path.abspath(__file__)) + '/files/Should_open_calc.docm'
output_path = tempfile.mkstemp(suffix='.zip', prefix='output')
files = {'file': open(file_path, 'rb')}
data = {'extension': 'docm'}
res = requests.post(docker_hello_world + '/document', files=files, data=data)
f = open(output_path[1], 'wb+')
f.write(res.content)
assert os.path.getsize(output_path[1]) > 0
def test_doc_upload_via_api_zip_content_contains_pdf(docker_hello_world):
"""Test if the container is reachable.
The docker_hello_world fixture is used, so the container will start on test
startup. Do a request to the docker service and read the body.
"""
name = 'Should_open_calc'
file_path = os.path.dirname(os.path.abspath(__file__)) + '/files/{name}.docm'.format(name=name)
files = {'file': open(file_path, 'rb')}
data = {'extension': 'docm'}
res = requests.post(docker_hello_world + '/document', files=files, data=data)
file_found = False
with BytesIO(res.content) as zip_file:
with ZipFile(zip_file) as zip_file:
for zip_info in zip_file.infolist():
if zip_info.filename in name.replace('-','_').replace(' ','_') + '.pdf':
file_found = True
if file_found:
assert True
else:
assert False
def test_doc_upload_via_api_zip_content_contains_macro_json_file(docker_hello_world):
"""Test if the container is reachable.
The docker_hello_world fixture is used, so the container will start on test
startup. Do a request to the docker service and read the body.
"""
name = 'Should_open_calc'
file_path = os.path.dirname(os.path.abspath(__file__)) + '/files/{name}.docm'.format(name=name)
files = {'file': open(file_path, 'rb')}
data = {'extension': 'docm'}
res = requests.post(docker_hello_world + '/document', files=files, data=data)
file_found = False
with BytesIO(res.content) as zip_file:
with ZipFile(zip_file) as zip_file:
for zip_info in zip_file.infolist():
if zip_info.filename in 'uploads - macro.json':
file_found = True
if file_found:
assert True
else:
assert False
| [
"os.path.getsize",
"requests.post",
"zipfile.ZipFile",
"io.BytesIO",
"os.path.abspath",
"tempfile.mkstemp"
] | [((459, 507), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".zip"""', 'prefix': '"""output"""'}), "(suffix='.zip', prefix='output')\n", (475, 507), False, 'import os, tempfile\n'), ((596, 667), 'requests.post', 'requests.post', (["(docker_hello_world + '/document')"], {'files': 'files', 'data': 'data'}), "(docker_hello_world + '/document', files=files, data=data)\n", (609, 667), False, 'import requests\n'), ((1267, 1338), 'requests.post', 'requests.post', (["(docker_hello_world + '/document')"], {'files': 'files', 'data': 'data'}), "(docker_hello_world + '/document', files=files, data=data)\n", (1280, 1338), False, 'import requests\n'), ((2199, 2270), 'requests.post', 'requests.post', (["(docker_hello_world + '/document')"], {'files': 'files', 'data': 'data'}), "(docker_hello_world + '/document', files=files, data=data)\n", (2212, 2270), False, 'import requests\n'), ((740, 771), 'os.path.getsize', 'os.path.getsize', (['output_path[1]'], {}), '(output_path[1])\n', (755, 771), False, 'import os, tempfile\n'), ((1371, 1391), 'io.BytesIO', 'BytesIO', (['res.content'], {}), '(res.content)\n', (1378, 1391), False, 'from io import BytesIO\n'), ((2303, 2323), 'io.BytesIO', 'BytesIO', (['res.content'], {}), '(res.content)\n', (2310, 2323), False, 'from io import BytesIO\n'), ((381, 406), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (396, 406), False, 'import os, tempfile\n'), ((1111, 1136), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1126, 1136), False, 'import os, tempfile\n'), ((1418, 1435), 'zipfile.ZipFile', 'ZipFile', (['zip_file'], {}), '(zip_file)\n', (1425, 1435), False, 'from zipfile import ZipFile\n'), ((2043, 2068), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2058, 2068), False, 'import os, tempfile\n'), ((2350, 2367), 'zipfile.ZipFile', 'ZipFile', (['zip_file'], {}), '(zip_file)\n', (2357, 2367), False, 'from zipfile import ZipFile\n')] |
import factory
from django.utils import timezone
from factory.django import DjangoModelFactory
from psycopg2._range import DateTimeTZRange
class CampFactory(DjangoModelFactory):
class Meta:
model = "camps.Camp"
read_only = False
title = factory.Faker("word")
tagline = factory.Faker("sentence")
slug = factory.Faker("slug")
shortslug = factory.Faker("slug")
buildup = factory.LazyFunction(
lambda: DateTimeTZRange(
lower=timezone.now() - timezone.timedelta(days=3),
upper=timezone.now() - timezone.timedelta(hours=1),
)
)
camp = factory.LazyFunction(
lambda: DateTimeTZRange(
lower=timezone.now(), upper=timezone.now() + timezone.timedelta(days=8)
)
)
teardown = factory.LazyFunction(
lambda: DateTimeTZRange(
lower=timezone.now() + timezone.timedelta(days=8, hours=1),
upper=timezone.now() + timezone.timedelta(days=11),
)
)
colour = factory.Faker("hex_color")
| [
"django.utils.timezone.now",
"factory.Faker",
"django.utils.timezone.timedelta"
] | [((261, 282), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (274, 282), False, 'import factory\n'), ((297, 322), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {}), "('sentence')\n", (310, 322), False, 'import factory\n'), ((334, 355), 'factory.Faker', 'factory.Faker', (['"""slug"""'], {}), "('slug')\n", (347, 355), False, 'import factory\n'), ((372, 393), 'factory.Faker', 'factory.Faker', (['"""slug"""'], {}), "('slug')\n", (385, 393), False, 'import factory\n'), ((1011, 1037), 'factory.Faker', 'factory.Faker', (['"""hex_color"""'], {}), "('hex_color')\n", (1024, 1037), False, 'import factory\n'), ((692, 706), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (704, 706), False, 'from django.utils import timezone\n'), ((482, 496), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (494, 496), False, 'from django.utils import timezone\n'), ((499, 525), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (517, 525), False, 'from django.utils import timezone\n'), ((545, 559), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (557, 559), False, 'from django.utils import timezone\n'), ((562, 589), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (580, 589), False, 'from django.utils import timezone\n'), ((714, 728), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (726, 728), False, 'from django.utils import timezone\n'), ((731, 757), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(8)'}), '(days=8)\n', (749, 757), False, 'from django.utils import timezone\n'), ((863, 877), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (875, 877), False, 'from django.utils import timezone\n'), ((880, 915), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(8)', 'hours': '(1)'}), '(days=8, hours=1)\n', (898, 915), False, 'from django.utils import timezone\n'), ((935, 949), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (947, 949), False, 'from django.utils import timezone\n'), ((952, 979), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(11)'}), '(days=11)\n', (970, 979), False, 'from django.utils import timezone\n')] |
from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404
from .models import Item, Votefor, Comment
from .forms import ItemForm, ItemEditForm, VoteforForm, CommentForm
from django.contrib import messages, auth
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def get_issues_list(request):
""" Render the ticket list """
item=Item.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(item, 10)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
items = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
items = paginator.page(paginator.num_pages)
return render(request, "issues_list.html", {'items': items})
def create_an_item(request):
""" Present a blank form to be filled in """
if request.method == "POST":
#create new form
form = ItemForm(request.POST, request.FILES)
#django checks if form is valid and saves if so
if form.is_valid():
if request.user.is_authenticated():
form = form.save(commit=False)
form.user=request.user
form.save()
return redirect(get_issues_list)
#if not a post request return an empty form
else:
form = ItemForm()
return render(request, "itemform.html", {'form': form})
#get item where primary key = id
def edit_an_item(request, id):
""" Return an existing form for edit """
item = get_object_or_404(Item, pk=id)
if request.method == "POST":
form = ItemEditForm(request.POST, instance=item)
if form.is_valid():
form.save()
return redirect(get_issues_list)
else:
#item is the instance that we want to construct the object from
form = ItemEditForm(instance=item)
return render(request, "editform.html", {'form': form})
def get_issue_detail(request,id):
""" Return the ticket issue detail view"""
item = get_object_or_404(Item, pk=id)
user = request.user
upvotes = Votefor.objects.filter(item=item, user=user).count()
comments = Comment.objects.all()
return render(request, "issue_detail.html", {'item' : item, 'upvotes' : upvotes, 'comments' : comments})
def cast_an_upvote(request, id):
""" Free vote up a bug"""
item = get_object_or_404(Item, id=id)
user = request.user
if Votefor.objects.filter(item=item, user_id=request.user.id).exists():
messages.info(request, "Sorry you have already voted this one!")
return redirect(get_issues_list)
else:
item.upvotes += 1
item.save()
Votefor.objects.get_or_create(user=user, item=item)
messages.success(request, "Your Bug has been upvoted!")
return redirect(get_issues_list)
def add_comment_to_issue(request, id):
"""Add a comment to a ticket item"""
item= get_object_or_404(Item, pk=id)
form = CommentForm(request.POST, request.FILES)
if request.method == "POST":
if form.is_valid():
form = form.save(commit=False)
form.author = request.user
form.item = item
form.save()
messages.success(request, "Your Comment has been added!")
return redirect(get_issue_detail, id)
else:
form = CommentForm()
return render(request, "issue_commentform.html", {'form': form})
# code below based on moderating your comments in django girls tutorial create comment model
def comment_approve(request, id):
comment = get_object_or_404(Comment, pk=id)
comment.approve()
return redirect('issue_detail', id=comment.item.id)
def comment_remove(request, id):
comment = get_object_or_404(Comment, pk=id)
comment.delete()
return redirect('issue_detail', id=comment.item.id)
| [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.contrib.messages.info",
"django.shortcuts.redirect",
"django.contrib.messages.success",
"django.core.paginator.Paginator"
] | [((534, 553), 'django.core.paginator.Paginator', 'Paginator', (['item', '(10)'], {}), '(item, 10)\n', (543, 553), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((917, 970), 'django.shortcuts.render', 'render', (['request', '"""issues_list.html"""', "{'items': items}"], {}), "(request, 'issues_list.html', {'items': items})\n", (923, 970), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((1551, 1599), 'django.shortcuts.render', 'render', (['request', '"""itemform.html"""', "{'form': form}"], {}), "(request, 'itemform.html', {'form': form})\n", (1557, 1599), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((1725, 1755), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'pk': 'id'}), '(Item, pk=id)\n', (1742, 1755), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2079, 2127), 'django.shortcuts.render', 'render', (['request', '"""editform.html"""', "{'form': form}"], {}), "(request, 'editform.html', {'form': form})\n", (2085, 2127), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2225, 2255), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'pk': 'id'}), '(Item, pk=id)\n', (2242, 2255), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2395, 2493), 'django.shortcuts.render', 'render', (['request', '"""issue_detail.html"""', "{'item': item, 'upvotes': upvotes, 'comments': comments}"], {}), "(request, 'issue_detail.html', {'item': item, 'upvotes': upvotes,\n 'comments': comments})\n", (2401, 2493), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2572, 2602), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'id': 'id'}), '(Item, id=id)\n', (2589, 2602), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3149, 3179), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'pk': 'id'}), '(Item, pk=id)\n', (3166, 3179), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3602, 3659), 'django.shortcuts.render', 'render', (['request', '"""issue_commentform.html"""', "{'form': form}"], {}), "(request, 'issue_commentform.html', {'form': form})\n", (3608, 3659), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3811, 3844), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Comment'], {'pk': 'id'}), '(Comment, pk=id)\n', (3828, 3844), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3878, 3922), 'django.shortcuts.redirect', 'redirect', (['"""issue_detail"""'], {'id': 'comment.item.id'}), "('issue_detail', id=comment.item.id)\n", (3886, 3922), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3977, 4010), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Comment'], {'pk': 'id'}), '(Comment, pk=id)\n', (3994, 4010), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((4043, 4087), 'django.shortcuts.redirect', 'redirect', (['"""issue_detail"""'], {'id': 'comment.item.id'}), "('issue_detail', id=comment.item.id)\n", (4051, 4087), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2711, 2775), 'django.contrib.messages.info', 'messages.info', (['request', '"""Sorry you have already voted this one!"""'], {}), "(request, 'Sorry you have already voted this one!')\n", (2724, 2775), False, 'from django.contrib import messages, auth\n'), ((2791, 2816), 'django.shortcuts.redirect', 'redirect', (['get_issues_list'], {}), '(get_issues_list)\n', (2799, 2816), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((2943, 2998), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your Bug has been upvoted!"""'], {}), "(request, 'Your Bug has been upvoted!')\n", (2959, 2998), False, 'from django.contrib import messages, auth\n'), ((3014, 3039), 'django.shortcuts.redirect', 'redirect', (['get_issues_list'], {}), '(get_issues_list)\n', (3022, 3039), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((1917, 1942), 'django.shortcuts.redirect', 'redirect', (['get_issues_list'], {}), '(get_issues_list)\n', (1925, 1942), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((3442, 3499), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your Comment has been added!"""'], {}), "(request, 'Your Comment has been added!')\n", (3458, 3499), False, 'from django.contrib import messages, auth\n'), ((3519, 3549), 'django.shortcuts.redirect', 'redirect', (['get_issue_detail', 'id'], {}), '(get_issue_detail, id)\n', (3527, 3549), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n'), ((1430, 1455), 'django.shortcuts.redirect', 'redirect', (['get_issues_list'], {}), '(get_issues_list)\n', (1438, 1455), False, 'from django.shortcuts import render, HttpResponse, redirect, reverse, get_object_or_404\n')] |
# on Windows: open the shell as admin then: `pip install matplotlib numpy nbformat`
# on Unix: `sudo pip install matplotlib numpy nbformat`
# You might need to reload Atom after installation of dependencies if they are not found
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 20, 500)
plt.plot(t, np.sin(t))
| [
"numpy.sin",
"numpy.linspace"
] | [((286, 309), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(500)'], {}), '(0, 20, 500)\n', (297, 309), True, 'import numpy as np\n'), ((322, 331), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (328, 331), True, 'import numpy as np\n')] |
from django.db import models
class CommunityBank(models.Model):
balance = models.DecimalField(max_digits=30, decimal_places=4)
class OwnerBank(models.Model):
balance = models.DecimalField(max_digits=30, decimal_places=4) | [
"django.db.models.DecimalField"
] | [((82, 134), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(30)', 'decimal_places': '(4)'}), '(max_digits=30, decimal_places=4)\n', (101, 134), False, 'from django.db import models\n'), ((182, 234), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(30)', 'decimal_places': '(4)'}), '(max_digits=30, decimal_places=4)\n', (201, 234), False, 'from django.db import models\n')] |
import datetime
import json
import netaddr
import sys
from time import time
import uuid
from pprint import pprint
from oslo_config import cfg
import oslo_messaging as messaging
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import rpc as q_rpc
from neutron import context
from neutron.db import agents_db
from neutron.plugins.common import constants
from neutron_lbaas.services.loadbalancer import constants as lb_const
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2
from neutron_lbaas.services.loadbalancer.drivers.abstract_driver \
import LoadBalancerAbstractDriver # @UnresolvedImport @Reimport
from neutron_lbaas.extensions \
import lbaas_agentscheduler # @UnresolvedImport @Reimport
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from oslo_log import log as logging
from oslo_utils import importutils
import f5_openstack_agent.lbaasv2.drivers.bigip.constants_v2
def make_msg(method, **kwargs):
return {'method': method,
'args': kwargs}
if __name__ == '__main__':
args = sys.argv
lb_id = args[1]
environment_prefix = 'Test'
topic = '%s_%s' % (constants_v2.TOPIC_PROCESS_ON_HOST_V2, environment_prefix)
default_version = '1.0'
q_rpc.init(cfg.CONF)
transport = messaging.get_transport(cfg.CONF)
target = messaging.Target(topic=topic)
client = messaging.RPCClient(transport, target)
ctxt=context.get_admin_context().to_dict()
client.call(ctxt, 'update_loadbalancer_status',
loadbalancer_id=lb_id,
status=constants.ACTIVE,
operating_status=lb_const.ONLINE
)
| [
"neutron.common.rpc.init",
"oslo_messaging.get_transport",
"neutron.context.get_admin_context",
"oslo_messaging.RPCClient",
"oslo_messaging.Target"
] | [((1289, 1309), 'neutron.common.rpc.init', 'q_rpc.init', (['cfg.CONF'], {}), '(cfg.CONF)\n', (1299, 1309), True, 'from neutron.common import rpc as q_rpc\n'), ((1327, 1360), 'oslo_messaging.get_transport', 'messaging.get_transport', (['cfg.CONF'], {}), '(cfg.CONF)\n', (1350, 1360), True, 'import oslo_messaging as messaging\n'), ((1374, 1403), 'oslo_messaging.Target', 'messaging.Target', ([], {'topic': 'topic'}), '(topic=topic)\n', (1390, 1403), True, 'import oslo_messaging as messaging\n'), ((1417, 1455), 'oslo_messaging.RPCClient', 'messaging.RPCClient', (['transport', 'target'], {}), '(transport, target)\n', (1436, 1455), True, 'import oslo_messaging as messaging\n'), ((1466, 1493), 'neutron.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (1491, 1493), False, 'from neutron import context\n')] |
import json
import os
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.model.custom_action import CustomAction
from tests.test_cases.openproject_test_case import OpenProjectTestCase
class CustomActionServiceTestCase(OpenProjectTestCase):
def setUp(self):
super().setUp()
DATA = os.path.join(self.TEST_CASES, '../data/custom_action.json')
self.caSer = self.op.get_custom_action_service()
with open(DATA) as f:
self.custom_action = CustomAction(json.load(f))
def test_executed(self):
# TODO: We need to create custom actions to test them
pass
def test_find(self):
# TODO: We need to create custom actions to test them
pass
def test_not_found_executed(self):
with self.assertRaises(BusinessError):
self.assertIsNotNone(self.caSer.execute(self.custom_action))
def test_not_found(self):
with self.assertRaises(BusinessError):
self.assertIsNotNone(self.caSer.find(self.custom_action))
| [
"json.load",
"os.path.join"
] | [((346, 405), 'os.path.join', 'os.path.join', (['self.TEST_CASES', '"""../data/custom_action.json"""'], {}), "(self.TEST_CASES, '../data/custom_action.json')\n", (358, 405), False, 'import os\n'), ((539, 551), 'json.load', 'json.load', (['f'], {}), '(f)\n', (548, 551), False, 'import json\n')] |
import sys
import pickle
_, entities_tsv_in, entity_name_dict_in, bert_input_tsv_out = sys.argv
with open(entities_tsv_in, 'r') as f:
entities_tsv = f.read().strip().split('\n')
with open(entity_name_dict_in, 'rb') as f:
entity_name_dict = pickle.load(f)
#is_relation = False
is_relation = True
bert_input_tsv = ''
for i, line in enumerate(entities_tsv):
text = line.split('\t')[1]
if is_relation:
name = text
else:
key_, id_ = text.split('::')
if id_ in entity_name_dict:
name = entity_name_dict[id_]
if name is None:
name = 'NoText'
else:
name = 'NoText'
bert_input_tsv += name + '\t0\n'
with open(bert_input_tsv_out, 'w') as f:
f.write(bert_input_tsv)
| [
"pickle.load"
] | [((251, 265), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (262, 265), False, 'import pickle\n')] |
"""
Utility functions for evaluation
"""
from collections import Counter, defaultdict
class Metric(object):
def __init__(self):
self.cnt_samples = 0
self.cnt_samples_wo_true_triples = 0
self.cnt_samples_wo_pred_triples = 0
self.sum_prec = 0
self.sum_recall = 0
self.sum_f1 = 0
self.rel_counter = Counter()
self.true_triples_by_rel = defaultdict(set)
self.pred_triples_by_rel = defaultdict(set)
def compute_relationwise_metric(self):
"""
compute relation-wise precision, recall and F1 scores
:param true_triples_by_rel:
:param pred_triples_by_rel:
:return:
"""
rel2metric = dict()
for rel in self.true_triples_by_rel:
true_triples = self.true_triples_by_rel[rel]
if rel in self.pred_triples_by_rel:
pred_triples = self.pred_triples_by_rel[rel]
else:
pred_triples = set()
true_pred_triples = pred_triples.intersection(true_triples)
if len(pred_triples) == 0:
prec = 0
else:
prec = len(true_pred_triples) / len(pred_triples)
if len(true_triples) == 0:
recall = 0
else:
recall = len(true_pred_triples) / len(true_triples)
if prec + recall > 0:
f1 = 2 * prec * recall / (prec + recall)
else:
f1 = 0
# use percentage points
rel2metric[rel] = {
"prec": prec * 100,
"recall": recall * 100,
"f1": f1 * 100
}
return rel2metric
| [
"collections.Counter",
"collections.defaultdict"
] | [((358, 367), 'collections.Counter', 'Counter', ([], {}), '()\n', (365, 367), False, 'from collections import Counter, defaultdict\n'), ((403, 419), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (414, 419), False, 'from collections import Counter, defaultdict\n'), ((455, 471), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (466, 471), False, 'from collections import Counter, defaultdict\n')] |
import os
import discord
from discord.ext import commands
from PIL import Image, ImageDraw, ImageFont
import textwrap
class Memes:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def dipshit(self, ctx, msg: discord.Member=None):
"""Generate a meme
usage : %prefix%dipshit <mention user>
"""
if msg:
msg = msg.name
else:
msg = ctx.author.name
image = Image.open("data/google.jpg").convert("RGBA")
txt = Image.new('RGBA', image.size, (255, 255, 255, 0))
font = ImageFont.truetype('data/fonts/arial.ttf', 18)
d = ImageDraw.Draw(txt)
d.text((138, 58), msg, font=font, fill=(0, 0, 0, 255))
out = Image.alpha_composite(image, txt).save("dipshit.png")
file = discord.File("dipshit.png", filename="dipshit.png")
await ctx.trigger_typing()
await ctx.send(file=file)
os.remove('dipshit.png')
@commands.command()
async def headache(self, ctx, msg: discord.Member=None):
"""Generate a meme
usage : %prefix%headache <mention user>
"""
x = 396
if msg:
msg = msg.name
else:
msg = ctx.author.name
if len(msg) > 8:
x = x - 20
image = Image.open("data/headache.png").convert("RGBA")
txt = Image.new('RGBA', image.size, (255, 255, 255, 0))
font = ImageFont.truetype('data/fonts/impact.ttf', 54)
d = ImageDraw.Draw(txt)
d.text((361, 504), msg, font=font, fill=(0, 0, 0, 255))
out = Image.alpha_composite(image, txt).save("headache.png")
file = discord.File("headache.png", filename="headache.png")
await ctx.trigger_typing()
await ctx.send(file=file)
os.remove('headache.png')
@commands.command()
async def firstwords(self, ctx, *, msg):
"""Generate a meme
usage : %prefix%firstwords message
"""
image = Image.open("data/firstwords.png").convert("RGBA")
txt = Image.new('RGBA', image.size, (255, 255, 255, 0))
font = ImageFont.truetype('data/fonts/comic.ttf', 70)
d = ImageDraw.Draw(txt)
d.text((104, 27), f"{msg[0]}..{msg[0]}...",
font=font, fill=(0, 0, 0, 255))
out = Image.alpha_composite(image, txt)
nfont = ImageFont.truetype('data/fonts/comic.ttf', 50)
para = textwrap.wrap(msg, width=20)
current_h = 591
pad = 3
for line in para:
w, h = d.textsize(line, font=nfont)
d.text((52, current_h), line, font=nfont, fill=(0, 0, 0, 255))
current_h += h + pad
img = Image.alpha_composite(out, txt).save("firstwords.png")
file = discord.File("firstwords.png", filename="firstwords.png")
await ctx.trigger_typing()
await ctx.send(file=file)
os.remove('firstwords.png')
def setup(bot):
bot.add_cog(Memes(bot))
| [
"PIL.Image.open",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"textwrap.wrap",
"PIL.Image.alpha_composite",
"discord.ext.commands.command",
"discord.File",
"os.remove"
] | [((192, 210), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (208, 210), False, 'from discord.ext import commands\n'), ((980, 998), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (996, 998), False, 'from discord.ext import commands\n'), ((1843, 1861), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1859, 1861), False, 'from discord.ext import commands\n'), ((525, 574), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'image.size', '(255, 255, 255, 0)'], {}), "('RGBA', image.size, (255, 255, 255, 0))\n", (534, 574), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((591, 637), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""data/fonts/arial.ttf"""', '(18)'], {}), "('data/fonts/arial.ttf', 18)\n", (609, 637), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((651, 670), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (665, 670), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((820, 871), 'discord.File', 'discord.File', (['"""dipshit.png"""'], {'filename': '"""dipshit.png"""'}), "('dipshit.png', filename='dipshit.png')\n", (832, 871), False, 'import discord\n'), ((949, 973), 'os.remove', 'os.remove', (['"""dipshit.png"""'], {}), "('dipshit.png')\n", (958, 973), False, 'import os\n'), ((1382, 1431), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'image.size', '(255, 255, 255, 0)'], {}), "('RGBA', image.size, (255, 255, 255, 0))\n", (1391, 1431), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1448, 1495), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""data/fonts/impact.ttf"""', '(54)'], {}), "('data/fonts/impact.ttf', 54)\n", (1466, 1495), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1509, 1528), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (1523, 1528), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1680, 1733), 'discord.File', 'discord.File', (['"""headache.png"""'], {'filename': '"""headache.png"""'}), "('headache.png', filename='headache.png')\n", (1692, 1733), False, 'import discord\n'), ((1811, 1836), 'os.remove', 'os.remove', (['"""headache.png"""'], {}), "('headache.png')\n", (1820, 1836), False, 'import os\n'), ((2069, 2118), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'image.size', '(255, 255, 255, 0)'], {}), "('RGBA', image.size, (255, 255, 255, 0))\n", (2078, 2118), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2135, 2181), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""data/fonts/comic.ttf"""', '(70)'], {}), "('data/fonts/comic.ttf', 70)\n", (2153, 2181), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2195, 2214), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (2209, 2214), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2330, 2363), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['image', 'txt'], {}), '(image, txt)\n', (2351, 2363), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2381, 2427), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""data/fonts/comic.ttf"""', '(50)'], {}), "('data/fonts/comic.ttf', 50)\n", (2399, 2427), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2444, 2472), 'textwrap.wrap', 'textwrap.wrap', (['msg'], {'width': '(20)'}), '(msg, width=20)\n', (2457, 2472), False, 'import textwrap\n'), ((2781, 2838), 'discord.File', 'discord.File', (['"""firstwords.png"""'], {'filename': '"""firstwords.png"""'}), "('firstwords.png', filename='firstwords.png')\n", (2793, 2838), False, 'import discord\n'), ((2916, 2943), 'os.remove', 'os.remove', (['"""firstwords.png"""'], {}), "('firstwords.png')\n", (2925, 2943), False, 'import os\n'), ((465, 494), 'PIL.Image.open', 'Image.open', (['"""data/google.jpg"""'], {}), "('data/google.jpg')\n", (475, 494), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((750, 783), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['image', 'txt'], {}), '(image, txt)\n', (771, 783), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1320, 1351), 'PIL.Image.open', 'Image.open', (['"""data/headache.png"""'], {}), "('data/headache.png')\n", (1330, 1351), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1609, 1642), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['image', 'txt'], {}), '(image, txt)\n', (1630, 1642), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2005, 2038), 'PIL.Image.open', 'Image.open', (['"""data/firstwords.png"""'], {}), "('data/firstwords.png')\n", (2015, 2038), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2710, 2741), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['out', 'txt'], {}), '(out, txt)\n', (2731, 2741), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
from django.test import TestCase
from django.core.exceptions import FieldDoesNotExist
from django.contrib.gis.geos import Point
from uk_geo_utils.geocoders import (
AddressBaseGeocoder,
get_address_model,
get_onsud_model,
AddressBaseNotImportedException,
OnsudNotImportedException,
CodesNotFoundException,
MultipleCodesException,
NorthernIrelandException,
StrictMatchException,
)
from uk_geo_utils.helpers import AddressSorter
from uk_geo_utils.models import Address
class FuzzyInt(int):
def __new__(cls, lowest, highest):
obj = super(FuzzyInt, cls).__new__(cls, highest)
obj.lowest = lowest
obj.highest = highest
return obj
def __eq__(self, other):
return other >= self.lowest and other <= self.highest
def __repr__(self):
return "[%d..%d]" % (self.lowest, self.highest)
class AddressBaseGeocoderTest(TestCase):
fixtures = [
# records in Address, no corresponding records in ONSUD
"addressbase_geocoder/AA11AA.json",
# 3 records in Address, 2 corresponding records in ONSUD
# all in county A01000001 and local auth B01000001
"addressbase_geocoder/BB11BB.json",
# records in Address, corresponding records in ONSUD
# all in county A01000001 but split across
# local auths B01000001 and B01000002
"addressbase_geocoder/CC11CC.json",
]
def test_empty_addressbase_table(self):
"""
The AddressBase table has no records in it
"""
get_address_model().objects.all().delete()
with self.assertNumQueries(FuzzyInt(0, 4)):
with self.assertRaises(AddressBaseNotImportedException):
addressbase = AddressBaseGeocoder("AA11AA")
def test_empty_onsud_table(self):
"""
The ONSUD table has no records in it
"""
get_onsud_model().objects.all().delete()
with self.assertNumQueries(FuzzyInt(0, 4)):
with self.assertRaises(OnsudNotImportedException):
addressbase = AddressBaseGeocoder("AA11AA")
def test_northern_ireland(self):
with self.assertNumQueries(FuzzyInt(0, 4)):
with self.assertRaises(NorthernIrelandException):
addressbase = AddressBaseGeocoder("BT11AA")
def test_no_records(self):
"""
We can't find any records for the given postcode in the AddressBase table
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
with self.assertRaises(get_address_model().DoesNotExist):
addressbase = AddressBaseGeocoder("ZZ1 1ZZ")
def test_no_codes(self):
"""
We find records for the given postcode in the AddressBase table
but there are no corresponding records in the ONSUD for the UPRNs we found
"""
with self.assertNumQueries(FuzzyInt(0, 5)):
addressbase = AddressBaseGeocoder("AA11AA")
with self.assertRaises(CodesNotFoundException):
result = addressbase.get_code("lad")
self.assertIsInstance(addressbase.centroid, Point)
def test_valid(self):
"""
We find records for the given postcode in the AddressBase table
There are some corresponding records in the ONSUD for the UPRNs we found
Valid result should be returned
Note that in this case, the ONSUD table does not contain corresponding
records for *all* of the UPRNs we found, but we accept the result anyway
"""
with self.assertNumQueries(FuzzyInt(0, 5)):
addressbase = AddressBaseGeocoder(
"bb 1 1B B"
) # intentionally spurious whitespace and case
self.assertEqual("B01000001", addressbase.get_code("lad"))
self.assertIsInstance(addressbase.centroid, Point)
def test_strict_mode(self):
"""
We find records for the given postcode in the AddressBase table
There are some corresponding records in the ONSUD for the UPRNs we found
Note that in this case, the ONSUD table does not contain corresponding
records for *all* of the UPRNs we found, and we are passing strict=True
so we raise a StrictMatchException
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("BB11BB")
with self.assertRaises(StrictMatchException):
addressbase.get_code("lad", strict=True)
def test_multiple_codes(self):
"""
We find records for the given postcode in the AddressBase table
There are corresponding records in the ONSUD for the UPRNs we found
The UPRNs described by this postcode map to more than one 'lad'
but they all map to the same 'cty'
"""
with self.assertNumQueries(FuzzyInt(0, 5)):
addressbase = AddressBaseGeocoder("CC1 1CC")
with self.assertRaises(MultipleCodesException):
result = addressbase.get_code("lad")
self.assertEqual("A01000001", addressbase.get_code("cty"))
self.assertIsInstance(addressbase.centroid, Point)
def test_invalid_code_type(self):
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("CC1 1CC")
with self.assertRaises(FieldDoesNotExist):
result = addressbase.get_code("foo") # not a real code type
def test_get_code_by_uprn_valid(self):
"""
valid get_code() by UPRN queries
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("CC1 1CC")
self.assertEqual("B01000001", addressbase.get_code("lad", "00000008"))
self.assertIsInstance(addressbase.get_point("00000008"), Point)
self.assertEqual("B01000002", addressbase.get_code("lad", "00000009"))
self.assertIsInstance(addressbase.get_point("00000009"), Point)
def test_get_code_by_uprn_invalid_uprn(self):
"""
'foo' is not a valid UPRN in our DB
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("CC1 1CC")
with self.assertRaises(get_address_model().DoesNotExist):
result = addressbase.get_code("lad", "foo")
with self.assertRaises(get_address_model().DoesNotExist):
result = addressbase.get_point("foo")
def test_get_code_by_uprn_invalid_uprn_for_postcode(self):
"""
'00000001' is a valid UPRN in our DB,
but for a different postcode
than the one we constructed with
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("CC1 1CC")
with self.assertRaises(get_address_model().DoesNotExist):
result = addressbase.get_code("lad", "00000001")
with self.assertRaises(get_address_model().DoesNotExist):
result = addressbase.get_point("00000001")
def test_get_code_by_uprn_no_onsud(self):
"""
'00000006' is a valid UPRN in AddressBase but not in ONSUD
"""
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("BB1 1BB")
with self.assertRaises(get_onsud_model().DoesNotExist):
result = addressbase.get_code("lad", "00000006")
self.assertIsInstance(addressbase.get_point("00000006"), Point)
def test_addresses_property(self):
with self.assertNumQueries(FuzzyInt(0, 4)):
addressbase = AddressBaseGeocoder("AA1 1AA")
addressbase._addresses = addressbase._addresses.order_by("-address")
self.assertNotEqual(addressbase._addresses, addressbase.addresses)
sorter = AddressSorter(addressbase._addresses)
self.assertEqual(addressbase.addresses, sorter.natural_sort())
def test_centroid_ignores_type_l(self):
addressbase = AddressBaseGeocoder("BB11BB")
before_centroid = addressbase.centroid
# adding a type L UPRN shouldn't change the postcode centroid
Address.objects.create(
postcode="BB1 1BB",
address="foobar",
location=Point(94.5, 65.7, srid=4326),
addressbase_postal="L",
)
after_centroid = addressbase.centroid
self.assertEqual(before_centroid, after_centroid)
| [
"uk_geo_utils.geocoders.AddressBaseGeocoder",
"uk_geo_utils.geocoders.get_address_model",
"django.contrib.gis.geos.Point",
"uk_geo_utils.geocoders.get_onsud_model",
"uk_geo_utils.helpers.AddressSorter"
] | [((8025, 8054), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""BB11BB"""'], {}), "('BB11BB')\n", (8044, 8054), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((2921, 2950), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""AA11AA"""'], {}), "('AA11AA')\n", (2940, 2950), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((3613, 3647), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""bb 1 1B B"""'], {}), "('bb 1 1B B')\n", (3632, 3647), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((4349, 4378), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""BB11BB"""'], {}), "('BB11BB')\n", (4368, 4378), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((4895, 4925), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""CC1 1CC"""'], {}), "('CC1 1CC')\n", (4914, 4925), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((5293, 5323), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""CC1 1CC"""'], {}), "('CC1 1CC')\n", (5312, 5323), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((5643, 5673), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""CC1 1CC"""'], {}), "('CC1 1CC')\n", (5662, 5673), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6189, 6219), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""CC1 1CC"""'], {}), "('CC1 1CC')\n", (6208, 6219), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6764, 6794), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""CC1 1CC"""'], {}), "('CC1 1CC')\n", (6783, 6794), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((7275, 7305), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""BB1 1BB"""'], {}), "('BB1 1BB')\n", (7294, 7305), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((7633, 7663), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""AA1 1AA"""'], {}), "('AA1 1AA')\n", (7652, 7663), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((7845, 7882), 'uk_geo_utils.helpers.AddressSorter', 'AddressSorter', (['addressbase._addresses'], {}), '(addressbase._addresses)\n', (7858, 7882), False, 'from uk_geo_utils.helpers import AddressSorter\n'), ((1739, 1768), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""AA11AA"""'], {}), "('AA11AA')\n", (1758, 1768), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((2071, 2100), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""AA11AA"""'], {}), "('AA11AA')\n", (2090, 2100), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((2283, 2312), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""BT11AA"""'], {}), "('BT11AA')\n", (2302, 2312), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((2603, 2633), 'uk_geo_utils.geocoders.AddressBaseGeocoder', 'AddressBaseGeocoder', (['"""ZZ1 1ZZ"""'], {}), "('ZZ1 1ZZ')\n", (2622, 2633), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((8287, 8315), 'django.contrib.gis.geos.Point', 'Point', (['(94.5)', '(65.7)'], {'srid': '(4326)'}), '(94.5, 65.7, srid=4326)\n', (8292, 8315), False, 'from django.contrib.gis.geos import Point\n'), ((2538, 2557), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (2555, 2557), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6255, 6274), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (6272, 6274), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6385, 6404), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (6402, 6404), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6830, 6849), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (6847, 6849), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((6965, 6984), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (6982, 6984), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((7341, 7358), 'uk_geo_utils.geocoders.get_onsud_model', 'get_onsud_model', ([], {}), '()\n', (7356, 7358), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((1545, 1564), 'uk_geo_utils.geocoders.get_address_model', 'get_address_model', ([], {}), '()\n', (1562, 1564), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n'), ((1885, 1902), 'uk_geo_utils.geocoders.get_onsud_model', 'get_onsud_model', ([], {}), '()\n', (1900, 1902), False, 'from uk_geo_utils.geocoders import AddressBaseGeocoder, get_address_model, get_onsud_model, AddressBaseNotImportedException, OnsudNotImportedException, CodesNotFoundException, MultipleCodesException, NorthernIrelandException, StrictMatchException\n')] |
from configparser import ConfigParser
from differentiator.differentiator import Differentiator
from differentiator.exception.differentiator_df_exceptions import *
from functools import reduce
from logging import Logger
from pathlib import Path
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, when
from pyspark.sql.types import LongType, StringType, StructType
from sequences_handler.sequences_handler import SequencesHandler
from time import time
from typing import Union
class DataFrameStruct:
def __init__(self,
dataframe: DataFrame,
schema: StructType,
column_names: list,
num_rows: int) -> None:
self.dataframe = dataframe
self.schema = schema
self.column_names = column_names
self.num_rows = num_rows
class DataFrameDifferentiator(Differentiator):
def __init__(self) -> None:
super().__init__()
self.max_DF = None
self.partitioning = None
@staticmethod
def __read_max_DF(differentiator_config_file: Path,
differentiator_config_parser: ConfigParser) -> Union[int, str]:
exception_message = "{0}: 'max_DF' must be a integer value in range [1, N-1]!" \
.format(differentiator_config_file)
try:
max_DF = str(differentiator_config_parser.get("DataFrames Settings",
"max_DF"))
if max_DF != "N-1":
max_DF = int(max_DF)
except ValueError:
raise InvalidMaxDFError(exception_message)
return max_DF
@staticmethod
def __validate_max_DF(max_DF: Union[int, str]) -> None:
exception_message = "Multiple Sequences DataFrames must have at least 1 sequence."
if max_DF == "N-1":
pass
else:
if max_DF < 1:
raise InvalidMaxDFError(exception_message)
def __set_max_DF(self,
N: int,
max_DF: Union[int, str]) -> None:
if max_DF == "N-1":
self.max_DF = N - 1
else:
self.max_DF = max_DF
@staticmethod
def __log_max_DF(spark_app_name: str,
max_DF: int,
logger: Logger) -> None:
maximum_sequences_per_dataframe_message = "({0}) Maximum Sequences Per DataFrame (max_DF): {1}" \
.format(spark_app_name,
str(max_DF))
print(maximum_sequences_per_dataframe_message)
logger.info(maximum_sequences_per_dataframe_message)
def __get_max_DF(self) -> int:
return self.max_DF
@staticmethod
def __read_partitioning(differentiator_config_file: Path,
differentiator_config_parser: ConfigParser) -> str:
exception_message = "{0}: 'partitioning' must be a string value!" \
.format(differentiator_config_file)
try:
partitioning = \
str(differentiator_config_parser.get("DataFrames Settings",
"partitioning"))
except ValueError:
raise InvalidPartitioningError(exception_message)
return partitioning
@staticmethod
def __validate_partitioning(partitioning: str) -> None:
supported_partitioning = ["auto", "custom"]
exception_message = "Supported partitioning: {0}" \
.format(" | ".join(supported_partitioning))
if partitioning not in supported_partitioning:
raise InvalidPartitioningError(exception_message)
def __set_partitioning(self,
partitioning: str) -> None:
self.partitioning = partitioning
@staticmethod
def __log_partitioning(spark_app_name: str,
partitioning: str,
logger: Logger) -> None:
partitioning_message = "({0}) Partitioning: {1}" \
.format(spark_app_name,
partitioning.capitalize())
print(partitioning_message)
logger.info(partitioning_message)
def __get_partitioning(self) -> str:
return self.partitioning
@staticmethod
def __generate_dataframe_schema_struct_list(dataframe_sequences_data_list: list) -> list:
dataframe_schema_struct_list = []
dataframe_index_label = "Index"
dataframe_schema_struct_list.append([dataframe_index_label,
LongType(),
False])
for index_dataframe_sequences_data_list in range(len(dataframe_sequences_data_list)):
dataframe_sequence_identification = \
dataframe_sequences_data_list[index_dataframe_sequences_data_list][0]
if dataframe_sequence_identification != "Seq":
dataframe_char_label = "Seq_" + dataframe_sequence_identification
else:
dataframe_char_label = "Seq_" + str(index_dataframe_sequences_data_list + 1)
dataframe_schema_struct_list.append([dataframe_char_label,
StringType(),
True])
return dataframe_schema_struct_list
@staticmethod
def __create_dataframe_schema(dataframe_schema_struct_list: list) -> StructType:
dataframe_schema = StructType()
for index_dataframe_schema_struct_list in range(len(dataframe_schema_struct_list)):
dataframe_schema.add(field=dataframe_schema_struct_list[index_dataframe_schema_struct_list][0],
data_type=dataframe_schema_struct_list[index_dataframe_schema_struct_list][1],
nullable=dataframe_schema_struct_list[index_dataframe_schema_struct_list][2])
return dataframe_schema
@staticmethod
def __get_dataframe_schema_column_names(dataframe_schema: StructType) -> list:
return dataframe_schema.names
@staticmethod
def __create_dataframe(spark_session: SparkSession,
dataframe_data: list,
dataframe_schema: StructType) -> DataFrame:
return spark_session.createDataFrame(data=dataframe_data,
schema=dataframe_schema,
verifySchema=True)
@staticmethod
def __repartition_dataframe(dataframe: DataFrame,
new_number_of_partitions: int) -> DataFrame:
current_dataframe_num_partitions = dataframe.rdd.getNumPartitions()
if current_dataframe_num_partitions > new_number_of_partitions:
# Execute Coalesce (Spark Less-Wide-Shuffle Transformation) Function
dataframe = dataframe.coalesce(new_number_of_partitions)
if current_dataframe_num_partitions < new_number_of_partitions:
# Execute Repartition (Spark Wider-Shuffle Transformation) Function
dataframe = dataframe.repartition(new_number_of_partitions)
return dataframe
def __apply_customized_partitioning_after_dataframe_creation(self,
partitioning: str,
spark_app_cores_max_count: int,
spark_recommended_tasks_per_cpu: int,
dataframe: DataFrame) -> DataFrame:
if partitioning == "custom":
dataframe_optimized_number_of_partitions = spark_app_cores_max_count * spark_recommended_tasks_per_cpu
dataframe = self.__repartition_dataframe(dataframe,
dataframe_optimized_number_of_partitions)
return dataframe
@staticmethod
def __assemble_join_conditions(first_dataframe: DataFrame,
first_dataframe_column_names: list,
second_dataframe: DataFrame,
second_dataframe_column_names: list):
index_condition = first_dataframe["Index"] == second_dataframe["Index"]
non_index_conditions_list = []
for index_second_dataframe_column_names in range(len(second_dataframe_column_names)):
second_dataframe_column_name_quoted = \
"`" + second_dataframe_column_names[index_second_dataframe_column_names] + "`"
second_dataframe_column_name_found = second_dataframe_column_name_quoted.find("Seq_") != -1
for index_first_dataframe_column_names in range(len(first_dataframe_column_names)):
first_dataframe_column_name_quoted = \
"`" + first_dataframe_column_names[index_first_dataframe_column_names] + "`"
first_dataframe_column_name_found = first_dataframe_column_name_quoted.find("Seq_") != -1
if first_dataframe_column_name_found and second_dataframe_column_name_found:
non_index_condition = \
first_dataframe[first_dataframe_column_name_quoted] != \
second_dataframe[second_dataframe_column_name_quoted]
non_index_conditions_list.append(non_index_condition)
return index_condition & reduce(lambda x, y: x | y, non_index_conditions_list)
@staticmethod
def __execute_dataframes_diff(first_dataframe: DataFrame,
second_dataframe: DataFrame,
join_conditions) -> DataFrame:
# Execute Full Outer Join (Spark Wider-Shuffle Transformation),
# Filter (Spark Narrow Transformation) and
# Drop (Spark Narrow Transformation) Functions
df_r = first_dataframe \
.join(second_dataframe, join_conditions, "full_outer") \
.filter(first_dataframe["Index"].isNotNull() & second_dataframe["Index"].isNotNull()) \
.drop(second_dataframe["Index"])
return df_r
@staticmethod
def __substitute_equal_nucleotide_letters_on_df_r(diff_phase: str,
df_r: DataFrame,
first_dataframe_column_names: list) -> DataFrame:
if diff_phase == "opt":
# Update Non-Diff Line Values to "=" Character (For Better Viewing)
first_dataframe_nucleotide_letter_column_quoted = "`" + first_dataframe_column_names[1] + "`"
first_dataframe_nucleotide_letter_column_new_value = "="
df_r_second_dataframe_columns_only_list = \
[column for column in df_r.columns if column not in first_dataframe_column_names]
for second_dataframe_column in df_r_second_dataframe_columns_only_list:
second_dataframe_column_quoted = "`" + second_dataframe_column + "`"
is_non_diff_column_comparison = col(second_dataframe_column_quoted) == \
df_r[first_dataframe_nucleotide_letter_column_quoted]
column_expression = when(is_non_diff_column_comparison,
first_dataframe_nucleotide_letter_column_new_value) \
.otherwise(col(second_dataframe_column_quoted))
df_r = df_r.withColumn(second_dataframe_column,
column_expression)
return df_r
@staticmethod
def __estimate_highest_df_r_size_in_bytes(first_dataframe_schema: StructType,
first_dataframe_num_rows: int,
second_dataframe_schema: StructType,
second_dataframe_num_rows: int) -> int:
long_type_count = 0
long_type_default_size = 8 # LongType(): 8 Bytes Each
string_type_count = 0
string_type_default_size = 4 # StringType(): 4 Bytes Each + (1 Byte * String Length)
first_dataframe_schema_list = [[field.dataType, field.name] for field in first_dataframe_schema.fields]
for schema_field_list in first_dataframe_schema_list:
if schema_field_list[0] == LongType():
long_type_count = long_type_count + 1
elif schema_field_list[0] == StringType():
string_type_count = string_type_count + 1
second_dataframe_schema_list = [[field.dataType, field.name] for field in second_dataframe_schema.fields]
for schema_field_list in second_dataframe_schema_list:
if schema_field_list[0] == LongType():
long_type_count = long_type_count + 1
elif schema_field_list[0] == StringType():
string_type_count = string_type_count + 1
long_type_count = long_type_count - 1 # Discounting Index of Second DataFrame (Dropped After Join)
minimum_dataframe_num_rows = min(first_dataframe_num_rows, second_dataframe_num_rows)
long_type_size_one_row = long_type_count * long_type_default_size
string_type_size_one_row = string_type_count * (string_type_default_size + 1)
return minimum_dataframe_num_rows * (long_type_size_one_row + string_type_size_one_row)
@staticmethod
def __get_optimal_num_of_partitions_after_dataframe_shuffling(spark_max_recommended_partition_size: int,
spark_app_cores_max_count: int,
spark_recommended_tasks_per_cpu: int,
dataframe_size_in_bytes: int) -> int:
# Set Initial Divider Variable Value
divider = spark_app_cores_max_count * spark_recommended_tasks_per_cpu
# Search Optimized Number of Partitions
while True:
if (dataframe_size_in_bytes / divider) <= spark_max_recommended_partition_size:
return divider
divider = divider + 1
def __apply_customized_partitioning_after_dataframes_diff(self,
partitioning: str,
first_dataframe_schema: StructType,
first_dataframe_num_rows: int,
second_dataframe_schema: StructType,
second_dataframe_num_rows: int,
spark_max_recommended_partition_size: int,
spark_app_cores_max_count: int,
spark_recommended_tasks_per_cpu: int,
df_r: DataFrame) -> DataFrame:
if partitioning == "custom":
estimated_df_r_size_in_bytes = self.__estimate_highest_df_r_size_in_bytes(first_dataframe_schema,
first_dataframe_num_rows,
second_dataframe_schema,
second_dataframe_num_rows)
optimized_num_of_dataframe_partitions = \
self.__get_optimal_num_of_partitions_after_dataframe_shuffling(spark_max_recommended_partition_size,
spark_app_cores_max_count,
spark_recommended_tasks_per_cpu,
estimated_df_r_size_in_bytes)
df_r = self.__repartition_dataframe(df_r,
optimized_num_of_dataframe_partitions)
return df_r
def __execute_diff_phase(self,
diff_phase: str,
partitioning: str,
first_dataframe_struct: DataFrameStruct,
second_dataframe_struct: DataFrameStruct,
spark_max_recommended_partition_size: int,
spark_app_cores_max_count: int,
spark_recommended_tasks_per_cpu: int) -> DataFrame:
# Get Struct Values of First DataFrame
first_dataframe = first_dataframe_struct.dataframe
first_dataframe_schema = first_dataframe_struct.schema
first_dataframe_column_names = first_dataframe_struct.column_names
first_dataframe_num_rows = first_dataframe_struct.num_rows
# Get Struct Values of Second DataFrame
second_dataframe = second_dataframe_struct.dataframe
second_dataframe_schema = second_dataframe_struct.schema
second_dataframe_column_names = second_dataframe_struct.column_names
second_dataframe_num_rows = second_dataframe_struct.num_rows
# Assemble Join Conditions
join_conditions = self.__assemble_join_conditions(first_dataframe,
first_dataframe_column_names,
second_dataframe,
second_dataframe_column_names)
# Execute DataFrames Diff (Resulting DataFrame: df_r)
df_r = self.__execute_dataframes_diff(first_dataframe,
second_dataframe,
join_conditions)
# Substitute Equal Nucleotide Letters on df_r (If diff_phase = opt)
df_r = self.__substitute_equal_nucleotide_letters_on_df_r(diff_phase,
df_r,
first_dataframe_column_names)
# Apply Customized Partitioning on df_r After Diff (If Enabled)
df_r = self.__apply_customized_partitioning_after_dataframes_diff(partitioning,
first_dataframe_schema,
first_dataframe_num_rows,
second_dataframe_schema,
second_dataframe_num_rows,
spark_max_recommended_partition_size,
spark_app_cores_max_count,
spark_recommended_tasks_per_cpu,
df_r)
return df_r
@staticmethod
def __show_dataframe(dataframe: DataFrame,
number_of_rows_to_show: int,
truncate_boolean: bool) -> None:
# Execute Sort (Spark Wider-Shuffle Transformation) and
# Show (Spark Action) Functions
dataframe \
.sort(dataframe["Index"].asc_nulls_last()) \
.show(n=number_of_rows_to_show,
truncate=truncate_boolean)
@staticmethod
def __write_dataframe_as_distributed_partial_multiple_csv_files(dataframe: DataFrame,
destination_file_path: Path,
header_boolean: bool,
write_mode: str) -> None:
# Execute Sort (Spark Wider-Shuffle Transformation) and
# Write.CSV (Spark Action) Functions
dataframe \
.sort(dataframe["Index"].asc_nulls_last()) \
.write \
.csv(path=str(destination_file_path),
header=header_boolean,
mode=write_mode)
@staticmethod
def __write_dataframe_as_merged_single_csv_file(dataframe: DataFrame,
destination_file_path: Path,
header_boolean: bool,
write_mode: str) -> None:
# Execute Coalesce (Spark Less-Wide-Shuffle Transformation),
# Sort (Spark Wider-Shuffle Transformation) and
# Write.CSV (Spark Action) Functions
dataframe \
.coalesce(1) \
.sort(dataframe["Index"].asc_nulls_last()) \
.write \
.csv(path=str(destination_file_path),
header=header_boolean,
mode=write_mode)
def __execute_collection_phase(self,
dataframe: DataFrame,
collection_phase: str,
destination_file_path: Path) -> None:
if collection_phase == "None":
# Do Not Collect Resulting DataFrame (df_r)
pass
elif collection_phase == "SC": # SP = Show/Collect
# Show Resulting DataFrame (df_r) as Table Format on Command-Line Interface
self.__show_dataframe(dataframe,
dataframe.count(),
False)
elif collection_phase == "DW": # DW = Distributed Write
# Write to Disk Resulting DataFrame (df_r) as Multiple Partial CSV Files
# (Each Spark Executor Writes Its Partition's Data Locally)
self.__write_dataframe_as_distributed_partial_multiple_csv_files(dataframe,
destination_file_path,
True,
"append")
elif collection_phase == "MW": # MW = Merged Write
# Write to Disk Resulting DataFrame (df_r) as Single CSV File
# (Each Spark Executor Sends Its Partition's Data to One Executor Which Will Merge and Write Them)
self.__write_dataframe_as_merged_single_csv_file(dataframe,
destination_file_path,
True,
"append")
def diff_sequences(self) -> None:
# Initialize Metrics Variables
sequences_comparisons_count = 0
spark_dataframe_partitions_count = 0
diff_phases_time_in_seconds = 0
collection_phases_time_in_seconds = 0
average_sequences_comparison_time_in_seconds = 0
sequences_comparisons_time_in_seconds = 0
# Get Spark Context
spark_context = self.get_spark_context()
# Get Spark App Name
spark_app_name = self.get_spark_app_name(spark_context)
# Get Spark App Id
spark_app_id = self.get_spark_app_id(spark_context)
# Get Output Directory
output_directory = self.get_output_directory()
# Get Diff Phase
diff_phase = self.get_diff_phase()
# Get Collection Phase
collection_phase = self.get_collection_phase()
# Get Data Structure
data_structure = self.get_data_structure()
# Get Number of Sequences to Compare (N)
N = self.get_N()
# Get Differentiator Config File
differentiator_config_file = self.get_differentiator_config_file()
# Init ConfigParser Object
config_parser = ConfigParser()
# Case Preservation of Each Option Name
config_parser.optionxform = str
# Load config_parser
config_parser.read(differentiator_config_file,
encoding="utf-8")
if diff_phase == "1":
max_DF = 1
# Set Maximum Sequences Per DataFrame (max_DF)
self.__set_max_DF(N,
max_DF)
elif diff_phase == "opt":
# Read Maximum Sequences Per DataFrame (max_DF)
max_DF = self.__read_max_DF(differentiator_config_file,
config_parser)
# Validate Maximum Sequences Per DataFrame (max_DF)
self.__validate_max_DF(max_DF)
# Set Maximum Sequences Per DataFrame (max_DF)
self.__set_max_DF(N,
max_DF)
# Get Maximum Sequences Per DataFrame (max_DF)
max_DF = self.__get_max_DF()
# Get Logger
logger = self.get_logger()
# Log Maximum Sequences Per DataFrame (max_DF)
self.__log_max_DF(spark_app_name,
max_DF,
logger)
# Read Partitioning
partitioning = self.__read_partitioning(differentiator_config_file,
config_parser)
# Validate Partitioning
self.__validate_partitioning(partitioning)
# Set Partitioning
self.__set_partitioning(partitioning)
# Log Partitioning
self.__log_partitioning(spark_app_name,
partitioning,
logger)
# Get Estimated Amount of Diffs (d_a)
estimated_d_a = self.estimate_amount_of_diffs(diff_phase,
N,
max_DF)
# Log Estimated Amount of Diffs (d_a)
self.log_estimated_amount_of_diffs(spark_app_name,
estimated_d_a,
logger)
# Get Sequences List Text File
sequences_list_text_file = self.get_sequences_list_text_file()
# Init SequencesHandler Object
sh = SequencesHandler(sequences_list_text_file)
# Generate Sequences Indices List
sequences_indices_list = sh.generate_sequences_indices_list(N,
max_DF)
# Get Actual Amount of Diffs
actual_d_a = self.get_actual_amount_of_diffs(sequences_indices_list)
# Log Actual Amount of Diffs
self.log_actual_amount_of_diffs(spark_app_name,
actual_d_a,
logger)
# Calculate Amount of Diffs (d_a) Estimation Absolute Error
d_a_estimation_absolute_error = self.calculate_amount_of_diffs_estimation_absolute_error(estimated_d_a,
actual_d_a)
# Calculate Amount of Diffs (d_a) Estimation Percent Error
d_a_estimation_percent_error = self.calculate_amount_of_diffs_estimation_percent_error(estimated_d_a,
actual_d_a)
# Log Amount of Diffs (d_a) Estimation Absolute and Percent Errors
self.log_d_a_estimation_errors(spark_app_name,
d_a_estimation_absolute_error,
d_a_estimation_percent_error,
logger)
# Get Spark Session
spark_session = self.get_spark_session()
# Iterate Through Sequences Indices List
for index_sequences_indices_list in range(actual_d_a):
# Sequences Comparison Start Time
sequences_comparison_start_time = time()
# Get First DataFrame Sequences Indices List
first_dataframe_sequences_indices_list = sequences_indices_list[index_sequences_indices_list][0]
# Get Second DataFrame Sequences Indices List
second_dataframe_sequences_indices_list = sequences_indices_list[index_sequences_indices_list][1]
# Get First DataFrame Sequences Data List
first_dataframe_sequences_data_list = sh.generate_sequences_list(sequences_list_text_file,
first_dataframe_sequences_indices_list)
# Get Second DataFrame Sequences Data List
second_dataframe_sequences_data_list = sh.generate_sequences_list(sequences_list_text_file,
second_dataframe_sequences_indices_list)
# Get the Biggest Sequence Length Among DataFrames
biggest_sequence_length_among_dataframes = \
self.get_biggest_sequence_length_among_data_structures(first_dataframe_sequences_data_list,
second_dataframe_sequences_data_list)
# Set Length of First DataFrame
first_dataframe_length = biggest_sequence_length_among_dataframes
# Set Length of Second DataFrame
second_dataframe_length = biggest_sequence_length_among_dataframes
# Generate Schema Struct List of First DataFrame
first_dataframe_schema_struct_list = \
self.__generate_dataframe_schema_struct_list(first_dataframe_sequences_data_list)
# Create Schema of First DataFrame
first_dataframe_schema = self.__create_dataframe_schema(first_dataframe_schema_struct_list)
# Get Schema Column Names of First DataFrame
first_dataframe_schema_column_names = self.__get_dataframe_schema_column_names(first_dataframe_schema)
# Get Data of First DataFrame
first_dataframe_data = self.get_data_structure_data(first_dataframe_length,
first_dataframe_sequences_data_list)
# Create First DataFrame
first_dataframe = self.__create_dataframe(spark_session,
first_dataframe_data,
first_dataframe_schema)
# Get Spark App Cores Max Count
spark_app_cores_max_count = self.get_spark_app_cores_max_count(spark_context)
# Get Spark Recommended Tasks per CPU
spark_recommended_tasks_per_cpu = self.get_spark_recommended_tasks_per_cpu()
# Apply Customized Partitioning on First DataFrame After Creation (If Enabled)
first_dataframe = \
self.__apply_customized_partitioning_after_dataframe_creation(partitioning,
spark_app_cores_max_count,
spark_recommended_tasks_per_cpu,
first_dataframe)
# Get Number of Partitions of First DataFrame
first_dataframe_partitions_number = first_dataframe.rdd.getNumPartitions()
# Increase Spark DataFrame Partitions Count
spark_dataframe_partitions_count = spark_dataframe_partitions_count + first_dataframe_partitions_number
# Create Struct of First DataFrame
first_dataframe_struct = DataFrameStruct(first_dataframe,
first_dataframe_schema,
first_dataframe_schema_column_names,
first_dataframe_length)
# Generate Schema Struct List of Second DataFrame
second_dataframe_schema_struct_list = \
self.__generate_dataframe_schema_struct_list(second_dataframe_sequences_data_list)
# Create Schema of Second DataFrame
second_dataframe_schema = self.__create_dataframe_schema(second_dataframe_schema_struct_list)
# Get Schema Column Names of Second DataFrame
second_dataframe_schema_column_names = self.__get_dataframe_schema_column_names(second_dataframe_schema)
# Get Data of Second DataFrame
second_dataframe_data = self.get_data_structure_data(second_dataframe_length,
second_dataframe_sequences_data_list)
# Create Second DataFrame
second_dataframe = self.__create_dataframe(spark_session,
second_dataframe_data,
second_dataframe_schema)
# Apply Customized Partitioning on Second DataFrame After Creation (If Enabled)
second_dataframe = \
self.__apply_customized_partitioning_after_dataframe_creation(partitioning,
spark_app_cores_max_count,
spark_recommended_tasks_per_cpu,
second_dataframe)
# Get Number of Partitions of Second DataFrame
second_dataframe_partitions_number = second_dataframe.rdd.getNumPartitions()
# Increase Spark DataFrame Partitions Count
spark_dataframe_partitions_count = spark_dataframe_partitions_count + second_dataframe_partitions_number
# Create Struct of Second DataFrame
second_dataframe_struct = DataFrameStruct(second_dataframe,
second_dataframe_schema,
second_dataframe_schema_column_names,
second_dataframe_length)
# Get Spark Maximum Recommended Partition Size in Bytes
spark_max_recommended_partition_size = 134217728 # 128 MB
# Diff Phase Start Time
diff_phase_start_time = time()
# Execute Diff Phase
df_r = self.__execute_diff_phase(diff_phase,
partitioning,
first_dataframe_struct,
second_dataframe_struct,
spark_max_recommended_partition_size,
spark_app_cores_max_count,
spark_recommended_tasks_per_cpu)
# Time to Execute Diff Phase in Seconds
time_to_execute_diff_phase_in_seconds = time() - diff_phase_start_time
# Increase Diff Phases Time
diff_phases_time_in_seconds = diff_phases_time_in_seconds + time_to_execute_diff_phase_in_seconds
# Increase Sequences Comparisons Count
sequences_comparisons_count = sequences_comparisons_count + 1
# Get Partition Number of Resulting DataFrame (df_r)
df_r_partitions_number = df_r.rdd.getNumPartitions()
# Increase Spark DataFrame Partitions Count
spark_dataframe_partitions_count = spark_dataframe_partitions_count + df_r_partitions_number
# Get First Sequence Index of First DataFrame
first_dataframe_first_sequence_index = first_dataframe_sequences_indices_list[0]
# Get First Sequence Index of Second DataFrame
second_dataframe_first_sequence_index = second_dataframe_sequences_indices_list[0]
# Get Last Sequence Index of Second DataFrame
second_dataframe_last_sequence_index = second_dataframe_sequences_indices_list[-1]
# Get Destination File Path for Collection Phase
collection_phase_destination_file_path = \
self.get_collection_phase_destination_file_path(output_directory,
spark_app_name,
spark_app_id,
first_dataframe_first_sequence_index,
second_dataframe_first_sequence_index,
second_dataframe_last_sequence_index)
# Collection Phase Start Time
collection_phase_start_time = time()
# Execute Collection Phase
self.__execute_collection_phase(df_r,
collection_phase,
collection_phase_destination_file_path)
# Time to Execute Collection Phase in Seconds
time_to_execute_collection_phase_in_seconds = time() - collection_phase_start_time
# Increase Collection Phases Time
collection_phases_time_in_seconds = \
collection_phases_time_in_seconds + time_to_execute_collection_phase_in_seconds
# Time to Compare Sequences in Seconds
time_to_compare_sequences_in_seconds = time() - sequences_comparison_start_time
# Increase Sequences Comparisons Time
sequences_comparisons_time_in_seconds = \
sequences_comparisons_time_in_seconds + time_to_compare_sequences_in_seconds
# Log Time to Compare Sequences
self.log_time_to_compare_sequences(spark_app_name,
first_dataframe_first_sequence_index,
second_dataframe_first_sequence_index,
second_dataframe_last_sequence_index,
data_structure,
time_to_compare_sequences_in_seconds,
logger)
# Get Number of Sequences Comparisons Left
number_of_sequences_comparisons_left = \
self.get_number_of_sequences_comparisons_left(actual_d_a,
sequences_comparisons_count)
# Get Average Sequences Comparison Time
average_sequences_comparison_time_in_seconds = \
self.get_average_sequences_comparison_time(sequences_comparisons_time_in_seconds,
sequences_comparisons_count)
# Estimate Time Left
estimated_time_left_in_seconds = self.estimate_time_left(number_of_sequences_comparisons_left,
average_sequences_comparison_time_in_seconds)
# Print Real Time Metrics
self.print_real_time_metrics(spark_app_name,
sequences_comparisons_count,
number_of_sequences_comparisons_left,
average_sequences_comparison_time_in_seconds,
estimated_time_left_in_seconds)
# Log Average Sequences Comparison Time
self.log_average_sequences_comparison_time(spark_app_name,
data_structure,
average_sequences_comparison_time_in_seconds,
logger)
# Log Sequences Comparisons Count
self.log_sequences_comparisons_count(spark_app_name,
sequences_comparisons_count,
logger)
# Log Diff Phases Time
self.log_diff_phases_time(spark_app_name,
diff_phase,
diff_phases_time_in_seconds,
logger)
# Log Collection Phases Time
self.log_collection_phases_time(spark_app_name,
collection_phase,
collection_phases_time_in_seconds,
logger)
# Log Spark DataFrame Partitions Count
self.log_spark_data_structure_partitions_count(spark_app_name,
data_structure,
spark_dataframe_partitions_count,
logger)
# Delete SequencesHandler Object
del sh
| [
"configparser.ConfigParser",
"pyspark.sql.types.StructType",
"functools.reduce",
"pyspark.sql.functions.col",
"pyspark.sql.types.LongType",
"sequences_handler.sequences_handler.SequencesHandler",
"pyspark.sql.types.StringType",
"time.time",
"pyspark.sql.functions.when"
] | [((5448, 5460), 'pyspark.sql.types.StructType', 'StructType', ([], {}), '()\n', (5458, 5460), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((24155, 24169), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (24167, 24169), False, 'from configparser import ConfigParser\n'), ((26428, 26470), 'sequences_handler.sequences_handler.SequencesHandler', 'SequencesHandler', (['sequences_list_text_file'], {}), '(sequences_list_text_file)\n', (26444, 26470), False, 'from sequences_handler.sequences_handler import SequencesHandler\n'), ((9452, 9505), 'functools.reduce', 'reduce', (['(lambda x, y: x | y)', 'non_index_conditions_list'], {}), '(lambda x, y: x | y, non_index_conditions_list)\n', (9458, 9505), False, 'from functools import reduce\n'), ((28137, 28143), 'time.time', 'time', ([], {}), '()\n', (28141, 28143), False, 'from time import time\n'), ((34589, 34595), 'time.time', 'time', ([], {}), '()\n', (34593, 34595), False, 'from time import time\n'), ((37023, 37029), 'time.time', 'time', ([], {}), '()\n', (37027, 37029), False, 'from time import time\n'), ((4536, 4546), 'pyspark.sql.types.LongType', 'LongType', ([], {}), '()\n', (4544, 4546), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((12366, 12376), 'pyspark.sql.types.LongType', 'LongType', ([], {}), '()\n', (12374, 12376), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((12761, 12771), 'pyspark.sql.types.LongType', 'LongType', ([], {}), '()\n', (12769, 12771), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((35221, 35227), 'time.time', 'time', ([], {}), '()\n', (35225, 35227), False, 'from time import time\n'), ((37381, 37387), 'time.time', 'time', ([], {}), '()\n', (37385, 37387), False, 'from time import time\n'), ((37712, 37718), 'time.time', 'time', ([], {}), '()\n', (37716, 37718), False, 'from time import time\n'), ((5203, 5215), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (5213, 5215), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((11099, 11134), 'pyspark.sql.functions.col', 'col', (['second_dataframe_column_quoted'], {}), '(second_dataframe_column_quoted)\n', (11102, 11134), False, 'from pyspark.sql.functions import col, when\n'), ((11412, 11447), 'pyspark.sql.functions.col', 'col', (['second_dataframe_column_quoted'], {}), '(second_dataframe_column_quoted)\n', (11415, 11447), False, 'from pyspark.sql.functions import col, when\n'), ((12473, 12485), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (12483, 12485), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((12868, 12880), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (12878, 12880), False, 'from pyspark.sql.types import LongType, StringType, StructType\n'), ((11250, 11341), 'pyspark.sql.functions.when', 'when', (['is_non_diff_column_comparison', 'first_dataframe_nucleotide_letter_column_new_value'], {}), '(is_non_diff_column_comparison,\n first_dataframe_nucleotide_letter_column_new_value)\n', (11254, 11341), False, 'from pyspark.sql.functions import col, when\n')] |
#
# Copyright 2016-2018 Games Creators Club
#
# MIT License
#
import math
import time
import telemetry
import traceback
import numpy
import cv2
import PIL
import PIL.Image
from PIL import ImageDraw
import pyroslib
import pyroslib.logging
from pyroslib.logging import log, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG, LOG_LEVEL_ALWAYS
from rover import RoverState, normaiseAngle, angleDiference
from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID
MINIMUM_SPEED = 60
MIN_ANGLE = 0.5
MAX_ANGLE = 45
HEADING_MIN_DISTANCE = 150
WALL_SPEED = 210
CORNER_SPEED = 170
CORNER_CROSS_SPEED = 240
MAX_CORNER_DISTANCE = 700
pyroslib.logging.LOG_LEVEL = LOG_LEVEL_INFO
remotDebug = True
size = (80, 64)
class CameraData:
def __init__(self):
self.found = {'red': None, 'blue': None, 'yellow': None, 'green': None}
def reset(self):
self.found['red'] = None
self.found['blue'] = None
self.found['yellow'] = None
self.found['green'] = None
def hasAll(self):
return self.found['red'] is not None and self.found['blue'] is not None and self.found['yellow'] is not None and self.found['green'] is not None
def getFound(self):
return self.found
def foundAsString(self):
return " ".join([("" if v is None else str(v)) + ":" + k for k, v in self.found.items()])
def setData(self, colour, data):
if not self.hasAll():
self.found[colour] = data
for c in self.found:
if c != colour and self.found[c] == data:
self.found[c] = None
def missingColours(self):
return ", ".join([p for p in self.found if self.found[p] is None])
class WaitCameraData(Action):
def __init__(self, agent, next_action):
super(WaitCameraData, self).__init__(agent)
self.foundColours = agent.foundColours
self.next_action = next_action
self.started_scanning_time = None
def start(self):
self.started_scanning_time = time.time()
self.foundColours.reset()
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/raw/fetch", "")
pyroslib.publish("camera/wheels/raw/fetch", "")
pyroslib.publish("camera/camera1/raw/fetch", "")
pyroslib.publish("camera/camera2/raw/fetch", "")
self.agent.log_info("Started a wait for all camera data to arrive...")
def next(self):
if self.foundColours.hasAll():
self.agent.log_info("Scanning lasted " + ("{:7.3f}".format(time.time() - self.started_scanning_time)) + "!")
self.agent.log_info("Received all colours " + ("stopping" if self.next_action is None else "starting action " + str(self.next_action.getActionName())))
return self.next_action
return self
def execute(self):
self.agent.log_info("Waiting for sensor data to arrive...")
def getActionName(self):
return "Scan"
class NebulaAction(Action):
def __init__(self, agent, speed, next_action):
super(NebulaAction, self).__init__(agent)
self.speed = speed
self.next_action = next_action
self.direction_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.heading_pid = PID(0.3, 0, 0.01, 1, 0, diff_method=angleDiference)
self.distance_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.distance_error = 0
self.rover_speed = 0
self.required_corner_distance = 210
self.required_side_distance = 150
self.required_keeping_side_distance = 180
self.last_speed = 0
self.last_speed_time = 0
def obtainRoverSpeed(self):
self.rover_speed = self.rover.wheel_odos.averageSpeed() / 10
self.rover_speed = 25
def keepHeading(self):
state = self.rover.getRoverState()
# Keeping heading
heading = state.heading.heading
heading_output = -self.heading_pid.process(0, heading)
if -MIN_ANGLE < heading_output < MIN_ANGLE:
distance = 32000
else:
heading_fix_rad = heading_output * math.pi / 180
distance = self.rover_speed / heading_fix_rad
if 0 <= distance < HEADING_MIN_DISTANCE:
distance = HEADING_MIN_DISTANCE
elif -HEADING_MIN_DISTANCE < distance < 0:
distance = -HEADING_MIN_DISTANCE
return distance, heading_output
def keepDirection(self, requested_angle, setpoint_distance, current_distance):
state = self.rover.getRoverState()
# Keeping direction
angle_output = self.direction_pid.process(setpoint_distance, current_distance)
angle = 0
if abs(angle_output) < 1:
angle = 0
elif angle_output > 0 and angle_output > self.rover_speed:
angle = math.pi / 4
elif angle_output < 0 and angle_output < -self.rover_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(angle_output / self.rover_speed)
except BaseException as ex:
self.agent.log_always("Domain error")
if angle > MAX_ANGLE:
angle = MAX_ANGLE
elif angle < -MAX_ANGLE:
angle = -MAX_ANGLE
angle = int(requested_angle + angle * 180 / math.pi)
return angle, angle_output
def calculateSpeed(self, speed_time):
# Defining forward speed
if self.last_speed_time == speed_time:
return self.last_speed
if self.distance_error <= 0:
speed = -self.distance_error
if speed > self.speed:
speed = self.speed
elif speed < MINIMUM_SPEED:
speed = MINIMUM_SPEED
else:
speed = -self.distance_error
if speed > -MINIMUM_SPEED:
speed = -MINIMUM_SPEED
elif speed < -self.speed:
speed = -self.speed
self.last_speed = speed
self.last_speed_time = speed_time
return speed
def start(self):
super(NebulaAction, self).start()
# self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
# self.direction_pid = PID(0.20, 0, 0.005, 1, 0)
# self.heading_pid = PID(0.25, 0.0, 0.01, 1, 0, diff_method=angleDiference)
def end(self):
super(NebulaAction, self).end()
class GoToCornerKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, angle, next_action=None):
super(GoToCornerKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.angle = angle
self.prev_angle = angle - 45
self.next_angle = angle + 45
if self.prev_angle < 0:
self.prev_angle += 360
if self.next_angle >= 360:
self.next_angle -= 360
def hasRadar(self, state):
return state.radar.radar[self.prev_angle] > 1 and state.radar.radar[self.next_angle] > 1 and state.radar.radar[self.angle] > 1
def start(self):
super(GoToCornerKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.prev_angle) + " " + str(self.next_angle) + " " + str(self.angle))
self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
self.direction_pid = PID(0.20, 0, 0.02, 0.4, 0)
self.heading_pid = PID(0.25, 0.0, 0.01, 0.5, 0, diff_method=angleDiference)
self.agent.log_info("Starting Corner with prev_angle={: 3d} angle={: 3d} next_angle={: 3d}".format(self.prev_angle, self.angle, self.next_angle))
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar prev_angle[{0}]={1} angle[{2}]={3} next_angle[{4}]={5}".format(
self.prev_angle, int(state.radar.radar[self.prev_angle]) if state.radar.radar[self.prev_angle] is not None else "-",
self.angle, int(state.radar.radar[self.angle]) if state.radar.radar[self.angle] is not None else "-",
self.next_angle, int(state.radar.radar[self.next_angle]) if state.radar.radar[self.next_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
corner_distance = state.radar.radar[self.angle]
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
self.distance_error = self.distance_pid.process(self.required_corner_distance, corner_distance)
average_side = int((left_side + right_side) / 2)
if left_side > right_side:
ratio = left_side / right_side
else:
ratio = right_side / left_side
if corner_distance < self.required_corner_distance:
self.agent.log_info(
"reached corner distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
if average_side < self.required_side_distance:
self.agent.log_info(
"reached side distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
corner_distance = state.radar.radar[self.angle]
distance, heading_output = self.keepHeading()
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
angle, angle_output = self.keepDirection(self.angle, right_side, left_side)
speed = self.calculateSpeed(state.radar.time)
if corner_distance > MAX_CORNER_DISTANCE:
angle = self.angle
speed = CORNER_CROSS_SPEED
corner_distance = state.radar.radar[self.angle]
self.agent.log_info("rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
# distance = 32000
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Corner[{:3d}]".format(self.angle)
class FollowWallKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, wall_angle, direction_angle, next_action=None):
super(FollowWallKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.wall_angle = wall_angle
self.direction_angle = direction_angle
@staticmethod
def calculateRealDistance(side_distance, side_angle):
if side_distance < 1:
return 0
if side_angle > 180:
side_angle = 360 - side_angle
side_angle = side_angle * math.pi / 180
return math.sin(math.pi / 2 - side_angle) * side_distance
def hasRadar(self, state):
return state.radar.radar[self.wall_angle] > 1 and state.radar.radar[self.direction_angle] > 1
def start(self):
super(FollowWallKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.wall_angle) + " " + str(self.direction_angle))
self.distance_pid = PID(0.85, 0.1, 0.2, 0.8, 0)
self.direction_pid = PID(0.20, 0, 0.01, 0.6, 0)
self.heading_pid = PID(0.25, 0.02, 0.0, 1, 0, diff_method=angleDiference)
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar wall_angle[{0}]={1} direction_angle[{2}]={3}".format(
self.wall_angle, int(state.radar.radar[self.wall_angle]) if state.radar.radar[self.wall_angle] is not None else "-",
self.direction_angle, int(state.radar.radar[self.direction_angle]) if state.radar.radar[self.direction_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
wall_distance = state.radar.radar[self.wall_angle]
front_distance = state.radar.radar[self.direction_angle]
self.distance_error = self.distance_pid.process(self.required_side_distance, front_distance)
if front_distance < self.required_side_distance:
self.agent.log_info("reached distance rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wall_dist={: 5d} heading={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wall_distance),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
distance, heading_output = self.keepHeading()
wall_distance = self.calculateRealDistance(state.radar.radar[self.wall_angle], state.heading.heading)
if angleDiference(self.wall_angle, self.direction_angle) > 0:
angle, angle_output = self.keepDirection(self.direction_angle, wall_distance, self.required_keeping_side_distance)
else:
angle, angle_output = self.keepDirection(self.direction_angle, self.required_keeping_side_distance, wall_distance)
speed = self.calculateSpeed(state.radar.time)
front_distance = state.radar.radar[self.direction_angle]
self.agent.log_info("rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wall_dist={: 5d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wall_distance), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Wall[{0} on {1}]".format(self.direction_angle, self.wall_angle)
class CalculateRouteAction(Action):
def __init__(self, agent, speed, foundColours, next_action):
super(CalculateRouteAction, self).__init__(agent)
self.speed = speed
self.foundColours = foundColours
self.next_action = next_action
self.colour_order = ['red', 'blue', 'yellow', 'green']
log(LOG_LEVEL_INFO, "Colour order " + str(self.colour_order))
self.wait = 0
self.prepared_action = None
def calcualteAction(self, from_angle, to_colour):
to_angle = self.foundColours.found[to_colour]
colour_index = self.colour_order.index(to_colour)
if colour_index < 3:
following_action = self.calcualteAction(to_angle, self.colour_order[colour_index + 1])
else:
following_action = self.next_action
# follow_wall_speed = self.speed
# go_to_corner_speed = self.speed
follow_wall_speed = WALL_SPEED
go_to_corner_speed = CORNER_SPEED
if normaiseAngle(from_angle + 90) == to_angle:
wall_angle = normaiseAngle(from_angle + 45)
direction_angle = normaiseAngle(wall_angle + 90)
# return FollowWallKeepingHeadingAction(self.agent, self.speed, wall_angle, direction_angle, following_action)
return FollowWallKeepingHeadingAction(self.agent, follow_wall_speed, wall_angle, direction_angle, following_action)
elif normaiseAngle(from_angle - 90) == to_angle:
wall_angle = normaiseAngle(from_angle - 45)
direction_angle = normaiseAngle(wall_angle - 90)
# return FollowWallKeepingHeadingAction(self.agent, self.speed, wall_angle, direction_angle, following_action)
return FollowWallKeepingHeadingAction(self.agent, follow_wall_speed, wall_angle, direction_angle, following_action)
else:
# return GoToCornerKeepingHeadingAction(self, self.speed, to_angle, following_action)
return GoToCornerKeepingHeadingAction(self.agent, go_to_corner_speed, to_angle, following_action)
def next(self):
if self.wait == 0:
self.agent.log_info("Calculating route (1) -> Corner " + str(self.foundColours.found['red']))
initial_angle = self.foundColours.found['red']
following_action = self.calcualteAction(initial_angle, 'blue')
i = 1
a = following_action
while a != self.next_action:
i += 1
if isinstance(a, GoToCornerKeepingHeadingAction):
self.agent.log_info("Calculating route (" + str(i) + ") -> Corner " + str(a.angle))
a = a.next_action
else:
self.agent.log_info("Calculating route (" + str(i) + ") -> Follow wall " + str(a.wall_angle) + " to " + str(a.direction_angle))
a = a.next_action
self.prepared_action = GoToCornerKeepingHeadingAction(self.agent, self.speed, initial_angle, following_action)
self.wait = 2
self.rover.command(pyroslib.publish, 0, initial_angle, 32000)
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
else:
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
self.wait -= 1
if self.wait == 0:
return self.prepared_action
return self
def getActionName(self):
return "Calculate"
class StraightWheelsAction(Action):
def __init__(self, agent, next_action):
super(StraightWheelsAction, self).__init__(agent)
self.next_action = next_action
def next(self):
self.rover.command(pyroslib.publish, 0, 0, 3200)
return self.next_action
class NebulaAgent(AgentClass):
def __init__(self):
super(NebulaAgent, self).__init__("nebula")
self.foundColours = CameraData()
def connected(self):
super(NebulaAgent, self).connected()
pyroslib.subscribeBinary("camera/raw", self.handleCameraMain)
pyroslib.subscribeBinary("camera/wheels/raw", self.handleCameraWheels)
pyroslib.subscribeBinary("camera/camera1/raw", self.handleCamera1)
pyroslib.subscribeBinary("camera/camera2/raw", self.handleCamera2)
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
def start(self, data):
if not self.running:
if data[0] == 'nebula':
super(NebulaAgent, self).start(data)
# speed = int(data[1])
speed = 160
speed = 200
calculate_route_action = CalculateRouteAction(self, speed, self.foundColours, self.stop_action)
wait_camera_data_action = WaitCameraData(self, calculate_route_action)
wait_sensor_data_action = WaitSensorData(self, wait_camera_data_action)
# self.nextAction(wait_sensor_data_action)
self.nextAction(wait_camera_data_action)
elif data[0] == 'warmup':
# super(NebulaAgent, self).start(data)
self.nextAction(StraightWheelsAction(self, WaitSensorData(self, WarmupAction(self))))
elif data[0] == 'scan':
super(NebulaAgent, self).start(data)
self.nextAction(WaitCameraData(self, self.stop_action))
elif data[0] == 'combo':
super(NebulaAgent, self).start(data)
combo = data[1]
# go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, self.stop_action)
# follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 0, go_to_corner2_action)
# go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wall_action)
# follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner1_action)
# wait_sensor_data_action = WaitSensorData(self, follow_left_wall_action)
if combo == '1':
# Comb 1
go_to_corner3_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 315, self.stop_action)
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, go_to_corner3_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 45, follow_right_wall_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, go_to_corner2_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '2':
# Comb 2
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 0, self.stop_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner2_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wall_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '3':
# Comb 3
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, self.stop_action)
follow_top_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wall_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wall_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
else:
wait_sensor_data_action = WaitSensorData(self, self.stop_action)
self.nextAction(wait_sensor_data_action)
elif data[0] == 'walls':
super(NebulaAgent, self).start(data)
follow_bottom_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 180, 270, self.stop_action)
follow_right_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 90, 180, follow_bottom_wall_action)
follow_top_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wall_action)
follow_left_wall_action = FollowWallKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wall_action)
wait_sensor_data_action = WaitSensorData(self, follow_left_wall_action)
self.nextAction(wait_sensor_data_action)
def handleCameraData(self, topic, message, source):
# now = time.time()
# delta = now - lastProcessed
# lastProcessed = now
pilImage = self._toPILImage(message)
openCVImage = numpy.array(pilImage)
result, value = self.processImageCV(openCVImage)
self.log_info("For " + str(source) + " got " + ("None" if result is None else str(result)) + " for value " + str(value))
if result is not None:
self.foundColours.setData(result, source)
if not self.foundColours.hasAll():
self.log_info("Found " + self.foundColours.foundAsString() + " but not finished yet as " + self.foundColours.missingColours() + " " + ("are" if len(self.foundColours.missingColours()) > 1 else "is") + " still missing.")
if self.running:
pyroslib.publish(topic + "/fetch", "")
pyroslib.publish("nebula/imagedetails", "working: " + self.foundColours.foundAsString())
else:
self.log_info("So far " + self.foundColours.foundAsString() + " and finishing...")
stopped = True
pyroslib.publish("nebula/imagedetails", "found: " + self.foundColours.foundAsString())
def handleCameraMain(self, topic, message, groups):
self.handleCameraData(topic, message, 225)
def handleCameraWheels(self, topic, message, groups):
self.handleCameraData(topic, message, 45)
def handleCamera1(self, topic, message, groups):
self.handleCameraData(topic, message, 315)
def handleCamera2(self, topic, message, groups):
self.handleCameraData(topic, message, 135)
@staticmethod
def _toPILImage(imageBytes):
pilImage = PIL.Image.frombytes("RGB", size, imageBytes)
return pilImage
def processImageCV(self, image):
def findColourNameHSV(hChannel, contour):
mask = numpy.zeros(hChannel.shape[:2], dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
mask = cv2.erode(mask, None, iterations=2)
maskAnd = hChannel.copy()
cv2.bitwise_and(hChannel, mask, maskAnd)
pyroslib.publish("nebula/processed", PIL.Image.fromarray(cv2.cvtColor(maskAnd, cv2.COLOR_GRAY2RGB)).tobytes("raw"))
self.log_debug("Published mask ")
hist = cv2.calcHist([hChannel], [0], mask, [255], [0, 255], False)
value = numpy.argmax(hist)
if value < 19 or value > 145:
return "red", value
elif 19 <= value <= 34:
return "yellow", value
elif 40 <= value <= 76:
return "green", value
elif 90 <= value <= 138:
return "blue", value
else:
return None, value
blurred = cv2.GaussianBlur(image, (5, 5), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV)
hueChannel, satChannel, valChannel = cv2.split(hsv)
countours = [numpy.array([[25, 20], [55, 20], [55, 44], [25, 44]], dtype=numpy.int32)]
c = countours[0]
result, value = findColourNameHSV(hueChannel, c)
if result is not None:
def sendResult(colour):
# pil = PIL.Image.fromarray(cv2.cvtColor(hueChannel, cv2.COLOR_GRAY2RGB))
pil = PIL.Image.fromarray(image)
draw = ImageDraw.Draw(pil)
draw.rectangle(((25, 20), (55, 44)), outline=colour)
pyroslib.publish("nebula/processed", pil.tobytes("raw"))
if result == "red":
sendResult("#f00")
self.log_debug("Published hue red image")
elif result == "yellow":
sendResult("#ff0")
self.log_debug("Published hue yellow image")
elif result == "green":
sendResult("#0f0")
self.log_debug("Published hue green image")
elif result == "blue":
sendResult("#00f")
self.log_debug("Published hue blue image")
else:
cv2.drawContours(hueChannel, countours, -1, (255, 255, 255), 2)
pyroslib.publish("nebula/processed", PIL.Image.fromarray(cv2.cvtColor(hueChannel, cv2.COLOR_GRAY2RGB)).tobytes("raw"))
self.log_debug("Published unrecognised hue image")
return result, value
if __name__ == "__main__":
try:
print("Starting Nebula agent...")
print(" creating logger...")
state_logger = RoverState.defineLogger(telemetry.MQTTLocalPipeTelemetryLogger('rover-state'))
nebula = NebulaAgent()
pyroslib.init("nebula-agent", unique=True, onConnected=nebula.connected)
print(" initialising logger...")
state_logger.init()
nebula.register_logger()
print("Started Nebula agent.")
pyroslib.forever(0.1, nebula.mainLoop)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
| [
"challenge_utils.WaitSensorData",
"traceback.format_tb",
"numpy.array",
"PIL.ImageDraw.Draw",
"pyroslib.forever",
"pyroslib.subscribeBinary",
"challenge_utils.PID",
"cv2.calcHist",
"cv2.erode",
"rover.angleDiference",
"cv2.drawContours",
"pyroslib.init",
"numpy.argmax",
"cv2.cvtColor",
"cv2.split",
"PIL.Image.frombytes",
"rover.normaiseAngle",
"cv2.GaussianBlur",
"time.time",
"pyroslib.publish",
"PIL.Image.fromarray",
"math.asin",
"cv2.bitwise_and",
"numpy.zeros",
"telemetry.MQTTLocalPipeTelemetryLogger",
"challenge_utils.WarmupAction",
"math.sin"
] | [((2016, 2027), 'time.time', 'time.time', ([], {}), '()\n', (2025, 2027), False, 'import time\n'), ((2483, 2523), 'pyroslib.publish', 'pyroslib.publish', (['"""camera/raw/fetch"""', '""""""'], {}), "('camera/raw/fetch', '')\n", (2499, 2523), False, 'import pyroslib\n'), ((2532, 2579), 'pyroslib.publish', 'pyroslib.publish', (['"""camera/wheels/raw/fetch"""', '""""""'], {}), "('camera/wheels/raw/fetch', '')\n", (2548, 2579), False, 'import pyroslib\n'), ((2588, 2636), 'pyroslib.publish', 'pyroslib.publish', (['"""camera/camera1/raw/fetch"""', '""""""'], {}), "('camera/camera1/raw/fetch', '')\n", (2604, 2636), False, 'import pyroslib\n'), ((2645, 2693), 'pyroslib.publish', 'pyroslib.publish', (['"""camera/camera2/raw/fetch"""', '""""""'], {}), "('camera/camera2/raw/fetch', '')\n", (2661, 2693), False, 'import pyroslib\n'), ((3545, 3571), 'challenge_utils.PID', 'PID', (['(0.75)', '(0.2)', '(0.01)', '(1)', '(0)'], {}), '(0.75, 0.2, 0.01, 1, 0)\n', (3548, 3571), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((3599, 3650), 'challenge_utils.PID', 'PID', (['(0.3)', '(0)', '(0.01)', '(1)', '(0)'], {'diff_method': 'angleDiference'}), '(0.3, 0, 0.01, 1, 0, diff_method=angleDiference)\n', (3602, 3650), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((3679, 3705), 'challenge_utils.PID', 'PID', (['(0.75)', '(0.2)', '(0.01)', '(1)', '(0)'], {}), '(0.75, 0.2, 0.01, 1, 0)\n', (3682, 3705), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((7547, 7573), 'challenge_utils.PID', 'PID', (['(0.75)', '(0.15)', '(0.1)', '(1)', '(0)'], {}), '(0.75, 0.15, 0.1, 1, 0)\n', (7550, 7573), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((7603, 7628), 'challenge_utils.PID', 'PID', (['(0.2)', '(0)', '(0.02)', '(0.4)', '(0)'], {}), '(0.2, 0, 0.02, 0.4, 0)\n', (7606, 7628), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((7657, 7713), 'challenge_utils.PID', 'PID', (['(0.25)', '(0.0)', '(0.01)', '(0.5)', '(0)'], {'diff_method': 'angleDiference'}), '(0.25, 0.0, 0.01, 0.5, 0, diff_method=angleDiference)\n', (7660, 7713), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((12574, 12601), 'challenge_utils.PID', 'PID', (['(0.85)', '(0.1)', '(0.2)', '(0.8)', '(0)'], {}), '(0.85, 0.1, 0.2, 0.8, 0)\n', (12577, 12601), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((12631, 12656), 'challenge_utils.PID', 'PID', (['(0.2)', '(0)', '(0.01)', '(0.6)', '(0)'], {}), '(0.2, 0, 0.01, 0.6, 0)\n', (12634, 12656), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((12685, 12739), 'challenge_utils.PID', 'PID', (['(0.25)', '(0.02)', '(0.0)', '(1)', '(0)'], {'diff_method': 'angleDiference'}), '(0.25, 0.02, 0.0, 1, 0, diff_method=angleDiference)\n', (12688, 12739), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((19608, 19669), 'pyroslib.subscribeBinary', 'pyroslib.subscribeBinary', (['"""camera/raw"""', 'self.handleCameraMain'], {}), "('camera/raw', self.handleCameraMain)\n", (19632, 19669), False, 'import pyroslib\n'), ((19678, 19748), 'pyroslib.subscribeBinary', 'pyroslib.subscribeBinary', (['"""camera/wheels/raw"""', 'self.handleCameraWheels'], {}), "('camera/wheels/raw', self.handleCameraWheels)\n", (19702, 19748), False, 'import pyroslib\n'), ((19757, 19823), 'pyroslib.subscribeBinary', 'pyroslib.subscribeBinary', (['"""camera/camera1/raw"""', 'self.handleCamera1'], {}), "('camera/camera1/raw', self.handleCamera1)\n", (19781, 19823), False, 'import pyroslib\n'), ((19832, 19898), 'pyroslib.subscribeBinary', 'pyroslib.subscribeBinary', (['"""camera/camera2/raw"""', 'self.handleCamera2'], {}), "('camera/camera2/raw', self.handleCamera2)\n", (19856, 19898), False, 'import pyroslib\n'), ((25077, 25098), 'numpy.array', 'numpy.array', (['pilImage'], {}), '(pilImage)\n', (25088, 25098), False, 'import numpy\n'), ((26567, 26611), 'PIL.Image.frombytes', 'PIL.Image.frombytes', (['"""RGB"""', 'size', 'imageBytes'], {}), "('RGB', size, imageBytes)\n", (26586, 26611), False, 'import PIL\n'), ((27666, 27700), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (27682, 27700), False, 'import cv2\n'), ((27716, 27756), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_RGB2HSV'], {}), '(blurred, cv2.COLOR_RGB2HSV)\n', (27728, 27756), False, 'import cv2\n'), ((27802, 27816), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (27811, 27816), False, 'import cv2\n'), ((29485, 29557), 'pyroslib.init', 'pyroslib.init', (['"""nebula-agent"""'], {'unique': '(True)', 'onConnected': 'nebula.connected'}), "('nebula-agent', unique=True, onConnected=nebula.connected)\n", (29498, 29557), False, 'import pyroslib\n'), ((29711, 29749), 'pyroslib.forever', 'pyroslib.forever', (['(0.1)', 'nebula.mainLoop'], {}), '(0.1, nebula.mainLoop)\n', (29727, 29749), False, 'import pyroslib\n'), ((12173, 12207), 'math.sin', 'math.sin', (['(math.pi / 2 - side_angle)'], {}), '(math.pi / 2 - side_angle)\n', (12181, 12207), False, 'import math\n'), ((16487, 16517), 'rover.normaiseAngle', 'normaiseAngle', (['(from_angle + 90)'], {}), '(from_angle + 90)\n', (16500, 16517), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((16556, 16586), 'rover.normaiseAngle', 'normaiseAngle', (['(from_angle + 45)'], {}), '(from_angle + 45)\n', (16569, 16586), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((16617, 16647), 'rover.normaiseAngle', 'normaiseAngle', (['(wall_angle + 90)'], {}), '(wall_angle + 90)\n', (16630, 16647), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((26744, 26790), 'numpy.zeros', 'numpy.zeros', (['hChannel.shape[:2]'], {'dtype': '"""uint8"""'}), "(hChannel.shape[:2], dtype='uint8')\n", (26755, 26790), False, 'import numpy\n'), ((26803, 26849), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(-1)', '(255)', '(-1)'], {}), '(mask, [contour], -1, 255, -1)\n', (26819, 26849), False, 'import cv2\n'), ((26869, 26904), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (26878, 26904), False, 'import cv2\n'), ((26956, 26996), 'cv2.bitwise_and', 'cv2.bitwise_and', (['hChannel', 'mask', 'maskAnd'], {}), '(hChannel, mask, maskAnd)\n', (26971, 26996), False, 'import cv2\n'), ((27192, 27251), 'cv2.calcHist', 'cv2.calcHist', (['[hChannel]', '[0]', 'mask', '[255]', '[0, 255]', '(False)'], {}), '([hChannel], [0], mask, [255], [0, 255], False)\n', (27204, 27251), False, 'import cv2\n'), ((27273, 27291), 'numpy.argmax', 'numpy.argmax', (['hist'], {}), '(hist)\n', (27285, 27291), False, 'import numpy\n'), ((27839, 27911), 'numpy.array', 'numpy.array', (['[[25, 20], [55, 20], [55, 44], [25, 44]]'], {'dtype': 'numpy.int32'}), '([[25, 20], [55, 20], [55, 44], [25, 44]], dtype=numpy.int32)\n', (27850, 27911), False, 'import numpy\n'), ((28934, 28997), 'cv2.drawContours', 'cv2.drawContours', (['hueChannel', 'countours', '(-1)', '(255, 255, 255)', '(2)'], {}), '(hueChannel, countours, -1, (255, 255, 255), 2)\n', (28950, 28997), False, 'import cv2\n'), ((29389, 29442), 'telemetry.MQTTLocalPipeTelemetryLogger', 'telemetry.MQTTLocalPipeTelemetryLogger', (['"""rover-state"""'], {}), "('rover-state')\n", (29427, 29442), False, 'import telemetry\n'), ((14284, 14337), 'rover.angleDiference', 'angleDiference', (['self.wall_angle', 'self.direction_angle'], {}), '(self.wall_angle, self.direction_angle)\n', (14298, 14337), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((16912, 16942), 'rover.normaiseAngle', 'normaiseAngle', (['(from_angle - 90)'], {}), '(from_angle - 90)\n', (16925, 16942), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((16981, 17011), 'rover.normaiseAngle', 'normaiseAngle', (['(from_angle - 45)'], {}), '(from_angle - 45)\n', (16994, 17011), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((17042, 17072), 'rover.normaiseAngle', 'normaiseAngle', (['(wall_angle - 90)'], {}), '(wall_angle - 90)\n', (17055, 17072), False, 'from rover import RoverState, normaiseAngle, angleDiference\n'), ((20795, 20840), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'wait_camera_data_action'], {}), '(self, wait_camera_data_action)\n', (20809, 20840), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((25694, 25732), 'pyroslib.publish', 'pyroslib.publish', (["(topic + '/fetch')", '""""""'], {}), "(topic + '/fetch', '')\n", (25710, 25732), False, 'import pyroslib\n'), ((28176, 28202), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image'], {}), '(image)\n', (28195, 28202), False, 'import PIL\n'), ((28227, 28246), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pil'], {}), '(pil)\n', (28241, 28246), False, 'from PIL import ImageDraw\n'), ((5331, 5373), 'math.asin', 'math.asin', (['(angle_output / self.rover_speed)'], {}), '(angle_output / self.rover_speed)\n', (5340, 5373), False, 'import math\n'), ((29830, 29867), 'traceback.format_tb', 'traceback.format_tb', (['ex.__traceback__'], {}), '(ex.__traceback__)\n', (29849, 29867), False, 'import traceback\n'), ((27067, 27108), 'cv2.cvtColor', 'cv2.cvtColor', (['maskAnd', 'cv2.COLOR_GRAY2RGB'], {}), '(maskAnd, cv2.COLOR_GRAY2RGB)\n', (27079, 27108), False, 'import cv2\n'), ((29068, 29112), 'cv2.cvtColor', 'cv2.cvtColor', (['hueChannel', 'cv2.COLOR_GRAY2RGB'], {}), '(hueChannel, cv2.COLOR_GRAY2RGB)\n', (29080, 29112), False, 'import cv2\n'), ((2904, 2915), 'time.time', 'time.time', ([], {}), '()\n', (2913, 2915), False, 'import time\n'), ((21131, 21149), 'challenge_utils.WarmupAction', 'WarmupAction', (['self'], {}), '(self)\n', (21143, 21149), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((22612, 22654), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'go_to_corner1_action'], {}), '(self, go_to_corner1_action)\n', (22626, 22654), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((24753, 24798), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'follow_left_wall_action'], {}), '(self, follow_left_wall_action)\n', (24767, 24798), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((23261, 23303), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'go_to_corner1_action'], {}), '(self, go_to_corner1_action)\n', (23275, 23303), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((23916, 23958), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'go_to_corner1_action'], {}), '(self, go_to_corner1_action)\n', (23930, 23958), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n'), ((24027, 24065), 'challenge_utils.WaitSensorData', 'WaitSensorData', (['self', 'self.stop_action'], {}), '(self, self.stop_action)\n', (24041, 24065), False, 'from challenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID\n')] |
# Generated by Django 3.0.3 on 2020-03-22 22:03
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0010_remove_supplier_is_publish'),
('stocks', '0021_auto_20200323_0503'),
]
operations = [
migrations.AlterField(
model_name='stockout',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='supplierstockout', to='users.Customer'),
),
]
| [
"django.db.models.ForeignKey"
] | [((425, 569), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""supplierstockout"""', 'to': '"""users.Customer"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='supplierstockout', to='users.Customer')\n", (442, 569), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Test PhotoStore GET and PUT.
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import base64
import hashlib
import json
import time
from functools import partial
from viewfinder.backend.base import util
from viewfinder.backend.base.message import Message
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.friend import Friend
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.user import User
from viewfinder.backend.www import json_schema
from viewfinder.backend.www.test import service_base_test
class PhotoStoreTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(PhotoStoreTestCase, self).setUp()
self._CreateSimpleTestAssets()
def testUploadAndGetPut(self):
"""Upload a photo, PUT the photo image data, then access it in
various ways.
"""
episode_id = self._episode_id
photo_id = self._photo_ids[0]
orig_image_data = 'original image data' # Same as used in self._UploadEpisode
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.o', orig_image_data)
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.f', 'full image data')
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.t', 'thumbnail image data')
# Test legit downloads.
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.o')
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.f')
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.t')
# Try get and put with no cookie.
self._PutPhotoAndVerify(None, 401, episode_id, photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(None, 401, episode_id, photo_id, '.o')
# Try get and put of missing photo.
self._PutPhotoAndVerify(self._cookie, 404, episode_id, 'p-unk', '.m', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 404, episode_id, 'p-unk', '.m')
# Try get and put without permission.
self._PutPhotoAndVerify(self._cookie2, 404, episode_id, photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie2, 404, episode_id, photo_id, '.o')
# Omit the Content-MD5 header.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data)
assert response.code == 400, response
# Try to use a non well-formed Content-MD5 header.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data,
content_md5='not well-formed')
assert response.code == 400, response
# Try to use a Content-MD5 header that does not match the data.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data,
content_md5=util.ComputeMD5Base64('mismatched md5'))
assert response.code == 400, response
# Try put with user that is not episode owner.
new_vp_id, new_ep_ids = self._tester.ShareNew(self._cookie,
[(episode_id, [photo_id])],
[self._user2.user_id])
self._PutPhotoAndVerify(self._cookie2, 403, new_ep_ids[0], photo_id, '.o', orig_image_data)
# Try get of photo using removed follower.
self._tester.RemoveFollowers(self._cookie2, new_vp_id, [self._user2.user_id])
self._GetPhotoAndVerify(self._cookie2, 404, new_ep_ids[0], photo_id, '.o')
# Try get and put of unshared photo.
self._tester.Unshare(self._cookie, new_vp_id, [(new_ep_ids[0], [photo_id])])
self._PutPhotoAndVerify(self._cookie, 403, new_ep_ids[0], photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 403, new_ep_ids[0], photo_id, '.o')
# Try get and put of photo that has been shared again in order to override unshare.
self._tester.ShareExisting(self._cookie, new_vp_id, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
# Try get and put of hidden photo.
self._tester.HidePhotos(self._cookie, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
# Try get and put of removed photo.
self._tester.RemovePhotos(self._cookie, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
def testErrorResponse(self):
"""Test that error response is always in JSON format."""
response = self._PutPhoto(self._cookie, 'unk', 'unk', '.o', '')
self.assertEqual(json.loads(response.body), {"error": {"message": "Missing Content-MD5 header."}})
response = self._GetPhoto(self._cookie, 'unk', 'unk', '.o')
self.assertEqual(json.loads(response.body),
{u'error': {u'message': u'Photo was not found or you do not have permission to view it.'}})
def testReUpload(self):
"""Upload a new photo and attempt to re-upload using If-None-Match
header to simulate a phone reinstall where the client uses the
/photos/<photo_id> interface to get a redirect to a PUT URL. In
the case of the photo existing, the Etag should match and result
in a 304 response, saving the client the upload bandwidth.
"""
full_image_data = 'full image data'
for photo_id in self._photo_ids:
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag=util.ComputeMD5Hex(full_image_data))
self.assertEqual(response.code, 200)
for photo_id in self._photo_ids:
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag='"%s"' % util.ComputeMD5Hex(full_image_data))
self.assertEqual(response.code, 304)
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag='*')
self.assertEqual(response.code, 304)
def testUploadMismatch(self):
"""Upload photo image data with a different MD5 than was originally
provided to upload_episode. Because the photo image data does not
yet exist, the metadata should be overwritten with the new values.
Then try to upload a different MD5 again, expecting an error this
time.
"""
for attr_name, suffix, image_data in [('tn_md5', '.t', 'new thumbnail image data'),
('med_md5', '.m', 'new medium image data'),
('full_md5', '.f', 'new full image data'),
('orig_md5', '.o', 'new original image data')]:
# Expect success on first upload.
response = self._PutPhoto(self._cookie, self._episode_id, self._photo_ids[0], suffix,
image_data, content_md5=util.ComputeMD5Base64(image_data),
etag=util.ComputeMD5Hex(image_data))
self.assertEqual(response.code, 200)
# Validate that the photo's MD5 was updated.
ph_dict = {'photo_id': self._photo_ids[0],
attr_name: util.ComputeMD5Hex(image_data)}
self._validator.ValidateUpdateDBObject(Photo, **ph_dict)
# Expect failure on second upload with different MD5.
new_image_data = 'really ' + image_data
response = self._PutPhoto(self._cookie, self._episode_id, self._photo_ids[0], suffix,
new_image_data, content_md5=util.ComputeMD5Base64(new_image_data),
etag=util.ComputeMD5Hex(new_image_data))
self.assertEqual(response.code, 400)
def testProspectiveCookie(self):
"""Gets photos using a prospective user cookie."""
orig_image_data = 'original image data' # Same as used in self._UploadEpisode
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
prospective_user, vp_id, ep_id = self._CreateProspectiveUser()
prospective_cookie = self._tester.GetSecureUserCookie(user_id=prospective_user.user_id,
device_id=prospective_user.webapp_dev_id,
user_name=None,
viewpoint_id=vp_id)
self._GetPhotoAndVerify(prospective_cookie, 200, ep_id, self._photo_ids[0], '.o')
# Share again to the prospective user to create a second viewpoint.
vp_id2, ep_ids2 = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:<EMAIL>'])
# Now try to get the photo using the prospective cookie that is keyed to the first viewpoint.
response = self._GetPhoto(prospective_cookie, ep_ids2[0], self._photo_ids[0], '.o')
self.assertEqual(response.code, 403)
def _GetPhotoAndVerify(self, user_cookie, exp_code, episode_id, photo_id, suffix):
"""Call _GetPhoto and verify return code equals "exp_code"."""
response = self._GetPhoto(user_cookie, episode_id, photo_id, suffix)
self.assertEqual(response.code, exp_code)
if response.code == 200:
self.assertEqual(response.headers['Cache-Control'], 'private,max-age=31536000')
return response
def _PutPhotoAndVerify(self, user_cookie, exp_code, episode_id, photo_id, suffix, image_data):
"""Call _PutPhoto and verify return code equals "exp_code"."""
response = self._PutPhoto(user_cookie, episode_id, photo_id, suffix, image_data,
content_md5=util.ComputeMD5Base64(image_data))
self.assertEqual(response.code, exp_code)
return response
def _GetPhoto(self, user_cookie, episode_id, photo_id, suffix):
"""Sends a GET request to the photo store URL for the specified
photo and user cookie.
"""
return self._tester.GetPhotoImage(user_cookie, episode_id, photo_id, suffix)
def _PutPhoto(self, user_cookie, episode_id, photo_id, suffix, image_data,
etag=None, content_md5=None):
"""Sends a PUT request to the photo store URL for the specified
photo and user cookie. The put request body is set to "image_data".
"""
return self._tester.PutPhotoImage(user_cookie, episode_id, photo_id, suffix, image_data,
etag=etag, content_md5=content_md5)
| [
"json.loads",
"viewfinder.backend.base.util.ComputeMD5Base64",
"viewfinder.backend.base.util.ComputeMD5Hex"
] | [((5026, 5051), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (5036, 5051), False, 'import json\n'), ((5194, 5219), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (5204, 5219), False, 'import json\n'), ((2867, 2906), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['"""mismatched md5"""'], {}), "('mismatched md5')\n", (2888, 2906), False, 'from viewfinder.backend.base import util\n'), ((7842, 7872), 'viewfinder.backend.base.util.ComputeMD5Hex', 'util.ComputeMD5Hex', (['image_data'], {}), '(image_data)\n', (7860, 7872), False, 'from viewfinder.backend.base import util\n'), ((10334, 10367), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['image_data'], {}), '(image_data)\n', (10355, 10367), False, 'from viewfinder.backend.base import util\n'), ((5926, 5964), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['full_image_data'], {}), '(full_image_data)\n', (5947, 5964), False, 'from viewfinder.backend.base import util\n'), ((6003, 6038), 'viewfinder.backend.base.util.ComputeMD5Hex', 'util.ComputeMD5Hex', (['full_image_data'], {}), '(full_image_data)\n', (6021, 6038), False, 'from viewfinder.backend.base import util\n'), ((6262, 6300), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['full_image_data'], {}), '(full_image_data)\n', (6283, 6300), False, 'from viewfinder.backend.base import util\n'), ((6570, 6608), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['full_image_data'], {}), '(full_image_data)\n', (6591, 6608), False, 'from viewfinder.backend.base import util\n'), ((7566, 7599), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['image_data'], {}), '(image_data)\n', (7587, 7599), False, 'from viewfinder.backend.base import util\n'), ((7638, 7668), 'viewfinder.backend.base.util.ComputeMD5Hex', 'util.ComputeMD5Hex', (['image_data'], {}), '(image_data)\n', (7656, 7668), False, 'from viewfinder.backend.base import util\n'), ((8196, 8233), 'viewfinder.backend.base.util.ComputeMD5Base64', 'util.ComputeMD5Base64', (['new_image_data'], {}), '(new_image_data)\n', (8217, 8233), False, 'from viewfinder.backend.base import util\n'), ((8272, 8306), 'viewfinder.backend.base.util.ComputeMD5Hex', 'util.ComputeMD5Hex', (['new_image_data'], {}), '(new_image_data)\n', (8290, 8306), False, 'from viewfinder.backend.base import util\n'), ((6348, 6383), 'viewfinder.backend.base.util.ComputeMD5Hex', 'util.ComputeMD5Hex', (['full_image_data'], {}), '(full_image_data)\n', (6366, 6383), False, 'from viewfinder.backend.base import util\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCustomerFeedback(unittest.TestCase):
def test_customer_feedback(self):
test_create_feedback = create_feedback()
test_get_feedback = get_feedback()
self.assertEquals(test_create_feedback.name, test_get_feedback.name)
def create_feedback():
feedback = frappe.get_doc({
"doctype": "Customer Feedback",
"date": ""+ frappe.utils.nowdate() +""
})
feedback_exist = frappe.get_list("Customer Feedback", filters={"date": ""+ feedback.date +""})
if len(feedback_exist) == 0:
feedback.insert()
return feedback
else:
return feedback_exist[0]
def get_feedback():
feedback = frappe.get_list("Customer Feedback")
return feedback[0] | [
"frappe.get_list",
"frappe.utils.nowdate"
] | [((545, 624), 'frappe.get_list', 'frappe.get_list', (['"""Customer Feedback"""'], {'filters': "{'date': '' + feedback.date + ''}"}), "('Customer Feedback', filters={'date': '' + feedback.date + ''})\n", (560, 624), False, 'import frappe\n'), ((758, 794), 'frappe.get_list', 'frappe.get_list', (['"""Customer Feedback"""'], {}), "('Customer Feedback')\n", (773, 794), False, 'import frappe\n'), ((496, 518), 'frappe.utils.nowdate', 'frappe.utils.nowdate', ([], {}), '()\n', (516, 518), False, 'import frappe\n')] |
# Generated by Django 2.0.2 on 2018-06-04 18:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0033_auto_20180319_1648'),
]
operations = [
migrations.AddField(
model_name='poolmembership',
name='contract_end_date',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='poolmembership',
name='expiration_8a_date',
field=models.DateTimeField(null=True),
),
]
| [
"django.db.models.DateTimeField"
] | [((353, 384), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (373, 384), False, 'from django.db import migrations, models\n'), ((524, 555), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (544, 555), False, 'from django.db import migrations, models\n')] |
from flask import Blueprint
from flask_restful import Api
import config
from resources import BatchNMTPerformanceResource
PERFORMANCE_BLUEPRINT = Blueprint("performance", __name__)
Api(PERFORMANCE_BLUEPRINT).add_resource(
BatchNMTPerformanceResource, config.MODULE_NAME + "/v1/performance"
) | [
"flask.Blueprint",
"flask_restful.Api"
] | [((148, 182), 'flask.Blueprint', 'Blueprint', (['"""performance"""', '__name__'], {}), "('performance', __name__)\n", (157, 182), False, 'from flask import Blueprint\n'), ((184, 210), 'flask_restful.Api', 'Api', (['PERFORMANCE_BLUEPRINT'], {}), '(PERFORMANCE_BLUEPRINT)\n', (187, 210), False, 'from flask_restful import Api\n')] |
from application import db
from datetime import datetime
from sqlalchemy import ForeignKey
class Orders(db.Model):
orderid = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
number = db.Column(db.String(30), nullable=False)
address = db.Column(db.String(100), nullable=False)
pizzaid = db.Column(db.Integer, db.ForeignKey('stock.pizza_num')) # should this be stock
order_quantity = db.Column(db.Integer, nullable=False) ####### integer
#price = db.Column(db.Integer, nullable=True) ######## times by price per pizza
orderstatus = db.Column(db.String(5), nullable=True, server_default='No')
class Stock(db.Model):
pizza_num = db.Column(db.Integer, primary_key=True)
pizza_name = db.Column(db.String(40), nullable=False) ###########)
#priceperpizza = db.Column(db.Integer, nullable=False)
pizza = db.relationship('Orders', backref='nameofpizza', lazy=True)
| [
"application.db.String",
"application.db.relationship",
"application.db.Column",
"application.db.ForeignKey"
] | [((127, 166), 'application.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (136, 166), False, 'from application import db\n'), ((488, 525), 'application.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (497, 525), False, 'from application import db\n'), ((740, 779), 'application.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (749, 779), False, 'from application import db\n'), ((913, 972), 'application.db.relationship', 'db.relationship', (['"""Orders"""'], {'backref': '"""nameofpizza"""', 'lazy': '(True)'}), "('Orders', backref='nameofpizza', lazy=True)\n", (928, 972), False, 'from application import db\n'), ((191, 204), 'application.db.String', 'db.String', (['(30)'], {}), '(30)\n', (200, 204), False, 'from application import db\n'), ((245, 258), 'application.db.String', 'db.String', (['(30)'], {}), '(30)\n', (254, 258), False, 'from application import db\n'), ((296, 309), 'application.db.String', 'db.String', (['(30)'], {}), '(30)\n', (305, 309), False, 'from application import db\n'), ((348, 362), 'application.db.String', 'db.String', (['(100)'], {}), '(100)\n', (357, 362), False, 'from application import db\n'), ((413, 445), 'application.db.ForeignKey', 'db.ForeignKey', (['"""stock.pizza_num"""'], {}), "('stock.pizza_num')\n", (426, 445), False, 'from application import db\n'), ((648, 660), 'application.db.String', 'db.String', (['(5)'], {}), '(5)\n', (657, 660), False, 'from application import db\n'), ((804, 817), 'application.db.String', 'db.String', (['(40)'], {}), '(40)\n', (813, 817), False, 'from application import db\n')] |
# Generated by Django 2.1.9 on 2019-08-27 08:24
import datetime
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import resources.models.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('resources', '0080_payments_related_changes'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected', 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')], default='waiting', max_length=32, verbose_name='state')),
('order_number', models.CharField(default=resources.models.utils.generate_id, max_length=64, unique=True, verbose_name='order number')),
('reservation', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='order', to='resources.Reservation', verbose_name='reservation')),
],
options={
'verbose_name': 'order',
'verbose_name_plural': 'orders',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='OrderLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_lines', to='payments.Order', verbose_name='order')),
],
options={
'verbose_name': 'order line',
'verbose_name_plural': 'order lines',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='OrderLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('state_change', models.CharField(blank=True, choices=[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected', 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')], max_length=32, verbose_name='state change')),
('message', models.TextField(blank=True)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='log_entries', to='payments.Order', verbose_name='order log entry')),
],
options={
'verbose_name': 'order log entry',
'verbose_name_plural': 'order log entries',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('product_id', models.CharField(db_index=True, editable=False, max_length=100, verbose_name='internal product ID')),
('archived_at', models.DateTimeField(db_index=True, default=datetime.datetime(9999, 12, 31, 0, 0, tzinfo=utc), editable=False, verbose_name='archived_at')),
('type', models.CharField(choices=[('rent', 'rent'), ('extra', 'extra')], default='rent', max_length=32, verbose_name='type')),
('sku', models.CharField(max_length=255, verbose_name='SKU')),
('name', models.CharField(blank=True, max_length=100, verbose_name='name')),
('name_fi', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('name_en', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('name_sv', models.CharField(blank=True, max_length=100, null=True, verbose_name='name')),
('description', models.TextField(blank=True, verbose_name='description')),
('description_fi', models.TextField(blank=True, null=True, verbose_name='description')),
('description_en', models.TextField(blank=True, null=True, verbose_name='description')),
('description_sv', models.TextField(blank=True, null=True, verbose_name='description')),
('price', models.DecimalField(decimal_places=2, max_digits=10, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='price including VAT')),
('tax_percentage', models.DecimalField(choices=[(Decimal('0.00'), '0.00'), (Decimal('10.00'), '10.00'), (Decimal('14.00'), '14.00'), (Decimal('24.00'), '24.00')], decimal_places=2, default=Decimal('24.00'), max_digits=5, verbose_name='tax percentage')),
('price_type', models.CharField(choices=[('per_period', 'per period'), ('fixed', 'fixed')], default='per_period', max_length=32, verbose_name='price type')),
('price_period', models.DurationField(blank=True, default=datetime.timedelta(0, 3600), null=True, verbose_name='price period')),
('max_quantity', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)], verbose_name='max quantity')),
('resources', models.ManyToManyField(blank=True, related_name='products', to='resources.Resource', verbose_name='resources')),
],
options={
'verbose_name': 'product',
'verbose_name_plural': 'products',
'ordering': ('product_id',),
},
),
migrations.AddField(
model_name='orderline',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='order_lines', to='payments.Product', verbose_name='product'),
),
migrations.AlterUniqueTogether(
name='product',
unique_together={('archived_at', 'product_id')},
),
]
| [
"datetime.datetime",
"django.db.models.OneToOneField",
"django.db.migrations.AlterUniqueTogether",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"datetime.timedelta",
"django.db.models.CharField",
"decimal.Decimal"
] | [((6208, 6308), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""product"""', 'unique_together': "{('archived_at', 'product_id')}"}), "(name='product', unique_together={(\n 'archived_at', 'product_id')})\n", (6238, 6308), False, 'from django.db import migrations, models\n'), ((6050, 6192), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""order_lines"""', 'to': '"""payments.Product"""', 'verbose_name': '"""product"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='order_lines', to='payments.Product', verbose_name='product')\n", (6067, 6192), False, 'from django.db import migrations, models\n'), ((532, 625), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (548, 625), False, 'from django.db import migrations, models\n'), ((650, 871), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected',\n 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')]", 'default': '"""waiting"""', 'max_length': '(32)', 'verbose_name': '"""state"""'}), "(choices=[('waiting', 'waiting'), ('confirmed', 'confirmed'\n ), ('rejected', 'rejected'), ('expired', 'expired'), ('cancelled',\n 'cancelled')], default='waiting', max_length=32, verbose_name='state')\n", (666, 871), False, 'from django.db import migrations, models\n'), ((898, 1019), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'resources.models.utils.generate_id', 'max_length': '(64)', 'unique': '(True)', 'verbose_name': '"""order number"""'}), "(default=resources.models.utils.generate_id, max_length=64,\n unique=True, verbose_name='order number')\n", (914, 1019), False, 'from django.db import migrations, models\n'), ((1050, 1202), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""order"""', 'to': '"""resources.Reservation"""', 'verbose_name': '"""reservation"""'}), "(on_delete=django.db.models.deletion.PROTECT,\n related_name='order', to='resources.Reservation', verbose_name=\n 'reservation')\n", (1070, 1202), False, 'from django.db import migrations, models\n'), ((1492, 1585), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1508, 1585), False, 'from django.db import migrations, models\n'), ((1613, 1676), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)', 'verbose_name': '"""quantity"""'}), "(default=1, verbose_name='quantity')\n", (1640, 1676), False, 'from django.db import migrations, models\n'), ((1705, 1843), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""order_lines"""', 'to': '"""payments.Order"""', 'verbose_name': '"""order"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='order_lines', to='payments.Order', verbose_name='order')\n", (1722, 1843), False, 'from django.db import migrations, models\n'), ((2151, 2244), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2167, 2244), False, 'from django.db import migrations, models\n'), ((2273, 2312), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2293, 2312), False, 'from django.db import migrations, models\n'), ((2348, 2569), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('waiting', 'waiting'), ('confirmed', 'confirmed'), ('rejected',\n 'rejected'), ('expired', 'expired'), ('cancelled', 'cancelled')]", 'max_length': '(32)', 'verbose_name': '"""state change"""'}), "(blank=True, choices=[('waiting', 'waiting'), ('confirmed',\n 'confirmed'), ('rejected', 'rejected'), ('expired', 'expired'), (\n 'cancelled', 'cancelled')], max_length=32, verbose_name='state change')\n", (2364, 2569), False, 'from django.db import migrations, models\n'), ((2591, 2619), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (2607, 2619), False, 'from django.db import migrations, models\n'), ((2648, 2796), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""log_entries"""', 'to': '"""payments.Order"""', 'verbose_name': '"""order log entry"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='log_entries', to='payments.Order', verbose_name='order log entry')\n", (2665, 2796), False, 'from django.db import migrations, models\n'), ((3109, 3202), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3125, 3202), False, 'from django.db import migrations, models\n'), ((3232, 3298), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (3252, 3298), False, 'from django.db import migrations, models\n'), ((3332, 3435), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'editable': '(False)', 'max_length': '(100)', 'verbose_name': '"""internal product ID"""'}), "(db_index=True, editable=False, max_length=100,\n verbose_name='internal product ID')\n", (3348, 3435), False, 'from django.db import migrations, models\n'), ((3632, 3753), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('rent', 'rent'), ('extra', 'extra')]", 'default': '"""rent"""', 'max_length': '(32)', 'verbose_name': '"""type"""'}), "(choices=[('rent', 'rent'), ('extra', 'extra')], default=\n 'rent', max_length=32, verbose_name='type')\n", (3648, 3753), False, 'from django.db import migrations, models\n'), ((3775, 3827), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""SKU"""'}), "(max_length=255, verbose_name='SKU')\n", (3791, 3827), False, 'from django.db import migrations, models\n'), ((3855, 3920), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'verbose_name': '"""name"""'}), "(blank=True, max_length=100, verbose_name='name')\n", (3871, 3920), False, 'from django.db import migrations, models\n'), ((3951, 4027), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""name"""'}), "(blank=True, max_length=100, null=True, verbose_name='name')\n", (3967, 4027), False, 'from django.db import migrations, models\n'), ((4058, 4134), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""name"""'}), "(blank=True, max_length=100, null=True, verbose_name='name')\n", (4074, 4134), False, 'from django.db import migrations, models\n'), ((4165, 4241), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""name"""'}), "(blank=True, max_length=100, null=True, verbose_name='name')\n", (4181, 4241), False, 'from django.db import migrations, models\n'), ((4276, 4332), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""description"""'}), "(blank=True, verbose_name='description')\n", (4292, 4332), False, 'from django.db import migrations, models\n'), ((4370, 4437), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""description"""'}), "(blank=True, null=True, verbose_name='description')\n", (4386, 4437), False, 'from django.db import migrations, models\n'), ((4475, 4542), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""description"""'}), "(blank=True, null=True, verbose_name='description')\n", (4491, 4542), False, 'from django.db import migrations, models\n'), ((4580, 4647), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""description"""'}), "(blank=True, null=True, verbose_name='description')\n", (4596, 4647), False, 'from django.db import migrations, models\n'), ((5140, 5284), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('per_period', 'per period'), ('fixed', 'fixed')]", 'default': '"""per_period"""', 'max_length': '(32)', 'verbose_name': '"""price type"""'}), "(choices=[('per_period', 'per period'), ('fixed', 'fixed')],\n default='per_period', max_length=32, verbose_name='price type')\n", (5156, 5284), False, 'from django.db import migrations, models\n'), ((5624, 5739), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""products"""', 'to': '"""resources.Resource"""', 'verbose_name': '"""resources"""'}), "(blank=True, related_name='products', to=\n 'resources.Resource', verbose_name='resources')\n", (5646, 5739), False, 'from django.db import migrations, models\n'), ((3510, 3559), 'datetime.datetime', 'datetime.datetime', (['(9999)', '(12)', '(31)', '(0)', '(0)'], {'tzinfo': 'utc'}), '(9999, 12, 31, 0, 0, tzinfo=utc)\n', (3527, 3559), False, 'import datetime\n'), ((5044, 5060), 'decimal.Decimal', 'Decimal', (['"""24.00"""'], {}), "('24.00')\n", (5051, 5060), False, 'from decimal import Decimal\n'), ((5357, 5384), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(3600)'], {}), '(0, 3600)\n', (5375, 5384), False, 'import datetime\n'), ((4782, 4797), 'decimal.Decimal', 'Decimal', (['"""0.01"""'], {}), "('0.01')\n", (4789, 4797), False, 'from decimal import Decimal\n'), ((4904, 4919), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (4911, 4919), False, 'from decimal import Decimal\n'), ((4931, 4947), 'decimal.Decimal', 'Decimal', (['"""10.00"""'], {}), "('10.00')\n", (4938, 4947), False, 'from decimal import Decimal\n'), ((4960, 4976), 'decimal.Decimal', 'Decimal', (['"""14.00"""'], {}), "('14.00')\n", (4967, 4976), False, 'from decimal import Decimal\n'), ((4989, 5005), 'decimal.Decimal', 'Decimal', (['"""24.00"""'], {}), "('24.00')\n", (4996, 5005), False, 'from decimal import Decimal\n')] |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2018/9/18 19:17
# @Author: <NAME>
# @File : test.py
import equations_solver
n = 100 # the scale of the Matrix
# test different methods
def test_methods(A, b, methods_list):
methods_num = len(methods_list)
# get solution
solution_x = []
for i in range(methods_num):
tmp_x = equations_solver.solve(A, b, verbose=1, method=methods_list[i])
solution_x.append(tmp_x)
# make table head
for i in range(methods_num):
print('%12s' % methods_list[i], end=' ')
print()
print('-' * (13 * methods_num))
# show solution
for k in range(n):
for i in range(methods_num):
print('%12.2f' % solution_x[i][k, 0], end=' ')
print()
def main():
A, b = equations_solver.generate_homework_data()
# 1. test single method.
# x = equations_solver.gauss(A, b)
# or
# x = equations_solver.solve(A, b, method='gauss')
# 2. compare different methods.
# methods_list_all = ['gauss', 'lu', 'chase', 'square_root',
# 'jacobi', 'gauss_seidel', 'sor',
# 'cg', 'qr']
methods_list = [
'jacobi',
'gauss_seidel',
'sor',
'cg',
'qr'
]
test_methods(A, b, methods_list)
if __name__ == '__main__':
main()
# Here is other data to test the top four methods
# A = np.random.randint(0, 10, (n, n)) + np.eye(n)
# x = np.arange(1, n + 1).reshape((n, 1))
# b = A.dot(x)
# x = EquationsSolver.solve(A, b, method='Gauss')
# x = EquationsSolver.solve(A, b, method='LU')
# A, x, b = model.generate_data()
# LU data 1
# A = np.array([[2, 1, 5], [4, 1, 12], [-2, -4, 5]], dtype='f8')
# x = np.array([[1], [-1], [2]], dtype='f8')
# b = np.array([[11], [27], [12]], dtype='f8')
# chase data 1
# A = np.array([[2, 1, 0, 0], [1/2, 2, 1/2, 0], [0, 1/2, 2, 1/2], [0, 0, 1, 2]])
# x = np.array([[-13/45], [7/90], [-1/45], [1/90]])
# b = np.array([[-1/2], [0], [0], [0]])
# chase data 2
# A = np.array([[3, 1, 0, 0], [1, 4, 1, 0], [0, 2, 5, 2], [0, 0, 2, 6]])
# x = np.array([[1], [3], [-2], [1]])
# b = np.array([[6], [11], [-2], [2]])
# square root data 1
# A = np.array([[4, 2, -2], [2, 2, -3], [-2, -3, 14]])
# x = np.array([[2], [2], [1]])
# b = np.array([[10], [5], [4]])
| [
"equations_solver.generate_homework_data",
"equations_solver.solve"
] | [((789, 830), 'equations_solver.generate_homework_data', 'equations_solver.generate_homework_data', ([], {}), '()\n', (828, 830), False, 'import equations_solver\n'), ((358, 421), 'equations_solver.solve', 'equations_solver.solve', (['A', 'b'], {'verbose': '(1)', 'method': 'methods_list[i]'}), '(A, b, verbose=1, method=methods_list[i])\n', (380, 421), False, 'import equations_solver\n')] |
import re
comment_re = re.compile(r"""^(?P<instr>[^#]*)(?P<comment>#.*)?$""")
no_operand_re = re.compile(r'''^(?P<op>(NOP|RET|RTI))$''', re.IGNORECASE)
one_operand_re = re.compile(r'''^(?P<op>(NOT|INC|DEC|OUT|IN|PUSH|POP|JZ|JMP|CALL))\s+(?P<rd>R[0-7])$''',
re.IGNORECASE)
two_operand_re = re.compile(r'''^(?P<op>(SWAP))\s+(?P<rs>R[0-7])+(\s*,\s*)+(?P<rd>R[0-7])$''', re.IGNORECASE)
three_operand_re = re.compile(
r'''^(?P<op>(ADD|OR|SUB|AND))\s+(?P<rs1>R[0-7])+(\s*,\s*)+(?P<rs2>R[0-7])+(\s*,\s*)+(?P<rd>R[0-7])$''',
re.IGNORECASE)
three_operand_immediate_re = re.compile(
r'''^(?P<op>(IADD))\s+(?P<rs1>R[0-7])+(\s*,\s*)+(?P<rd>R[0-7])+(\s*,\s*)+(?P<imm>[0-9a-fA-F]{1,4})$''',
re.IGNORECASE)
two_operand_immediate_re = re.compile(
r'''^(?P<op>(SHL|SHR|LDM))\s+(?P<r>R[0-7])+(\s*,\s*)+(?P<imm>[0-9a-fA-F]{1,4})$''', re.IGNORECASE)
two_operand_effective_re = re.compile(
r'''^(?P<op>(LDD|STD))\s+(?P<r>R[0-7])+(\s*,\s*)+(?P<eff>[0-9a-fA-F]{1,5})$''', re.IGNORECASE)
org_re = re.compile(r'''^(?P<op>(.ORG)\s+(?P<val>[0-9a-fA-F]{1,8}))$''', re.IGNORECASE)
address_value_re = re.compile(r'''^(\s*)([0-9a-fA-F]{1,4})(\s*)$''', re.IGNORECASE)
| [
"re.compile"
] | [((24, 73), 're.compile', 're.compile', (['"""^(?P<instr>[^#]*)(?P<comment>#.*)?$"""'], {}), "('^(?P<instr>[^#]*)(?P<comment>#.*)?$')\n", (34, 73), False, 'import re\n'), ((95, 147), 're.compile', 're.compile', (['"""^(?P<op>(NOP|RET|RTI))$"""', 're.IGNORECASE'], {}), "('^(?P<op>(NOP|RET|RTI))$', re.IGNORECASE)\n", (105, 147), False, 'import re\n'), ((170, 277), 're.compile', 're.compile', (['"""^(?P<op>(NOT|INC|DEC|OUT|IN|PUSH|POP|JZ|JMP|CALL))\\\\s+(?P<rd>R[0-7])$"""', 're.IGNORECASE'], {}), "(\n '^(?P<op>(NOT|INC|DEC|OUT|IN|PUSH|POP|JZ|JMP|CALL))\\\\s+(?P<rd>R[0-7])$',\n re.IGNORECASE)\n", (180, 277), False, 'import re\n'), ((318, 412), 're.compile', 're.compile', (['"""^(?P<op>(SWAP))\\\\s+(?P<rs>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])$"""', 're.IGNORECASE'], {}), "('^(?P<op>(SWAP))\\\\s+(?P<rs>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])$',\n re.IGNORECASE)\n", (328, 412), False, 'import re\n'), ((430, 569), 're.compile', 're.compile', (['"""^(?P<op>(ADD|OR|SUB|AND))\\\\s+(?P<rs1>R[0-7])+(\\\\s*,\\\\s*)+(?P<rs2>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])$"""', 're.IGNORECASE'], {}), "(\n '^(?P<op>(ADD|OR|SUB|AND))\\\\s+(?P<rs1>R[0-7])+(\\\\s*,\\\\s*)+(?P<rs2>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])$'\n , re.IGNORECASE)\n", (440, 569), False, 'import re\n'), ((598, 737), 're.compile', 're.compile', (['"""^(?P<op>(IADD))\\\\s+(?P<rs1>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])+(\\\\s*,\\\\s*)+(?P<imm>[0-9a-fA-F]{1,4})$"""', 're.IGNORECASE'], {}), "(\n '^(?P<op>(IADD))\\\\s+(?P<rs1>R[0-7])+(\\\\s*,\\\\s*)+(?P<rd>R[0-7])+(\\\\s*,\\\\s*)+(?P<imm>[0-9a-fA-F]{1,4})$'\n , re.IGNORECASE)\n", (608, 737), False, 'import re\n'), ((764, 881), 're.compile', 're.compile', (['"""^(?P<op>(SHL|SHR|LDM))\\\\s+(?P<r>R[0-7])+(\\\\s*,\\\\s*)+(?P<imm>[0-9a-fA-F]{1,4})$"""', 're.IGNORECASE'], {}), "(\n '^(?P<op>(SHL|SHR|LDM))\\\\s+(?P<r>R[0-7])+(\\\\s*,\\\\s*)+(?P<imm>[0-9a-fA-F]{1,4})$'\n , re.IGNORECASE)\n", (774, 881), False, 'import re\n'), ((906, 1019), 're.compile', 're.compile', (['"""^(?P<op>(LDD|STD))\\\\s+(?P<r>R[0-7])+(\\\\s*,\\\\s*)+(?P<eff>[0-9a-fA-F]{1,5})$"""', 're.IGNORECASE'], {}), "(\n '^(?P<op>(LDD|STD))\\\\s+(?P<r>R[0-7])+(\\\\s*,\\\\s*)+(?P<eff>[0-9a-fA-F]{1,5})$'\n , re.IGNORECASE)\n", (916, 1019), False, 'import re\n'), ((1026, 1100), 're.compile', 're.compile', (['"""^(?P<op>(.ORG)\\\\s+(?P<val>[0-9a-fA-F]{1,8}))$"""', 're.IGNORECASE'], {}), "('^(?P<op>(.ORG)\\\\s+(?P<val>[0-9a-fA-F]{1,8}))$', re.IGNORECASE)\n", (1036, 1100), False, 'import re\n'), ((1124, 1185), 're.compile', 're.compile', (['"""^(\\\\s*)([0-9a-fA-F]{1,4})(\\\\s*)$"""', 're.IGNORECASE'], {}), "('^(\\\\s*)([0-9a-fA-F]{1,4})(\\\\s*)$', re.IGNORECASE)\n", (1134, 1185), False, 'import re\n')] |
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
import os
import shlex
import six
import subprocess
import unittest
from django.conf import settings
from django.test import override_settings
from resolwe.flow.executors.docker import FlowExecutor
from resolwe.flow.executors import BaseFlowExecutor
try:
import builtins # py3
except ImportError:
import __builtin__ as builtins # py2
def check_docker():
"""Check if Docker is installed and working.
:return: tuple (indicator of the availability of Docker, reason for
unavailability)
:rtype: (bool, str)
"""
command = settings.FLOW_EXECUTOR.get('COMMAND', 'docker')
info_command = '{} info'.format(command)
available, reason = True, ""
# TODO: use subprocess.DEVNULL after dropping support for Python 2
with open(os.devnull, 'wb') as DEVNULL:
try:
subprocess.check_call(shlex.split(info_command), stdout=DEVNULL, stderr=subprocess.STDOUT)
except OSError:
available, reason = False, "Docker command '{}' not found".format(command)
except subprocess.CalledProcessError:
available, reason = (False, "Docker command '{}' returned non-zero "
"exit status".format(info_command))
return available, reason
class DockerExecutorTestCase(unittest.TestCase):
@unittest.skipUnless(*check_docker())
@mock.patch('os.mkdir')
@mock.patch('os.chmod')
@mock.patch('os.chdir')
@mock.patch('resolwe.flow.executors.Data.objects.filter')
@mock.patch('resolwe.flow.executors.Data.objects.get')
def test_run_in_docker(self, data_get_mock, data_filter_mock, chdir_mock, chmod_mock, mkdir_mock):
executor_settings = settings.FLOW_EXECUTOR
executor_settings['CONTAINER_IMAGE'] = 'centos'
with override_settings(FLOW_EXECUTOR=executor_settings):
executor = FlowExecutor()
script = 'if grep -Fq "docker" /proc/1/cgroup; then echo "Running inside Docker"; ' \
'else echo "Running locally"; fi'
count = {'running': 0}
def assert_output(line):
if line.strip() == 'Running inside Docker':
count['running'] += 1
write_mock = mock.MagicMock(side_effect=assert_output)
stdout_mock = mock.MagicMock(write=write_mock)
open_mock = mock.MagicMock(side_effect=[stdout_mock, mock.MagicMock()])
with mock.patch.object(builtins, 'open', open_mock):
executor.run('no_data_id', script, verbosity=0)
self.assertEqual(count['running'], 1)
class GetToolsTestCase(unittest.TestCase):
@mock.patch('resolwe.flow.executors.apps')
@mock.patch('resolwe.flow.executors.os')
@mock.patch('resolwe.flow.executors.settings')
def test_get_tools(self, settings_mock, os_mock, apps_mock):
apps_mock.get_app_configs.return_value = [
mock.MagicMock(path='/resolwe/test_app1'),
mock.MagicMock(path='/resolwe/test_app2'),
]
os_mock.path.join = os.path.join
os_mock.path.isdir.side_effect = [False, True]
settings_mock.RESOLWE_CUSTOM_TOOLS_PATHS = ['/custom_tools']
base_executor = BaseFlowExecutor()
tools_list = base_executor.get_tools()
self.assertEqual(len(tools_list), 2)
self.assertIn('/resolwe/test_app2/tools', tools_list)
self.assertIn('/custom_tools', tools_list)
@mock.patch('resolwe.flow.executors.apps')
@mock.patch('resolwe.flow.executors.settings')
def test_not_list(self, settings_mock, apps_mock):
apps_mock.get_app_configs.return_value = []
settings_mock.RESOLWE_CUSTOM_TOOLS_PATHS = '/custom_tools'
base_executor = BaseFlowExecutor()
with six.assertRaisesRegex(self, KeyError, 'setting must be a list'):
base_executor.get_tools()
| [
"resolwe.flow.executors.docker.FlowExecutor",
"mock.patch",
"mock.MagicMock",
"shlex.split",
"mock.patch.object",
"django.test.override_settings",
"six.assertRaisesRegex",
"django.conf.settings.FLOW_EXECUTOR.get",
"resolwe.flow.executors.BaseFlowExecutor"
] | [((699, 746), 'django.conf.settings.FLOW_EXECUTOR.get', 'settings.FLOW_EXECUTOR.get', (['"""COMMAND"""', '"""docker"""'], {}), "('COMMAND', 'docker')\n", (725, 746), False, 'from django.conf import settings\n'), ((1498, 1520), 'mock.patch', 'mock.patch', (['"""os.mkdir"""'], {}), "('os.mkdir')\n", (1508, 1520), False, 'import mock\n'), ((1526, 1548), 'mock.patch', 'mock.patch', (['"""os.chmod"""'], {}), "('os.chmod')\n", (1536, 1548), False, 'import mock\n'), ((1554, 1576), 'mock.patch', 'mock.patch', (['"""os.chdir"""'], {}), "('os.chdir')\n", (1564, 1576), False, 'import mock\n'), ((1582, 1638), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.Data.objects.filter"""'], {}), "('resolwe.flow.executors.Data.objects.filter')\n", (1592, 1638), False, 'import mock\n'), ((1644, 1697), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.Data.objects.get"""'], {}), "('resolwe.flow.executors.Data.objects.get')\n", (1654, 1697), False, 'import mock\n'), ((2782, 2823), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.apps"""'], {}), "('resolwe.flow.executors.apps')\n", (2792, 2823), False, 'import mock\n'), ((2829, 2868), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.os"""'], {}), "('resolwe.flow.executors.os')\n", (2839, 2868), False, 'import mock\n'), ((2874, 2919), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.settings"""'], {}), "('resolwe.flow.executors.settings')\n", (2884, 2919), False, 'import mock\n'), ((3577, 3618), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.apps"""'], {}), "('resolwe.flow.executors.apps')\n", (3587, 3618), False, 'import mock\n'), ((3624, 3669), 'mock.patch', 'mock.patch', (['"""resolwe.flow.executors.settings"""'], {}), "('resolwe.flow.executors.settings')\n", (3634, 3669), False, 'import mock\n'), ((3346, 3364), 'resolwe.flow.executors.BaseFlowExecutor', 'BaseFlowExecutor', ([], {}), '()\n', (3362, 3364), False, 'from resolwe.flow.executors import BaseFlowExecutor\n'), ((3869, 3887), 'resolwe.flow.executors.BaseFlowExecutor', 'BaseFlowExecutor', ([], {}), '()\n', (3885, 3887), False, 'from resolwe.flow.executors import BaseFlowExecutor\n'), ((1922, 1972), 'django.test.override_settings', 'override_settings', ([], {'FLOW_EXECUTOR': 'executor_settings'}), '(FLOW_EXECUTOR=executor_settings)\n', (1939, 1972), False, 'from django.test import override_settings\n'), ((1997, 2011), 'resolwe.flow.executors.docker.FlowExecutor', 'FlowExecutor', ([], {}), '()\n', (2009, 2011), False, 'from resolwe.flow.executors.docker import FlowExecutor\n'), ((2367, 2408), 'mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'assert_output'}), '(side_effect=assert_output)\n', (2381, 2408), False, 'import mock\n'), ((2435, 2467), 'mock.MagicMock', 'mock.MagicMock', ([], {'write': 'write_mock'}), '(write=write_mock)\n', (2449, 2467), False, 'import mock\n'), ((3048, 3089), 'mock.MagicMock', 'mock.MagicMock', ([], {'path': '"""/resolwe/test_app1"""'}), "(path='/resolwe/test_app1')\n", (3062, 3089), False, 'import mock\n'), ((3103, 3144), 'mock.MagicMock', 'mock.MagicMock', ([], {'path': '"""/resolwe/test_app2"""'}), "(path='/resolwe/test_app2')\n", (3117, 3144), False, 'import mock\n'), ((3901, 3964), 'six.assertRaisesRegex', 'six.assertRaisesRegex', (['self', 'KeyError', '"""setting must be a list"""'], {}), "(self, KeyError, 'setting must be a list')\n", (3922, 3964), False, 'import six\n'), ((987, 1012), 'shlex.split', 'shlex.split', (['info_command'], {}), '(info_command)\n', (998, 1012), False, 'import shlex\n'), ((2569, 2615), 'mock.patch.object', 'mock.patch.object', (['builtins', '"""open"""', 'open_mock'], {}), "(builtins, 'open', open_mock)\n", (2586, 2615), False, 'import mock\n'), ((2533, 2549), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2547, 2549), False, 'import mock\n')] |
from game.server.world.chunk import Chunk
from game.server.tiles.tiles import *
class World:
def __init__(self, name, seed, generator, sizeX, sizeZ):
self.name = name
self.seed = seed
self.generator = generator
self.generator.setWorld(self);
self.sizeX = sizeX
self.sizeZ = sizeZ
self.chunks = [None] * (sizeX * sizeZ)
self.time = 0
self.dayDuration = 12000
#Fill chunk array
for x in range(sizeX):
for z in range(sizeZ):
self.chunks[x * sizeX + z] = Chunk(x, z)
for chunk in self.chunks:
self.generator.genChunk(chunk)
# Returns the chunk at x,z in chunk pos [0 -> sizeX / sizeZ]
# Params:
# - x: X coordinate of the chunk
# - z: Z coordinate of the chunk
def getChunkAtChunkPos(self, x, z):
if(x < 0 or z < 0 or x >= self.sizeX or z >= self.sizeZ):
return None
return self.chunks[x * self.sizeX + z]
# Returns the chunk at x,z in tile pos [0 -> (sizeX / sizeZ) * chunkSize]
# Params:
# - x: X coordinate of the tile
# - z: Z coordinate of the tile
def getChunkAt(self, x, z):
return self.getChunkAtChunkPos(x >> 4, z >> 4)
# Place a tile at x, y, z in the world.
# Params:
# - tile: Tile instance defined in class Tiles
# - x: X pos of the tile
# - y: Y pos of the tile
# - z: Z pos of the tile
def setTileAt(self, tile, x, y, z):
chunk = self.getChunkAt(x, z)
if(chunk != None):
chunk.setTileAt(tile, x % Chunk.CHUNK_SIZE, y, z % Chunk.CHUNK_SIZE)
# Returns the tile (Tile instance) at x,y,z in the world.
# Params:
# - x: X pos of the tile
# - y: Y pos of the tile
# - z: Z pos of the tile
def getTileAt(self, x, y, z):
chunk = self.getChunkAt(x, z)
if(chunk != None):
return chunk.getTileAt(x % Chunk.CHUNK_SIZE, y, z % Chunk.CHUNK_SIZE)
return None
# Returns the biome at x,z in the world.
# Params:
# - x: X pos of the tile
# - z: Z pos of the tile
def getBiomeAt(self, x, z):
chunk = self.getChunkAt(x, z)
if(chunk != None):
return chunk.biome;
return None
# Returns the surface (initial height without modification) at x,z in the world.
# Params:
# - x: X pos of the tile
# - z: Z pos of the tile
def getSurfaceAt(self, x, z):
chunk = self.getChunkAt(x, z)
if(chunk != None):
return chunk.heightMap[x % Chunk.CHUNK_SIZE][z % Chunk.CHUNK_SIZE];
return None
| [
"game.server.world.chunk.Chunk"
] | [((484, 495), 'game.server.world.chunk.Chunk', 'Chunk', (['x', 'z'], {}), '(x, z)\n', (489, 495), False, 'from game.server.world.chunk import Chunk\n')] |
import numpy as np
import tensorflow as tf
from lenet import create_neural_net, create_cnn_only, replace_cnn_dense_layer, freeze_part_of_inception
from tensorflow import compat
import time
from sklearn.utils import shuffle, class_weight
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import os
import shutil
import datetime
import config
import utils
import random
import gc
current_data_dir = config.new_data_dir_name
def setup_tf():
tf.keras.backend.clear_session()
config_var = compat.v1.ConfigProto()
config_var.gpu_options.allow_growth = True
compat.v1.Session(config=config_var)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def train_model(load_saved, freeze=False, load_saved_cnn=False):
setup_tf()
if load_saved:
model = load_model(config.model_name)
else:
model = create_neural_net(load_pretrained_cnn=load_saved_cnn, model_name=config.cnn_only_name)
# freeze convolutional model to fine tune lstm (the cnn is treated as one layer
# make sure you freeze the correct one)
# goes the other way around too if the model was saved frozen and you want to unfreeze
model.layers[1].trainable = not freeze
optimizer = Adam(learning_rate=config.lr)
# recompile to make the changes
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
model.summary()
custom_training_loop(model, config.allowed_ram_mb, 10000, save_every_epoch=True,
normalize_input_values=False, incorporate_fps=True)
def train_cnn_only(load_saved, swap_output_layer=False, freeze_part=True):
setup_tf()
if load_saved:
model = load_model(config.cnn_only_name)
if swap_output_layer:
model = replace_cnn_dense_layer(model)
else:
model = create_cnn_only()
if freeze_part:
model = freeze_part_of_inception(model, "mixed9")
model.summary()
cnn_only_training(model, False)
def cnn_only_training(model, normalize_inputs=True):
test_data_size = 10000
class_weights = get_class_weights(test_data_size=test_data_size)
filenames = utils.get_sorted_filenames(current_data_dir)
random.shuffle(filenames)
filename_dict_list = divide_dataset_cnn_only(filenames, test_data_size, normalize_inputs,
config.known_normalize_growth, allowed_ram=config.allowed_ram_mb)
for epoch in range(config.epochs):
for filename_dict in filename_dict_list:
K.clear_session()
images = []
labels = []
for filename in filename_dict["filenames"]:
data_x, data_y = utils.load_file(filename)
if normalize_inputs:
data_x = utils.normalize_input_values(data_x, "float32")
images += [data_x]
labels += [data_y]
del data_x, data_y
if "start_index" in filename_dict:
idx_start, idx_stop = filename_dict["start_index"], filename_dict["stop_index"]
list_idx = 0 if filename_dict["is_test"] else -1
images[list_idx] = images[list_idx][idx_start:idx_stop]
labels[list_idx] = labels[list_idx][idx_start:idx_stop]
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
images, labels = convert_labels_to_time_pressed(images, labels)
if not filename_dict["is_test"]:
model.fit(images, labels,
epochs=epoch+1, initial_epoch=epoch, batch_size=config.CNN_ONLY_BATCH_SIZE,
class_weight=class_weights, validation_split=0.05, shuffle=True)
else:
model.evaluate(images, labels, batch_size=config.CNN_ONLY_BATCH_SIZE)
del images, labels
gc.collect()
model.save(config.cnn_only_name)
def divide_dataset_cnn_only(filenames, test_data_size, normalize_inputs, normalize_factor=0, allowed_ram=config.allowed_ram_mb):
res_list = []
filename_dict = {"filenames": [], "is_test": False}
left_mem = allowed_ram
for i in range(len(filenames) - 1, -1, -1):
full_filename = current_data_dir + filenames[i]
file_size_mb = calc_filesize(full_filename, normalize_inputs, normalize_factor)
labels = utils.load_file_only_labels(full_filename)
if test_data_size <= 0:
filename_dict["is_test"] = False
elif len(labels) > test_data_size:
filename_dict["stop_index"] = len(labels)
filename_dict["start_index"] = len(labels) - test_data_size
filename_dict["is_test"] = True
filename_dict["filenames"].insert(0, full_filename)
res_list.insert(0, filename_dict)
filename_dict = {"filenames": [full_filename], "stop_index": len(labels) - test_data_size,
"start_index": 0, "is_test": False}
left_mem = allowed_ram
test_data_size = 0
continue
else:
test_data_size -= len(labels)
filename_dict["is_test"] = True
if file_size_mb > left_mem:
left_mem = allowed_ram
res_list.insert(0, filename_dict)
filename_dict = {"filenames": [], "is_test": False}
filename_dict["filenames"].insert(0, full_filename)
left_mem -= file_size_mb
if filename_dict["filenames"]:
res_list.insert(0, filename_dict)
return res_list
def custom_training_loop(model, allowed_ram_mb, test_data_size, save_every_epoch, normalize_input_values=True, incorporate_fps = True):
"""log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)"""
class_weights = get_class_weights(test_data_size=test_data_size)
filenames = utils.get_sorted_filenames(current_data_dir)
if config.random_file_order_train:
random.shuffle(filenames)
filenames = divide_dataset(filenames, allowed_ram_mb, test_data_size, normalize_input_values,
incorporate_fps=incorporate_fps, known_normalize_growth=config.known_normalize_growth)
for epoch in range(config.epochs):
for i in range(len(filenames)):
K.clear_session()
if os.path.isfile(filenames[i]["filename"]):
for j in range(filenames[i]["chunks"]):
K.clear_session()
images, labels = utils.load_file(filenames[i]["filename"])
start_idx, stop_idx = filenames[i]["indices"][j], filenames[i]["indices"][j+1]
# stop_idx is next start index, therefore not stop_idx-1 because is it the first NOT included index
images, labels = images[start_idx:stop_idx], labels[start_idx:stop_idx]
if normalize_input_values:
images = utils.normalize_input_values(images, "float32")
images, labels = convert_labels_to_time_pressed(images, labels)
images, labels = sequence_data(images, labels, shuffle_bool=False, incorporate_fps=incorporate_fps)
print(f"Epoch: {epoch}; Chunk {j+1} out of {filenames[i]['chunks']};"
f" {len(filenames) - i} out of {len(filenames)} files to go!")
# test data is always last, meaning if next doesn't exists it's the test data
if not filenames[i]["test_data"]:
model.fit(images, labels, epochs=epoch+1, batch_size=config.BATCH_SIZE,
class_weight=class_weights, initial_epoch=epoch, validation_split=0.1,
shuffle=True)
else:
model.evaluate(images, labels, batch_size=config.BATCH_SIZE)
del images, labels
gc.collect()
else:
print(f"File {filenames[i]['filename']} existed at the beginning, not anymore!")
continue
if save_every_epoch:
model.save(config.model_name + "_epoch_" + str(epoch))
model.save(config.model_name + "_fully_trained")
def divide_dataset(filenames, allowed_ram_mb, test_data_size=0, normalize_input_values=True, incorporate_fps=True, known_normalize_growth=0):
file_size_limit = allowed_ram_mb // config.sequence_len
res_filenames = []
for i in range(len(filenames) - 1, -1, -1):
full_filename = current_data_dir + filenames[i]
divider = {"filename": full_filename}
labels = utils.load_file_only_labels(full_filename)
# only load images if necessary
file_size_mb = calc_filesize(full_filename, normalize_input_values, known_normalize_growth)
if test_data_size <= 0:
divider["test_data"] = False
elif test_data_size < len(labels):
full_seq_len = config.sequence_len * utils.get_fps_ratio() if incorporate_fps else config.sequence_len
if test_data_size > full_seq_len:
new_divider = div_test_data_helper(full_filename, test_data_size,
file_size_limit, normalize_input_values, is_test=True)
res_filenames.insert(0, new_divider)
if len(labels) - test_data_size > full_seq_len:
new_divider = div_test_data_helper(full_filename, test_data_size,
file_size_limit, normalize_input_values, is_test=False)
res_filenames.insert(0, new_divider)
test_data_size = 0
continue
else:
test_data_size -= len(labels)
divider["test_data"] = True
calc_chunks_and_indices(file_size_mb, file_size_limit, len(labels), divider)
res_filenames.insert(0, divider)
return res_filenames
def div_test_data_helper(filename, test_data_size, file_size_limit, normalize_input_values, is_test=True):
m_byte = (1024 ** 2)
new_divider = {"filename": filename, "test_data": is_test}
images, labels = utils.load_file(filename)
original_len = len(labels)
if is_test:
images, labels = images[-test_data_size:], labels[-test_data_size:]
else:
images, labels = images[:-test_data_size], labels[:-test_data_size]
if normalize_input_values:
images = utils.normalize_input_values(images, "float32")
file_size_mb = images.nbytes // m_byte + labels.nbytes // m_byte
offset = original_len - len(labels) if is_test else 0
calc_chunks_and_indices(file_size_mb, file_size_limit, len(labels), new_divider, offset)
return new_divider
def calc_chunks_and_indices(file_size_mb, file_size_limit, data_len, divider, offset=0):
divider["chunks"] = file_size_mb // file_size_limit + 1
step = data_len / divider["chunks"]
divider["indices"] = [int(round(chunk * step)) + offset for chunk in range(divider["chunks"] + 1)]
def calc_filesize(full_filename, normalize, normalize_factor=0):
m_byte = 1024 ** 2
file_size_mb = os.stat(full_filename).st_size // m_byte
if normalize:
if normalize_factor == 0:
images = utils.load_file_only_images(full_filename)
images = utils.normalize_input_values(images, "float32")
labels = utils.load_file_only_labels(full_filename)
file_size_mb = images.nbytes // m_byte + labels.nbytes // m_byte
else:
file_size_mb = int(file_size_mb * normalize_factor)
return file_size_mb
def sequence_data(data_x, data_y, shuffle_bool=True, incorporate_fps=True):
if len(data_x) != len(data_y):
ValueError(f"Data_x and Data_y length differ: Data_x:{len(data_x)}, Data_y:{len(data_y)}")
images = []
fps_ratio = utils.get_fps_ratio()
if fps_ratio == 0 and incorporate_fps:
raise ValueError('Fps ratio is 0, cannot divide by 0')
full_seq_len = config.sequence_len * fps_ratio if incorporate_fps else config.sequence_len
step = fps_ratio if incorporate_fps else 1
if len(data_y) < full_seq_len:
raise ValueError(f"Not enough data, minimum length should be {full_seq_len}, but is {len(data_y)}")
for i in range(len(data_x) - full_seq_len + step):
# i + full_seq_len is last NOT included index, therefore + step in for loop above
images += [data_x[i:i+full_seq_len:step]]
images = np.stack(images, axis=0)
labels = data_y[full_seq_len-step:]
# use keras fit shuffle, this creates a copy -> both arrays in ram for short time
# also don't use if you use validation_split in fit (seen data as validation over multiple epochs)
if shuffle_bool:
images, labels = shuffle(images, labels) # shuffle both the same way
return images, labels
# not needed anymore
def subdivide_data(load_from, new_dir_name, chunks, keep_directory, test_data_size=None):
# keep directory assumes the files are correct, will perform training as if they just got created
if keep_directory and os.path.isdir(new_dir_name):
print("Directory exists and was not changed, as specified")
return
data = np.load(load_from, allow_pickle=True)
# data = data[:len(data) // 10] # for testing
name_for_file_path = new_dir_name + "/" + config.temp_data_chunk_name
print("Data length:", len(data))
print("Chunk length:", len(data) // chunks)
if os.path.isdir(new_dir_name):
remove_subdivided_data(new_dir_name)
try:
os.makedirs(new_dir_name)
except OSError:
print(f"Creation of the directory {new_dir_name} failed, probably because it already exists")
return
step = len(data) // chunks
for i in range(chunks):
# guarantee, that test data remains constant if specified
if not test_data_size:
np.save(name_for_file_path + str(i), data[step*i:step*(i+1)])
else:
# if test_data_size is bigger than step size, there will be less files than requested chunks
# if test_data_size is smaller than step size, there will be one more file than requested chunks
# when iterating over data later just check if next file exists, if no it means that it is the test data
if (chunks - i - 1) * step >= test_data_size and i < chunks - 1:
np.save(name_for_file_path + str(i), data[step*i:step*(i+1)])
else:
np.save(name_for_file_path + str(i), data[step*i:-test_data_size])
np.save(name_for_file_path + str(i + 1), data[-test_data_size:])
break
del data
def remove_subdivided_data(dir_to_remove_name):
if os.path.isdir(dir_to_remove_name):
shutil.rmtree(dir_to_remove_name)
print(f"Directory {dir_to_remove_name} successfully removed")
else:
print(f"Directory {dir_to_remove_name} not found")
def get_inverse_proportions(data):
print(len(data))
x = np.sum(data, axis=0) # sum each label for each timestep separately
x = x / np.sum(x, axis=-1).reshape(-1, 1)
x = np.ones(x.shape) / x
x = x / np.sum(x, axis=-1).reshape(-1, 1)
print(x)
print(x.shape)
return x
def get_class_weights(test_data_size=0):
labels = utils.load_labels_only(current_data_dir)
# remove last x rows
labels = np.concatenate(labels, axis=0)
uesless, labels = convert_labels_to_time_pressed(range(100), labels)
if test_data_size:
labels = labels[:-test_data_size, :]
labels = np.argmax(labels, axis=-1)
classes = np.asarray(range(config.output_classes))
inverse_proportions = class_weight.compute_class_weight('balanced', classes=classes, y=labels)
inverse_proportions = dict(enumerate(inverse_proportions))
print("Proportions:", inverse_proportions)
del labels
return inverse_proportions
def test_sequence_data_no_mismatch():
x = np.random.rand(1000, config.height, config.width, config.color_channels)
y = np.random.rand(1000, config.output_classes)
print("Images shape:", x.shape, "Labels shape:", y.shape)
xx, yy = sequence_data(x, y, shuffle_bool=False, incorporate_fps=True)
print("Images match?:", xx[-100][-1][0][0] == x[-100][0][0], "Labels match?:", yy[-100] == y[-100])
del xx, yy
xx, yy = sequence_data(x, y, shuffle_bool=False, incorporate_fps=False)
print("Images match?:", xx[-100][-1][0][0] == x[-100][0][0], "Labels match?:", yy[-100] == y[-100])
def test_divide_dataset():
filenames = utils.get_sorted_filenames(current_data_dir)
if config.random_file_order_train:
random.shuffle(filenames)
filenames = divide_dataset(filenames, config.allowed_ram_mb, 10000, normalize_input_values=True,
incorporate_fps=False, known_normalize_growth=config.known_normalize_growth)
for entry in filenames:
print(entry)
def test_divide_cnn_only():
filenames = utils.get_sorted_filenames(current_data_dir)
if config.random_file_order_train:
random.shuffle(filenames)
my_dict = divide_dataset_cnn_only(filenames, 30000, True, normalize_factor=config.known_normalize_growth,
allowed_ram=config.allowed_ram_mb)
for i in my_dict:
print(i)
for i in my_dict:
filesize = 0
for j in i["filenames"]:
filesize += os.stat(j).st_size
print((filesize * 4) // (1024 ** 2))
def convert_labels_to_time_pressed(images, labels):
fps_ratio = utils.get_fps_ratio()
new_labels = []
index_list = [1, 3, 4, 5]
dict_len = len(config.outputs)
for i in range(labels.shape[0] - fps_ratio):
new_label = np.zeros(config.output_classes)
index = np.argmax(labels[i])
temp = labels[i:i+fps_ratio]
temp = np.sum(temp, axis=0)
temp = temp[index] / fps_ratio
if temp <= config.counts_as_tap and index in index_list:
if index == 1: index = 0
else: index -= 2
new_label[index + dict_len] = 1
else:
new_label[index] = 1
new_labels.append(new_label)
new_labels = np.stack(new_labels, axis=0)
images = images[:-fps_ratio]
return images, new_labels
if __name__ == "__main__":
# train_model(True, freeze=True, load_saved_cnn=False)
train_cnn_only(False, swap_output_layer=False, freeze_part=True)
| [
"numpy.random.rand",
"lenet.freeze_part_of_inception",
"utils.load_labels_only",
"utils.get_sorted_filenames",
"utils.load_file",
"tensorflow.keras.models.load_model",
"tensorflow.compat.v1.Session",
"numpy.stack",
"os.path.isdir",
"numpy.concatenate",
"lenet.replace_cnn_dense_layer",
"utils.normalize_input_values",
"utils.load_file_only_labels",
"lenet.create_neural_net",
"lenet.create_cnn_only",
"random.shuffle",
"numpy.ones",
"numpy.argmax",
"utils.get_fps_ratio",
"os.path.isfile",
"gc.collect",
"tensorflow.compat.v1.ConfigProto",
"os.makedirs",
"tensorflow.config.experimental.set_memory_growth",
"sklearn.utils.class_weight.compute_class_weight",
"sklearn.utils.shuffle",
"tensorflow.keras.optimizers.Adam",
"numpy.sum",
"numpy.zeros",
"shutil.rmtree",
"utils.load_file_only_images",
"os.stat",
"tensorflow.keras.backend.clear_session",
"numpy.load",
"tensorflow.config.experimental.list_physical_devices"
] | [((529, 561), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (559, 561), True, 'import tensorflow as tf\n'), ((579, 602), 'tensorflow.compat.v1.ConfigProto', 'compat.v1.ConfigProto', ([], {}), '()\n', (600, 602), False, 'from tensorflow import compat\n'), ((654, 690), 'tensorflow.compat.v1.Session', 'compat.v1.Session', ([], {'config': 'config_var'}), '(config=config_var)\n', (671, 690), False, 'from tensorflow import compat\n'), ((702, 753), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (746, 753), True, 'import tensorflow as tf\n'), ((1373, 1402), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'config.lr'}), '(learning_rate=config.lr)\n', (1377, 1402), False, 'from tensorflow.keras.optimizers import Adam\n'), ((2319, 2363), 'utils.get_sorted_filenames', 'utils.get_sorted_filenames', (['current_data_dir'], {}), '(current_data_dir)\n', (2345, 2363), False, 'import utils\n'), ((2368, 2393), 'random.shuffle', 'random.shuffle', (['filenames'], {}), '(filenames)\n', (2382, 2393), False, 'import random\n'), ((6125, 6169), 'utils.get_sorted_filenames', 'utils.get_sorted_filenames', (['current_data_dir'], {}), '(current_data_dir)\n', (6151, 6169), False, 'import utils\n'), ((10409, 10434), 'utils.load_file', 'utils.load_file', (['filename'], {}), '(filename)\n', (10424, 10434), False, 'import utils\n'), ((12099, 12120), 'utils.get_fps_ratio', 'utils.get_fps_ratio', ([], {}), '()\n', (12118, 12120), False, 'import utils\n'), ((12722, 12746), 'numpy.stack', 'np.stack', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (12730, 12746), True, 'import numpy as np\n'), ((13467, 13504), 'numpy.load', 'np.load', (['load_from'], {'allow_pickle': '(True)'}), '(load_from, allow_pickle=True)\n', (13474, 13504), True, 'import numpy as np\n'), ((13725, 13752), 'os.path.isdir', 'os.path.isdir', (['new_dir_name'], {}), '(new_dir_name)\n', (13738, 13752), False, 'import os\n'), ((14983, 15016), 'os.path.isdir', 'os.path.isdir', (['dir_to_remove_name'], {}), '(dir_to_remove_name)\n', (14996, 15016), False, 'import os\n'), ((15265, 15285), 'numpy.sum', 'np.sum', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15271, 15285), True, 'import numpy as np\n'), ((15558, 15598), 'utils.load_labels_only', 'utils.load_labels_only', (['current_data_dir'], {}), '(current_data_dir)\n', (15580, 15598), False, 'import utils\n'), ((15637, 15667), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (15651, 15667), True, 'import numpy as np\n'), ((15822, 15848), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (15831, 15848), True, 'import numpy as np\n'), ((15930, 16002), 'sklearn.utils.class_weight.compute_class_weight', 'class_weight.compute_class_weight', (['"""balanced"""'], {'classes': 'classes', 'y': 'labels'}), "('balanced', classes=classes, y=labels)\n", (15963, 16002), False, 'from sklearn.utils import shuffle, class_weight\n'), ((16207, 16279), 'numpy.random.rand', 'np.random.rand', (['(1000)', 'config.height', 'config.width', 'config.color_channels'], {}), '(1000, config.height, config.width, config.color_channels)\n', (16221, 16279), True, 'import numpy as np\n'), ((16288, 16331), 'numpy.random.rand', 'np.random.rand', (['(1000)', 'config.output_classes'], {}), '(1000, config.output_classes)\n', (16302, 16331), True, 'import numpy as np\n'), ((16813, 16857), 'utils.get_sorted_filenames', 'utils.get_sorted_filenames', (['current_data_dir'], {}), '(current_data_dir)\n', (16839, 16857), False, 'import utils\n'), ((17235, 17279), 'utils.get_sorted_filenames', 'utils.get_sorted_filenames', (['current_data_dir'], {}), '(current_data_dir)\n', (17261, 17279), False, 'import utils\n'), ((17809, 17830), 'utils.get_fps_ratio', 'utils.get_fps_ratio', ([], {}), '()\n', (17828, 17830), False, 'import utils\n'), ((18442, 18470), 'numpy.stack', 'np.stack', (['new_labels'], {'axis': '(0)'}), '(new_labels, axis=0)\n', (18450, 18470), True, 'import numpy as np\n'), ((783, 834), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (823, 834), True, 'import tensorflow as tf\n'), ((952, 981), 'tensorflow.keras.models.load_model', 'load_model', (['config.model_name'], {}), '(config.model_name)\n', (962, 981), False, 'from tensorflow.keras.models import load_model\n'), ((1008, 1099), 'lenet.create_neural_net', 'create_neural_net', ([], {'load_pretrained_cnn': 'load_saved_cnn', 'model_name': 'config.cnn_only_name'}), '(load_pretrained_cnn=load_saved_cnn, model_name=config.\n cnn_only_name)\n', (1025, 1099), False, 'from lenet import create_neural_net, create_cnn_only, replace_cnn_dense_layer, freeze_part_of_inception\n'), ((1860, 1892), 'tensorflow.keras.models.load_model', 'load_model', (['config.cnn_only_name'], {}), '(config.cnn_only_name)\n', (1870, 1892), False, 'from tensorflow.keras.models import load_model\n'), ((2000, 2017), 'lenet.create_cnn_only', 'create_cnn_only', ([], {}), '()\n', (2015, 2017), False, 'from lenet import create_neural_net, create_cnn_only, replace_cnn_dense_layer, freeze_part_of_inception\n'), ((2054, 2095), 'lenet.freeze_part_of_inception', 'freeze_part_of_inception', (['model', '"""mixed9"""'], {}), "(model, 'mixed9')\n", (2078, 2095), False, 'from lenet import create_neural_net, create_cnn_only, replace_cnn_dense_layer, freeze_part_of_inception\n'), ((4553, 4595), 'utils.load_file_only_labels', 'utils.load_file_only_labels', (['full_filename'], {}), '(full_filename)\n', (4580, 4595), False, 'import utils\n'), ((6217, 6242), 'random.shuffle', 'random.shuffle', (['filenames'], {}), '(filenames)\n', (6231, 6242), False, 'import random\n'), ((8888, 8930), 'utils.load_file_only_labels', 'utils.load_file_only_labels', (['full_filename'], {}), '(full_filename)\n', (8915, 8930), False, 'import utils\n'), ((10692, 10739), 'utils.normalize_input_values', 'utils.normalize_input_values', (['images', '"""float32"""'], {}), "(images, 'float32')\n", (10720, 10739), False, 'import utils\n'), ((13022, 13045), 'sklearn.utils.shuffle', 'shuffle', (['images', 'labels'], {}), '(images, labels)\n', (13029, 13045), False, 'from sklearn.utils import shuffle, class_weight\n'), ((13344, 13371), 'os.path.isdir', 'os.path.isdir', (['new_dir_name'], {}), '(new_dir_name)\n', (13357, 13371), False, 'import os\n'), ((13816, 13841), 'os.makedirs', 'os.makedirs', (['new_dir_name'], {}), '(new_dir_name)\n', (13827, 13841), False, 'import os\n'), ((15026, 15059), 'shutil.rmtree', 'shutil.rmtree', (['dir_to_remove_name'], {}), '(dir_to_remove_name)\n', (15039, 15059), False, 'import shutil\n'), ((15390, 15406), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (15397, 15406), True, 'import numpy as np\n'), ((16905, 16930), 'random.shuffle', 'random.shuffle', (['filenames'], {}), '(filenames)\n', (16919, 16930), False, 'import random\n'), ((17327, 17352), 'random.shuffle', 'random.shuffle', (['filenames'], {}), '(filenames)\n', (17341, 17352), False, 'import random\n'), ((17985, 18016), 'numpy.zeros', 'np.zeros', (['config.output_classes'], {}), '(config.output_classes)\n', (17993, 18016), True, 'import numpy as np\n'), ((18033, 18053), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (18042, 18053), True, 'import numpy as np\n'), ((18106, 18126), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (18112, 18126), True, 'import numpy as np\n'), ((1943, 1973), 'lenet.replace_cnn_dense_layer', 'replace_cnn_dense_layer', (['model'], {}), '(model)\n', (1966, 1973), False, 'from lenet import create_neural_net, create_cnn_only, replace_cnn_dense_layer, freeze_part_of_inception\n'), ((2703, 2720), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2718, 2720), True, 'from tensorflow.keras import backend as K\n'), ((3476, 3506), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (3490, 3506), True, 'import numpy as np\n'), ((3528, 3558), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (3542, 3558), True, 'import numpy as np\n'), ((4062, 4074), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4072, 4074), False, 'import gc\n'), ((6550, 6567), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6565, 6567), True, 'from tensorflow.keras import backend as K\n'), ((6583, 6623), 'os.path.isfile', 'os.path.isfile', (["filenames[i]['filename']"], {}), "(filenames[i]['filename'])\n", (6597, 6623), False, 'import os\n'), ((11386, 11408), 'os.stat', 'os.stat', (['full_filename'], {}), '(full_filename)\n', (11393, 11408), False, 'import os\n'), ((11500, 11542), 'utils.load_file_only_images', 'utils.load_file_only_images', (['full_filename'], {}), '(full_filename)\n', (11527, 11542), False, 'import utils\n'), ((11564, 11611), 'utils.normalize_input_values', 'utils.normalize_input_values', (['images', '"""float32"""'], {}), "(images, 'float32')\n", (11592, 11611), False, 'import utils\n'), ((11633, 11675), 'utils.load_file_only_labels', 'utils.load_file_only_labels', (['full_filename'], {}), '(full_filename)\n', (11660, 11675), False, 'import utils\n'), ((2858, 2883), 'utils.load_file', 'utils.load_file', (['filename'], {}), '(filename)\n', (2873, 2883), False, 'import utils\n'), ((15348, 15366), 'numpy.sum', 'np.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (15354, 15366), True, 'import numpy as np\n'), ((15423, 15441), 'numpy.sum', 'np.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (15429, 15441), True, 'import numpy as np\n'), ((17675, 17685), 'os.stat', 'os.stat', (['j'], {}), '(j)\n', (17682, 17685), False, 'import os\n'), ((2950, 2997), 'utils.normalize_input_values', 'utils.normalize_input_values', (['data_x', '"""float32"""'], {}), "(data_x, 'float32')\n", (2978, 2997), False, 'import utils\n'), ((6701, 6718), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6716, 6718), True, 'from tensorflow.keras import backend as K\n'), ((6756, 6797), 'utils.load_file', 'utils.load_file', (["filenames[i]['filename']"], {}), "(filenames[i]['filename'])\n", (6771, 6797), False, 'import utils\n'), ((8192, 8204), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8202, 8204), False, 'import gc\n'), ((7189, 7236), 'utils.normalize_input_values', 'utils.normalize_input_values', (['images', '"""float32"""'], {}), "(images, 'float32')\n", (7217, 7236), False, 'import utils\n'), ((9236, 9257), 'utils.get_fps_ratio', 'utils.get_fps_ratio', ([], {}), '()\n', (9255, 9257), False, 'import utils\n')] |
from django.contrib import admin
from .models import Account, Expense
from .models import WCType, PercentDeduction
admin.site.register(Account)
admin.site.register(Expense)
admin.site.register(WCType)
admin.site.register(PercentDeduction)
| [
"django.contrib.admin.site.register"
] | [((117, 145), 'django.contrib.admin.site.register', 'admin.site.register', (['Account'], {}), '(Account)\n', (136, 145), False, 'from django.contrib import admin\n'), ((146, 174), 'django.contrib.admin.site.register', 'admin.site.register', (['Expense'], {}), '(Expense)\n', (165, 174), False, 'from django.contrib import admin\n'), ((175, 202), 'django.contrib.admin.site.register', 'admin.site.register', (['WCType'], {}), '(WCType)\n', (194, 202), False, 'from django.contrib import admin\n'), ((203, 240), 'django.contrib.admin.site.register', 'admin.site.register', (['PercentDeduction'], {}), '(PercentDeduction)\n', (222, 240), False, 'from django.contrib import admin\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-28 08:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hmlvaraus', '0010_hmlreservation_berth'),
]
operations = [
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Time of creation')),
('modified_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Time of modification')),
('purchase_code', models.CharField(max_length=40, verbose_name='Purchase code')),
('reserver_name', models.CharField(blank=True, max_length=100, verbose_name='Reserver name')),
('reserver_email_address', models.EmailField(blank=True, max_length=254, verbose_name='Reserver email address')),
('reserver_phone_number', models.CharField(blank=True, max_length=30, verbose_name='Reserver phone number')),
('reserver_address_street', models.CharField(blank=True, max_length=100, verbose_name='Reserver address street')),
('reserver_address_zip', models.CharField(blank=True, max_length=30, verbose_name='Reserver address zip')),
('reserver_address_city', models.CharField(blank=True, max_length=100, verbose_name='Reserver address city')),
('vat_percent', models.IntegerField(choices=[0, 10, 14, 24], default=24)),
('price_vat', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),
('product_name', models.CharField(blank=True, max_length=100, verbose_name='Product name')),
('purchase_process_started', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Purchase process started')),
('purchase_process_success', models.DateTimeField(blank=True, null=True, verbose_name='Purchase process success')),
('purchase_process_failure', models.DateTimeField(blank=True, null=True, verbose_name='Purchase process failure')),
('purchase_process_notified', models.DateTimeField(blank=True, null=True, verbose_name='Purchase process notified')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='purchase_created', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('hml_reservation', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='hmlvaraus.HMLReservation', verbose_name='HMLReservation')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='purchase_modified', to=settings.AUTH_USER_MODEL, verbose_name='Modified by')),
],
options={
'abstract': False,
},
),
]
| [
"django.db.models.EmailField",
"django.db.models.OneToOneField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((321, 378), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (352, 378), False, 'from django.db import migrations, models\n'), ((563, 656), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (579, 656), False, 'from django.db import migrations, models\n'), ((686, 779), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""Time of creation"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'Time of creation')\n", (706, 779), False, 'from django.db import migrations, models\n'), ((809, 906), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""Time of modification"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'Time of modification')\n", (829, 906), False, 'from django.db import migrations, models\n'), ((938, 999), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'verbose_name': '"""Purchase code"""'}), "(max_length=40, verbose_name='Purchase code')\n", (954, 999), False, 'from django.db import migrations, models\n'), ((1036, 1110), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'verbose_name': '"""Reserver name"""'}), "(blank=True, max_length=100, verbose_name='Reserver name')\n", (1052, 1110), False, 'from django.db import migrations, models\n'), ((1156, 1245), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""Reserver email address"""'}), "(blank=True, max_length=254, verbose_name=\n 'Reserver email address')\n", (1173, 1245), False, 'from django.db import migrations, models\n'), ((1285, 1371), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""Reserver phone number"""'}), "(blank=True, max_length=30, verbose_name=\n 'Reserver phone number')\n", (1301, 1371), False, 'from django.db import migrations, models\n'), ((1413, 1502), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'verbose_name': '"""Reserver address street"""'}), "(blank=True, max_length=100, verbose_name=\n 'Reserver address street')\n", (1429, 1502), False, 'from django.db import migrations, models\n'), ((1541, 1626), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""Reserver address zip"""'}), "(blank=True, max_length=30, verbose_name='Reserver address zip'\n )\n", (1557, 1626), False, 'from django.db import migrations, models\n'), ((1666, 1753), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'verbose_name': '"""Reserver address city"""'}), "(blank=True, max_length=100, verbose_name=\n 'Reserver address city')\n", (1682, 1753), False, 'from django.db import migrations, models\n'), ((1783, 1839), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': '[0, 10, 14, 24]', 'default': '(24)'}), '(choices=[0, 10, 14, 24], default=24)\n', (1802, 1839), False, 'from django.db import migrations, models\n'), ((1872, 1936), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0.0)', 'max_digits': '(6)'}), '(decimal_places=2, default=0.0, max_digits=6)\n', (1891, 1936), False, 'from django.db import migrations, models\n'), ((1972, 2045), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'verbose_name': '"""Product name"""'}), "(blank=True, max_length=100, verbose_name='Product name')\n", (1988, 2045), False, 'from django.db import migrations, models\n'), ((2093, 2194), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""Purchase process started"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'Purchase process started')\n", (2113, 2194), False, 'from django.db import migrations, models\n'), ((2237, 2326), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Purchase process success"""'}), "(blank=True, null=True, verbose_name=\n 'Purchase process success')\n", (2257, 2326), False, 'from django.db import migrations, models\n'), ((2369, 2458), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Purchase process failure"""'}), "(blank=True, null=True, verbose_name=\n 'Purchase process failure')\n", (2389, 2458), False, 'from django.db import migrations, models\n'), ((2502, 2592), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Purchase process notified"""'}), "(blank=True, null=True, verbose_name=\n 'Purchase process notified')\n", (2522, 2592), False, 'from django.db import migrations, models\n'), ((2621, 2805), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""purchase_created"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Created by"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='purchase_created', to=settings.\n AUTH_USER_MODEL, verbose_name='Created by')\n", (2638, 2805), False, 'from django.db import migrations, models\n'), ((2834, 2966), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""hmlvaraus.HMLReservation"""', 'verbose_name': '"""HMLReservation"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'hmlvaraus.HMLReservation', verbose_name='HMLReservation')\n", (2854, 2966), False, 'from django.db import migrations, models\n'), ((2996, 3182), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""purchase_modified"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Modified by"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='purchase_modified', to=settings.\n AUTH_USER_MODEL, verbose_name='Modified by')\n", (3013, 3182), False, 'from django.db import migrations, models\n')] |
##############################
# #
# loxygenK/musical_typer #
# メインプログラム #
# (c)2020 loxygenK #
# All rights reversed. #
# #
##############################
import sys
import os
# Python管理ライブラリ
import math
import pygame
from pygame.locals import *
# Pygame初期化
pygame.mixer.pre_init(44100, 16, 2, 1024)
pygame.mixer.init()
pygame.init()
# 自作ライブラリ
from lib import DrawMethodTemplates
from lib.GameSystem import *
from lib.ColorTheme import *
# FPS管理用インスタンスを生成
fps_clock = pygame.time.Clock()
def gs_specify_score():
# ----- [ ゲーム用の情報準備 ] -----
if len(sys.argv) < 2:
raise RuntimeError("Song is not specified!")
elif not os.path.isfile(sys.argv[1]):
raise FileNotFoundError("Specified path is not file, or not exists!")
else:
print("Game will start at soon. Stay tuned!")
# 譜面を読み込む
score_data = Score()
score_data.read_score(sys.argv[1])
return score_data
def gs_main_routine(score_data: Score):
# ゲームに必要なインスタンスを生成
ui = Screen()
game_info = GameInfo(score_data)
keyboard_drawer = DrawingUtil.KeyboardDrawer(ui.screen, 193, ui.full_font, 40, 5, 2)
# ループ管理用変数
game_finished_reason = ""
mainloop_continues = True
# フレームカウンター
# 点滅処理などに使う
frame_count = 0
# スクリーンのサイズを取得
w, h = ui.screen_size
# 次の歌詞を表示するモードに居るか
is_tmp_next_lyrics_printing = False
is_cont_next_lyrics_printing = False
# ランク決定用定数
rank_standard = [200, 150, 125, 100, 99.50, 99, 98, 97, 94, 90, 80, 60, 40, 20, 10, 0]
rank_string = ["Wow", "Unexpected", "Very God", "God", "Pro", "Genius", "Geki-tsuyo", "tsuyotusyo", "AAA", "AA",
"A", "B", "C", "D", "E", "F"]
# ----- [ ゲーム準備 ] -----
# 再生
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(1)
game_info.pos = 0
# メインループ
# (何らかの理由で強制的にメインループを抜ける必要が出てきた or 曲が終わった)
while mainloop_continues and pygame.mixer.music.get_pos() >= 0:
# -----------------
# 事前計算
# -----------------
# フレームカウンタを更新
frame_count = (frame_count + 1) % 60
# 曲上の現在位置を把握
game_info.pos = pygame.mixer.music.get_pos() / 1000
# 現在の歌詞・ゾーン・セクションを取得
lyx_idx = game_info.update_current_lyrincs()
game_info.update_current_zone()
sct_idx = game_info.get_current_section()
# ---------------------------
# イベント処理/ジャッジ
# ---------------------------
# イベント処理ループ
for event in pygame.event.get():
if event.type == QUIT:
mainloop_continues = False
break
if event.type == KEYUP:
if event.key == K_SPACE:
is_tmp_next_lyrics_printing = False
if event.type == KEYDOWN:
# ESCキーの場合
if event.key == K_ESCAPE:
mainloop_continues = False
break
if event.key == K_SPACE:
is_tmp_next_lyrics_printing = True
if event.key == K_LSHIFT or event.key == K_RSHIFT:
is_cont_next_lyrics_printing = not is_cont_next_lyrics_printing
# 大前提として、無効なキーが押されていないか
if Romautil.is_readable_key_pressed(event.key):
# これ以上打つキーがない(打ち終わったか、そもそも歌詞がない)
if game_info.completed:
SoundEffectConstants.unneccesary.play()
continue
# 正しいキーが押されたか
if game_info.is_expected_key(chr(event.key)):
# 輪唱で初めての打鍵か
if game_info.full[:1] and game_info.sent_count == 0:
game_info.override_key_prev_pos()
# 成功処理をする
got_point = game_info.count_success()
# 成功エフェクト
ui.add_fg_effector(30, "AC/WA", DrawMethodTemplates.slide_fadeout_text,
["Pass", more_blackish(GREEN_THIN_COLOR, 50), ui.alphabet_font, 10, -150,
-383])
if not (is_cont_next_lyrics_printing or is_tmp_next_lyrics_printing):
# キーボード上に点数を描画する
x, y = keyboard_drawer.get_place(chr(event.key))
x += keyboard_drawer.key_size / 2
x -= ui.full_font.render("+{}".format(got_point), True, TEXT_COLOR).get_width() / 2
ui.add_fg_effector(30, chr(event.key), DrawMethodTemplates.absolute_fadeout,
["+{}".format(got_point), BLUE_THICK_COLOR, ui.full_font, 15, x, y])
# AC/WAのエフェクト
if game_info.is_ac:
ui.add_fg_effector(120, "AC/WA", DrawMethodTemplates.slide_fadeout_text,
["AC", GREEN_THICK_COLOR, ui.alphabet_font, 20, -170, -383])
ui.add_bg_effector(15, "AC/WA", DrawMethodTemplates.blink_rect,
[more_whitish(GREEN_THIN_COLOR, 50), (0, 60, w, 130)])
SoundEffectConstants.ac.play()
elif game_info.is_wa:
ui.add_fg_effector(120, "AC/WA", DrawMethodTemplates.slide_fadeout_text,
["WA", more_whitish(BLUE_THICK_COLOR, 100), ui.alphabet_font, 20, -170,
-383])
ui.add_bg_effector(15, "AC/WA", DrawMethodTemplates.blink_rect,
[more_whitish(BLUE_THICK_COLOR, 100), (0, 60, w, 130)])
SoundEffectConstants.wa.play()
else:
if game_info.is_in_zone and game_info.score.zone[game_info.zone_index]:
SoundEffectConstants.special_success.play()
else:
if game_info.get_key_per_second() > 4:
SoundEffectConstants.fast.play()
else:
SoundEffectConstants.success.play()
else:
if not game_info.has_to_prevent_miss:
# 失敗をカウントする
game_info.count_failure()
# 効果音を流す
SoundEffectConstants.failed.play()
# エフェクト
ui.add_bg_effector(15, "AC/WA", DrawMethodTemplates.blink_rect,
[(255, 200, 200), (0, 60, w, 130)])
ui.add_fg_effector(30, "AC/WA", DrawMethodTemplates.slide_fadeout_text,
["MISS", more_whitish(RED_COLOR, 50), ui.alphabet_font,
10, -150, -383])
else:
SoundEffectConstants.unneccesary.play()
# ------------
# 計算
# ------------
# ===== 歌詞情報が変化したときの処理 =====
# 歌詞が変わった?
if lyx_idx:
# TLEの計算をする
game_info.apply_TLE()
# 最終的なタイプ情報を記録する
game_info.sentence_log.append([game_info.sent_count, game_info.sent_miss, game_info.completed])
# TLEした?
if len(game_info.target_roma) > 0 and not game_info.has_to_prevent_miss:
ui.add_fg_effector(30, "TLE", DrawMethodTemplates.slide_fadeout_text,
["TLE", more_blackish(RED_COLOR, 50), ui.alphabet_font, -10,
-150, -383])
ui.add_bg_effector(15, "TLE", DrawMethodTemplates.blink_rect,
[more_whitish(RED_COLOR, 50), (0, 60, w, 130)])
SoundEffectConstants.tle.play()
else:
# 歌詞が変わるまでの待機時間を考慮して、前回のキータイプ時間を早める
game_info.override_key_prev_pos()
# 歌詞をアップデートする
game_info.update_current_lyrics()
# 曲が終わった?
if game_info.song_finished:
# 歌詞情報を消去する
game_info.update_current_lyrics("", "")
ui.add_bg_effector(60, "S.F.", DrawMethodTemplates.slide_fadeout_text,
["Song Finished!", (255, 127, 0), ui.system_font, 25, 0, 0])
# セクションが変わった?
if sct_idx:
# セクションを全完した?
if game_info.section_miss == 0 and game_info.section_count != 0:
# エフェクトとボーナスポイント
ui.add_bg_effector(60, "Section AC", DrawMethodTemplates.slide_fadeout_text,
["Section AC!", (255, 127, 0), ui.system_font, 25, 0, 0])
game_info.point += game_info.SECTION_PERFECT_POINT
# セクションごとのタイプ情報を記録
game_info.section_log.append([game_info.section_count, game_info.section_miss])
# セクションのデータを削除
game_info.reset_section_score()
# ---------------
# 画面描画
# ----------------
# [ 背面レイヤー ]
# 画面を消去する
ui.screen.fill(BACKGROUND_COLOR)
# 曲のタイトルなどの情報
DrawingUtil.write_limit(ui.screen, (w - 2, 0), w / 2, ui.alphabet_font, score_data.properties["title"])
DrawingUtil.write_limit(ui.screen, (w - 5, 33), w / 2, ui.system_font,
score_data.properties["song_author"] + "/" + score_data.properties["singer"],
more_whitish(TEXT_COLOR, 100))
# 残り時間ゲージ
pygame.draw.rect(ui.screen, more_blackish(BACKGROUND_COLOR, 25), (0, 60, w, 130))
pygame.draw.rect(ui.screen, more_blackish(BACKGROUND_COLOR, 50),
(0, 60, math.floor(game_info.get_time_remain_ratio() * w), 130))
# レイヤーが変わるのでここで背景エフェクトを更新する
ui.update_effector(1)
# ----- [ 前面レイヤー ] -----
# 歌詞
if game_info.full[:1] != "/" or game_info.sent_count > 0:
DrawingUtil.print_progress(ui.screen, (w / 2, 80), MARGIN + 20, ui.nihongo_font,
game_info.typed_kana, game_info.target_kana)
DrawingUtil.print_progress(ui.screen, (w / 2, 130), MARGIN + 5, ui.full_font,
game_info.typed_roma, game_info.target_roma)
printout_lyrics = game_info.full if game_info.full[:1] != "/" else game_info.full[1:]
ui.print_str(MARGIN - 12, 60, ui.full_font, printout_lyrics, more_whitish(TEXT_COLOR, 30))
# コンボ
combo_text = ui.full_font.render(str(game_info.combo), True, more_whitish(TEXT_COLOR, 50))
ui.screen.blit(combo_text, (MARGIN - 12, 157))
ui.print_str(combo_text.get_width() + 5, 165, ui.system_font, "chain", more_whitish(TEXT_COLOR, 75))
# 正確率ゲージ
pygame.draw.rect(ui.screen, GREEN_THICK_COLOR if not game_info.is_ac else RED_COLOR,
(0, 60, w * game_info.get_sentence_accuracy(), 3))
DrawingUtil.write_limit(ui.screen, (w * game_info.get_rate(limit=True), 168), 0, ui.system_font,
rank_string[game_info.calculate_rank()], more_whitish(TEXT_COLOR, 100))
# 達成率ゲージ
if game_info.calculate_rank() > 0:
acheive_rate = rank_standard[game_info.calculate_rank() - 1] / 100
pygame.draw.rect(ui.screen, RED_COLOR, (0, 187, w * acheive_rate, 3))
pygame.draw.rect(ui.screen, GREEN_THICK_COLOR if game_info.get_rate() < 0.8 else BLUE_THICK_COLOR,
(0, 187, w * game_info.get_rate(), 3))
# キーボード
if is_tmp_next_lyrics_printing or is_cont_next_lyrics_printing:
for i in range(3):
lyrics_index = (i + game_info.lyrincs_index + 1)
if lyrics_index >= len(game_info.score.score):
break
ui.print_str(5, 193 + 60 * i, ui.system_font, "[{}]".format(lyrics_index), TEXT_COLOR)
if game_info.score.score[lyrics_index][1][:1] != "/":
ui.print_str(5, 210 + 60 * i, ui.full_font, game_info.score.score[lyrics_index][1], TEXT_COLOR)
ui.print_str(5, 230 + 60 * i, ui.system_font,
Romautil.hira2roma(game_info.score.score[lyrics_index][2]),
more_whitish(TEXT_COLOR, 50))
else:
if game_info.has_to_prevent_miss:
keyboard_drawer.draw("", background_color=(192, 192, 192))
else:
keyboard_drawer.draw(game_info.target_roma[:1],
background_color=(192, 192, 192) if game_info.completed else None)
# 点数表示
if game_info.point < 0:
if frame_count % 20 < 10:
score_color = RED_COLOR
else:
score_color = BLUE_THICK_COLOR
ui.print_str(5, 20, ui.alphabet_font, "{:08d}".format(game_info.point), score_color)
else:
ui.print_str(5, 20, ui.alphabet_font, "{:08d}".format(game_info.point), BLUE_THICK_COLOR)
# --- リアルタイム情報 ---
pygame.draw.line(ui.screen, more_whitish(TEXT_COLOR, 100), (0, 375), (w, 375), 2)
# タイピング速度
ui.print_str(MARGIN, 382, ui.system_font, "タイピング速度", more_whitish(TEXT_COLOR, 100))
if game_info.get_key_per_second() > 4:
color = more_blackish(RED_COLOR, 30 if frame_count % 10 < 5 else 0)
pygame.draw.rect(ui.screen, color, (MARGIN, 400, w - MARGIN * 2, 20))
else:
pygame.draw.rect(ui.screen, GREEN_THIN_COLOR, (MARGIN, 400, w - MARGIN * 2, 20))
pygame.draw.rect(ui.screen, more_blackish(GREEN_THIN_COLOR, 50),
(MARGIN, 400, game_info.get_key_per_second() / 4 * (w - MARGIN * 2), 20))
DrawingUtil.write_center_x(ui.screen, w / 2, 398, ui.system_font,
"{:4.2f} Char/sec".format(game_info.get_key_per_second()), TEXT_COLOR)
# 正確率の数値情報
ui.print_str(MARGIN, 430, ui.system_font, "正確率", more_whitish(TEXT_COLOR, 100))
pygame.draw.rect(ui.screen, more_blackish(RED_COLOR, 50),
(MARGIN + 5, 510, game_info.get_full_accuracy() * 250, 3))
ui.print_str(MARGIN + 5, 430, ui.big_font, "{:05.1f}%".format(game_info.get_full_accuracy() * 100),
tuple(x * game_info.get_full_accuracy() for x in RED_COLOR))
# ランク
ui.print_str(MARGIN + 320, 430, ui.system_font, "達成率", more_whitish(TEXT_COLOR, 100))
ui.print_str(MARGIN + 330, 430, ui.big_font, "{:05.1f}%".format(game_info.get_rate() * 100), BLUE_THICK_COLOR)
# レイヤーが変わるのでここで前面エフェクトを更新する
ui.update_effector(0)
# FPSカウンタ
ui.print_str(3, -3, ui.system_font, "{:5.2f} fps".format(fps_clock.get_fps()), TEXT_COLOR)
# ループ終わり
fps_clock.tick(60)
pygame.display.update()
# メインループ終了
print("*****************")
print("* LOOP FINISHED *")
print("*****************")
print(mainloop_continues)
print(game_finished_reason)
pygame.mixer.music.stop()
return game_info
def gs_result(game_info):
ui = Screen()
w, h = ui.screen_size
mainloop_continues = True
retry = False
rank_standard = [200, 150, 125, 100, 99.50, 99, 98, 97, 94, 90, 80, 60, 40, 20, 10, 0]
rank_string = ["Wow", "Unexpected", "Very God", "God", "Pro", "Genius", "Geki-tsuyo", "tsuyotusyo", "AAA", "AA",
"A", "B", "C", "D", "E", "F"]
while mainloop_continues:
for event in pygame.event.get():
if event.type == QUIT:
mainloop_continues = False
break
if event.type == KEYDOWN:
# ESCキーの場合
if event.key == K_ESCAPE:
mainloop_continues = False
break
elif event.key == K_r:
retry = True
mainloop_continues = False
break
ui.screen.fill(BACKGROUND_COLOR)
# 曲のタイトルなどの情報
ui.print_str(MARGIN, 0, ui.nihongo_font, score.properties["title"], TEXT_COLOR)
ui.print_str(MARGIN, 50, ui.full_font,
score.properties["song_author"] + "/" + score.properties["singer"],
more_whitish(TEXT_COLOR, 25))
pygame.draw.line(ui.screen, more_whitish(TEXT_COLOR, 100), (0, 90), (w, 90), 2)
ui.print_str(MARGIN, 85, ui.big_font,
rank_string[game_info.calculate_rank()],
more_blackish(RED_COLOR, 150 * (game_info.calculate_rank() + 1) / len(rank_standard)))
ui.print_str(MARGIN, 150, ui.nihongo_font, "{:06.2f}%".format(game_info.get_rate() * 100),
tuple(x * game_info.get_full_accuracy() for x in RED_COLOR))
if game_info.get_key_per_second() > 4:
pygame.draw.rect(ui.screen, more_blackish(RED_COLOR, 30), (MARGIN, 210, w - MARGIN * 2, 20))
else:
pygame.draw.rect(ui.screen, GREEN_THIN_COLOR, (MARGIN, 210, w - MARGIN * 2, 20))
pygame.draw.rect(ui.screen, more_blackish(GREEN_THIN_COLOR, 50),
(MARGIN, 210, game_info.get_key_per_second() / 4 * (w - MARGIN * 2), 20))
DrawingUtil.write_center_x(ui.screen, w / 2, 208, ui.system_font,
"{:4.2f} Char/sec".format(game_info.get_key_per_second()), TEXT_COLOR)
if game_info.calculate_rank() > 0:
acheive_rate = rank_standard[game_info.calculate_rank() - 1] - game_info.get_rate() * 100
ui.print_str(MARGIN + 200, 160, ui.system_font,
"{} まで ".format(rank_string[game_info.calculate_rank() - 1]), BLUE_THICK_COLOR)
ui.print_str(MARGIN + 200, 168, ui.alphabet_font, "{:06.2f}% ".format(acheive_rate), BLUE_THICK_COLOR)
ui.print_str(MARGIN, 240, ui.system_font, "正確率", more_whitish(TEXT_COLOR, 50))
ui.print_str(MARGIN + 10, 247, ui.nihongo_font, "{:06.2f}%".format(game_info.get_full_accuracy() * 100),
tuple(x * game_info.get_full_accuracy() for x in RED_COLOR))
DrawingUtil.write_limit(ui.screen, (w - 15, 150), w / 2, ui.nihongo_font, "{:08}".format(game_info.point))
pygame.draw.line(ui.screen, more_whitish(TEXT_COLOR, 100), (0, 320), (w, 320), 2)
# TODO: 21世紀史上もっともひどいデザインをどうにかする
ui.print_str(MARGIN - 10, 320, ui.alphabet_font, "[R]/リトライ", TEXT_COLOR)
ui.print_str(MARGIN + 300, 320, ui.alphabet_font, "[Esc]/終了", TEXT_COLOR)
fps_clock.tick(60)
pygame.display.update()
return retry
def gs_special_error_log(score_data, path):
ui = Screen()
w, h = ui.screen_size
error_index = 0
mainloop_continues = True
while mainloop_continues:
for event in pygame.event.get():
if event.type == QUIT:
mainloop_continues = False
break
if event.type == KEYDOWN:
# ESCキーの場合
if event.key == K_ESCAPE:
mainloop_continues = False
break
if event.key == K_UP:
error_index = max(0, error_index - 1)
if event.key == K_DOWN:
error_index = min(error_index + 1, len(score_data.log) - 1)
ui.screen.fill(BACKGROUND_COLOR)
ui.print_str(MARGIN - 10, 0, ui.alphabet_font, "読み込みに失敗しました!", RED_COLOR)
ui.print_str(MARGIN - 10, 35, ui.system_font, "譜面がおかしなことになっているようです:", RED_COLOR)
ui.print_str(MARGIN - 10, 60, ui.system_font, path, TEXT_COLOR)
pygame.draw.line(ui.screen, more_whitish(TEXT_COLOR, 100), (0, 83), (w, 83), 2)
ui.print_str(MARGIN, 90, ui.system_font, score_data.log[0][1], TEXT_COLOR)
ui.print_str(MARGIN, 105, ui.alphabet_font, score_data.log[0][2], TEXT_COLOR)
pygame.draw.line(ui.screen, more_whitish(TEXT_COLOR, 100), (0, 160), (w, 160), 2)
fps_clock.tick(60)
pygame.display.update()
if __name__ == '__main__':
try:
score = gs_specify_score()
loop_continues = True
while loop_continues:
game_result = gs_main_routine(score)
loop_continues = gs_result(game_result)
finally:
pygame.quit()
| [
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.mixer.pre_init",
"pygame.display.update",
"pygame.mixer.music.set_volume",
"os.path.isfile",
"pygame.draw.rect",
"pygame.time.Clock",
"pygame.mixer.music.play",
"pygame.mixer.music.stop",
"pygame.mixer.init",
"pygame.mixer.music.get_pos"
] | [((347, 388), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', (['(44100)', '(16)', '(2)', '(1024)'], {}), '(44100, 16, 2, 1024)\n', (368, 388), False, 'import pygame\n'), ((389, 408), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (406, 408), False, 'import pygame\n'), ((409, 422), 'pygame.init', 'pygame.init', ([], {}), '()\n', (420, 422), False, 'import pygame\n'), ((559, 578), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (576, 578), False, 'import pygame\n'), ((1808, 1842), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(0.5)'], {}), '(0.5)\n', (1837, 1842), False, 'import pygame\n'), ((1847, 1873), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(1)'], {}), '(1)\n', (1870, 1873), False, 'import pygame\n'), ((15497, 15522), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (15520, 15522), False, 'import pygame\n'), ((2566, 2584), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2582, 2584), False, 'import pygame\n'), ((15297, 15320), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (15318, 15320), False, 'import pygame\n'), ((15974, 15992), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (15990, 15992), False, 'import pygame\n'), ((19025, 19048), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (19046, 19048), False, 'import pygame\n'), ((19260, 19278), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (19276, 19278), False, 'import pygame\n'), ((20448, 20471), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (20469, 20471), False, 'import pygame\n'), ((20729, 20742), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (20740, 20742), False, 'import pygame\n'), ((730, 757), 'os.path.isfile', 'os.path.isfile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (744, 757), False, 'import os\n'), ((1991, 2019), 'pygame.mixer.music.get_pos', 'pygame.mixer.music.get_pos', ([], {}), '()\n', (2017, 2019), False, 'import pygame\n'), ((2214, 2242), 'pygame.mixer.music.get_pos', 'pygame.mixer.music.get_pos', ([], {}), '()\n', (2240, 2242), False, 'import pygame\n'), ((11723, 11792), 'pygame.draw.rect', 'pygame.draw.rect', (['ui.screen', 'RED_COLOR', '(0, 187, w * acheive_rate, 3)'], {}), '(ui.screen, RED_COLOR, (0, 187, w * acheive_rate, 3))\n', (11739, 11792), False, 'import pygame\n'), ((13844, 13913), 'pygame.draw.rect', 'pygame.draw.rect', (['ui.screen', 'color', '(MARGIN, 400, w - MARGIN * 2, 20)'], {}), '(ui.screen, color, (MARGIN, 400, w - MARGIN * 2, 20))\n', (13860, 13913), False, 'import pygame\n'), ((13940, 14025), 'pygame.draw.rect', 'pygame.draw.rect', (['ui.screen', 'GREEN_THIN_COLOR', '(MARGIN, 400, w - MARGIN * 2, 20)'], {}), '(ui.screen, GREEN_THIN_COLOR, (MARGIN, 400, w - MARGIN * 2, 20)\n )\n', (13956, 14025), False, 'import pygame\n'), ((17426, 17511), 'pygame.draw.rect', 'pygame.draw.rect', (['ui.screen', 'GREEN_THIN_COLOR', '(MARGIN, 210, w - MARGIN * 2, 20)'], {}), '(ui.screen, GREEN_THIN_COLOR, (MARGIN, 210, w - MARGIN * 2, 20)\n )\n', (17442, 17511), False, 'import pygame\n')] |
#!/usr/bin/env python
"""Generic definition of a robot.
Currently subclasses from the iRobotCreate class to allow for
physical control of an iRobot Create base (if the Robot class is
configured to control hardware) but could be subclassed to use other
physical hardware in the future.
A robot has a planner that allows it to select goals and a map to
keep track of other robots, feasible regions to which it can move,
an occupancy grid representation of the world, and role-specific
information (such as a probability layer for the rop robot to keep
track of where robber robots may be).
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
import math
import random
import numpy as np
from shapely.geometry import Point, Polygon
from cops_and_robots.robo_tools.pose import Pose
from cops_and_robots.robo_tools.iRobot_create import iRobotCreate
from cops_and_robots.robo_tools.planner import (MissionPlanner,
GoalPlanner,
PathPlanner,
Controller)
from cops_and_robots.map_tools.map import Map
from cops_and_robots.map_tools.map_elements import MapObject
class Robot(iRobotCreate):
"""Class definition for the generic robot object.
.. image:: img/classes_Robot.png
Parameters
----------
name : str
The robot's name.
pose : array_like, optional
The robot's initial [x, y, theta] in [m,m,degrees] (defaults to
[0, 0.5, 0]).
map_name : str, optional
The name of the map (defaults to 'fleming').
role : {'robber','cop'}, optional
The robot's role in the cops and robbers game.
status : two-element list of strings, optional
The robot's initial mission status and movement status. Cops and
robbers share possible movement statuses, but their mission statuses
differ entirely. Defaults to ['on the run', 'without a goal'].
planner_type: {'simple', 'particle', 'MAP'}, optional
The robot's type of planner.
consider_others : bool, optional
Whether this robot generates other robot models (e.g. the primary cop
will imagine other robots moving around.) Defaults to false.
**kwargs
Arguments passed to the ``MapObject`` attribute.
"""
def __init__(self,
name,
pose=None,
pose_source='python',
color_str='darkorange',
map_cfg={},
create_mission_planner=True,
goal_planner_cfg={},
path_planner_cfg={},
**kwargs):
# Object attributes
self.name = name
self.pose_source = pose_source
# Setup map
self.map = Map(**map_cfg)
# If pose is not given, randomly place in feasible layer.
feasible_robot_generated = False
if pose is None:
while not feasible_robot_generated:
x = random.uniform(self.map.bounds[0], self.map.bounds[2])
y = random.uniform(self.map.bounds[1], self.map.bounds[3])
if self.map.feasible_layer.pose_region.contains(Point([x, y])):
feasible_robot_generated = True
theta = random.uniform(0, 359)
pose = [x, y, theta]
self.pose2D = Pose(self, pose, pose_source)
self.pose_history = np.array(([0, 0, 0], self.pose2D.pose))
if pose_source == 'python':
self.publish_to_ROS = False
else:
self.publish_to_ROS = True
# Setup planners
if create_mission_planner:
self.mission_planner = MissionPlanner(self)
self.goal_planner = GoalPlanner(self,
**goal_planner_cfg)
# If pose_source is python, this robot is just in simulation
if not self.publish_to_ROS:
self.path_planner = PathPlanner(self, **path_planner_cfg)
self.controller = Controller(self)
# Define MapObject
# <>TODO: fix this horrible hack
if self.name == 'Deckard':
pose = [0, 0, -np.pi/4]
r = iRobotCreate.DIAMETER / 2
n_sides = 4
pose = [0, 0, -np.pi/4]
x = [r * np.cos(2 * np.pi * n / n_sides + pose[2]) + pose[0]
for n in range(n_sides)]
y = [r * np.sin(2 * np.pi * n / n_sides + pose[2]) + pose[1]
for n in range(n_sides)]
shape_pts = Polygon(zip(x, y)).exterior.coords
else:
shape_pts = Point([0, 0]).buffer(iRobotCreate.DIAMETER / 2)\
.exterior.coords
self.map_obj = MapObject(self.name, shape_pts[:], has_relations=False,
blocks_camera=False, color_str=color_str)
self.update_shape()
def update_shape(self):
"""Update the robot's map_obj.
"""
self.map_obj.move_absolute(self.pose2D.pose)
def update(self, i=0):
"""Update all primary functionality of the robot.
This includes planning and movement for both cops and robbers,
as well as sensing and map animations for cops.
Parameters
----------
i : int, optional
The current animation frame. Default is 0 for non-animated robots.
Returns
-------
tuple or None
`None` if the robot does not generate an animation packet, or a
tuple of all animation parameters otherwise.
"""
# <>TODO: @Matt Figure out how to move this back to pose class.
if self.pose_source == 'tf':
self.pose2D.tf_update()
if self.mission_planner.mission_status is not 'stopped':
# Update statuses and planners
self.mission_planner.update()
self.goal_planner.update()
if self.publish_to_ROS is False:
self.path_planner.update()
self.controller.update()
# Add to the pose history, update the map
self.pose_history = np.vstack((self.pose_history,
self.pose2D.pose[:]))
self.update_shape()
###############################################################################
# Custom Robot classes
###############################################################################
class ImaginaryRobot(object):
"""An imaginary robber for the cop
Represents what the cop thinks the robber is doing.
Includes robber's real pose for psuedo detection.
"""
def __init__(self, name, pose=None):
self.name = name
self.pose2D = pose
class Distractor(Robot):
"""The Distractor subclass of the generic robot type.
Distractors act as distractions during search. They can be given
move goals, but do not interact with other robots
Parameters
----------
name : str
The distractor's name.
pose : list of float, optional
The robots's initial [x, y, theta] (defaults to [0, 0.5, 90]).
planner_type: {'simple', 'particle', 'MAP'}
The robot's own type of planner.
Attributes
----------
planner
"""
mission_planner_defaults = {}
goal_planner_defaults = {'type_': 'stationary'}
path_planner_defaults = {'type_': 'direct'}
def __init__(self,
name,
pose=[0, 0, 90],
pose_source='python',
map_cfg={},
mission_planner_cfg={},
goal_planner_cfg={},
path_planner_cfg={},
**kwargs):
# Use class defaults for kwargs not included
mp_cfg = Distractor.mission_planner_defaults.copy()
mp_cfg.update(mission_planner_cfg)
gp_cfg = Distractor.goal_planner_defaults.copy()
gp_cfg.update(goal_planner_cfg)
pp_cfg = Distractor.path_planner_defaults.copy()
pp_cfg.update(path_planner_cfg)
# Superclass and compositional attributes
super(Distractor, self).__init__(name,
pose=pose,
pose_source=pose_source,
goal_planner_cfg=gp_cfg,
path_planner_cfg=pp_cfg,
map_cfg=map_cfg,
color_str='darkgreen')
| [
"cops_and_robots.robo_tools.planner.MissionPlanner",
"cops_and_robots.robo_tools.pose.Pose",
"random.uniform",
"cops_and_robots.map_tools.map.Map",
"numpy.sin",
"shapely.geometry.Point",
"numpy.array",
"cops_and_robots.robo_tools.planner.GoalPlanner",
"numpy.vstack",
"numpy.cos",
"cops_and_robots.map_tools.map_elements.MapObject",
"cops_and_robots.robo_tools.planner.PathPlanner",
"cops_and_robots.robo_tools.planner.Controller"
] | [((2978, 2992), 'cops_and_robots.map_tools.map.Map', 'Map', ([], {}), '(**map_cfg)\n', (2981, 2992), False, 'from cops_and_robots.map_tools.map import Map\n'), ((3555, 3584), 'cops_and_robots.robo_tools.pose.Pose', 'Pose', (['self', 'pose', 'pose_source'], {}), '(self, pose, pose_source)\n', (3559, 3584), False, 'from cops_and_robots.robo_tools.pose import Pose\n'), ((3613, 3652), 'numpy.array', 'np.array', (['([0, 0, 0], self.pose2D.pose)'], {}), '(([0, 0, 0], self.pose2D.pose))\n', (3621, 3652), True, 'import numpy as np\n'), ((3927, 3964), 'cops_and_robots.robo_tools.planner.GoalPlanner', 'GoalPlanner', (['self'], {}), '(self, **goal_planner_cfg)\n', (3938, 3964), False, 'from cops_and_robots.robo_tools.planner import MissionPlanner, GoalPlanner, PathPlanner, Controller\n'), ((4901, 5002), 'cops_and_robots.map_tools.map_elements.MapObject', 'MapObject', (['self.name', 'shape_pts[:]'], {'has_relations': '(False)', 'blocks_camera': '(False)', 'color_str': 'color_str'}), '(self.name, shape_pts[:], has_relations=False, blocks_camera=False,\n color_str=color_str)\n', (4910, 5002), False, 'from cops_and_robots.map_tools.map_elements import MapObject\n'), ((3476, 3498), 'random.uniform', 'random.uniform', (['(0)', '(359)'], {}), '(0, 359)\n', (3490, 3498), False, 'import random\n'), ((3878, 3898), 'cops_and_robots.robo_tools.planner.MissionPlanner', 'MissionPlanner', (['self'], {}), '(self)\n', (3892, 3898), False, 'from cops_and_robots.robo_tools.planner import MissionPlanner, GoalPlanner, PathPlanner, Controller\n'), ((4142, 4179), 'cops_and_robots.robo_tools.planner.PathPlanner', 'PathPlanner', (['self'], {}), '(self, **path_planner_cfg)\n', (4153, 4179), False, 'from cops_and_robots.robo_tools.planner import MissionPlanner, GoalPlanner, PathPlanner, Controller\n'), ((4210, 4226), 'cops_and_robots.robo_tools.planner.Controller', 'Controller', (['self'], {}), '(self)\n', (4220, 4226), False, 'from cops_and_robots.robo_tools.planner import MissionPlanner, GoalPlanner, PathPlanner, Controller\n'), ((6302, 6353), 'numpy.vstack', 'np.vstack', (['(self.pose_history, self.pose2D.pose[:])'], {}), '((self.pose_history, self.pose2D.pose[:]))\n', (6311, 6353), True, 'import numpy as np\n'), ((3194, 3248), 'random.uniform', 'random.uniform', (['self.map.bounds[0]', 'self.map.bounds[2]'], {}), '(self.map.bounds[0], self.map.bounds[2])\n', (3208, 3248), False, 'import random\n'), ((3269, 3323), 'random.uniform', 'random.uniform', (['self.map.bounds[1]', 'self.map.bounds[3]'], {}), '(self.map.bounds[1], self.map.bounds[3])\n', (3283, 3323), False, 'import random\n'), ((3388, 3401), 'shapely.geometry.Point', 'Point', (['[x, y]'], {}), '([x, y])\n', (3393, 3401), False, 'from shapely.geometry import Point, Polygon\n'), ((4490, 4531), 'numpy.cos', 'np.cos', (['(2 * np.pi * n / n_sides + pose[2])'], {}), '(2 * np.pi * n / n_sides + pose[2])\n', (4496, 4531), True, 'import numpy as np\n'), ((4605, 4646), 'numpy.sin', 'np.sin', (['(2 * np.pi * n / n_sides + pose[2])'], {}), '(2 * np.pi * n / n_sides + pose[2])\n', (4611, 4646), True, 'import numpy as np\n'), ((4796, 4809), 'shapely.geometry.Point', 'Point', (['[0, 0]'], {}), '([0, 0])\n', (4801, 4809), False, 'from shapely.geometry import Point, Polygon\n')] |
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import collections
import json
import pprint
import sys
import pprint as pp
import traceback
import numpy as np
import jax.numpy as jnp
from jax import jit, make_jaxpr, xla_computation
from jax import random
from jax import lax
scary_map = map
def map(f, *args):
return list(scary_map(f, *args))
class JaxFunction(object):
def __init__(self, binders, decls, results):
for b in binders: assert isinstance(b, Var)
for b, op in decls:
assert isinstance(b, Var)
assert isinstance(op, Operation)
for r in results: assert isinstance(r, Atom)
self.binders = binders
self.decls = decls
self.results = results
def ser(self):
assert False
@staticmethod
def des(obj):
binders_ser, (decls_ser, results_ser) = obj
binders = map(Var.des, binders_ser)
results = map(Atom.des, results_ser)
decls = [(Var.des(b), Operation.des(op)) for (b, op) in decls_ser]
return JaxFunction(binders, decls, results)
class Name(object):
def __init__(self, namespace, root, i):
assert isinstance(i, int)
assert isinstance(namespace, str)
assert isinstance(root, str)
self._name = (namespace, root, i)
@staticmethod
def des(obj):
namespace, root, i = obj
return Name(namespace, root, i)
def ser(self):
return {"tag":"Name", "contents": list(self._name)}
def __repr__(self): return str(self)
def __str__(self):
(_, root, i) = self._name
if i == 0:
return root
else:
return root + str(i)
def __eq__(self, other):
assert isinstance(other, Name)
return self._name == other._name
def __hash__(self):
return hash(self._name)
class IdxVar(object):
def __init__(self, name, size):
assert isinstance(name, Name)
assert isinstance(size, int)
self.name = name
self.size = size
def __repr__(self): return str(self)
def __str__(self):
return str(self.name) + ":" + str(self.size)
def __eq__(self, other):
assert isinstance(other, IdxVar)
return self.name == other.name
def __hash__(self):
return hash(self.name)
@staticmethod
def des(obj):
name, idxSize = obj
assert name["tag"] == "Name"
return IdxVar(Name.des(name["contents"]), idxSize)
class Var(object):
def __init__(self, name, ty):
assert isinstance(ty, Ty)
assert isinstance(name, Name)
self.name = name
self.ty = ty
def __repr__(self): return str(self)
def __str__(self):
return str(self.name) + ":" + str(self.ty)
def __eq__(self, other):
assert isinstance(other, Var)
return self.name == other.name
def __hash__(self):
return hash(self.name)
def ser(self):
return [self.name.ser(), self.ty.ser()]
@staticmethod
def des(obj):
name, (shape, basetype) = obj
assert name["tag"] == "Name"
return Var(Name.des(name["contents"]), Ty(shape, basetype))
class Atom(object):
def __init__(self, case, data):
self.case = case
if case == "Var":
assert isinstance(data, Var)
self.var = data
elif case == "Lit":
assert isinstance(data, arrayish_types), type(data)
self.val = data
else:
assert False
def __repr__(self): return str(self)
def __str__(self):
if self.case == "Var":
return str(self.var)
elif self.case == "Lit":
return str(self.val)
else:
assert False
@property
def ty(self):
if self.case == "Var":
return self.var.ty
elif self.case == "Lit":
x = self.val
return array_ty(x)
else:
assert False
@staticmethod
def des(obj):
if obj["tag"] == "JVar":
val = obj["contents"]
return Atom("Var", Var.des(val))
elif obj["tag"] == "JLit":
shape, vec = obj["contents"]
val = np.array(vec["contents"], dtype=vec_dtype(vec)).reshape(shape)
return Atom("Lit", val)
class IndexedAtom(object):
def __init__(self, atom, idxs):
assert isinstance(atom, Atom)
for i in idxs: assert isinstance(i, IdxVar)
self.atom = atom
self.idxs = idxs
@property
def ty(self):
atom_ty = self.atom.ty
return Ty(atom_ty.shape[:len(self.idxs)], atom_ty.basetype)
@staticmethod
def des(obj):
atom, idxs = obj
return IndexedAtom(Atom.des(atom), map(IdxVar.des, idxs))
def __repr__(self): return str(self)
def __str__(self):
return str(self.atom) + "".join("." + str(i) for i in self.idxs)
class Ty(object):
def __init__(self, shape, basetype):
for n in shape: assert isinstance(n, int)
assert basetype in ["IntType", "BoolType", "RealType"]
self.basetype = basetype
self.shape = tuple(shape)
def ser(self):
return [self.shape, self.basetype]
def __eq__(self, other):
assert isinstance(other, Ty)
return self.basetype == other.basetype and self.shape == other.shape
@staticmethod
def des(obj):
assert False
def __repr__(self): return str(self)
def __str__(self):
return self.basetype + str(self.shape)
MapIdx = "MapIdx"
SumIdx = "SumIdx"
class Operation(object):
def __init__(self, binders, op_name, size_args, args):
for (i, flavor) in binders:
assert isinstance(i, IdxVar)
assert flavor in (MapIdx, SumIdx)
assert isinstance(op_name, str)
for size in size_args: assert isinstance(size, int)
for arg in args: assert isinstance(arg, IndexedAtom)
self.binders = binders
self.op_name = op_name
self.size_args = size_args
self.args = args
@property
def all_idxs(self):
return [i for i, _ in self.binders]
def ser(self):
assert False
@staticmethod
def des(obj):
binders_ser, op_and_args_ser = obj
binders = [(IdxVar.des(i), fl) for i, fl in binders_ser]
op_name, size_args, args = des_op_and_args(op_and_args_ser)
return Operation(binders, op_name, size_args, args)
def __repr__(self): return str(self)
def __str__(self):
return "for {} . {} {}".format(
self.binders, self.op_name, tuple(self.args))
def array_ty(x):
return Ty(x.shape, dtype_basetype(x.dtype))
def ser_array(arr):
assert isinstance(arr, arrayish_types)
return ser_flat_vec(arr.ravel())
def ser_flat_vec(vec):
if vec.dtype in [np.int32, np.int64]:
return {"tag":"IntVec", "contents": map(int, vec)}
if vec.dtype in [np.float32, np.float64]:
return {"tag":"DoubleVec", "contents": map(float, vec)}
else:
assert False
def des_op_and_args(obj):
tag = obj["tag"]
if tag == "JScalarBinOp":
binop_name, x_ser, y_ser = obj["contents"]
x = IndexedAtom.des(x_ser)
y = IndexedAtom.des(y_ser)
return binop_name["tag"], [], [x, y]
if tag == "JScalarUnOp":
unop_name, x_ser = obj["contents"]
x = IndexedAtom.des(x_ser)
return unop_name, [], [x]
elif tag == "JIota":
size = obj["contents"]
assert isinstance(size, int)
return "Iota", [size], []
elif tag == "JId":
x_ser = obj["contents"]
x = IndexedAtom.des(x_ser)
return "Id", [], [x]
elif tag == "JGet":
x_ser, y_ser = obj["contents"]
x = IndexedAtom.des(x_ser)
y = IndexedAtom.des(y_ser)
return "Get", [], [x, y]
elif tag == "JThreeFry2x32":
x_ser, y_ser = obj["contents"]
x = IndexedAtom.des(x_ser)
y = IndexedAtom.des(y_ser)
return "ThreeFry2x32", [], [x, y]
else:
raise Exception("Not implemented: " + str(tag))
global_env = {}
def eval_op(op):
if op.op_name in ("FMul", "IMul"):
ans = eval_einsum(op)
return Atom("Lit", ans)
else:
broadcast_ans = eval_for(op)
sum_axes = tuple(i for (i, (_, fl)) in enumerate(op.binders) if fl == SumIdx)
if sum_axes == ():
return Atom("Lit", broadcast_ans)
else:
summed_ans = np.sum(broadcast_ans, axis=sum_axes)
return Atom("Lit", summed_ans)
def eval_einsum(op):
assert op.op_name in ("FMul", "IMul")
x, y = op.args
x_axes = [str(i.name) for i in x.idxs]
y_axes = [str(i.name) for i in y.idxs]
out_axes = [str(i.name) for i, f in op.binders if f != SumIdx]
return jnp.einsum(x.atom.val, x_axes, y.atom.val, y_axes, out_axes)
def eval_for(op):
if op.op_name in ("IAdd", "IMul", "FAdd", "FMul", "FDiv"):
x, y = op.args
x_bc = broadcast_dims(op.all_idxs, x.idxs, x.atom.val)
y_bc = broadcast_dims(op.all_idxs, y.idxs, y.atom.val)
if op.op_name in ("IAdd", "FAdd"):
return jnp.add(x_bc, y_bc)
elif op.op_name in ("IMul", "FMul"):
return jnp.multiply(x_bc, y_bc)
if op.op_name in ("FDiv",):
return jnp.divide(x_bc, y_bc)
else:
raise Exception("Not implemented: " + str(op.op_name))
elif op.op_name == "Iota":
n, = op.size_args
val = jnp.arange(n)
val_bc = broadcast_dims(op.all_idxs, [], val)
return val_bc
elif op.op_name == "Id":
x, = op.args
x_bc = broadcast_dims(op.all_idxs, x.idxs, x.atom.val)
return x_bc
elif op.op_name == "Get":
x, idx = op.args
out_shape = [i.size for i in op.all_idxs]
x_idxs_used = get_stack_idxs_used(op.all_idxs, x.idxs)
leading_idx_arrays = []
for i, idx_used in enumerate(x_idxs_used):
if idx_used:
leading_idx_arrays.append(nth_iota(out_shape, i))
else:
pass
payload_idx_array = broadcast_dims(op.all_idxs, idx.idxs, idx.atom.val)
out = x.atom.val[tuple(leading_idx_arrays) + (payload_idx_array,)]
return out
elif op.op_name == "IntToReal":
x, = op.args
real_val = jnp.array(x.atom.val, dtype="float32")
x_bc = broadcast_dims(op.all_idxs, x.idxs, real_val)
return x_bc
elif op.op_name in ("FNeg", "INeg"):
x, = op.args
x_bc = broadcast_dims(op.all_idxs, x.idxs, jnp.negative(x.atom.val))
return x_bc
elif op.op_name == "ThreeFry2x32":
convert_64_to_32s = lambda x: np.array([x]).view(np.uint32)
convert_32s_to_64 = lambda x: np.int64(np.array(x).view(np.int64).item())
x, y = op.args
key, count = convert_64_to_32s(x.atom.val), convert_64_to_32s(y.atom.val)
result = convert_32s_to_64(random.threefry_2x32(key, count))
x_bc = broadcast_dims(op.all_idxs, x.idxs, result)
return x_bc
else:
raise Exception("Unrecognized op: {}".format(op.op_name))
def broadcast_dims(for_idxs, idxs, x):
shape = [i.size for i in for_idxs]
idxs_used = get_stack_idxs_used(for_idxs, idxs)
bcast_dims = [i for i, b in enumerate(idxs_used) if b]
return lax.broadcast_in_dim(x, shape, bcast_dims)
def broadcast_with(x, final_shape, idxs_used):
rem_shape = list(x.shape[sum(idxs_used):])
reshape_shape = [size if use else 1 for (size, use) in zip(final_shape, idxs_used)]
x_singletons = jnp.reshape(x, reshape_shape + rem_shape)
return jnp.broadcast_to(x_singletons, final_shape + rem_shape)
def nth_iota(shape, i):
size = shape[i]
iota = jnp.arange(size)
idxs_used = [Discard for _ in shape]
idxs_used[i] = Use
return broadcast_with(iota, shape, idxs_used)
Use = True
Discard = False
def get_stack_idxs_used(for_idxs, idxs):
stack_vars = []
cur_idxs = list(idxs)
for i in for_idxs:
if cur_idxs and i == cur_idxs[0]:
stack_vars.append(Use)
cur_idxs = cur_idxs[1:]
else:
stack_vars.append(Discard)
return stack_vars
arrayish_types = (jnp.ndarray, np.ndarray, np.int64, np.float64, np.float32)
def subst_op(env, op):
args = [IndexedAtom(subst_atom(env, x.atom), x.idxs) for x in op.args]
return Operation(op.binders, op.op_name, op.size_args, args)
def subst_atom(env, x):
assert isinstance(x, Atom)
if x.case == "Var":
return env[x.var]
elif x.case == "Lit":
return x
else:
assert False
def dtype_basetype(x):
if x in [np.int32, np.int64]:
return "IntType"
elif x in [np.float32, np.float64]:
return "RealType"
else:
assert False, x
def vec_dtype(vec):
if vec["tag"] == "IntVec":
return np.int64
elif vec["tag"] == "DoubleVec":
return np.float64
else:
assert False
def atom_as_var(x):
assert isinstance(x, Atom)
i = len(global_env)
name = Name("ArrayName", "arr", i)
v = Var(name, x.ty)
assert v not in global_env
global_env[v] = x
return v
def eval_function_application(top_arg):
def run():
f = JaxFunction.des(top_arg[0])
args = [Atom("Var", Var.des(x)) for x in top_arg[1]]
env = global_env.copy()
args_subst = [subst_atom(env, arg) for arg in args]
for v, arg in zip(f.binders, args_subst):
env[v] = arg
for (v, op) in f.decls:
ans = eval_op(subst_op(env, op))
if not (v.ty == ans.ty):
print(op)
raise Exception("Unexpected type. Expected {}, got {}".format(v.ty, ans.ty))
env[v] = ans
return [subst_atom(env, r).val for r in f.results]
outs = run()
irdump = str(make_jaxpr(run)())
return [atom_as_var(Atom("Lit", out)).ser() for out in outs], irdump
def check_type(ty, val):
assert isinstance(ty, Ty)
def retrieve_arrays(arrs):
vs = map(Var.des, arrs)
return [ser_array(global_env[v].val) for v in vs]
def just_print_it(obj):
print(obj)
return ()
def run_server(functions):
readChan, writeChan = sys.argv[1:]
with open(writeChan, "w") as w:
for line in open(readChan):
(f_idx, arg) = json.loads(line)
try:
f = functions[f_idx]
ans = {"Right" : f(arg)}
except Exception as e:
traceback.print_exc()
ans = {"Left": traceback.format_exc()}
w.write(json.dumps(ans) + "\n")
w.flush()
if __name__ == "__main__":
run_server([eval_function_application,
retrieve_arrays,
just_print_it])
| [
"traceback.format_exc",
"jax.numpy.reshape",
"json.loads",
"jax.make_jaxpr",
"jax.numpy.divide",
"jax.numpy.arange",
"jax.numpy.einsum",
"jax.numpy.broadcast_to",
"json.dumps",
"jax.numpy.array",
"jax.numpy.negative",
"numpy.sum",
"jax.random.threefry_2x32",
"numpy.array",
"jax.numpy.add",
"traceback.print_exc",
"jax.lax.broadcast_in_dim",
"jax.numpy.multiply"
] | [((8127, 8187), 'jax.numpy.einsum', 'jnp.einsum', (['x.atom.val', 'x_axes', 'y.atom.val', 'y_axes', 'out_axes'], {}), '(x.atom.val, x_axes, y.atom.val, y_axes, out_axes)\n', (8137, 8187), True, 'import jax.numpy as jnp\n'), ((10448, 10490), 'jax.lax.broadcast_in_dim', 'lax.broadcast_in_dim', (['x', 'shape', 'bcast_dims'], {}), '(x, shape, bcast_dims)\n', (10468, 10490), False, 'from jax import lax\n'), ((10687, 10728), 'jax.numpy.reshape', 'jnp.reshape', (['x', '(reshape_shape + rem_shape)'], {}), '(x, reshape_shape + rem_shape)\n', (10698, 10728), True, 'import jax.numpy as jnp\n'), ((10738, 10793), 'jax.numpy.broadcast_to', 'jnp.broadcast_to', (['x_singletons', '(final_shape + rem_shape)'], {}), '(x_singletons, final_shape + rem_shape)\n', (10754, 10793), True, 'import jax.numpy as jnp\n'), ((10846, 10862), 'jax.numpy.arange', 'jnp.arange', (['size'], {}), '(size)\n', (10856, 10862), True, 'import jax.numpy as jnp\n'), ((7818, 7854), 'numpy.sum', 'np.sum', (['broadcast_ans'], {'axis': 'sum_axes'}), '(broadcast_ans, axis=sum_axes)\n', (7824, 7854), True, 'import numpy as np\n'), ((8457, 8476), 'jax.numpy.add', 'jnp.add', (['x_bc', 'y_bc'], {}), '(x_bc, y_bc)\n', (8464, 8476), True, 'import jax.numpy as jnp\n'), ((8601, 8623), 'jax.numpy.divide', 'jnp.divide', (['x_bc', 'y_bc'], {}), '(x_bc, y_bc)\n', (8611, 8623), True, 'import jax.numpy as jnp\n'), ((8756, 8769), 'jax.numpy.arange', 'jnp.arange', (['n'], {}), '(n)\n', (8766, 8769), True, 'import jax.numpy as jnp\n'), ((12769, 12784), 'jax.make_jaxpr', 'make_jaxpr', (['run'], {}), '(run)\n', (12779, 12784), False, 'from jax import jit, make_jaxpr, xla_computation\n'), ((13221, 13237), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (13231, 13237), False, 'import json\n'), ((8531, 8555), 'jax.numpy.multiply', 'jnp.multiply', (['x_bc', 'y_bc'], {}), '(x_bc, y_bc)\n', (8543, 8555), True, 'import jax.numpy as jnp\n'), ((13348, 13369), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13367, 13369), False, 'import traceback\n'), ((13431, 13446), 'json.dumps', 'json.dumps', (['ans'], {}), '(ans)\n', (13441, 13446), False, 'import json\n'), ((9516, 9554), 'jax.numpy.array', 'jnp.array', (['x.atom.val'], {'dtype': '"""float32"""'}), "(x.atom.val, dtype='float32')\n", (9525, 9554), True, 'import jax.numpy as jnp\n'), ((13393, 13415), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13413, 13415), False, 'import traceback\n'), ((9731, 9755), 'jax.numpy.negative', 'jnp.negative', (['x.atom.val'], {}), '(x.atom.val)\n', (9743, 9755), True, 'import jax.numpy as jnp\n'), ((10080, 10112), 'jax.random.threefry_2x32', 'random.threefry_2x32', (['key', 'count'], {}), '(key, count)\n', (10100, 10112), False, 'from jax import random\n'), ((9844, 9857), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (9852, 9857), True, 'import numpy as np\n'), ((9917, 9928), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9925, 9928), True, 'import numpy as np\n')] |
from pymongo import MongoClient
def get_all_publications():
print("View all publications: ")
client = MongoClient('mongodb://localhost:27017',
username='root',
password='<PASSWORD>')
db = client['publicationsDB']
cursor = db.publications.find()
for publication in cursor:
print(publication)
get_all_publications() | [
"pymongo.MongoClient"
] | [((111, 196), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017"""'], {'username': '"""root"""', 'password': '"""<PASSWORD>"""'}), "('mongodb://localhost:27017', username='root', password='<PASSWORD>'\n )\n", (122, 196), False, 'from pymongo import MongoClient\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-01 06:42
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.hashers import make_password
def create_default_superuser(apps, schema_editor):
"""
Creates a default super user
"""
User = apps.get_model('auth', 'user')
default_super_user = User(
username="admin",
is_superuser=True,
password=make_password("<PASSWORD>"),
is_staff=True
)
default_super_user.save()
class Migration(migrations.Migration):
dependencies = [
('squealy', '0001_initial'),
]
operations = [
migrations.RunPython(create_default_superuser),
]
| [
"django.db.migrations.RunPython",
"django.contrib.auth.hashers.make_password"
] | [((667, 713), 'django.db.migrations.RunPython', 'migrations.RunPython', (['create_default_superuser'], {}), '(create_default_superuser)\n', (687, 713), False, 'from django.db import migrations\n'), ((446, 473), 'django.contrib.auth.hashers.make_password', 'make_password', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (459, 473), False, 'from django.contrib.auth.hashers import make_password\n')] |
import random
import numpy as np
import os
import sys
import torch
from torch.autograd import Variable
import sys
sys.path.append(".")
import utils # if running from root folder, else append '..'
class DataLoader(object):
"""Stores dataset_params, vocabulary ad tags with their mapping to indices"""
def __init__(self, data_dir, params):
"""Loads dataset_params, vocabulary and tags. Ensure you have already run build_vocab.py on data_dir"""
json_path = os.path.join(data_dir, 'dataset_params.json')
assert os.path.isfile(
json_path), "No json file found at {}, run build_vocab.py".format(json_path)
self.dataset_params = utils.Params(json_path)
# loading vocab
vocab_path = os.path.join(data_dir, 'words.txt')
self.vocab = {}
with open(vocab_path) as f:
# map words to their indices
for i, l in enumerate(f.read().splitlines()):
self.vocab[l] = i
# setting the indices for UNKnown words and PADding symbols
self.unk_ind = self.vocab[self.dataset_params.unk_word]
self.pad_ind = self.vocab[self.dataset_params.pad_word]
# loading tags
tags_path = os.path.join(data_dir, 'tags.txt')
self.tag_map = {}
with open(tags_path) as f:
for i, t in enumerate(f.read().splitlines()):
# map tags to their indices
self.tag_map[t] = i
# adding dataset parameters to param
params.update(json_path)
def load_sentences_labels(self, sentences_file, labels_file, d):
"""
Loads sentences and labels from their corresponding files. Maps tokens and tags to their indices and stores
them in the provided dict d.
Args:
sentences_file: (string) file with sentences with tokens space-separated
labels_file: (string) file with NER tags for the sentences in labels_file
d: (dict) a dictionary in which the loaded data is stored
"""
sentences = []
labels = []
with open(sentences_file) as f:
for sentence in f.read().splitlines():
s = [self.vocab[token] if token in self.vocab
else self.unk_ind
for token in sentence.split(' ')]
sentences.append(s)
with open(labels_file) as f:
for sentence in f.read().splitlines():
l = [self.tag_map[label] for label in sentence.split(' ')]
labels.append(l)
# ensure there is a tag for each token
assert len(labels) == len(sentences)
for i in range(len(labels)):
assert len(labels[i]) == len(sentences[i])
# storing sentences and labels in a dict
d['data'] = sentences
d['labels'] = labels
d['size'] = len(sentences)
def load_data(self, types, data_dir):
"""Loads data for each type in types from data_dir"""
data = {}
for split in ['train', 'val', 'test']:
if split in types:
sentences_file = os.path.join(data_dir, split, 'sentences.txt')
labels_file = os.path.join(data_dir, split, 'labels.txt')
data[split] = {}
self.load_sentences_labels(
sentences_file, labels_file, data[split])
return data
def data_iterator(self, data, params, shuffle=False):
"""
Returns a generator that yields batches of data with labels. Batch size is params.batch_size. Expires after one
pass over the data.
Args:
data: (dict) contains data which has keys 'data', 'labels' and 'size'
params: (Params) hyperparameters of the training process.
shuffle: (bool) whether the data should be shuffled
Yields:
batch_data: (Variable) dimension batch_size x seq_len with the sentence data
batch_labels: (Variable) dimension batch_size x seq_len with the corresponding labels
"""
# make a list that decides the order in which we go over the data- this avoids explicit shuffling of data
order = list(range(data['size']))
if shuffle:
random.seed(230)
random.shuffle(order)
# one pass over data
for i in range(data['size'] + 1 // params.batch_size):
# fetch sentences and tags
batch_sentences = [data['data'][idx]
for idx in order[i * params.batch_size:(i + 1) * params.batch_size]]
batch_tags = [data['labels'][idx] for idx in order[i *
params.batch_size:(i + 1) * params.batch_size]]
# compute length of longest sentence in the batch
batch_max_len = max([len(s) for s in batch_sentences])
# prepare a numpy array with the data, initialising the data with pad_ind and all labels with -1
# initialising labels to -1 differentiates tokens with tags from PADding tokens
batch_data = self.pad_ind * \
np.ones((len(batch_sentences), batch_max_len))
batch_labels = -1 * np.ones((len(batch_sentences), batch_max_len))
# copy the data to the numpy array
for j in range(len(batch_sentences)):
cur_len = len(batch_sentences[j])
batch_data[j][:cur_len] = batch_sentences[j]
batch_labels[j][:cur_len] = batch_tags[j]
batch_data, batch_labels = torch.LongTensor(
batch_data), torch.LongTensor(batch_labels)
# shift tensors to GPU if available
if params.cuda:
batch_data, batch_labels = batch_data.cuda(), batch_labels.cuda()
# convert them to Variables to record operations in the computational graph
batch_data, batch_labels = Variable(
batch_data), Variable(batch_labels)
yield batch_data, batch_labels
| [
"random.shuffle",
"torch.LongTensor",
"os.path.join",
"random.seed",
"os.path.isfile",
"utils.Params",
"torch.autograd.Variable",
"sys.path.append"
] | [((114, 134), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (129, 134), False, 'import sys\n'), ((483, 528), 'os.path.join', 'os.path.join', (['data_dir', '"""dataset_params.json"""'], {}), "(data_dir, 'dataset_params.json')\n", (495, 528), False, 'import os\n'), ((544, 569), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (558, 569), False, 'import os\n'), ((679, 702), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (691, 702), False, 'import utils\n'), ((749, 784), 'os.path.join', 'os.path.join', (['data_dir', '"""words.txt"""'], {}), "(data_dir, 'words.txt')\n", (761, 784), False, 'import os\n'), ((1219, 1253), 'os.path.join', 'os.path.join', (['data_dir', '"""tags.txt"""'], {}), "(data_dir, 'tags.txt')\n", (1231, 1253), False, 'import os\n'), ((4269, 4285), 'random.seed', 'random.seed', (['(230)'], {}), '(230)\n', (4280, 4285), False, 'import random\n'), ((4298, 4319), 'random.shuffle', 'random.shuffle', (['order'], {}), '(order)\n', (4312, 4319), False, 'import random\n'), ((3135, 3181), 'os.path.join', 'os.path.join', (['data_dir', 'split', '"""sentences.txt"""'], {}), "(data_dir, split, 'sentences.txt')\n", (3147, 3181), False, 'import os\n'), ((3212, 3255), 'os.path.join', 'os.path.join', (['data_dir', 'split', '"""labels.txt"""'], {}), "(data_dir, split, 'labels.txt')\n", (3224, 3255), False, 'import os\n'), ((5602, 5630), 'torch.LongTensor', 'torch.LongTensor', (['batch_data'], {}), '(batch_data)\n', (5618, 5630), False, 'import torch\n'), ((5649, 5679), 'torch.LongTensor', 'torch.LongTensor', (['batch_labels'], {}), '(batch_labels)\n', (5665, 5679), False, 'import torch\n'), ((5967, 5987), 'torch.autograd.Variable', 'Variable', (['batch_data'], {}), '(batch_data)\n', (5975, 5987), False, 'from torch.autograd import Variable\n'), ((6006, 6028), 'torch.autograd.Variable', 'Variable', (['batch_labels'], {}), '(batch_labels)\n', (6014, 6028), False, 'from torch.autograd import Variable\n')] |
from .data_transfer import *
import functools
MSG_MAX_LENGTH = 1024
CONTENT_LEN = 250
HEADINFO = [
["flags" , 1 , bytes2int , functools.partial(int2bytes , length = 1)] ,
["src_ip" , 4 , bytes2ip , ip2bytes ] ,
["src_port" , 4 , bytes2int , int2bytes] ,
]
FLAG_MASK = [
["SPE" , 1 << 0] ,
["STP" , 1 << 1] ,
["ENT" , 1 << 2] ,
["ADV" , 1 << 3] ,
["ADD" , 1 << 4] ,
["QUI" , 1 << 5] ,
] | [
"functools.partial"
] | [((131, 169), 'functools.partial', 'functools.partial', (['int2bytes'], {'length': '(1)'}), '(int2bytes, length=1)\n', (148, 169), False, 'import functools\n')] |
from setuptools import find_packages, setup
setup(
name='django-minimal-blog',
version='1.0.0',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Django>=1.9',
'Markdown>=2.6',
'docutils>=0.12',
'jsonfield>=1.0',
'Pillow>=3.3'
],
zip_safe=False
)
| [
"setuptools.find_packages"
] | [((167, 182), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (180, 182), False, 'from setuptools import find_packages, setup\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0014_instance_volumes'),
]
operations = [
migrations.AddField(
model_name='instance',
name='runtime_state',
field=models.CharField(max_length=150, verbose_name='runtime state', blank=True),
),
]
| [
"django.db.models.CharField"
] | [((359, 433), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""runtime state"""', 'blank': '(True)'}), "(max_length=150, verbose_name='runtime state', blank=True)\n", (375, 433), False, 'from django.db import migrations, models\n')] |
# https://codechalleng.es/bites/65/
import itertools
import os
import urllib.request
import random
import string
# PREWORK
DICTIONARY = os.path.join('/tmp', 'dictionary.txt')
urllib.request.urlretrieve('http://bit.ly/2iQ3dlZ', DICTIONARY)
with open(DICTIONARY) as f:
dictionary = set([word.strip().lower() for word in f.read().split()])
def get_possible_dict_words(draw):
"""Get all possible words from a draw (list of letters) which are
valid dictionary words. Use _get_permutations_draw and provided
dictionary"""
permutations = [''.join(word).lower()
for word in _get_permutations_draw(draw)]
return set(permutations) & set(dictionary)
def _get_permutations_draw(draw):
"""Helper to get all permutations of a draw (list of letters), hint:
use itertools.permutations (order of letters matters)"""
for i in range(1, 14):
yield from list(itertools.permutations(draw, i))
def random_letter():
alpha = list(string.ascii_lowercase)
random_letter = random.choice(alpha)
return random_letter
random_list = []
for _ in range(7):
random_list.append(random_letter())
print(random_list)
# print(get_possible_dict_words(random_list))list(get_possible_dict_words(aj_list)))
# print(list(_get_permutations_draw(random_list)))
my_words = 'i o l t p r l'.split()
def my_word_combos(my_words=my_words):
word_options = list(get_possible_dict_words(my_words))
sorted_list = sorted(word_options, key=len, reverse=True)
return sorted_list
# print(my_word_combos())
board_words = ['wank', 'armor', 'car', 'travel', 'foil', 'iron']
combined_words = my_words + board_words
print(my_word_combos(combined_words))
# print(get_possible_dict_words(combined_words))
| [
"itertools.permutations",
"random.choice",
"os.path.join"
] | [((138, 176), 'os.path.join', 'os.path.join', (['"""/tmp"""', '"""dictionary.txt"""'], {}), "('/tmp', 'dictionary.txt')\n", (150, 176), False, 'import os\n'), ((1035, 1055), 'random.choice', 'random.choice', (['alpha'], {}), '(alpha)\n', (1048, 1055), False, 'import random\n'), ((918, 949), 'itertools.permutations', 'itertools.permutations', (['draw', 'i'], {}), '(draw, i)\n', (940, 949), False, 'import itertools\n')] |
# from genut.models.seq2seq_vae import
from archive.genut import load_prev_state
from archive.genut import RNNLM
from archive.genut.util.argparser import ArgParser
from archive.genut import Tester
from archive.genut import LMTrainer
if __name__ == "__main__":
ap = ArgParser()
opt = ap.parser.parse_args()
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
# Register for logger
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
if opt.dbg is not True:
fileHandler = logging.FileHandler("logger.log")
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
rootLogger.addHandler(consoleHandler)
logging.info('Go!')
if opt.use_cuda and not torch.cuda.is_available():
logging.error('GPU NOT avail.')
elif not torch.cuda.is_available():
logging.warning('GPU NOT avail.')
opt, data_patch = load_dataset(opt)
logging.info(opt)
pretrain_embedding = load_pretrain_word_embedding(opt)
# model = Seq2seq(opt, pretrain_embedding)
model = RNNLM(opt, pretrain_embedding)
if opt.use_cuda:
model = model.cuda()
if opt.load_dir is not None and opt.load_file is not None:
# model.enc = load_prev_state(opt.load_dir + '/' + opt.load_file + '_enc', model.enc)
model.dec = load_prev_state(opt.load_dir + '/' + opt.load_file + '_dec', model.dec)
model.emb = load_prev_state(opt.load_dir + '/' + opt.load_file + '_emb', model.emb)
print("Model Initialized.")
if opt.mode == TEST_FLAG:
model.eval()
lm_test = Tester(opt, model,
data=data_patch,
write_file='_'.join([opt.load_file, 'result']))
ppl = lm_test.test_iters()
logging.info("Evaluation PPL: %f" % ppl)
elif opt.mode == TRAIN_FLAG:
model.train()
s2s_train = LMTrainer(opt, model, data_patch)
try:
s2s_train.train_iters()
except KeyboardInterrupt:
logging.info("Training Interrupted.")
else:
raise RuntimeError
| [
"archive.genut.LMTrainer",
"archive.genut.RNNLM",
"archive.genut.util.argparser.ArgParser",
"archive.genut.load_prev_state"
] | [((271, 282), 'archive.genut.util.argparser.ArgParser', 'ArgParser', ([], {}), '()\n', (280, 282), False, 'from archive.genut.util.argparser import ArgParser\n'), ((1067, 1097), 'archive.genut.RNNLM', 'RNNLM', (['opt', 'pretrain_embedding'], {}), '(opt, pretrain_embedding)\n', (1072, 1097), False, 'from archive.genut import RNNLM\n'), ((1327, 1398), 'archive.genut.load_prev_state', 'load_prev_state', (["(opt.load_dir + '/' + opt.load_file + '_dec')", 'model.dec'], {}), "(opt.load_dir + '/' + opt.load_file + '_dec', model.dec)\n", (1342, 1398), False, 'from archive.genut import load_prev_state\n'), ((1419, 1490), 'archive.genut.load_prev_state', 'load_prev_state', (["(opt.load_dir + '/' + opt.load_file + '_emb')", 'model.emb'], {}), "(opt.load_dir + '/' + opt.load_file + '_emb', model.emb)\n", (1434, 1490), False, 'from archive.genut import load_prev_state\n'), ((1888, 1921), 'archive.genut.LMTrainer', 'LMTrainer', (['opt', 'model', 'data_patch'], {}), '(opt, model, data_patch)\n', (1897, 1921), False, 'from archive.genut import LMTrainer\n')] |
class SupervisedImage(Dataset):
def __init__(self, x, y, augs):
self.xs = x
self.ys = y
self.augs = augs
def __getitem__(self, idx):
img = self.get_image(self.xs[idx])
img = self.augs(image=img)['image']
return img, torch.tensor(self.enc[self.ys[idx]])
@classmethod
def from_imagenet(cls, root, augs):
fs = get_image_files(root)
xs = fs
ys = [i.parent.name for i in fs]
cls.enc = {j:i for i,j in enumerate(set(ys))}
return cls(fs, ys, augs)
def __len__(self):
return len(self.xs)
def __repr__(self):
n = '\n'
s = f'# ({len(self.xs)}) {n} {self.enc}'
return s
def get_image(self, x):
img = cv2.imread(str(x))
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def show_batch(self):
import matplotlib.pyplot as plt
fig,axes = plt.subplots(2, 6, figsize=(10, 10))
ids = [self.xs[i] for i in np.random.randint(0, len(self.xs), 12)]
for i, ax in zip(ids, axes.ravel()):
ax.imshow(self.augs(image=self.get_image(i))['image'].permute(1,2,0))
plt.tight_layout()
class SegmentationData(Dataset):
def __init__(self, xs, ys, channels, augs):
self.xs = xs
self.ys = ys
self.augs = augs
self.channels = channels
def __len__(self):
return len(self.xs)
def __getitem__(self, idx):
img = self.get_image(self.xs[idx])
mask = self.get_mask(self.ys[idx])
sample = self.augs(image=img, mask=mask)
_mask, mask = torch.zeros(self.channels, 256, 256), sample['mask']
for i in rangE(1, 5): _mask[:, ...] = torch.where(mask==i, torch.ones(256, 256), torch.zeros(256, 256))
return sample['image'], _mask
| [
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((907, 943), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(6)'], {'figsize': '(10, 10)'}), '(2, 6, figsize=(10, 10))\n', (919, 943), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1172), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1170, 1172), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from mig3_client import ReportConverter
def test_simple_report(simple_report):
"""Should be simplest possible report."""
assert len(simple_report["included"]) == 1
def test_basic_convert(simple_report):
"""Should convert tests from simplest possible report."""
converter = ReportConverter(simple_report)
assert len(converter.convert()) == 1
def test_convert_module_name(simple_report):
"""Should extract module name from report item."""
result = ReportConverter(simple_report).convert()
converted_test = result[0]
assert converted_test.get("module") == "tests/test_examples.py", converted_test
def test_convert_test_name(simple_report):
"""Should extract test name from report item."""
result = ReportConverter(simple_report).convert()
converted_test = result[0]
assert converted_test.get("test") == "test_success", converted_test
def test_convert_result(simple_report):
"""Should extract result from report item."""
result = ReportConverter(simple_report).convert()
converted_test = result[0]
assert converted_test.get("result") == "passed", converted_test
| [
"mig3_client.ReportConverter"
] | [((317, 347), 'mig3_client.ReportConverter', 'ReportConverter', (['simple_report'], {}), '(simple_report)\n', (332, 347), False, 'from mig3_client import ReportConverter\n'), ((504, 534), 'mig3_client.ReportConverter', 'ReportConverter', (['simple_report'], {}), '(simple_report)\n', (519, 534), False, 'from mig3_client import ReportConverter\n'), ((771, 801), 'mig3_client.ReportConverter', 'ReportConverter', (['simple_report'], {}), '(simple_report)\n', (786, 801), False, 'from mig3_client import ReportConverter\n'), ((1020, 1050), 'mig3_client.ReportConverter', 'ReportConverter', (['simple_report'], {}), '(simple_report)\n', (1035, 1050), False, 'from mig3_client import ReportConverter\n')] |
from time import perf_counter
from collections import Counter
import numpy as np
import rpxdock.homog as hm
from rpxdock.geom import BCC6
from rpxdock.xbin import Xbin
from rpxdock.xbin.smear import smear
from rpxdock.phmap import PHMap_u8f8
from rpxdock.util import plot
xident_f4 = np.eye(4).astype("f4")
def test_smear_one():
for r in range(1, 6):
w = 2 * r + 1
cart_resl = 1.0
xb = Xbin(cart_resl, 9e9)
gr = xb.grid6
pm = PHMap_u8f8()
cen = xident_f4
kcen = xb.key_of(xident_f4)
bcen = xb.bincen_of(kcen)
assert np.allclose(cen, bcen, atol=1e-4)
phm = PHMap_u8f8()
phm[xb.key_of(bcen)] = 1.0
smeared = smear(xb, phm, radius=r, extrahalf=0, oddlast3=0, sphere=0)
assert isinstance(smeared, PHMap_u8f8)
assert len(smeared) == w**3 + (w - 1)**3
k, v = smeared.items_array()
x = xb.bincen_of(k)
cart_dis = np.linalg.norm(bcen[0, :3, 3] - x[:, :3, 3], axis=1)
assert np.min(cart_dis) == 0
# print(sorted(Counter(x[:, 0, 3]).values()))
counts = [(w - 1)**2] * (w - 1) + [w**2] * w
assert sorted(Counter(x[:, 0, 3]).values()) == counts
assert sorted(Counter(x[:, 1, 3]).values()) == counts
assert sorted(Counter(x[:, 2, 3]).values()) == counts
ori_dist = hm.angle_of_3x3(x[:, :3, :3])
assert np.allclose(np.unique(ori_dist), [0.0, 1.24466863])
def test_smear_one_oddori():
for r in range(1, 6):
w = 2 * r + 1
cart_resl = 1.0
xb = Xbin(cart_resl, 9e9)
gr = xb.grid6
pm = PHMap_u8f8()
cen = xident_f4
kcen = xb.key_of(xident_f4)
bcen = xb.bincen_of(kcen)
assert np.allclose(cen, bcen, atol=1e-4)
phm = PHMap_u8f8()
phm[xb.key_of(bcen)] = 1.0
smeared = smear(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=0)
assert isinstance(smeared, PHMap_u8f8)
assert len(smeared) == w**3 + 8 * (w - 1)**3
k, v = smeared.items_array()
x = xb.bincen_of(k)
cart_dis = np.linalg.norm(bcen[0, :3, 3] - x[:, :3, 3], axis=1)
d = 0.57787751
uvals = np.arange(-2 * r, 2 * r + 0.001) * d
assert np.allclose(np.unique(x[:, 0, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 1, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 2, 3]), uvals, atol=1e-4)
counts = [w**2] * w + [8 * (w - 1)**2] * (w - 1)
assert sorted(Counter(x[:, 0, 3]).values()) == counts
assert sorted(Counter(x[:, 1, 3]).values()) == counts
assert sorted(Counter(x[:, 2, 3]).values()) == counts
ori_dist = hm.angle_of_3x3(x[:, :3, :3])
assert np.allclose(np.unique(ori_dist), [0.0, 1.24466863])
def test_smear_one_oddori_sphere():
counts = [
[5, 5, 9, 32, 32],
[9, 9, 21, 21, 21, 96, 96, 128, 128],
[9, 9, 25, 25, 37, 37, 37, 128, 128, 256, 256, 256, 256],
[13, 13, 37, 37, 49, 49, 61, 61, 69, 192, 192, 352, 352, 416, 416, 480, 480],
[21, 21, 45, 45, 69, 69, 89, 89, 97, 97, 97, 256, 256] +
[416, 416, 608, 608, 704, 704, 704, 704],
]
for r in range(1, 6):
w = 2 * r + 1
cart_resl = 1.0
xb = Xbin(cart_resl, 9e9)
gr = xb.grid6
pm = PHMap_u8f8()
cen = xident_f4
kcen = xb.key_of(xident_f4)
bcen = xb.bincen_of(kcen)
assert np.allclose(cen, bcen, atol=1e-4)
phm = PHMap_u8f8()
phm[xb.key_of(bcen)] = 1.0
smeared = smear(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=1)
smeared2 = smear(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=1)
print("smear sph/cube", len(smeared) / len(smeared2))
assert isinstance(smeared, PHMap_u8f8)
assert len(smeared) == [83, 529, 1459, 3269, 6115][r - 1]
k, v = smeared.items_array()
x = xb.bincen_of(k)
cart_dis = np.linalg.norm(bcen[0, :3, 3] - x[:, :3, 3], axis=1)
d = 0.57787751
uvals = np.arange(-2 * r, 2 * r + 0.001) * d
assert np.allclose(np.unique(x[:, 0, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 1, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 2, 3]), uvals, atol=1e-4)
assert sorted(Counter(x[:, 0, 3]).values()) == counts[r - 1]
assert sorted(Counter(x[:, 1, 3]).values()) == counts[r - 1]
assert sorted(Counter(x[:, 2, 3]).values()) == counts[r - 1]
ori_dist = hm.angle_of_3x3(x[:, :3, :3])
assert np.allclose(np.unique(ori_dist), [0.0, 1.24466863])
def test_smear_one_exhalf_oddori_sphere():
counts = [
[5, 5, 9, 32, 32],
[9, 9, 21, 21, 21, 96, 96, 128, 128],
[9, 9, 25, 25, 37, 37, 37, 128, 128, 256, 256, 256, 256],
[13, 13, 37, 37, 49, 49, 61, 61, 69, 192, 192, 352, 352, 416, 416, 480, 480],
[21, 21, 45, 45, 69, 69, 89, 89, 97, 97, 97, 256, 256] +
[416, 416, 608, 608, 704, 704, 704, 704],
]
for r in range(1, 6):
w = 2 * r + 1
cart_resl = 1.0
xb = Xbin(cart_resl, 9e9)
gr = xb.grid6
pm = PHMap_u8f8()
cen = xident_f4
kcen = xb.key_of(xident_f4)
bcen = xb.bincen_of(kcen)
assert np.allclose(cen, bcen, atol=1e-4)
phm = PHMap_u8f8()
phm[xb.key_of(bcen)] = 1.0
smeared = smear(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=1)
smeared2 = smear(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=0)
print("smear exhalf sph/cube", len(smeared) / len(smeared2))
continue
assert isinstance(smeared, PHMap_u8f8)
assert len(smeared) == [83, 529, 1459, 3269, 6115][r - 1]
k, v = smeared.items_array()
x = xb.bincen_of(k)
cart_dis = np.linalg.norm(bcen[0, :3, 3] - x[:, :3, 3], axis=1)
d = 0.57787751
uvals = np.arange(-2 * r, 2 * r + 0.001) * d
assert np.allclose(np.unique(x[:, 0, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 1, 3]), uvals, atol=1e-4)
assert np.allclose(np.unique(x[:, 2, 3]), uvals, atol=1e-4)
assert sorted(Counter(x[:, 0, 3]).values()) == counts[r - 1]
assert sorted(Counter(x[:, 1, 3]).values()) == counts[r - 1]
assert sorted(Counter(x[:, 2, 3]).values()) == counts[r - 1]
ori_dist = hm.angle_of_3x3(x[:, :3, :3])
assert np.allclose(np.unique(ori_dist), [0.0, 1.24466863])
def test_smear_two():
for samp in range(10):
for r in range(1, 5):
w = 2 * r + 1
cart_resl = 1.0
ori_resl = 10
xb = Xbin(cart_resl, ori_resl)
gr = xb.grid6
phm, phm0, phm1 = PHMap_u8f8(), PHMap_u8f8(), PHMap_u8f8()
p = np.stack([np.eye(4), np.eye(4)]).astype("f4")
p[:, :3, 3] = np.random.randn(2, 3) * (r / 2)
p[1, :3, :3] = hm.rot(np.random.randn(3), ori_resl / 2, degrees=True)
k = xb.key_of(p)
phm[k] = np.array([1, 1], dtype="f8")
smeared = smear(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=1)
allk, allv = smeared.items_array()
smeared0 = [smear(xb, phm0, radius=r, extrahalf=1, oddlast3=1, sphere=1)]
phm0[k[0]] = 1.0
smeared0 = smear(xb, phm0, radius=r, extrahalf=1, oddlast3=1, sphere=1)
allv0 = smeared0[allk]
phm1[k[1]] = 1.0
smeared1 = smear(xb, phm1, radius=r, extrahalf=1, oddlast3=1, sphere=1)
allv1 = smeared1[allk]
assert np.all(allv0 <= allv)
assert np.all(allv1 <= allv)
assert np.all(allv == np.maximum(allv0, allv1))
d = np.linalg.norm(p[0, :3, 3] - p[1, :3, 3])
s, s0, s1 = set(allk), set(smeared0.keys()), set(smeared1.keys())
assert s0.issubset(s)
assert s1.issubset(s)
# print(len(s0.intersection(s1)) / len(s))
def test_smear_multiple():
cart_resl = 1.0
ori_resl = 10
xb = Xbin(cart_resl, ori_resl)
gr = xb.grid6
for rad in range(1, 6):
maxpts = [0, 20, 10, 6, 4, 2][rad]
for npts in range(2, maxpts):
w = 2 * rad + 1
phm = PHMap_u8f8()
phm0 = [PHMap_u8f8() for i in range(npts)]
p = hm.hrot(np.random.randn(npts, 3), 1.5 * ori_resl, degrees=1, dtype="f4")
p[:, :3, 3] = np.random.randn(npts, 3) * 1.5 * rad
k = xb.key_of(p)
phm[k] = np.ones(npts)
smeared = smear(xb, phm, radius=rad, extrahalf=1, oddlast3=1, sphere=1)
allk, allv = smeared.items_array()
sallk = set(allk)
allv0 = np.empty((npts, len(allk)))
sets = list()
for i in range(npts):
phm0[i][k[i]] = 1.0
smr = smear(xb, phm0[i], radius=rad, extrahalf=1, oddlast3=1, sphere=1)
allv0[i] = smr[allk]
assert np.all(allv0[i] <= allv)
s0 = set(smr.keys())
assert s0.issubset(sallk)
sets.append(s0)
# nisect = np.mean([len(a.intersection(b)) for a in sets for b in sets])
# print(rad, npts, nisect / len(sets[0]))
# assert np.all(allv == np.max(allv0, axis=0))
def check_scores(s0, s1):
not0 = np.sum(np.logical_or(s1 > 0, s0 > 0))
frac_s1_gt_s0 = np.sum(s1 > s0) / not0
frac_s1_ge_s0 = np.sum(s1 >= s0) / not0
print(
"score",
"Ns0",
np.sum(s0 > 0),
"Ns1",
np.sum(s1 > 0),
"frac1>=0",
frac_s1_ge_s0,
"frac1>0",
frac_s1_gt_s0,
)
return frac_s1_ge_s0, frac_s1_gt_s0, not0
def test_smear_one_bounding():
N1 = 5_000
N2 = 50_000
cart_sd = 2
xorig = hm.rand_xform(N1, cart_sd=cart_sd).astype("f4")
sorig = np.exp(np.random.rand(N1))
cart_resl = 1.0
ori_resl = 20
xb0 = Xbin(cart_resl, ori_resl)
xb2 = Xbin(cart_resl * 2, ori_resl * 1.5)
pm0 = PHMap_u8f8()
pm0[xb0.key_of(xorig)] = sorig
t = perf_counter()
pm1 = smear(xb0, pm0, radius=1)
t = perf_counter() - t
print(
f"fexpand {len(pm1) / len(pm0):7.2f}",
f"cell rate {int(len(pm1) / t):,}",
f" expand_rate {int(len(pm0) / t):,}",
)
x = hm.rand_xform(N2, cart_sd=cart_sd).astype("f4")
s0 = pm0[xb0.key_of(x)]
s1 = pm1[xb0.key_of(x)]
ge, gt, not0 = check_scores(s0, s1)
assert 0 == np.sum(np.logical_and(s0 > 0, s1 == 0))
assert np.sum((s0 > 0) * (s1 == 0)) == 0
assert ge > 0.99
assert gt > 0.98
pm20 = PHMap_u8f8()
pm20[xb2.key_of(xorig)] = sorig
t = perf_counter()
pm2 = smear(xb2, pm20, radius=1)
t = perf_counter() - t
print(
f"fexpand {len(pm2) / len(pm20):7.2f} cell rate {int(len(pm2) / t):,} expand_rate {int(len(pm20) / t):,}"
)
s2 = pm2[xb2.key_of(x)]
ge, gt, not0 = check_scores(s0, s2)
assert ge > 0.99
assert gt > 0.99
assert np.sum(np.logical_and(s0 > 0, s2 == 0)) / not0 < 0.001
def smear_bench():
N = 1_000_000
cart_sd = 5
xorig = hm.rand_xform(N, cart_sd=cart_sd)
sorig = np.exp(np.random.rand(N))
cart_resl = 1.0
ori_resl = 20
xb0 = Xbin(cart_resl, ori_resl)
pm0 = PHMap_u8f8()
pm0[xb0.key_of(xorig)] = sorig
for rad in range(1, 2):
t = perf_counter()
pm1 = smear(xb0, pm0, radius=rad)
t = perf_counter() - t
print(
f"rad {rad} relsize: {len(pm1) / len(pm0):7.2f} ",
f"cell rate {int(len(pm1) / t):,}",
f"expand_rate {int(len (pm0) / t):,}",
)
def test_smear_one_kernel():
spherefudge = {
(1, 0): 0.3734 + 0.0001,
(1, 1): 0.0361 + 0.0001,
(2, 0): 0.0474 + 0.0001,
(2, 1): 0.2781 + 0.0001,
(3, 0): 0.0148 + 0.0001,
(3, 1): 0.1347 + 0.0001,
(4, 0): 0.0510 + 0.0001,
(4, 1): 0.1583 + 0.0001,
}
cone4Dfudge = {
(1, 0): 0.0091 + 0.0001,
(1, 1): 0.1417 + 0.0001,
(2, 0): 0.1163 + 0.0001,
(2, 1): 0.1221 + 0.0001,
(3, 0): 0.1208 + 0.0001,
(3, 1): 0.1304 + 0.0001,
(4, 0): 0.1213 + 0.0001,
(4, 1): 0.1240 + 0.0001,
}
parab4fudge = {
(1, 0): 0.0041 + 0.0001,
(1, 1): 0.1688 + 0.0001,
(2, 0): 0.1347 + 0.0001,
(2, 1): 0.1436 + 0.0001,
(3, 0): 0.1402 + 0.0001,
(3, 1): 0.1532 + 0.0001,
(4, 0): 0.1413 + 0.0001,
(4, 1): 0.1448 + 0.0001,
}
N = 8
# plot.subplots(4, N // 2, rowmajor=True)
for rad in range(N):
exhalf = (rad) % 2
rad = (rad) // 2 + 1
# print("rad", rad, "exhalf", exhalf)
w = 2 * rad + 1
cart_resl = 1.0
xb = Xbin(cart_resl, 9e9)
gr = xb.grid6
pm = PHMap_u8f8()
cen = xident_f4
kcen = xb.key_of(xident_f4)
bcen = xb.bincen_of(kcen)
assert np.allclose(cen, bcen, atol=1e-4)
phm = PHMap_u8f8()
phm[xb.key_of(bcen)] = 1.0
grid_r2 = xb.grid6.neighbor_sphere_radius_square_cut(rad, exhalf)
d2 = np.arange(grid_r2 + 1)
# kern = np.exp(-d2 / grid_r2 * 2)
kern0 = 1 - (d2 / grid_r2)**(1 / 2) # 1/R
kern1 = 1 - (d2 / grid_r2)**(2 / 2) # 1/R**2
kern2 = 1 - (d2 / grid_r2)**(3 / 2) # 1/R**3 uniform in R
kern3 = np.ones(len(d2))
# plot.scatter(np.sqrt(d2), kern1, show=0)
# smeared = smear(xb, phm, rad, exhalf, oddlast3=1, sphere=1, kernel=kern0)
# k, v = smeared.items_array()
vals = []
for ikrn in [0, 1, 2, 3]:
kern = vars()["kern%i" % ikrn]
smeared = smear(xb, phm, rad, exhalf, oddlast3=1, sphere=1, kernel=kern)
k, v = smeared.items_array()
vals.append(v)
# plot.hist(v, title="kern%i" % ikrn, show=0)
# print(rad, "kern%i" % ikrn, np.sum(v))
assert np.all(vals[0] <= vals[1])
assert np.all(vals[1] <= vals[2])
assert np.all(vals[2] <= vals[3])
vol0 = np.sum(vals[0])
vol1 = np.sum(vals[1])
vol2 = np.sum(vals[2])
vol3 = np.sum(vals[3])
spherevol = 4 / 3 * np.pi * grid_r2**(3 / 2)
parab4vol = 1 / 6 * np.pi**2 * grid_r2**(3 / 2) # ?? close enough...
cone4Dvol = spherevol / 4
assert np.abs(1 - vol0 / cone4Dvol) < cone4Dfudge[rad, exhalf]
assert np.abs(1 - vol1 / parab4vol) < parab4fudge[rad, exhalf]
assert np.abs(1 - vol3 / spherevol) < spherefudge[rad, exhalf]
# print(np.sum(vals[3]) / spherevol)
# plot.show()
if __name__ == "__main__":
# test_smear_one()
# test_smear_one_oddori()
# test_smear_one_oddori_sphere()
# test_smear_one_exhalf_oddori_sphere()
# test_smear_one_bounding()
test_smear_two()
# test_smear_multiple()
# test_smear_one_kernel()
| [
"rpxdock.xbin.smear.smear",
"numpy.random.rand",
"numpy.array",
"numpy.linalg.norm",
"rpxdock.homog.angle_of_3x3",
"numpy.arange",
"rpxdock.xbin.Xbin",
"time.perf_counter",
"numpy.min",
"numpy.maximum",
"numpy.abs",
"numpy.eye",
"numpy.allclose",
"numpy.ones",
"rpxdock.homog.rand_xform",
"numpy.random.randn",
"rpxdock.phmap.PHMap_u8f8",
"numpy.unique",
"numpy.logical_and",
"numpy.logical_or",
"collections.Counter",
"numpy.sum",
"numpy.all"
] | [((7711, 7736), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', 'ori_resl'], {}), '(cart_resl, ori_resl)\n', (7715, 7736), False, 'from rpxdock.xbin import Xbin\n'), ((9500, 9525), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', 'ori_resl'], {}), '(cart_resl, ori_resl)\n', (9504, 9525), False, 'from rpxdock.xbin import Xbin\n'), ((9535, 9570), 'rpxdock.xbin.Xbin', 'Xbin', (['(cart_resl * 2)', '(ori_resl * 1.5)'], {}), '(cart_resl * 2, ori_resl * 1.5)\n', (9539, 9570), False, 'from rpxdock.xbin import Xbin\n'), ((9581, 9593), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (9591, 9593), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((9636, 9650), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (9648, 9650), False, 'from time import perf_counter\n'), ((9660, 9685), 'rpxdock.xbin.smear.smear', 'smear', (['xb0', 'pm0'], {'radius': '(1)'}), '(xb0, pm0, radius=1)\n', (9665, 9685), False, 'from rpxdock.xbin.smear import smear\n'), ((10158, 10170), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (10168, 10170), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((10213, 10227), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10225, 10227), False, 'from time import perf_counter\n'), ((10237, 10263), 'rpxdock.xbin.smear.smear', 'smear', (['xb2', 'pm20'], {'radius': '(1)'}), '(xb2, pm20, radius=1)\n', (10242, 10263), False, 'from rpxdock.xbin.smear import smear\n'), ((10651, 10684), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N'], {'cart_sd': 'cart_sd'}), '(N, cart_sd=cart_sd)\n', (10664, 10684), True, 'import rpxdock.homog as hm\n'), ((10767, 10792), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', 'ori_resl'], {}), '(cart_resl, ori_resl)\n', (10771, 10792), False, 'from rpxdock.xbin import Xbin\n'), ((10803, 10815), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (10813, 10815), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((285, 294), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (291, 294), True, 'import numpy as np\n'), ((409, 438), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', '(9000000000.0)'], {}), '(cart_resl, 9000000000.0)\n', (413, 438), False, 'from rpxdock.xbin import Xbin\n'), ((461, 473), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (471, 473), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((575, 610), 'numpy.allclose', 'np.allclose', (['cen', 'bcen'], {'atol': '(0.0001)'}), '(cen, bcen, atol=0.0001)\n', (586, 610), True, 'import numpy as np\n'), ((621, 633), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (631, 633), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((683, 742), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(0)', 'oddlast3': '(0)', 'sphere': '(0)'}), '(xb, phm, radius=r, extrahalf=0, oddlast3=0, sphere=0)\n', (688, 742), False, 'from rpxdock.xbin.smear import smear\n'), ((913, 965), 'numpy.linalg.norm', 'np.linalg.norm', (['(bcen[0, :3, 3] - x[:, :3, 3])'], {'axis': '(1)'}), '(bcen[0, :3, 3] - x[:, :3, 3], axis=1)\n', (927, 965), True, 'import numpy as np\n'), ((1301, 1330), 'rpxdock.homog.angle_of_3x3', 'hm.angle_of_3x3', (['x[:, :3, :3]'], {}), '(x[:, :3, :3])\n', (1316, 1330), True, 'import rpxdock.homog as hm\n'), ((1504, 1533), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', '(9000000000.0)'], {}), '(cart_resl, 9000000000.0)\n', (1508, 1533), False, 'from rpxdock.xbin import Xbin\n'), ((1556, 1568), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (1566, 1568), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((1670, 1705), 'numpy.allclose', 'np.allclose', (['cen', 'bcen'], {'atol': '(0.0001)'}), '(cen, bcen, atol=0.0001)\n', (1681, 1705), True, 'import numpy as np\n'), ((1716, 1728), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (1726, 1728), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((1778, 1837), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(0)', 'oddlast3': '(1)', 'sphere': '(0)'}), '(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=0)\n', (1783, 1837), False, 'from rpxdock.xbin.smear import smear\n'), ((2012, 2064), 'numpy.linalg.norm', 'np.linalg.norm', (['(bcen[0, :3, 3] - x[:, :3, 3])'], {'axis': '(1)'}), '(bcen[0, :3, 3] - x[:, :3, 3], axis=1)\n', (2026, 2064), True, 'import numpy as np\n'), ((2587, 2616), 'rpxdock.homog.angle_of_3x3', 'hm.angle_of_3x3', (['x[:, :3, :3]'], {}), '(x[:, :3, :3])\n', (2602, 2616), True, 'import rpxdock.homog as hm\n'), ((3145, 3174), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', '(9000000000.0)'], {}), '(cart_resl, 9000000000.0)\n', (3149, 3174), False, 'from rpxdock.xbin import Xbin\n'), ((3197, 3209), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (3207, 3209), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((3311, 3346), 'numpy.allclose', 'np.allclose', (['cen', 'bcen'], {'atol': '(0.0001)'}), '(cen, bcen, atol=0.0001)\n', (3322, 3346), True, 'import numpy as np\n'), ((3357, 3369), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (3367, 3369), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((3419, 3478), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(0)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=1)\n', (3424, 3478), False, 'from rpxdock.xbin.smear import smear\n'), ((3496, 3555), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(0)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm, radius=r, extrahalf=0, oddlast3=1, sphere=1)\n', (3501, 3555), False, 'from rpxdock.xbin.smear import smear\n'), ((3804, 3856), 'numpy.linalg.norm', 'np.linalg.norm', (['(bcen[0, :3, 3] - x[:, :3, 3])'], {'axis': '(1)'}), '(bcen[0, :3, 3] - x[:, :3, 3], axis=1)\n', (3818, 3856), True, 'import numpy as np\n'), ((4345, 4374), 'rpxdock.homog.angle_of_3x3', 'hm.angle_of_3x3', (['x[:, :3, :3]'], {}), '(x[:, :3, :3])\n', (4360, 4374), True, 'import rpxdock.homog as hm\n'), ((4910, 4939), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', '(9000000000.0)'], {}), '(cart_resl, 9000000000.0)\n', (4914, 4939), False, 'from rpxdock.xbin import Xbin\n'), ((4962, 4974), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (4972, 4974), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((5076, 5111), 'numpy.allclose', 'np.allclose', (['cen', 'bcen'], {'atol': '(0.0001)'}), '(cen, bcen, atol=0.0001)\n', (5087, 5111), True, 'import numpy as np\n'), ((5122, 5134), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (5132, 5134), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((5184, 5243), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=1)\n', (5189, 5243), False, 'from rpxdock.xbin.smear import smear\n'), ((5261, 5320), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(0)'}), '(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=0)\n', (5266, 5320), False, 'from rpxdock.xbin.smear import smear\n'), ((5591, 5643), 'numpy.linalg.norm', 'np.linalg.norm', (['(bcen[0, :3, 3] - x[:, :3, 3])'], {'axis': '(1)'}), '(bcen[0, :3, 3] - x[:, :3, 3], axis=1)\n', (5605, 5643), True, 'import numpy as np\n'), ((6132, 6161), 'rpxdock.homog.angle_of_3x3', 'hm.angle_of_3x3', (['x[:, :3, :3]'], {}), '(x[:, :3, :3])\n', (6147, 6161), True, 'import rpxdock.homog as hm\n'), ((8944, 8973), 'numpy.logical_or', 'np.logical_or', (['(s1 > 0)', '(s0 > 0)'], {}), '(s1 > 0, s0 > 0)\n', (8957, 8973), True, 'import numpy as np\n'), ((8994, 9009), 'numpy.sum', 'np.sum', (['(s1 > s0)'], {}), '(s1 > s0)\n', (9000, 9009), True, 'import numpy as np\n'), ((9036, 9052), 'numpy.sum', 'np.sum', (['(s1 >= s0)'], {}), '(s1 >= s0)\n', (9042, 9052), True, 'import numpy as np\n'), ((9104, 9118), 'numpy.sum', 'np.sum', (['(s0 > 0)'], {}), '(s0 > 0)\n', (9110, 9118), True, 'import numpy as np\n'), ((9139, 9153), 'numpy.sum', 'np.sum', (['(s1 > 0)'], {}), '(s1 > 0)\n', (9145, 9153), True, 'import numpy as np\n'), ((9435, 9453), 'numpy.random.rand', 'np.random.rand', (['N1'], {}), '(N1)\n', (9449, 9453), True, 'import numpy as np\n'), ((9693, 9707), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (9705, 9707), False, 'from time import perf_counter\n'), ((10073, 10101), 'numpy.sum', 'np.sum', (['((s0 > 0) * (s1 == 0))'], {}), '((s0 > 0) * (s1 == 0))\n', (10079, 10101), True, 'import numpy as np\n'), ((10271, 10285), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10283, 10285), False, 'from time import perf_counter\n'), ((10703, 10720), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (10717, 10720), True, 'import numpy as np\n'), ((10888, 10902), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10900, 10902), False, 'from time import perf_counter\n'), ((10915, 10942), 'rpxdock.xbin.smear.smear', 'smear', (['xb0', 'pm0'], {'radius': 'rad'}), '(xb0, pm0, radius=rad)\n', (10920, 10942), False, 'from rpxdock.xbin.smear import smear\n'), ((12224, 12253), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', '(9000000000.0)'], {}), '(cart_resl, 9000000000.0)\n', (12228, 12253), False, 'from rpxdock.xbin import Xbin\n'), ((12276, 12288), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (12286, 12288), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((12390, 12425), 'numpy.allclose', 'np.allclose', (['cen', 'bcen'], {'atol': '(0.0001)'}), '(cen, bcen, atol=0.0001)\n', (12401, 12425), True, 'import numpy as np\n'), ((12436, 12448), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (12446, 12448), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((12565, 12587), 'numpy.arange', 'np.arange', (['(grid_r2 + 1)'], {}), '(grid_r2 + 1)\n', (12574, 12587), True, 'import numpy as np\n'), ((13348, 13374), 'numpy.all', 'np.all', (['(vals[0] <= vals[1])'], {}), '(vals[0] <= vals[1])\n', (13354, 13374), True, 'import numpy as np\n'), ((13388, 13414), 'numpy.all', 'np.all', (['(vals[1] <= vals[2])'], {}), '(vals[1] <= vals[2])\n', (13394, 13414), True, 'import numpy as np\n'), ((13428, 13454), 'numpy.all', 'np.all', (['(vals[2] <= vals[3])'], {}), '(vals[2] <= vals[3])\n', (13434, 13454), True, 'import numpy as np\n'), ((13469, 13484), 'numpy.sum', 'np.sum', (['vals[0]'], {}), '(vals[0])\n', (13475, 13484), True, 'import numpy as np\n'), ((13498, 13513), 'numpy.sum', 'np.sum', (['vals[1]'], {}), '(vals[1])\n', (13504, 13513), True, 'import numpy as np\n'), ((13527, 13542), 'numpy.sum', 'np.sum', (['vals[2]'], {}), '(vals[2])\n', (13533, 13542), True, 'import numpy as np\n'), ((13556, 13571), 'numpy.sum', 'np.sum', (['vals[3]'], {}), '(vals[3])\n', (13562, 13571), True, 'import numpy as np\n'), ((979, 995), 'numpy.min', 'np.min', (['cart_dis'], {}), '(cart_dis)\n', (985, 995), True, 'import numpy as np\n'), ((1356, 1375), 'numpy.unique', 'np.unique', (['ori_dist'], {}), '(ori_dist)\n', (1365, 1375), True, 'import numpy as np\n'), ((2100, 2132), 'numpy.arange', 'np.arange', (['(-2 * r)', '(2 * r + 0.001)'], {}), '(-2 * r, 2 * r + 0.001)\n', (2109, 2132), True, 'import numpy as np\n'), ((2162, 2183), 'numpy.unique', 'np.unique', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (2171, 2183), True, 'import numpy as np\n'), ((2228, 2249), 'numpy.unique', 'np.unique', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (2237, 2249), True, 'import numpy as np\n'), ((2294, 2315), 'numpy.unique', 'np.unique', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (2303, 2315), True, 'import numpy as np\n'), ((2642, 2661), 'numpy.unique', 'np.unique', (['ori_dist'], {}), '(ori_dist)\n', (2651, 2661), True, 'import numpy as np\n'), ((3892, 3924), 'numpy.arange', 'np.arange', (['(-2 * r)', '(2 * r + 0.001)'], {}), '(-2 * r, 2 * r + 0.001)\n', (3901, 3924), True, 'import numpy as np\n'), ((3954, 3975), 'numpy.unique', 'np.unique', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (3963, 3975), True, 'import numpy as np\n'), ((4020, 4041), 'numpy.unique', 'np.unique', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (4029, 4041), True, 'import numpy as np\n'), ((4086, 4107), 'numpy.unique', 'np.unique', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (4095, 4107), True, 'import numpy as np\n'), ((4400, 4419), 'numpy.unique', 'np.unique', (['ori_dist'], {}), '(ori_dist)\n', (4409, 4419), True, 'import numpy as np\n'), ((5679, 5711), 'numpy.arange', 'np.arange', (['(-2 * r)', '(2 * r + 0.001)'], {}), '(-2 * r, 2 * r + 0.001)\n', (5688, 5711), True, 'import numpy as np\n'), ((5741, 5762), 'numpy.unique', 'np.unique', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (5750, 5762), True, 'import numpy as np\n'), ((5807, 5828), 'numpy.unique', 'np.unique', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (5816, 5828), True, 'import numpy as np\n'), ((5873, 5894), 'numpy.unique', 'np.unique', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (5882, 5894), True, 'import numpy as np\n'), ((6187, 6206), 'numpy.unique', 'np.unique', (['ori_dist'], {}), '(ori_dist)\n', (6196, 6206), True, 'import numpy as np\n'), ((6389, 6414), 'rpxdock.xbin.Xbin', 'Xbin', (['cart_resl', 'ori_resl'], {}), '(cart_resl, ori_resl)\n', (6393, 6414), False, 'from rpxdock.xbin import Xbin\n'), ((6745, 6773), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': '"""f8"""'}), "([1, 1], dtype='f8')\n", (6753, 6773), True, 'import numpy as np\n'), ((6793, 6852), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm, radius=r, extrahalf=1, oddlast3=1, sphere=1)\n', (6798, 6852), False, 'from rpxdock.xbin.smear import smear\n'), ((7027, 7087), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm0'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm0, radius=r, extrahalf=1, oddlast3=1, sphere=1)\n', (7032, 7087), False, 'from rpxdock.xbin.smear import smear\n'), ((7167, 7227), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm1'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm1, radius=r, extrahalf=1, oddlast3=1, sphere=1)\n', (7172, 7227), False, 'from rpxdock.xbin.smear import smear\n'), ((7277, 7298), 'numpy.all', 'np.all', (['(allv0 <= allv)'], {}), '(allv0 <= allv)\n', (7283, 7298), True, 'import numpy as np\n'), ((7315, 7336), 'numpy.all', 'np.all', (['(allv1 <= allv)'], {}), '(allv1 <= allv)\n', (7321, 7336), True, 'import numpy as np\n'), ((7408, 7449), 'numpy.linalg.norm', 'np.linalg.norm', (['(p[0, :3, 3] - p[1, :3, 3])'], {}), '(p[0, :3, 3] - p[1, :3, 3])\n', (7422, 7449), True, 'import numpy as np\n'), ((7899, 7911), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (7909, 7911), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((8154, 8167), 'numpy.ones', 'np.ones', (['npts'], {}), '(npts)\n', (8161, 8167), True, 'import numpy as np\n'), ((8187, 8248), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm'], {'radius': 'rad', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm, radius=rad, extrahalf=1, oddlast3=1, sphere=1)\n', (8192, 8248), False, 'from rpxdock.xbin.smear import smear\n'), ((9369, 9403), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N1'], {'cart_sd': 'cart_sd'}), '(N1, cart_sd=cart_sd)\n', (9382, 9403), True, 'import rpxdock.homog as hm\n'), ((9867, 9901), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (9880, 9901), True, 'import rpxdock.homog as hm\n'), ((10030, 10061), 'numpy.logical_and', 'np.logical_and', (['(s0 > 0)', '(s1 == 0)'], {}), '(s0 > 0, s1 == 0)\n', (10044, 10061), True, 'import numpy as np\n'), ((10953, 10967), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10965, 10967), False, 'from time import perf_counter\n'), ((13105, 13167), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm', 'rad', 'exhalf'], {'oddlast3': '(1)', 'sphere': '(1)', 'kernel': 'kern'}), '(xb, phm, rad, exhalf, oddlast3=1, sphere=1, kernel=kern)\n', (13110, 13167), False, 'from rpxdock.xbin.smear import smear\n'), ((13745, 13773), 'numpy.abs', 'np.abs', (['(1 - vol0 / cone4Dvol)'], {}), '(1 - vol0 / cone4Dvol)\n', (13751, 13773), True, 'import numpy as np\n'), ((13814, 13842), 'numpy.abs', 'np.abs', (['(1 - vol1 / parab4vol)'], {}), '(1 - vol1 / parab4vol)\n', (13820, 13842), True, 'import numpy as np\n'), ((13883, 13911), 'numpy.abs', 'np.abs', (['(1 - vol3 / spherevol)'], {}), '(1 - vol3 / spherevol)\n', (13889, 13911), True, 'import numpy as np\n'), ((6465, 6477), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (6475, 6477), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((6479, 6491), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (6489, 6491), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((6493, 6505), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (6503, 6505), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((6589, 6610), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (6604, 6610), True, 'import numpy as np\n'), ((6652, 6670), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (6667, 6670), True, 'import numpy as np\n'), ((6919, 6979), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm0'], {'radius': 'r', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm0, radius=r, extrahalf=1, oddlast3=1, sphere=1)\n', (6924, 6979), False, 'from rpxdock.xbin.smear import smear\n'), ((7929, 7941), 'rpxdock.phmap.PHMap_u8f8', 'PHMap_u8f8', ([], {}), '()\n', (7939, 7941), False, 'from rpxdock.phmap import PHMap_u8f8\n'), ((7985, 8009), 'numpy.random.randn', 'np.random.randn', (['npts', '(3)'], {}), '(npts, 3)\n', (8000, 8009), True, 'import numpy as np\n'), ((8469, 8534), 'rpxdock.xbin.smear.smear', 'smear', (['xb', 'phm0[i]'], {'radius': 'rad', 'extrahalf': '(1)', 'oddlast3': '(1)', 'sphere': '(1)'}), '(xb, phm0[i], radius=rad, extrahalf=1, oddlast3=1, sphere=1)\n', (8474, 8534), False, 'from rpxdock.xbin.smear import smear\n'), ((8587, 8611), 'numpy.all', 'np.all', (['(allv0[i] <= allv)'], {}), '(allv0[i] <= allv)\n', (8593, 8611), True, 'import numpy as np\n'), ((10540, 10571), 'numpy.logical_and', 'np.logical_and', (['(s0 > 0)', '(s2 == 0)'], {}), '(s0 > 0, s2 == 0)\n', (10554, 10571), True, 'import numpy as np\n'), ((7368, 7392), 'numpy.maximum', 'np.maximum', (['allv0', 'allv1'], {}), '(allv0, allv1)\n', (7378, 7392), True, 'import numpy as np\n'), ((8073, 8097), 'numpy.random.randn', 'np.random.randn', (['npts', '(3)'], {}), '(npts, 3)\n', (8088, 8097), True, 'import numpy as np\n'), ((1124, 1143), 'collections.Counter', 'Counter', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (1131, 1143), False, 'from collections import Counter\n'), ((1184, 1203), 'collections.Counter', 'Counter', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (1191, 1203), False, 'from collections import Counter\n'), ((1244, 1263), 'collections.Counter', 'Counter', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (1251, 1263), False, 'from collections import Counter\n'), ((2410, 2429), 'collections.Counter', 'Counter', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (2417, 2429), False, 'from collections import Counter\n'), ((2470, 2489), 'collections.Counter', 'Counter', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (2477, 2489), False, 'from collections import Counter\n'), ((2530, 2549), 'collections.Counter', 'Counter', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (2537, 2549), False, 'from collections import Counter\n'), ((4147, 4166), 'collections.Counter', 'Counter', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (4154, 4166), False, 'from collections import Counter\n'), ((4214, 4233), 'collections.Counter', 'Counter', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (4221, 4233), False, 'from collections import Counter\n'), ((4281, 4300), 'collections.Counter', 'Counter', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (4288, 4300), False, 'from collections import Counter\n'), ((5934, 5953), 'collections.Counter', 'Counter', (['x[:, 0, 3]'], {}), '(x[:, 0, 3])\n', (5941, 5953), False, 'from collections import Counter\n'), ((6001, 6020), 'collections.Counter', 'Counter', (['x[:, 1, 3]'], {}), '(x[:, 1, 3])\n', (6008, 6020), False, 'from collections import Counter\n'), ((6068, 6087), 'collections.Counter', 'Counter', (['x[:, 2, 3]'], {}), '(x[:, 2, 3])\n', (6075, 6087), False, 'from collections import Counter\n'), ((6530, 6539), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6536, 6539), True, 'import numpy as np\n'), ((6541, 6550), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6547, 6550), True, 'import numpy as np\n')] |
################################################################################
#
# Moon.py - A Python module that ...
#
# Copyright (C) 2011 <NAME>
#
################################################################################
"""
todo: something to do
"""
__author__ = '<NAME> <<EMAIL>>'
import datetime
import math
from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt
from . import convert, geometry
from .sun import Sun
APOLAT = 32.7797556
APOLONG = 105.8198305
class Moon(object):
"""
"""
ra = None
dec = None
datetimeObj = None
def __init__(self, datetimeObj=datetime.datetime.now()):
self.datetimeObj = datetimeObj
self._set_position()
def _set_position(self):
""" ~ a few arcminutes accuracy
"""
l0 = 318.351648 # mean longitude
P0 = 36.340410 # mean longitude of perigee
N0 = 318.510107 # mean longitude of node
ii = 5.145396 # inclination
ee = 0.054900 # eccentricity
aa = 384401 # km, semi-major axis or moon's orbit
theta0 = 0.5181 # degrees, semiangular size at distance a
pi0 = 0.9507 # parallax at distance a
sun = Sun(self.datetimeObj)
jdJan0 = convert.datetime2jd(
datetime.datetime(self.datetimeObj.year, 1, 1, hour=0, minute=0, second=0))
jd = convert.datetime2jd(self.datetimeObj)
d = jd - jdJan0
D = (self.datetimeObj.year - 1990) * 365.0 + (self.datetimeObj.year - 1992) / 4 + d + 2
l = (13.1763966 * D + l0) % 360.0
C = l - sun.longitude
moonMeanAnomaly = (l - 0.1114041 * D - P0) % 360.0
N = (N0 - 0.0529539 * D) % 360.0
Ev = 1.2739 * math.sin(math.radians(2 * C - moonMeanAnomaly))
Ae = 0.1858 * math.sin(math.radians(sun.meanAnomaly))
A3 = 0.37 * math.sin(math.radians(sun.meanAnomaly))
corrected_moonMeanAnomaly = moonMeanAnomaly + Ev - Ae - A3
Ec = 6.2886 * math.sin(math.radians(corrected_moonMeanAnomaly))
A4 = 0.214 * math.sin(math.radians(2.0 * corrected_moonMeanAnomaly))
lprime = l + Ev + Ec - Ae + A4
V = 0.6583 * math.sin(math.radians(2.0 * (lprime - sun.longitude)))
lprimeprime = lprime + V
Nprime = N - 0.16 * math.sin(math.radians(sun.meanAnomaly))
y = math.sin(math.radians(lprimeprime - Nprime)) * math.cos(math.radians(ii))
x = math.cos(math.radians(lprimeprime - Nprime))
arcTan = math.degrees(math.atan(y / x))
if y > 0 and x > 0:
arcTan = arcTan % 90.0
elif y > 0 and x < 0:
arcTan = (arcTan % 90.0) + 90.0
elif y < 0 and x < 0:
arcTan = (arcTan % 90.0) + 180.0
elif y < 0 and x > 0:
arcTan = (arcTan % 90.0) + 270.0
moonLongitude = arcTan + Nprime
moonBeta = math.degrees(
math.asin(math.sin(math.radians(lprimeprime - Nprime)) * math.sin(math.radians(ii))))
ra, dec = convert.eclipticLatLon2RADec(moonLongitude, moonBeta)
self.ra = ra
self.dec = dec
def illumination(self, datetimeObj=datetime.datetime.now()):
"""
"""
fraction = 0.0
return fraction
def rise(self, datetimeObj=datetime.datetime.now()):
"""
"""
return datetimeObj
def set(self, datetimeObj=datetime.datetime.now()):
"""
"""
return datetimeObj
def lunskybright(alpha, rho, altmoon, alt):
""" From Skycalc: Evaluates predicted LUNAR part of sky brightness, in
V magnitudes per square arcsecond, following <NAME>
and <NAME> (1991) PASP 103, 1033.
alpha = separation of sun and moon as seen from earth, in Degrees
rho = separation of moon and object, in Degrees
altmoon = altitude of moon above horizon, in Degrees
alt = altitude of object above horizon, in Degrees
The original C code has the following extra parameters, taken here to be constants:
kzen = zenith extinction coefficient
moondist = distance to moon, in earth radii
all are in decimal degrees. """
if altmoon < 0.0:
return 0.0
kzen = 0.19 # Zenith extinction
moondist = 60.27 # Earth radii
rho_rad = radians(rho)
alpha = 180. - alpha
Zmoon = pi / 2. - radians(altmoon)
Z = pi / 2. - radians(alt)
moondist = moondist / (60.27) # divide by mean distance
istar = -0.4 * (3.84 + 0.026 * fabs(alpha) + 4.0e-9 * alpha**4.) # eqn 20
istar = (10.**istar) / moondist**2
if fabs(alpha) < 7.: # crude accounting for opposition effect
istar = istar * (1.35 - 0.05 * fabs(istar))
# 35 per cent brighter at full, effect tapering linearly to
# zero at 7 degrees away from full. mentioned peripherally in
# Krisciunas and Scheafer, p. 1035.
fofrho = 229087. * (1.06 + cos(rho_rad)**2.)
if fabs(rho) > 10.:
fofrho = fofrho + 10.**(6.15 - rho / 40.) # eqn 21
elif (fabs(rho) > 0.25):
fofrho = fofrho + 6.2e7 / rho**2 # eqn 19
else:
fofrho = fofrho + 9.9e8 # for 1/4 degree -- radius of moon!
Xzm = sqrt(1.0 - 0.96 * sin(Zmoon)**2)
if (Xzm != 0.):
Xzm = 1. / Xzm
else:
Xzm = 10000.
Xo = sqrt(1.0 - 0.96 * sin(Z)**2)
if (Xo != 0.):
Xo = 1. / Xo
else:
Xo = 10000.
Bmoon = fofrho * istar * (10.**(-0.4 * kzen * Xzm)) * (1. - 10.**
(-0.4 * kzen * Xo)) # nanoLamberts
if (Bmoon > 0.001):
return 22.50 - 1.08574 * log(Bmoon / 34.08) # V mag per sq arcs-eqn 1
else:
return 99.
def mjdRADec2skyBright(mjd, ra, dec):
dtObj = convert.mjd2datetime(mjd)
moon = Moon(dtObj)
moonRA, moonDec = moon.ra, moon.dec
sun = Sun(dtObj)
sunRA, sunDec = sun.ra, sun.dec
# alpha
moonSunAngle = geometry.subtends(sunRA, sunDec, moonRA, moonDec, units='DEGREES')
# rho
moonObjectAngle = geometry.subtends(moonRA, moonDec, ra, dec, units='DEGREES')
moonAlt, moonAz = convert.raDec2AltAz(moonRA, moonDec, APOLAT, APOLONG, dtObj)
objAlt, objAz = convert.raDec2AltAz(ra, dec, APOLAT, APOLONG, dtObj)
if moonAlt > 0 and objAlt > 0:
bright = lunskybright(moonSunAngle.degrees, moonObjectAngle.degrees, moonAlt, objAlt)
else:
bright = 0
return bright
def main():
moon = Moon(datetime.datetime.now())
print(moon.ra, moon.dec)
if __name__ == '__main__':
main()
| [
"datetime.datetime",
"math.radians",
"math.cos",
"datetime.datetime.now",
"math.log",
"math.fabs",
"math.sin",
"math.atan"
] | [((4294, 4306), 'math.radians', 'radians', (['rho'], {}), '(rho)\n', (4301, 4306), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((626, 649), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (647, 649), False, 'import datetime\n'), ((3143, 3166), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3164, 3166), False, 'import datetime\n'), ((3272, 3295), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3293, 3295), False, 'import datetime\n'), ((3380, 3403), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3401, 3403), False, 'import datetime\n'), ((4354, 4370), 'math.radians', 'radians', (['altmoon'], {}), '(altmoon)\n', (4361, 4370), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((4389, 4401), 'math.radians', 'radians', (['alt'], {}), '(alt)\n', (4396, 4401), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((4590, 4601), 'math.fabs', 'fabs', (['alpha'], {}), '(alpha)\n', (4594, 4601), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((4934, 4943), 'math.fabs', 'fabs', (['rho'], {}), '(rho)\n', (4938, 4943), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((6456, 6479), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6477, 6479), False, 'import datetime\n'), ((1282, 1356), 'datetime.datetime', 'datetime.datetime', (['self.datetimeObj.year', '(1)', '(1)'], {'hour': '(0)', 'minute': '(0)', 'second': '(0)'}), '(self.datetimeObj.year, 1, 1, hour=0, minute=0, second=0)\n', (1299, 1356), False, 'import datetime\n'), ((2440, 2474), 'math.radians', 'math.radians', (['(lprimeprime - Nprime)'], {}), '(lprimeprime - Nprime)\n', (2452, 2474), False, 'import math\n'), ((2507, 2523), 'math.atan', 'math.atan', (['(y / x)'], {}), '(y / x)\n', (2516, 2523), False, 'import math\n'), ((5021, 5030), 'math.fabs', 'fabs', (['rho'], {}), '(rho)\n', (5025, 5030), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((1736, 1773), 'math.radians', 'math.radians', (['(2 * C - moonMeanAnomaly)'], {}), '(2 * C - moonMeanAnomaly)\n', (1748, 1773), False, 'import math\n'), ((1806, 1835), 'math.radians', 'math.radians', (['sun.meanAnomaly'], {}), '(sun.meanAnomaly)\n', (1818, 1835), False, 'import math\n'), ((1866, 1895), 'math.radians', 'math.radians', (['sun.meanAnomaly'], {}), '(sun.meanAnomaly)\n', (1878, 1895), False, 'import math\n'), ((1997, 2036), 'math.radians', 'math.radians', (['corrected_moonMeanAnomaly'], {}), '(corrected_moonMeanAnomaly)\n', (2009, 2036), False, 'import math\n'), ((2068, 2113), 'math.radians', 'math.radians', (['(2.0 * corrected_moonMeanAnomaly)'], {}), '(2.0 * corrected_moonMeanAnomaly)\n', (2080, 2113), False, 'import math\n'), ((2185, 2229), 'math.radians', 'math.radians', (['(2.0 * (lprime - sun.longitude))'], {}), '(2.0 * (lprime - sun.longitude))\n', (2197, 2229), False, 'import math\n'), ((2354, 2388), 'math.radians', 'math.radians', (['(lprimeprime - Nprime)'], {}), '(lprimeprime - Nprime)\n', (2366, 2388), False, 'import math\n'), ((2401, 2417), 'math.radians', 'math.radians', (['ii'], {}), '(ii)\n', (2413, 2417), False, 'import math\n'), ((4908, 4920), 'math.cos', 'cos', (['rho_rad'], {}), '(rho_rad)\n', (4911, 4920), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((5623, 5641), 'math.log', 'log', (['(Bmoon / 34.08)'], {}), '(Bmoon / 34.08)\n', (5626, 5641), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((2302, 2331), 'math.radians', 'math.radians', (['sun.meanAnomaly'], {}), '(sun.meanAnomaly)\n', (2314, 2331), False, 'import math\n'), ((4499, 4510), 'math.fabs', 'fabs', (['alpha'], {}), '(alpha)\n', (4503, 4510), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((4689, 4700), 'math.fabs', 'fabs', (['istar'], {}), '(istar)\n', (4693, 4700), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((5199, 5209), 'math.sin', 'sin', (['Zmoon'], {}), '(Zmoon)\n', (5202, 5209), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((5317, 5323), 'math.sin', 'sin', (['Z'], {}), '(Z)\n', (5320, 5323), False, 'from math import acos, asin, cos, degrees, fabs, log, pi, radians, sin, sqrt\n'), ((2918, 2952), 'math.radians', 'math.radians', (['(lprimeprime - Nprime)'], {}), '(lprimeprime - Nprime)\n', (2930, 2952), False, 'import math\n'), ((2965, 2981), 'math.radians', 'math.radians', (['ii'], {}), '(ii)\n', (2977, 2981), False, 'import math\n')] |
# -*- coding: utf-8 -*-
import os
import platform
from argparse import ArgumentParser
from typing import Iterable, ClassVar
import distro
from dotty_dict import Dotty
from .actions import FeaturesAction, ConfigAction, ReloadConfigAction, EjectAction, SelfUpdateAction, \
CheckForUpdateAction, VersionAction, CheckRequiredVersion
from .schema import CoreFeatureSchema
from ..feature import Feature, FeatureConfigurationAutoConfigureError, FeatureConfigurationReadOnlyError
from ..schema import FeatureSchema
from ...action import Action
from ...action.runner import ExpectedError, FailFastError
from ...command import LifecycleCommand, Command
from ...config import config
from ...config.config import ConfigPaths
from ...context import context
from ...phase import Phase, DefaultPhase
class ConfigureSecondPassException(Exception):
"""
Exception that should be raised when an additional configuration file is to be loaded.
"""
class NoProjectConfigurationError(FailFastError, ExpectedError):
"""
Error that should be raised when a project configuration file is required for the command.
"""
def __init__(self):
super().__init__("No project configuration file found. "
"Please create a ddb.yml file in your project directory. "
"It can be empty.")
def log_error(self):
context.log.error(str(self))
class CoreFeature(Feature):
"""
Default commands and configuration support.
"""
@property
def name(self) -> str:
return "core"
@property
def schema(self) -> ClassVar[FeatureSchema]:
return CoreFeatureSchema
@property
def actions(self) -> Iterable[Action]:
return (
FeaturesAction(),
ConfigAction(),
ReloadConfigAction(),
EjectAction(),
SelfUpdateAction(),
CheckForUpdateAction(),
VersionAction(),
CheckRequiredVersion()
)
@property
def phases(self) -> Iterable[Phase]:
def configure_parser(parser: ArgumentParser):
parser.add_argument("--eject", action="store_true",
help="Eject the project using the current configuration")
parser.add_argument("--autofix", action="store_true",
help="Autofix supported deprecated warnings by modifying template sources.")
def config_parser(parser: ArgumentParser):
parser.add_argument("property", nargs='?',
help="Property to read")
parser.add_argument("--variables", action="store_true",
help="Output as a flat list of variables available in template engines")
parser.add_argument("--value", action="store_true",
help="Output value of given property")
parser.add_argument("--full", action="store_true",
help="Output full configuration")
parser.add_argument("--files", action="store_true",
help="Group by loaded configuration file")
def info_parser(parser: ArgumentParser):
parser.add_argument("--type", action="append",
help="Filter for a type of information between: bin, env, port and vhost")
def selfupdate_parser(parser: ArgumentParser):
parser.add_argument("--force", action="store_true", help="Force update")
return (
DefaultPhase("init", "Initialize project", run_once=True),
DefaultPhase("configure", "Configure the environment", configure_parser),
DefaultPhase("download", "Download files from remote sources"),
DefaultPhase("features", "Display enabled features"),
DefaultPhase("config", "Display effective configuration", config_parser),
DefaultPhase("info", "Display useful information", info_parser),
DefaultPhase("selfupdate", "Update ddb binary with latest version", parser=selfupdate_parser)
)
@property
def commands(self) -> Iterable[Command]:
def requires_project_config():
if not config.project_configuration_file:
error = NoProjectConfigurationError()
error.log_error()
raise error
return (
LifecycleCommand("init", "Initialize the environment",
"init", avoid_stdout=True),
LifecycleCommand("configure", "Configure the environment",
"configure",
parent="init",
before_execute=requires_project_config),
LifecycleCommand("download", "Download files from remote sources",
"download"),
LifecycleCommand("features", "List enabled features",
"features"),
LifecycleCommand("config", "Display effective configuration",
"config"),
LifecycleCommand("info", "Display useful information",
"info"),
LifecycleCommand("self-update", "Update ddb to latest version",
"selfupdate"),
)
def configure(self, bootstrap=False):
super().configure(bootstrap)
if bootstrap:
return
self._load_environment_configuration()
self._apply_eject_configuration()
@staticmethod
def _load_environment_configuration():
"""
Loading enviromnent configuration file, if exists.
"""
current = config.data.get('core.env.current')
if config.filenames:
filenames = list(config.filenames)
original_filenames = list(config.filenames)
current_env_filename = filenames[0] + '.' + current
if current_env_filename not in filenames:
filenames.insert(len(filenames) - 1, current_env_filename)
extra = config.data.get('core.configuration.extra')
if extra:
for extra_item in extra:
if extra_item not in filenames:
filenames.insert(len(filenames) - 1, extra_item)
if filenames != original_filenames:
config.filenames = tuple(filenames)
raise ConfigureSecondPassException()
@staticmethod
def _apply_eject_configuration():
"""
Override some configuration that doesn't make sense without ddb
:return:
"""
if config.eject:
config.data['core.path.ddb_home'] = '.ddb-home'
config.data['core.path.home'] = '.docker-devbox-home'
config.data['core.path.project_home'] = '.'
def _configure_defaults(self, feature_config: Dotty):
if not feature_config.get('project.name'):
project_name = os.path.basename(config.paths.project_home)
feature_config['project.name'] = project_name
if not feature_config.get('domain.sub'):
feature_config['domain.sub'] = feature_config['project.name'].replace("_", "-").replace(" ", "-")
if feature_config.get('domain.value'):
raise FeatureConfigurationReadOnlyError(self, 'domain.value')
feature_config['domain.value'] = '.'.join((feature_config['domain.sub'], feature_config['domain.ext']))
if not feature_config.get('env.current') and feature_config.get('env.available'):
feature_config['env.current'] = feature_config['env.available'][-1]
if not feature_config.get('env.current') or \
feature_config.get('env.current') not in feature_config['env.available']:
raise FeatureConfigurationAutoConfigureError(self, 'env.current')
if not feature_config.get('path.project_home') and config.paths.project_home:
feature_config['path.project_home'] = config.paths.project_home
if not feature_config.get('path.home') and config.paths.home:
feature_config['path.home'] = config.paths.home
if not feature_config.get('path.ddb_home') and config.paths.ddb_home:
feature_config['path.ddb_home'] = config.paths.ddb_home
self._configure_release_asset_name_defaults(feature_config)
config.path = ConfigPaths(ddb_home=feature_config.get('path.ddb_home'),
home=feature_config.get('path.home'),
project_home=feature_config.get('path.project_home'))
def _configure_release_asset_name_defaults(self, feature_config: Dotty):
if not feature_config.get('release_asset_name'):
feature_config['release_asset_name'] = self._get_default_binary_remote_name()
@staticmethod
def _get_default_binary_remote_name():
"""
Get default binary remote name, based on current platform.
"""
if platform.system() == 'Windows':
return 'ddb-windows.exe'
if platform.system() == 'Darwin':
return 'ddb-macos'
if platform.system() == 'Linux':
if distro.id() == 'alpine':
return 'ddb-alpine'
return 'ddb-linux'
return None
| [
"distro.id",
"platform.system",
"os.path.basename"
] | [((7010, 7053), 'os.path.basename', 'os.path.basename', (['config.paths.project_home'], {}), '(config.paths.project_home)\n', (7026, 7053), False, 'import os\n'), ((9040, 9057), 'platform.system', 'platform.system', ([], {}), '()\n', (9055, 9057), False, 'import platform\n'), ((9120, 9137), 'platform.system', 'platform.system', ([], {}), '()\n', (9135, 9137), False, 'import platform\n'), ((9193, 9210), 'platform.system', 'platform.system', ([], {}), '()\n', (9208, 9210), False, 'import platform\n'), ((9238, 9249), 'distro.id', 'distro.id', ([], {}), '()\n', (9247, 9249), False, 'import distro\n')] |
from shapely.geometry import MultiLineString,MultiPolygon
from collections import Counter
import networkx as nx
from gisele.Steiner_tree_code import *
from gisele.functions import *
def create_roads_new(gisele_folder, case_study, Clusters,crs, accepted_road_types,resolution_MV,resolution_LV):
weighted_grid_of_points = pd.read_csv(gisele_folder+'/Case studies/'+case_study+'/Intermediate/Geospatial_Data/weighted_grid_of_points.csv')
starting_ID = weighted_grid_of_points['ID'].max()+1
#if not os.path.exists(gisele_folder+'/Case studies/'+case_study+'/Input/Roads_points'):
ROADS_unfiltered = gpd.read_file(gisele_folder+'/Case studies/'+case_study+'/Intermediate/Geospatial_Data/Roads.shp')
ROADS_unfiltered = ROADS_unfiltered.to_crs(crs)
ROADS = ROADS_unfiltered[ROADS_unfiltered['highway'].isin(accepted_road_types)]
#ROADS = ROADS_unfiltered
ROADS = MultiLine_to_Line(ROADS)
all_points = gpd.GeoDataFrame()
gdf_ROADS, ROADS_segments = create_roads2(ROADS, all_points, crs)
gdf_ROADS.crs = crs
ROADS_segments.crs = crs
#else:
#ROADS_segments=gpd.read_file(gisele_folder + '/Case studies/' + case_study + '/Input/Roads_lines/Roads_lines.shp')
#gdf_ROADS=gpd.read_file(gisele_folder + '/Case studies/' + case_study + '/Input/Roads_points/Roads_points.shp')
### PROBLEM WITH gdf_ROADS -> THERE ARE MULTI_POINTS
print(len(gdf_ROADS))
MP = MultiPolygon([p for p in Clusters['geometry']])
nodes = ROADS_segments.ID1.to_list() + ROADS_segments.ID2.to_list()
nodes = [int(i) for i in nodes]
occurence = Counter(nodes)
intersection_IDs = []
terminal_IDs = []
for i in occurence:
if occurence[i] == 1:
terminal_IDs.append(i)
elif occurence[i] > 2:
intersection_IDs.append(i)
new_nodes = terminal_IDs + intersection_IDs
Substations = new_nodes
Nodes = gdf_ROADS.copy()
Nodes.loc[Nodes['ID'].isin(new_nodes), 'Substation'] = 1
Nodes['inside_clusters'] = [1 if MP.contains(row['geometry']) else 0 for i,row in Nodes.iterrows()]
Lines = ROADS_segments.copy()
Lines.ID1 = Lines.ID1.astype(int)
Lines.ID2 = Lines.ID2.astype(int)
Lines_marked = Lines.copy()
conn_param = 0
New_Lines = gpd.GeoDataFrame()
while not Lines.empty:
nodes = Lines.ID1.to_list() + Lines.ID2.to_list()
# print(nodes)
nodes = [int(i) for i in nodes]
Substations = list(set(Substations) & set(nodes))
current_node = int(Substations[0])
#print(Substations)
#print(current_node)
no_lines = False
tot_length = 0
tot_cost = 0
id1 = current_node
while not no_lines:
next_index = Lines.index[Lines['ID1'] == current_node].to_list()
# check if there actually is a next node
if next_index:
next_index = next_index[0] # i only care about the first line if there are many
next_node = Lines.loc[next_index, 'ID2']
tot_length = tot_length + Lines.loc[next_index, 'length']
tot_cost = tot_cost + Lines.loc[next_index, 'length']
Lines.drop(index=next_index, inplace=True)
else:
next_index = Lines.index[Lines['ID2'] == current_node].to_list()
if next_index:
next_index = next_index[0] # i only care about the first line if there are many
next_node = Lines.loc[next_index, 'ID1']
tot_length = tot_length + Lines.loc[next_index, 'length']
tot_cost = tot_cost + Lines.loc[next_index, 'length']
Lines.drop(index=next_index, inplace=True)
else:
no_lines = True # there are no lines starting from this node
if not no_lines:
is_substation = Nodes.loc[Nodes.ID == int(next_node), 'Substation'] == 1
is_inside = int(Nodes.loc[Nodes.ID==int(next_node),'inside_clusters'])
if is_inside == 1:
max_tot_length = resolution_LV/1000
else:
max_tot_length = resolution_MV/1000
Lines_marked.loc[next_index, 'Conn_param'] = conn_param
if is_substation.values[0]:
cond = False
Point1 = Nodes.loc[Nodes['ID'] == int(id1), 'geometry'].values[0]
Point2 = Nodes.loc[Nodes['ID'] == int(next_node), 'geometry'].values[0]
geom = LineString([Point1, Point2])
Data = {'ID1': id1, 'ID2': next_node, 'Cost': tot_cost, 'length': tot_length, 'geometry': geom,
'Conn_param': conn_param}
New_Lines = New_Lines.append(Data, ignore_index=True)
current_node = next_node
tot_length = 0
tot_cost = 0
id1 = current_node
conn_param = conn_param + 1
elif tot_length > max_tot_length:
Point1 = Nodes.loc[Nodes['ID'] == int(id1), 'geometry'].values[0]
Point2 = Nodes.loc[Nodes['ID'] == int(next_node), 'geometry'].values[0]
geom = LineString([Point1, Point2])
Data = {'ID1': id1, 'ID2': next_node, 'Cost': tot_cost, 'length': tot_length, 'geometry': geom,
'Conn_param': conn_param}
New_Lines = New_Lines.append(Data, ignore_index=True)
current_node = next_node
tot_length = 0
tot_cost = 0
id1 = current_node
conn_param = conn_param + 1
else:
current_node = next_node
New_Lines.crs = Lines.crs
new_lines = []
for i, row in New_Lines.iterrows():
actual_Lines = Lines_marked.loc[Lines_marked['Conn_param'] == row['Conn_param'], 'geometry']
new_line = MultiLineString([actual_Lines.values[i] for i in range(len(actual_Lines))])
new_lines.append(new_line)
New_Lines.geometry = new_lines
# New_Lines.to_file(r'New_Lines')
new_nodes = New_Lines.ID1.to_list() + New_Lines.ID2.to_list()
New_Nodes = gdf_ROADS[gdf_ROADS['ID'].isin(new_nodes)]
New_Nodes.reset_index(inplace=True)
for i, row in New_Nodes.iterrows():
id = int(i)
New_Nodes.loc[i, 'ID'] = id
New_Lines.loc[New_Lines['ID1'] == row['ID'], 'ID1'] = id
New_Lines.loc[New_Lines['ID2'] == row['ID'], 'ID2'] = id
New_Nodes.ID+=starting_ID
New_Lines.ID1+=starting_ID
New_Lines.ID2+=starting_ID
drop = New_Lines.loc[New_Lines['ID1'] == New_Lines['ID2'], :]
if not len(drop)==0:
New_Lines.drop(index=drop.index, inplace=True)
print(len(New_Nodes))
New_Lines.to_file(gisele_folder + '/Case studies/' + case_study + '/Intermediate/Geospatial_Data/Roads_lines')
New_Nodes.to_file(gisele_folder + '/Case studies/' + case_study + '/Intermediate/Geospatial_Data/Roads_points')
return New_Nodes, New_Lines
def Merge_Roads_GridOfPoints(gisele_folder,case_study):
road_points = gpd.read_file(gisele_folder+'/Case studies/'+case_study+'/Intermediate/Geospatial_Data/Roads_points/Roads_points.shp')
weighted_grid_points = pd.read_csv(gisele_folder+'/Case studies/'+case_study+'/Intermediate/Geospatial_Data/weighted_grid_of_points.csv')
weighted_grid_points['Type'] = 'Standard'
road_points['Type'] = 'Road'
road_points.drop(columns=['geometry'],inplace=True)
weighted_grid_points_with_roads = weighted_grid_points.append(road_points)
weighted_grid_points_with_roads[['X','Y','ID','Elevation','Type','Population','Weight','Elevation']].\
to_csv(gisele_folder+'/Case studies/'+case_study+'/Intermediate/Geospatial_Data/weighted_grid_of_points_with_roads.csv')
def improve_connection(MV_grid,all_points,pts,grid_1,grid_2,line_bc):
graph = nx.Graph()
for i, row in MV_grid.iterrows():
graph.add_edge(int(row.ID1), int(row.ID2),length = row.Length, weight=row.Cost)
for i in pts:
if not graph.has_edge(i[0], i[1]):
p1 = all_points.loc[all_points.ID==i[0],'geometry'].values[0]
p2 = all_points.loc[all_points.ID == i[1], 'geometry'].values[0]
av_weight = (all_points.loc[all_points.ID==i[0],'Weight'].values[0]+all_points.loc[all_points.ID==i[1],'Weight']
.values[0])/2
graph.add_edge(int(i[0]), int(i[1]), length=p1.distance(p2)/1000, weight = p1.distance(p2)/1000*av_weight*line_bc)
T_metric1 = metric_closure(graph, weight='weight')
T_metric = metric_closure(graph, weight='length')
min_dist = 100000
terminal_points = []
for ind1, row1 in grid_1.iterrows():
for ind2, row2 in grid_2.iterrows():
id1 = int(row1['ID'])
id2 = int(row2['ID'])
dist = T_metric[id1][id2]['distance']
if dist < min_dist:
min_dist = dist
terminal_points = [id1, id2]
points = T_metric[terminal_points[0]][terminal_points[1]]['path']
steps = len(points)
new_path = []
for i in range(0, steps - 1):
new_path.append(points[i + 1])
pts = list(zip(points, new_path))
Line = array_to_LineString(all_points, pts)
length = min_dist*1000
cost = T_metric1[terminal_points[0]][terminal_points[1]]['distance']
return Line,length,cost,terminal_points
def array_to_LineString(all_points,pts):
pts_new=[]
i = 0
for pt in pts:
if i==0:
pts_new.append(all_points.loc[all_points['ID']==pt[0],'geometry'].values[0])
pts_new.append(all_points.loc[all_points['ID'] == pt[1], 'geometry'].values[0])
else:
pts_new.append(all_points.loc[all_points['ID']==pt[1],'geometry'].values[0])
print(i)
i+=1
Line = LineString(pts_new)
return Line
def calculate_mg(gisele_folder,case_study,crs,mg_types):
case_folder = gisele_folder + '/Case studies/' + case_study
data_folder = case_folder + '/Intermediate/Optimization/all_data'
Nodes = pd.read_csv(data_folder + '/All_Nodes.csv')
n_clusters = int(Nodes['Cluster'].max())
clusters_list = [*range(1,n_clusters+1)]
cluster_powers = [Nodes.loc[Nodes['Cluster'] == i, 'MV_Power'].sum() for i in range(1,n_clusters+1)]
cluster_population = [Nodes.loc[Nodes['Cluster'] == i, 'Population'].sum() for i in range(1,n_clusters+1)]
clusters_list=pd.DataFrame({'Cluster':clusters_list,'Population': cluster_population,'Load [kW]': cluster_powers})
clusters_list.to_csv(case_folder+'/Output/clusters_list.csv')
input_profile = pd.read_csv(gisele_folder+'/general_input/Load Profile.csv').round(4)
config = pd.read_csv(case_folder+'/Input/Configuration.csv',index_col='Parameter')
wt=config.loc['wt','Value']
grid_lifetime = int(config.loc['grid_lifetime','Value'])
Nodes_gdf = gpd.GeoDataFrame(Nodes, geometry=gpd.points_from_xy(Nodes.X, Nodes.Y),
crs=crs)
yearly_profile, years, total_energy = load(clusters_list,
grid_lifetime,
input_profile, gisele_folder, case_study)
mg = sizing(yearly_profile, clusters_list, Nodes_gdf, wt,mg_types,gisele_folder,case_study)
mg.to_csv(case_folder+'/Output/Microgrid.csv')
def calculate_mg_multiobjective(gisele_folder, case_study, crs):
pass | [
"collections.Counter",
"networkx.Graph",
"shapely.geometry.MultiPolygon"
] | [((1422, 1469), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["[p for p in Clusters['geometry']]"], {}), "([p for p in Clusters['geometry']])\n", (1434, 1469), False, 'from shapely.geometry import MultiLineString, MultiPolygon\n'), ((1594, 1608), 'collections.Counter', 'Counter', (['nodes'], {}), '(nodes)\n', (1601, 1608), False, 'from collections import Counter\n'), ((8026, 8036), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (8034, 8036), True, 'import networkx as nx\n')] |
import unittest
from mock import MagicMock, patch, call, ANY
from boto.dynamodb2.layer1 import DynamoDBConnection
from botocore.exceptions import ClientError
from flotilla.client.region_meta import RegionMetadata
ENVIRONMENT = 'test'
REGION = 'us-east-1'
REGION_OTHER = 'us-west-2'
SCHEDULER = 't2.nano'
CHANNEL = 'stable'
VERSION = 'current'
CONTAINER = 'pebbletech/flotilla'
class TestRegionMetadata(unittest.TestCase):
def setUp(self):
self.region_meta = RegionMetadata(ENVIRONMENT)
@patch('boto3.client')
def test_region_params(self, mock_connect):
message = 'Value (us-east-1-zzz) for parameter availabilityZone is ' \
'invalid. Subnets can currently only be created in the ' \
'following availability zones: us-east-1c, us-east-1a, ' \
'us-east-1d, us-east-1e.'
self.mock_subnet_error(mock_connect, message)
region_item = self.region_meta._region_params(REGION)
self.assertEqual(region_item['az1'], 'us-east-1a')
self.assertEqual(region_item['az2'], 'us-east-1c')
self.assertEqual(region_item['az3'], 'us-east-1d')
self.assertEqual(region_item['az4'], 'us-east-1e')
@patch('boto3.client')
def test_region_params_wrap(self, mock_connect):
message = 'Value (us-east-1-zzz) for parameter availabilityZone is ' \
'invalid. Subnets can currently only be created in the ' \
'following availability zones: us-east-1c, us-east-1a. '
self.mock_subnet_error(mock_connect, message)
region_item = self.region_meta._region_params(REGION)
self.assertEqual(region_item['az1'], 'us-east-1a')
self.assertEqual(region_item['az2'], 'us-east-1c')
self.assertNotIn('az3', region_item)
@patch('boto3.client')
def test__region_params_exception(self, mock_connect):
vpc = MagicMock()
mock_connect.return_value = vpc
vpc.describe_vpcs.side_effect = ClientError({'Error': {}}, '')
self.assertRaises(ClientError, self.region_meta._region_params, REGION)
@patch('boto3.resource')
def test_store_regions(self, mock_connect):
dynamo = MagicMock()
mock_connect.return_value = dynamo
self.region_meta._region_params = MagicMock(return_value={})
self.region_meta.store_regions((REGION, REGION_OTHER),
False, SCHEDULER,
CHANNEL, VERSION,
CONTAINER)
self.assertEquals(mock_connect.call_count, 2)
@patch('boto3.resource')
def test_store_regions_per_region(self, mock_connect):
dynamo = MagicMock()
mock_connect.return_value = dynamo
self.region_meta._region_params = MagicMock(return_value={})
self.region_meta.store_regions((REGION, REGION_OTHER),
True, SCHEDULER,
CHANNEL, VERSION, CONTAINER)
self.assertEquals(mock_connect.call_count, 2)
def mock_subnet_error(self, mock_connect, message):
vpc = MagicMock()
mock_connect.return_value = vpc
mock_vpc = {'VpcId': 'vpc-123456'}
vpc.describe_vpcs.return_value = {'Vpcs': [mock_vpc]}
client_error = ClientError({'Error': {'Message': message}}, '')
vpc.create_subnet.side_effect = client_error
| [
"flotilla.client.region_meta.RegionMetadata",
"mock.patch",
"mock.MagicMock",
"botocore.exceptions.ClientError"
] | [((508, 529), 'mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (513, 529), False, 'from mock import MagicMock, patch, call, ANY\n'), ((1214, 1235), 'mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (1219, 1235), False, 'from mock import MagicMock, patch, call, ANY\n'), ((1806, 1827), 'mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (1811, 1827), False, 'from mock import MagicMock, patch, call, ANY\n'), ((2111, 2134), 'mock.patch', 'patch', (['"""boto3.resource"""'], {}), "('boto3.resource')\n", (2116, 2134), False, 'from mock import MagicMock, patch, call, ANY\n'), ((2612, 2635), 'mock.patch', 'patch', (['"""boto3.resource"""'], {}), "('boto3.resource')\n", (2617, 2635), False, 'from mock import MagicMock, patch, call, ANY\n'), ((474, 501), 'flotilla.client.region_meta.RegionMetadata', 'RegionMetadata', (['ENVIRONMENT'], {}), '(ENVIRONMENT)\n', (488, 501), False, 'from flotilla.client.region_meta import RegionMetadata\n'), ((1901, 1912), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1910, 1912), False, 'from mock import MagicMock, patch, call, ANY\n'), ((1993, 2023), 'botocore.exceptions.ClientError', 'ClientError', (["{'Error': {}}", '""""""'], {}), "({'Error': {}}, '')\n", (2004, 2023), False, 'from botocore.exceptions import ClientError\n'), ((2200, 2211), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2209, 2211), False, 'from mock import MagicMock, patch, call, ANY\n'), ((2297, 2323), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '{}'}), '(return_value={})\n', (2306, 2323), False, 'from mock import MagicMock, patch, call, ANY\n'), ((2712, 2723), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2721, 2723), False, 'from mock import MagicMock, patch, call, ANY\n'), ((2809, 2835), 'mock.MagicMock', 'MagicMock', ([], {'return_value': '{}'}), '(return_value={})\n', (2818, 2835), False, 'from mock import MagicMock, patch, call, ANY\n'), ((3149, 3160), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3158, 3160), False, 'from mock import MagicMock, patch, call, ANY\n'), ((3330, 3378), 'botocore.exceptions.ClientError', 'ClientError', (["{'Error': {'Message': message}}", '""""""'], {}), "({'Error': {'Message': message}}, '')\n", (3341, 3378), False, 'from botocore.exceptions import ClientError\n')] |
import os
import time
import json
import numpy as np
import pandas as pd
import torch
from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.model import rnn, crit
caseName = 'refBasins'
ratioTrain = 0.8
rho = 365
batchSize = 100
nEpoch = 100
hiddenSize = 64
modelFolder = os.path.join(kPath.dirWQ, 'modelA', caseName)
if not os.path.exists(modelFolder):
os.mkdir(modelFolder)
# predict - point-by-point
modelFile = os.path.join(modelFolder, 'modelSeq_Ep' + str(nEpoch) + '.pt')
model = torch.load(modelFile)
nt = dictData['rho']
nd, ny = y.shape
batchSize = 1000
iS = np.arange(0, nd, batchSize)
iE = np.append(iS[1:], nd)
yOutLst = list()
xNorm = (xNorm - np.tile(statDict['xMean'], [nt, nd, 1])) / np.tile(
statDict['xStd'], [nt, nd, 1])
cNorm = (c - np.tile(statDict['cMean'], [nd, 1])) / np.tile(
statDict['cStd'], [nd, 1])
for k in range(len(iS)):
print('batch: '+str(k))
xT = torch.from_numpy(np.concatenate(
[xNorm[:, iS[k]:iE[k], :], np.tile(cNorm[iS[k]:iE[k], :], [nt, 1, 1])], axis=-1)).float()
if torch.cuda.is_available():
xT = xT.cuda()
model = model.cuda()
yT = model(xT)[-1, :, :]
yOutLst.append(yT.detach().cpu().numpy())
yOut = np.concatenate(yOutLst, axis=0)
yOut = yOut * np.tile(statDict['yStd'], [nd, 1]) +\
np.tile(statDict['yMean'], [nd, 1])
# save output
dfOut = info
dfOut['train'] = np.nan
dfOut['train'][indTrain] = 1
dfOut['train'][indTest] = 0
varC = dictData['varC']
targetFile = os.path.join(modelFolder, 'target.csv')
if not os.path.exists(targetFile):
targetDf = pd.merge(dfOut, pd.DataFrame(data=y, columns=varC),
left_index=True, right_index=True)
targetDf.to_csv(targetFile)
outFile = os.path.join(modelFolder, 'output_Ep' + str(nEpoch) + '.csv')
outDf = pd.merge(dfOut, pd.DataFrame(data=yOut, columns=varC),
left_index=True, right_index=True)
outDf.to_csv(outFile) | [
"os.path.exists",
"numpy.tile",
"torch.load",
"os.path.join",
"numpy.append",
"torch.cuda.is_available",
"os.mkdir",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.arange"
] | [((296, 341), 'os.path.join', 'os.path.join', (['kPath.dirWQ', '"""modelA"""', 'caseName'], {}), "(kPath.dirWQ, 'modelA', caseName)\n", (308, 341), False, 'import os\n'), ((515, 536), 'torch.load', 'torch.load', (['modelFile'], {}), '(modelFile)\n', (525, 536), False, 'import torch\n'), ((597, 624), 'numpy.arange', 'np.arange', (['(0)', 'nd', 'batchSize'], {}), '(0, nd, batchSize)\n', (606, 624), True, 'import numpy as np\n'), ((630, 651), 'numpy.append', 'np.append', (['iS[1:]', 'nd'], {}), '(iS[1:], nd)\n', (639, 651), True, 'import numpy as np\n'), ((1227, 1258), 'numpy.concatenate', 'np.concatenate', (['yOutLst'], {'axis': '(0)'}), '(yOutLst, axis=0)\n', (1241, 1258), True, 'import numpy as np\n'), ((1497, 1536), 'os.path.join', 'os.path.join', (['modelFolder', '"""target.csv"""'], {}), "(modelFolder, 'target.csv')\n", (1509, 1536), False, 'import os\n'), ((349, 376), 'os.path.exists', 'os.path.exists', (['modelFolder'], {}), '(modelFolder)\n', (363, 376), False, 'import os\n'), ((382, 403), 'os.mkdir', 'os.mkdir', (['modelFolder'], {}), '(modelFolder)\n', (390, 403), False, 'import os\n'), ((729, 767), 'numpy.tile', 'np.tile', (["statDict['xStd']", '[nt, nd, 1]'], {}), "(statDict['xStd'], [nt, nd, 1])\n", (736, 767), True, 'import numpy as np\n'), ((825, 859), 'numpy.tile', 'np.tile', (["statDict['cStd']", '[nd, 1]'], {}), "(statDict['cStd'], [nd, 1])\n", (832, 859), True, 'import numpy as np\n'), ((1066, 1091), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1089, 1091), False, 'import torch\n'), ((1315, 1350), 'numpy.tile', 'np.tile', (["statDict['yMean']", '[nd, 1]'], {}), "(statDict['yMean'], [nd, 1])\n", (1322, 1350), True, 'import numpy as np\n'), ((1544, 1570), 'os.path.exists', 'os.path.exists', (['targetFile'], {}), '(targetFile)\n', (1558, 1570), False, 'import os\n'), ((1826, 1863), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'yOut', 'columns': 'varC'}), '(data=yOut, columns=varC)\n', (1838, 1863), True, 'import pandas as pd\n'), ((686, 725), 'numpy.tile', 'np.tile', (["statDict['xMean']", '[nt, nd, 1]'], {}), "(statDict['xMean'], [nt, nd, 1])\n", (693, 725), True, 'import numpy as np\n'), ((786, 821), 'numpy.tile', 'np.tile', (["statDict['cMean']", '[nd, 1]'], {}), "(statDict['cMean'], [nd, 1])\n", (793, 821), True, 'import numpy as np\n'), ((1273, 1307), 'numpy.tile', 'np.tile', (["statDict['yStd']", '[nd, 1]'], {}), "(statDict['yStd'], [nd, 1])\n", (1280, 1307), True, 'import numpy as np\n'), ((1603, 1637), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'y', 'columns': 'varC'}), '(data=y, columns=varC)\n', (1615, 1637), True, 'import pandas as pd\n'), ((996, 1038), 'numpy.tile', 'np.tile', (['cNorm[iS[k]:iE[k], :]', '[nt, 1, 1]'], {}), '(cNorm[iS[k]:iE[k], :], [nt, 1, 1])\n', (1003, 1038), True, 'import numpy as np\n')] |
import datetime
import time
import traceback
from functools import wraps
from logging import Logger
from flask import Flask, request, g
from flask import jsonify
from app.domain.usecase.user import UserService
from app.pkgs import errors
from app.pkgs.errors import Error, HttpStatusCode
class Middleware(object):
def __init__(self, a: UserService, logger: Logger):
self.user_service = a
self.permissions_list = set()
self.logger = logger
def error_handler(self, func):
"""Contain handler for json and error exception. Accept only one value (not tuple) and should be a dict/list
Arguments:
func {[type]} -- [description]
Returns:
[type] -- [description]
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
except Error as e:
return jsonify(e.to_json()), e.code()
except Exception as e:
self.logger.error(e, exc_info=True)
return jsonify(data=None, error=f'Unknown error: {str(e)}'), HttpStatusCode.Internal_Server_Error
if res is not None:
return jsonify(data=res)
return jsonify(data=[])
return wrapper
@staticmethod
def get_bearer_token():
if 'Authorization' in request.headers:
# Flask/Werkzeug do not recognize any authentication types
# other than Basic or Digest or bearer, so here we parse the header by
# hand
try:
auth_type, token = request.headers['Authorization'].split(None, 1)
except ValueError:
raise errors.authorization_header_empty
else:
raise errors.authorization_header_empty
# if the auth type does not match, we act as if there is no auth
# this is better than failing directly, as it allows the callback
# to handle special cases, like supporting multiple auth types
if auth_type != 'Bearer':
raise errors.authorization_type_wrong
return token
def verify_auth_token(self, f):
@wraps(f)
def decorated(*args, **kwargs):
# Flask normally handles OPTIONS requests on its own, but in the
# case it is configured to forward those to the application, we
# need to ignore authentication headers and let the request through
# to avoid unwanted interactions with CORS.
if request.method != 'OPTIONS': # pragma: no cover
token = self.get_bearer_token()
payload = self.user_service.validate_auth_token(token)
# print(payload)
accept, note = self.user_service.validate_access_policy(payload['sub'], payload['role_ids'], payload['iat'])
if not accept:
raise Error(f'Token rejected because of changing in user and role: {note}', HttpStatusCode.Unauthorized)
g.user = payload['user']
g.roles = payload['role_ids']
g.permissions = payload['permissions']
g.auth_token = token
return f(*args, **kwargs)
return decorated
def require_permissions(self, *permissions):
"""
Require on of the following permissions to pass over:
:param permissions:
:return:
"""
self.permissions_list.update(permissions)
def check_permission(fn):
@wraps(fn)
def permit(*args, **kwargs):
for p in permissions:
if p in g.permissions:
return fn(*args, **kwargs)
raise Error("permission denied", HttpStatusCode.Forbidden)
return permit
return check_permission
def set_logger(logger: Logger, app: Flask):
# app.logger.handlers = logger.handlers
# app.logger.setLevel(logger.level)
@app.before_request
def start_timer():
g.start = time.time()
@app.after_request
def log_request(response):
now = time.time()
duration = '%2.4f ms' % ((now - g.start) * 1000)
ip = request.headers.get('X-Forwarded-For', request.remote_addr)
host = request.host.split(':', 1)[0]
args = dict(request.args)
timestamp = datetime.datetime.utcnow()
log_params = [
('method', request.method, 'blue'),
('path', request.path, 'blue'),
('status', response.status_code, 'yellow'),
('duration', duration, 'green'),
('utc_time', timestamp, 'magenta'),
('ip', ip, 'red'),
('host', host, 'red'),
('params', args, 'blue')
]
request_id = request.headers.get('X-Request-ID')
if request_id:
log_params.append(('request_id', request_id, 'yellow'))
parts = []
for name, value, _color in log_params:
part = f'"{name}":"{value}"'
parts.append(part)
line = '{' + ",".join(parts) + '}'
logger.info(line)
return response
| [
"datetime.datetime.utcnow",
"app.pkgs.errors.Error",
"functools.wraps",
"time.time",
"flask.request.headers.get",
"flask.request.host.split",
"flask.jsonify"
] | [((763, 774), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (768, 774), False, 'from functools import wraps\n'), ((2183, 2191), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2188, 2191), False, 'from functools import wraps\n'), ((4054, 4065), 'time.time', 'time.time', ([], {}), '()\n', (4063, 4065), False, 'import time\n'), ((4136, 4147), 'time.time', 'time.time', ([], {}), '()\n', (4145, 4147), False, 'import time\n'), ((4219, 4278), 'flask.request.headers.get', 'request.headers.get', (['"""X-Forwarded-For"""', 'request.remote_addr'], {}), "('X-Forwarded-For', request.remote_addr)\n", (4238, 4278), False, 'from flask import Flask, request, g\n'), ((4378, 4404), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4402, 4404), False, 'import datetime\n'), ((4805, 4840), 'flask.request.headers.get', 'request.headers.get', (['"""X-Request-ID"""'], {}), "('X-Request-ID')\n", (4824, 4840), False, 'from flask import Flask, request, g\n'), ((1252, 1268), 'flask.jsonify', 'jsonify', ([], {'data': '[]'}), '(data=[])\n', (1259, 1268), False, 'from flask import jsonify\n'), ((3541, 3550), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (3546, 3550), False, 'from functools import wraps\n'), ((4294, 4320), 'flask.request.host.split', 'request.host.split', (['""":"""', '(1)'], {}), "(':', 1)\n", (4312, 4320), False, 'from flask import Flask, request, g\n'), ((1215, 1232), 'flask.jsonify', 'jsonify', ([], {'data': 'res'}), '(data=res)\n', (1222, 1232), False, 'from flask import jsonify\n'), ((3746, 3798), 'app.pkgs.errors.Error', 'Error', (['"""permission denied"""', 'HttpStatusCode.Forbidden'], {}), "('permission denied', HttpStatusCode.Forbidden)\n", (3751, 3798), False, 'from app.pkgs.errors import Error, HttpStatusCode\n'), ((2920, 3022), 'app.pkgs.errors.Error', 'Error', (['f"""Token rejected because of changing in user and role: {note}"""', 'HttpStatusCode.Unauthorized'], {}), "(f'Token rejected because of changing in user and role: {note}',\n HttpStatusCode.Unauthorized)\n", (2925, 3022), False, 'from app.pkgs.errors import Error, HttpStatusCode\n')] |
import sys
import math
import numpy as np
import warnings
import torch
import os
import argparse
import random
import pandas as pd
import logging
from sklearn.model_selection import GroupKFold
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from cld_ivado.utils.context import get_context
from cld_ivado.utils.reshape_features import flatten_scattering
from cld_ivado.utils.reshape_features import reshape_raw_images
from cld_ivado.utils.reshape_features import get_scattering_features
from cld_ivado.utils.compute_metrics import get_metrics
from cld_ivado.utils.compute_metrics import get_average_per_patient
from cld_ivado.utils.compute_metrics import log_test_experiments
sys.path.append('../src')
warnings.filterwarnings("ignore")
logging.basicConfig(level = logging.INFO)
def train_and_evaluate_model(parameters, X_train, X_test, y_train, y_test, fold_c):
"""
:param parameters: parameters to train model
:param X_train: training data points
:param X_test: testing data points
:param y_train: training labels
:param y_test: testing label
:param fold_c: fold number
"""
svc = svm.SVC(probability = True, class_weight='balanced')
clf = GridSearchCV(svc, parameters['param_grid'], verbose=parameters['verbose'], n_jobs=-1)
clf.fit(X_train, y_train)
probs = np.array(clf.predict_proba(X_test))[:,1]
acc, auc, specificity, sensitivity = get_metrics(y_test, probs)
(acc_avg, auc_avg, specificity_avg, sensitivity_avg), label_per_patient, average_prob = get_average_per_patient(y_test, probs)
if math.isnan(auc):
logging.info(f'FOLD {fold_c} : acc: {acc} , specificity: {specificity}, sensitivity: {sensitivity}')
logging.info(f'FOLD {fold_c} : Average per patient: acc : {acc_avg} , specificity: {specificity_avg}, sensitivity: {sensitivity_avg}')
else:
logging.info(f'FOLD {fold_c} : acc: {acc} , auc: {auc}, \
specificity: {specificity}, sensitivity: {sensitivity}')
logging.info(f'FOLD {fold_c} : Average per patient: acc : {acc_avg} , auc: {auc_avg},\
specificity: {specificity_avg}, sensitivity: {sensitivity_avg}')
test_metric = {'acc': acc, 'auc': auc, 'sensitivity': sensitivity, 'specificity': specificity}
test_metric_avg = {'acc': acc_avg, 'auc': auc_avg, 'sensitivity': sensitivity_avg, 'specificity': specificity_avg}
return test_metric, test_metric_avg, probs, label_per_patient, average_prob
def train_predict(catalog, params):
M = params['preprocess']['dimension']['M']
N = params['preprocess']['dimension']['N']
df = pd.read_pickle(os.path.join(catalog['data_root'], catalog['02_interim_pd']))
seed = params['cross_val']['seed']
#permutation = np.random.RandomState(seed=1424).permutation(df.index)
#df = df.iloc[permutation ].reset_index(drop=True)
if params['model']['is_raw_data']:
if params['pca']['global'] is False:
raise NotImplemented(f"Local PCA not implemented for raw images")
data = reshape_raw_images(df, params['preprocess']['dimension']['M'], params['preprocess']['dimension']['N'] )
# using raw images and not scattering
params['scattering']['J'] = None
params['scattering']['max_order'] = None
params['scattering']['scat_order'] = None
else:
J = params['scattering']['J']
data = get_scattering_features(catalog, params['scattering']['J'], params['scattering']['scat_order'] )
#data = data.iloc[permutation].reset_index(drop=True)
df = df.drop(columns=['img'])
test_n_splits = params['cross_val']['test_n_splits']
group_kfold_test = GroupKFold(n_splits=test_n_splits)
fold_c = 1
df_pid = df['id']
df_y = df['class']
df_fat = df['fat']
# save metrics and probability
test_metrics = {}
test_metrics_avg = {}
labels_all, probs_all, fat_percentage = [], [], [] # for mlflow
patient_ids, avg_prob, label_per_patients_all = [], [], []# for mlflow,
logging.info('Cross-validation Started')
for train_index, test_index in group_kfold_test.split(df, df_y, df_pid):
random.seed(seed)
random.shuffle(train_index)
X_train, X_test = data.iloc[train_index][10:params['model']['train_samples']+10], data.iloc[test_index]
y_train, y_test, y_fat = df_y.iloc[train_index][10:params['model']['train_samples']+10], df_y.iloc[test_index], df_fat[test_index]
if params['pca']['global'] is False:
X_train, size_train = flatten_scattering(X_train, J, M, N)
X_test, size_test = flatten_scattering(X_test, J, M , N)
fat_percentage.extend(y_fat)
patient_ids.extend(df_pid[test_index])
# pca is used for dimensionality reduction
logging.info(f'FOLD {fold_c}: Apply PCA on train data points')
pca = PCA(n_components = params['pca']['n_components'], random_state = seed)
X_train = pca.fit_transform(X_train[:params['model']['train_samples']])
X_train = X_train
X_test = pca.transform(X_test)
if params['pca']['global'] is False:
X_train = torch.from_numpy(pca.fit_transform(X_train)).reshape(size_train, -1)
X_test = torch.from_numpy(pca.transform(X_test)).reshape(size_test, -1)
#standardize
if params['pca']['standardize']:
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
logging.info(f'FOLD {fold_c}: model train started')
# training and evalution
test_metric, test_metric_avg, probs, label_per_patient, average_prob = train_and_evaluate_model(params['model'],
X_train, X_test,
y_train, y_test,
fold_c = fold_c)
labels_all.extend(y_test)
probs_all.extend(probs)
avg_prob.extend(average_prob)
label_per_patients_all.extend(label_per_patient)
logging.info(f'FOLD {fold_c}: model train done')
test_metrics[fold_c] = test_metric
test_metrics_avg[fold_c] = test_metric_avg
fold_c += 1
# log all the metrics in mlflow
all_predictions = {'labels': labels_all, 'probabilities': probs_all,
'Fat_percentage': fat_percentage, 'Patient ID': patient_ids }
df_all_predictions= pd.DataFrame(data= all_predictions)
pred_values = {'df_all_predictions': df_all_predictions,
'average_prob': avg_prob,
'label_per_patient': label_per_patients_all}
print(f"pca num: {params['pca']['n_components']}")
log_test_experiments(test_metrics, test_metrics_avg, params = params, pred_values = pred_values)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--param_file', type=str, default='parameters_svm.yml',
help="YML Parameter File Name")
args = parser.parse_args()
catalog, params = get_context(args.param_file)
train_predict(catalog, params)
# train_predict(catalog, params)
# train_predict(catalog, params)
# train_predict(catalog, params)
# train_predict(catalog, params)
# for n_split in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,]:
# #params['model']['train_samples'] = n_split
# params['pca']['n_components'] = n_split
# train_predict(catalog, params)
| [
"sklearn.model_selection.GridSearchCV",
"cld_ivado.utils.compute_metrics.get_metrics",
"cld_ivado.utils.context.get_context",
"cld_ivado.utils.compute_metrics.get_average_per_patient",
"cld_ivado.utils.reshape_features.flatten_scattering",
"logging.info",
"sys.path.append",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"sklearn.model_selection.GroupKFold",
"cld_ivado.utils.reshape_features.get_scattering_features",
"pandas.DataFrame",
"random.shuffle",
"warnings.filterwarnings",
"cld_ivado.utils.reshape_features.reshape_raw_images",
"logging.basicConfig",
"sklearn.svm.SVC",
"cld_ivado.utils.compute_metrics.log_test_experiments",
"os.path.join",
"random.seed",
"sklearn.preprocessing.StandardScaler",
"math.isnan"
] | [((789, 814), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (804, 814), False, 'import sys\n'), ((815, 848), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (838, 848), False, 'import warnings\n'), ((849, 888), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (868, 888), False, 'import logging\n'), ((1232, 1282), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)', 'class_weight': '"""balanced"""'}), "(probability=True, class_weight='balanced')\n", (1239, 1282), False, 'from sklearn import svm\n'), ((1295, 1384), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', "parameters['param_grid']"], {'verbose': "parameters['verbose']", 'n_jobs': '(-1)'}), "(svc, parameters['param_grid'], verbose=parameters['verbose'],\n n_jobs=-1)\n", (1307, 1384), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1506, 1532), 'cld_ivado.utils.compute_metrics.get_metrics', 'get_metrics', (['y_test', 'probs'], {}), '(y_test, probs)\n', (1517, 1532), False, 'from cld_ivado.utils.compute_metrics import get_metrics\n'), ((1625, 1663), 'cld_ivado.utils.compute_metrics.get_average_per_patient', 'get_average_per_patient', (['y_test', 'probs'], {}), '(y_test, probs)\n', (1648, 1663), False, 'from cld_ivado.utils.compute_metrics import get_average_per_patient\n'), ((1677, 1692), 'math.isnan', 'math.isnan', (['auc'], {}), '(auc)\n', (1687, 1692), False, 'import math\n'), ((3773, 3807), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'test_n_splits'}), '(n_splits=test_n_splits)\n', (3783, 3807), False, 'from sklearn.model_selection import GroupKFold\n'), ((4134, 4174), 'logging.info', 'logging.info', (['"""Cross-validation Started"""'], {}), "('Cross-validation Started')\n", (4146, 4174), False, 'import logging\n'), ((6706, 6740), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'all_predictions'}), '(data=all_predictions)\n', (6718, 6740), True, 'import pandas as pd\n'), ((6977, 7073), 'cld_ivado.utils.compute_metrics.log_test_experiments', 'log_test_experiments', (['test_metrics', 'test_metrics_avg'], {'params': 'params', 'pred_values': 'pred_values'}), '(test_metrics, test_metrics_avg, params=params,\n pred_values=pred_values)\n', (6997, 7073), False, 'from cld_ivado.utils.compute_metrics import log_test_experiments\n'), ((7131, 7156), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7154, 7156), False, 'import argparse\n'), ((7346, 7374), 'cld_ivado.utils.context.get_context', 'get_context', (['args.param_file'], {}), '(args.param_file)\n', (7357, 7374), False, 'from cld_ivado.utils.context import get_context\n'), ((1702, 1813), 'logging.info', 'logging.info', (['f"""FOLD {fold_c} : acc: {acc} , specificity: {specificity}, sensitivity: {sensitivity}"""'], {}), "(\n f'FOLD {fold_c} : acc: {acc} , specificity: {specificity}, sensitivity: {sensitivity}'\n )\n", (1714, 1813), False, 'import logging\n'), ((1812, 1957), 'logging.info', 'logging.info', (['f"""FOLD {fold_c} : Average per patient: acc : {acc_avg} , specificity: {specificity_avg}, sensitivity: {sensitivity_avg}"""'], {}), "(\n f'FOLD {fold_c} : Average per patient: acc : {acc_avg} , specificity: {specificity_avg}, sensitivity: {sensitivity_avg}'\n )\n", (1824, 1957), False, 'import logging\n'), ((1967, 2110), 'logging.info', 'logging.info', (['f"""FOLD {fold_c} : acc: {acc} , auc: {auc}, specificity: {specificity}, sensitivity: {sensitivity}"""'], {}), "(\n f'FOLD {fold_c} : acc: {acc} , auc: {auc}, specificity: {specificity}, sensitivity: {sensitivity}'\n )\n", (1979, 2110), False, 'import logging\n'), ((2111, 2291), 'logging.info', 'logging.info', (['f"""FOLD {fold_c} : Average per patient: acc : {acc_avg} , auc: {auc_avg}, specificity: {specificity_avg}, sensitivity: {sensitivity_avg}"""'], {}), "(\n f'FOLD {fold_c} : Average per patient: acc : {acc_avg} , auc: {auc_avg}, specificity: {specificity_avg}, sensitivity: {sensitivity_avg}'\n )\n", (2123, 2291), False, 'import logging\n'), ((2739, 2799), 'os.path.join', 'os.path.join', (["catalog['data_root']", "catalog['02_interim_pd']"], {}), "(catalog['data_root'], catalog['02_interim_pd'])\n", (2751, 2799), False, 'import os\n'), ((3146, 3253), 'cld_ivado.utils.reshape_features.reshape_raw_images', 'reshape_raw_images', (['df', "params['preprocess']['dimension']['M']", "params['preprocess']['dimension']['N']"], {}), "(df, params['preprocess']['dimension']['M'], params[\n 'preprocess']['dimension']['N'])\n", (3164, 3253), False, 'from cld_ivado.utils.reshape_features import reshape_raw_images\n'), ((3500, 3600), 'cld_ivado.utils.reshape_features.get_scattering_features', 'get_scattering_features', (['catalog', "params['scattering']['J']", "params['scattering']['scat_order']"], {}), "(catalog, params['scattering']['J'], params[\n 'scattering']['scat_order'])\n", (3523, 3600), False, 'from cld_ivado.utils.reshape_features import get_scattering_features\n'), ((4260, 4277), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4271, 4277), False, 'import random\n'), ((4286, 4313), 'random.shuffle', 'random.shuffle', (['train_index'], {}), '(train_index)\n', (4300, 4313), False, 'import random\n'), ((4904, 4966), 'logging.info', 'logging.info', (['f"""FOLD {fold_c}: Apply PCA on train data points"""'], {}), "(f'FOLD {fold_c}: Apply PCA on train data points')\n", (4916, 4966), False, 'import logging\n'), ((4981, 5047), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': "params['pca']['n_components']", 'random_state': 'seed'}), "(n_components=params['pca']['n_components'], random_state=seed)\n", (4984, 5047), False, 'from sklearn.decomposition import PCA\n'), ((5644, 5695), 'logging.info', 'logging.info', (['f"""FOLD {fold_c}: model train started"""'], {}), "(f'FOLD {fold_c}: model train started')\n", (5656, 5695), False, 'import logging\n'), ((6290, 6338), 'logging.info', 'logging.info', (['f"""FOLD {fold_c}: model train done"""'], {}), "(f'FOLD {fold_c}: model train done')\n", (6302, 6338), False, 'import logging\n'), ((4645, 4681), 'cld_ivado.utils.reshape_features.flatten_scattering', 'flatten_scattering', (['X_train', 'J', 'M', 'N'], {}), '(X_train, J, M, N)\n', (4663, 4681), False, 'from cld_ivado.utils.reshape_features import flatten_scattering\n'), ((4714, 4749), 'cld_ivado.utils.reshape_features.flatten_scattering', 'flatten_scattering', (['X_test', 'J', 'M', 'N'], {}), '(X_test, J, M, N)\n', (4732, 4749), False, 'from cld_ivado.utils.reshape_features import flatten_scattering\n'), ((5520, 5536), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5534, 5536), False, 'from sklearn.preprocessing import StandardScaler\n')] |
# Generated by Django 2.2 on 2019-11-20 00:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='blogpost',
options={'ordering': ['-publish_date', '-updated', '-timestamp']},
),
migrations.AddField(
model_name='blogpost',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='image/'),
),
migrations.AddField(
model_name='blogpost',
name='publish_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='blogpost',
name='slug',
field=models.SlugField(default=django.utils.timezone.now, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='blogpost',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='blogpost',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='blogpost',
name='user',
field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='blogpost',
name='title',
field=models.CharField(max_length=120),
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.migrations.AlterModelOptions",
"django.db.models.SlugField",
"django.db.models.ImageField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((254, 311), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (285, 311), False, 'from django.db import migrations, models\n'), ((381, 498), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""blogpost"""', 'options': "{'ordering': ['-publish_date', '-updated', '-timestamp']}"}), "(name='blogpost', options={'ordering': [\n '-publish_date', '-updated', '-timestamp']})\n", (409, 498), False, 'from django.db import migrations, models\n'), ((638, 698), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""image/"""'}), "(blank=True, null=True, upload_to='image/')\n", (655, 698), False, 'from django.db import migrations, models\n'), ((826, 869), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (846, 869), False, 'from django.db import migrations, models\n'), ((989, 1053), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': 'django.utils.timezone.now', 'unique': '(True)'}), '(default=django.utils.timezone.now, unique=True)\n', (1005, 1053), False, 'from django.db import migrations, models\n'), ((1214, 1288), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (1234, 1288), False, 'from django.db import migrations, models\n'), ((1447, 1482), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1467, 1482), False, 'from django.db import migrations, models\n'), ((1602, 1721), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(1)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL'}), '(default=1, null=True, on_delete=django.db.models.deletion\n .SET_NULL, to=settings.AUTH_USER_MODEL)\n', (1619, 1721), False, 'from django.db import migrations, models\n'), ((1839, 1871), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)'}), '(max_length=120)\n', (1855, 1871), False, 'from django.db import migrations, models\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.generic.base import View
from django.urls import reverse
from user import forms
from . import models
from django.utils.dateparse import parse_date
from django.http.response import Http404, HttpResponse
from django.core import serializers
from .utils import updateInvitation, list_status
import user
@login_required(login_url='/user/login/')
def home(request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
return HttpResponseRedirect(reverse('status'))
@login_required(login_url='/user/login/')
def edit_profile(request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id # Mendapatkan id user
user = User.objects.get(id=user_id) #mencari objek user berdasarkan id user
data = request.POST # data adalah dictionary yang key-valuenya adalah nama input dan isinya
if(request.method == 'POST'): # Akan dijalankan bila methodnya POST
user_profile = models.Profile.objects.get(user=user) # Mendapatkan profile berdasarkan user
user_profile.birthday = parse_date(data['birthday'])
user_profile.bio = data['bio']
user_profile.save() # Menyimpan kembali objek profile
return HttpResponseRedirect('/user') # Meredirect
return render(request, 'user/edit-profile.html', {
'profile': user.profile
})
class LikeStatusView(LoginRequiredMixin, View):
def post(self, request):
user_id = request.user.id
user = User.objects.get(id=user_id)
status_id = request.POST.get('status_id')
status = models.UserStatus.objects.get(id=status_id)
liker = status.liker
liker_filtered = liker.filter(user=user)
if liker_filtered.count() > 0:
isDislike=True
liker.remove(user.profile)
else:
isDislike=False
liker.add(user.profile)
totalCount = status.liker.all().count()
json_object = JsonResponse({"isDislike":isDislike, "totalCount": totalCount})
return json_object
class OtherStatusView(LoginRequiredMixin, View):
def get(self, request, name):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
try:
user_id = request.user.id
user_profile = User.objects.get(id=user_id).profile
if name == user_profile.name:
return HttpResponseRedirect(reverse('status'))
status_owner = models.Profile.objects.get(name=name)
data = list_status(status_owner, user_profile)
except:
return Http404()
return render(request, 'user/profile/status.html', {
'data': data,
'form': False,
'owner': status_owner.name,
'dataProfile': status_owner
})
# create invitation
class CreateInvitationView(LoginRequiredMixin, View):
def post(self, request, name):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
inviter = User.objects.get(id=user_id).profile
if name == inviter.name:
return HttpResponseRedirect(reverse('status'))
invitee = models.Profile.objects.get(name=name)
data = request.POST
print(data)
message = data.get('message')
if message == '':
message = 'Hi! Nice to meet you'
if len(message) > 200:
message = message[0:200]
count1 = models.Invitation.objects.filter(inviter=inviter, invitee=invitee).count()
count2 = models.Invitation.objects.filter(inviter=invitee, invitee=inviter).count()
if count1 == 0 and count2 == 0:
new_invitation = models.Invitation(inviter=inviter, invitee=invitee, message=message)
new_invitation.save()
return HttpResponseRedirect(reverse('friends'))
# see friends, accept invitation, decline
# invitation, retract invitation
class FriendsView(LoginRequiredMixin, View):
def get(self, request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
user_profile = User.objects.get(id=user_id).profile
user_name= user_profile.name
pending_invitation = []
pending_invitation_querySet = models.Invitation.objects.select_related('invitee').filter(inviter=user_profile, isAccepted=False)
for invitation in pending_invitation_querySet:
pending_invitation.append({'message':invitation.message, 'name':invitation.invitee.name})
inbox_invitation=[]
inbox_invitation_querySet = models.Invitation.objects.select_related('inviter').filter(invitee=user_profile, isAccepted=False)
for invitation in inbox_invitation_querySet:
inbox_invitation.append({'message':invitation.message, 'name':invitation.inviter.name})
friends = []
friends_querySet = models.Invitation.objects.select_related('inviter', 'invitee').filter(invitee=user_profile, isAccepted=True) | models.Invitation.objects.filter(inviter=user_profile, isAccepted=True)
for invitation in friends_querySet:
val = {}
inviter_name = invitation.inviter.name
if inviter_name != user_name:
val['name'] = inviter_name
status = invitation.inviter.posted_status.all().order_by('-time')
val['latest'] = status[0].status if status.count() > 0 else False
else:
val['name'] = invitation.invitee.name
status = invitation.invitee.posted_status.all().order_by('-time')
val['latest'] = status[0].status if status.count() > 0 else False
friends.append(val)
print(friends)
return render(request, 'user/profile/friends.html', {
'pending_invitation': pending_invitation,
'friends': friends,
'inbox_invitation':inbox_invitation,
'name': user_profile,
})
def post(self, request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
user_profile = User.objects.get(id=user_id).profile
method = request.POST.get('_method')
if method is None:
return
target_name = request.POST.get('name')
target_profile = models.Profile.objects.get(name=target_name)
# To delete invitation sent
print("---------")
if method == 'delete':
updateInvitation(user_profile, target_profile, True)
return HttpResponseRedirect(reverse('friends'))
if method == 'accept':
# to accept invitation
updateInvitation(target_profile, user_profile, False)
return HttpResponseRedirect(reverse('friends'))
# To decline invitation or delete friend
if method == 'decline':
updateInvitation(target_profile, user_profile, True)
return HttpResponseRedirect(reverse('friends'))
# to delete friend
updateInvitation(user_profile, target_profile, True)
updateInvitation(target_profile, user_profile, True)
return HttpResponseRedirect(reverse('friends'))
class MyStatusView(LoginRequiredMixin, View):
def post(self,request):
print("skreeeeee")
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
user = User.objects.get(id=user_id).profile
data = request.POST
flag = True
if data['status'] is None or data['status'] == '':
flag = False
if user is not None and flag:
status = models.UserStatus(user=user, status=data['status'])
status.save()
return HttpResponseRedirect(reverse('status'))
data = list_status(user)
return render(request, 'user/profile/status.html', {
'has_error': True,
'data': data,
'dataProfile': user,
'form': True
})
def get(self, request):
print("test")
request.session["testing"] = "hai hai"
print("test")
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
user = User.objects.get(id=user_id).profile
data = list_status(user, user)
# print(data)
return render(request, 'user/profile/status.html', {
'data': data,
'form': True,
'dataProfile': user,
'has_error': False
})
class SearchFriendView(LoginRequiredMixin, View):
def get(self, request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
return render(request, 'user/search/search-friend.html', {
'name':'',
'user': False,
})
def post(self, request):
if request.user.is_superuser:
return HttpResponseRedirect('/admin/')
user_id = request.user.id
user = User.objects.get(id=user_id).profile
searched = request.POST.get('name')
if searched is None:
searched=""
users = models.Profile.objects.filter(name__icontains=searched).order_by("name")
users_list = []
for user in users:
bio = user.bio if user.bio else "-"
users_list.append({'name': user.name,'bio': bio})
return JsonResponse({'data': users_list})
| [
"django.shortcuts.render",
"django.http.response.HttpResponseRedirect",
"django.utils.dateparse.parse_date",
"django.contrib.auth.decorators.login_required",
"django.urls.reverse",
"django.http.response.Http404",
"django.contrib.auth.models.User.objects.get",
"django.http.response.JsonResponse"
] | [((676, 716), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/user/login/"""'}), "(login_url='/user/login/')\n", (690, 716), False, 'from django.contrib.auth.decorators import login_required\n'), ((874, 914), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/user/login/"""'}), "(login_url='/user/login/')\n", (888, 914), False, 'from django.contrib.auth.decorators import login_required\n'), ((1095, 1123), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (1111, 1123), False, 'from django.contrib.auth.models import User\n'), ((1691, 1759), 'django.shortcuts.render', 'render', (['request', '"""user/edit-profile.html"""', "{'profile': user.profile}"], {}), "(request, 'user/edit-profile.html', {'profile': user.profile})\n", (1697, 1759), False, 'from django.shortcuts import redirect, render\n'), ((785, 816), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (805, 816), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((849, 866), 'django.urls.reverse', 'reverse', (['"""status"""'], {}), "('status')\n", (856, 866), False, 'from django.urls import reverse\n'), ((991, 1022), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (1011, 1022), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((1489, 1517), 'django.utils.dateparse.parse_date', 'parse_date', (["data['birthday']"], {}), "(data['birthday'])\n", (1499, 1517), False, 'from django.utils.dateparse import parse_date\n'), ((1635, 1664), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/user"""'], {}), "('/user')\n", (1655, 1664), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((1901, 1929), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (1917, 1929), False, 'from django.contrib.auth.models import User\n'), ((2373, 2437), 'django.http.response.JsonResponse', 'JsonResponse', (["{'isDislike': isDislike, 'totalCount': totalCount}"], {}), "({'isDislike': isDislike, 'totalCount': totalCount})\n", (2385, 2437), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((3044, 3179), 'django.shortcuts.render', 'render', (['request', '"""user/profile/status.html"""', "{'data': data, 'form': False, 'owner': status_owner.name, 'dataProfile':\n status_owner}"], {}), "(request, 'user/profile/status.html', {'data': data, 'form': False,\n 'owner': status_owner.name, 'dataProfile': status_owner})\n", (3050, 3179), False, 'from django.shortcuts import redirect, render\n'), ((6260, 6436), 'django.shortcuts.render', 'render', (['request', '"""user/profile/friends.html"""', "{'pending_invitation': pending_invitation, 'friends': friends,\n 'inbox_invitation': inbox_invitation, 'name': user_profile}"], {}), "(request, 'user/profile/friends.html', {'pending_invitation':\n pending_invitation, 'friends': friends, 'inbox_invitation':\n inbox_invitation, 'name': user_profile})\n", (6266, 6436), False, 'from django.shortcuts import redirect, render\n'), ((8401, 8518), 'django.shortcuts.render', 'render', (['request', '"""user/profile/status.html"""', "{'has_error': True, 'data': data, 'dataProfile': user, 'form': True}"], {}), "(request, 'user/profile/status.html', {'has_error': True, 'data':\n data, 'dataProfile': user, 'form': True})\n", (8407, 8518), False, 'from django.shortcuts import redirect, render\n'), ((8954, 9072), 'django.shortcuts.render', 'render', (['request', '"""user/profile/status.html"""', "{'data': data, 'form': True, 'dataProfile': user, 'has_error': False}"], {}), "(request, 'user/profile/status.html', {'data': data, 'form': True,\n 'dataProfile': user, 'has_error': False})\n", (8960, 9072), False, 'from django.shortcuts import redirect, render\n'), ((9311, 9389), 'django.shortcuts.render', 'render', (['request', '"""user/search/search-friend.html"""', "{'name': '', 'user': False}"], {}), "(request, 'user/search/search-friend.html', {'name': '', 'user': False})\n", (9317, 9389), False, 'from django.shortcuts import redirect, render\n'), ((9999, 10033), 'django.http.response.JsonResponse', 'JsonResponse', (["{'data': users_list}"], {}), "({'data': users_list})\n", (10011, 10033), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((2608, 2639), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (2628, 2639), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((3401, 3432), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (3421, 3432), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((3485, 3513), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (3501, 3513), False, 'from django.contrib.auth.models import User\n'), ((4287, 4305), 'django.urls.reverse', 'reverse', (['"""friends"""'], {}), "('friends')\n", (4294, 4305), False, 'from django.urls import reverse\n'), ((4514, 4545), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (4534, 4545), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((4612, 4640), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (4628, 4640), False, 'from django.contrib.auth.models import User\n'), ((6574, 6605), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (6594, 6605), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((6663, 6691), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (6679, 6691), False, 'from django.contrib.auth.models import User\n'), ((7720, 7738), 'django.urls.reverse', 'reverse', (['"""friends"""'], {}), "('friends')\n", (7727, 7738), False, 'from django.urls import reverse\n'), ((7899, 7930), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (7919, 7930), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((7980, 8008), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (7996, 8008), False, 'from django.contrib.auth.models import User\n'), ((8760, 8791), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (8780, 8791), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((8841, 8869), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (8857, 8869), False, 'from django.contrib.auth.models import User\n'), ((9264, 9295), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (9284, 9295), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((9518, 9549), 'django.http.response.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/admin/"""'], {}), "('/admin/')\n", (9538, 9549), False, 'from django.http.response import HttpResponseNotFound, HttpResponseRedirect, JsonResponse\n'), ((9599, 9627), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (9615, 9627), False, 'from django.contrib.auth.models import User\n'), ((2718, 2746), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (2734, 2746), False, 'from django.contrib.auth.models import User\n'), ((3019, 3028), 'django.http.response.Http404', 'Http404', ([], {}), '()\n', (3026, 3028), False, 'from django.http.response import Http404, HttpResponse\n'), ((3595, 3612), 'django.urls.reverse', 'reverse', (['"""status"""'], {}), "('status')\n", (3602, 3612), False, 'from django.urls import reverse\n'), ((7117, 7135), 'django.urls.reverse', 'reverse', (['"""friends"""'], {}), "('friends')\n", (7124, 7135), False, 'from django.urls import reverse\n'), ((7309, 7327), 'django.urls.reverse', 'reverse', (['"""friends"""'], {}), "('friends')\n", (7316, 7327), False, 'from django.urls import reverse\n'), ((7515, 7533), 'django.urls.reverse', 'reverse', (['"""friends"""'], {}), "('friends')\n", (7522, 7533), False, 'from django.urls import reverse\n'), ((8326, 8343), 'django.urls.reverse', 'reverse', (['"""status"""'], {}), "('status')\n", (8333, 8343), False, 'from django.urls import reverse\n'), ((2841, 2858), 'django.urls.reverse', 'reverse', (['"""status"""'], {}), "('status')\n", (2848, 2858), False, 'from django.urls import reverse\n')] |
from enum import Enum
from .Common import WeaponType
class StyleBonusType(Enum):
STR = 1
END = 2
DEX = 3
AGI = 4
INT = 5
WIL = 6
LOV = 7
CHA = 8
All_Attributes = 9
Ability1 = 31
Ability2 = 32
Ability3 = 33
Mastery_Level_EXP = 37
class Style:
def __init__(self, style_json_object):
self.id = style_json_object['id']
self.character_name = style_json_object['name']
self.style_name = style_json_object['another_name']
self.level_50_str_mod = style_json_object['bonus_rate_max_str'] - 100
self.level_50_end_mod = style_json_object['bonus_rate_max_end'] - 100
self.level_50_dex_mod = style_json_object['bonus_rate_max_dex'] - 100
self.level_50_agi_mod = style_json_object['bonus_rate_max_agi'] - 100
self.level_50_int_mod = style_json_object['bonus_rate_max_int'] - 100
self.level_50_wil_mod = style_json_object['bonus_rate_max_wil'] - 100
self.level_50_lov_mod = style_json_object['bonus_rate_max_lov'] - 100
self.level_50_cha_mod = style_json_object['bonus_rate_max_cha'] - 100
self.str_bonus = 0
self.end_bonus = 0
self.dex_bonus = 0
self.agi_bonus = 0
self.int_bonus = 0
self.wil_bonus = 0
self.lov_bonus = 0
self.cha_bonus = 0
self.base_str_bonus = 0
self.base_end_bonus = 0
self.base_dex_bonus = 0
self.base_agi_bonus = 0
self.base_int_bonus = 0
self.base_wil_bonus = 0
self.base_lov_bonus = 0
self.base_cha_bonus = 0
self.weapon_type = WeaponType(style_json_object['weapon_type'])
rank = style_json_object['rarity']
self.rank = 'A' if rank == 3 else 'S' if rank == 4 else 'SS'
self.skills = []
self.abilities = []
self._skill_ids = style_json_object['skill_ids']
def update_skills(self, skills_list):
for skill in skills_list:
if skill.id in self._skill_ids:
self.skills.append(skill)
def handle_level_ups(self, level_up_json_list, abilities_list):
for level_up in level_up_json_list:
if level_up['style_id'] != self.id:
continue
bonus_type = StyleBonusType(level_up['style_bonus_type'])
bonus_value = level_up['style_bonus_value']
if bonus_type == StyleBonusType.STR:
self.str_bonus += bonus_value
if bonus_type == StyleBonusType.END:
self.end_bonus += bonus_value
if bonus_type == StyleBonusType.DEX:
self.dex_bonus += bonus_value
if bonus_type == StyleBonusType.AGI:
self.agi_bonus += bonus_value
if bonus_type == StyleBonusType.INT:
self.int_bonus += bonus_value
if bonus_type == StyleBonusType.WIL:
self.int_bonus += bonus_value
if bonus_type == StyleBonusType.LOV:
self.lov_bonus += bonus_value
if bonus_type == StyleBonusType.CHA:
self.cha_bonus += bonus_value
if bonus_type == StyleBonusType.All_Attributes:
self.str_bonus += bonus_value
self.end_bonus += bonus_value
self.dex_bonus += bonus_value
self.agi_bonus += bonus_value
self.int_bonus += bonus_value
self.wil_bonus += bonus_value
self.lov_bonus += bonus_value
self.cha_bonus += bonus_value
if bonus_type in (StyleBonusType.Ability1, StyleBonusType.Ability2, StyleBonusType.Ability3):
for ability in abilities_list:
if ability.id == bonus_value:
self.abilities.append(ability)
def get_base_stat_bonus(self, stat_caps_list):
stat_cap_object = [sc for sc in stat_caps_list if sc['styleId'] == self.id]
if stat_cap_object:
stat_cap_object = stat_cap_object[0]
if stat_cap_object['modifier_char_str'] is None or stat_cap_object['modifier_str'] is None:
print(self.style_name)
else:
self.base_str_bonus = stat_cap_object['modifier_char_str'] + stat_cap_object['modifier_str']
self.base_end_bonus = stat_cap_object['modifier_char_end'] + stat_cap_object['modifier_end']
self.base_dex_bonus = stat_cap_object['modifier_char_dex'] + stat_cap_object['modifier_dex']
self.base_agi_bonus = stat_cap_object['modifier_char_agi'] + stat_cap_object['modifier_agi']
self.base_int_bonus = stat_cap_object['modifier_char_int'] + stat_cap_object['modifier_int']
self.base_wil_bonus = stat_cap_object['modifier_char_wil'] + stat_cap_object['modifier_wil']
self.base_lov_bonus = stat_cap_object['modifier_char_lov'] + stat_cap_object['modifier_lov']
self.base_cha_bonus = stat_cap_object['modifier_char_cha'] + stat_cap_object['modifier_cha']
def pretty_print(self):
print('{0} {1} - {2}'.format(self.rank, self.character_name, self.style_name))
print(' STR: {0}% +{1}'.format(self.level_50_str_mod, self.str_bonus))
print(' END: {0}% +{1}'.format(self.level_50_end_mod, self.end_bonus))
print(' DEX: {0}% +{1}'.format(self.level_50_dex_mod, self.dex_bonus))
print(' AGI: {0}% +{1}'.format(self.level_50_agi_mod, self.agi_bonus))
print(' INT: {0}% +{1}'.format(self.level_50_int_mod, self.int_bonus))
print(' WIL: {0}% +{1}'.format(self.level_50_wil_mod, self.wil_bonus))
print(' LOV: {0}% +{1}'.format(self.level_50_lov_mod, self.lov_bonus))
print(' CHA: {0}% +{1}'.format(self.level_50_cha_mod, self.cha_bonus))
print(' Skill1: {0}'.format(self.skills[0]))
print(' Skill2: {0}'.format(self.skills[1]))
print(' Skill3: {0}'.format(self.skills[2]))
print(' Ability1: {0}'.format(self.abilities[0]))
print(' Ability2: {0}'.format(self.abilities[1]))
print(' Ability3: {0}'.format(self.abilities[2]))
def __str__(self):
return self.style_name
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
skills_match = all([s in other.skills for s in self.skills])
abilities_match = all([a in other.abilities for a in self.abilities])
are_equal = self.style_name == other.style_name and self.rank == other.rank \
and self.character_name == other.character_name and self.weapon_type == other.weapon_type \
and self.str_bonus == other.str_bonus and self.end_bonus == other.end_bonus \
and self.dex_bonus == other.dex_bonus and self.agi_bonus == other.agi_bonus \
and self.int_bonus == other.int_bonus and self.wil_bonus == other.wil_bonus \
and self.lov_bonus == other.lov_bonus and self.cha_bonus == other.cha_bonus \
and self.level_50_str_mod == other.level_50_str_mod \
and self.level_50_end_mod == other.level_50_end_mod \
and self.level_50_dex_mod == other.level_50_dex_mod \
and self.level_50_agi_mod == other.level_50_agi_mod \
and self.level_50_int_mod == other.level_50_int_mod \
and self.level_50_wil_mod == other.level_50_wil_mod \
and self.level_50_lov_mod == other.level_50_lov_mod \
and self.level_50_cha_mod == other.level_50_cha_mod \
and skills_match \
and abilities_match
return are_equal
def get_styles(style_filename):
from json import load
with open(style_filename, 'r') as style_file:
styles = load(style_file)
style_list = []
for style_json in styles:
style = Style(style_json)
style_list.append(style)
return style_list
def merge_styles_with_skill_data(styles_list, skills_list):
dud_styles = []
for style in styles_list:
style.update_skills(skills_list)
if len(style.skills) != 3:
dud_styles.append(style)
for dud_style in dud_styles:
styles_list.remove(dud_style)
def merge_styles_with_level_up_data(styles_list, level_up_file, abilities_list):
from json import load
with open(level_up_file, 'r') as level_up:
level_up_data = load(level_up)
dud_styles = []
for style in styles_list:
style.handle_level_ups(level_up_data, abilities_list)
if len(style.abilities) != 3:
dud_styles.append(style)
for dud_style in dud_styles:
styles_list.remove(dud_style)
def merge_styles_with_base_stat_mods(styles_list, stat_mod_filename):
from json import load
with open(stat_mod_filename, 'r') as stat_file:
stat_mod_data = load(stat_file)
for style in styles_list:
style.get_base_stat_bonus(stat_mod_data)
| [
"json.load"
] | [((7961, 7977), 'json.load', 'load', (['style_file'], {}), '(style_file)\n', (7965, 7977), False, 'from json import load\n'), ((8593, 8607), 'json.load', 'load', (['level_up'], {}), '(level_up)\n', (8597, 8607), False, 'from json import load\n'), ((9040, 9055), 'json.load', 'load', (['stat_file'], {}), '(stat_file)\n', (9044, 9055), False, 'from json import load\n')] |
from gym.envs.registration import register
register(
id='airsim-event-v0',
entry_point='airgym.envs:EvAirSimDrone',
)
| [
"gym.envs.registration.register"
] | [((44, 115), 'gym.envs.registration.register', 'register', ([], {'id': '"""airsim-event-v0"""', 'entry_point': '"""airgym.envs:EvAirSimDrone"""'}), "(id='airsim-event-v0', entry_point='airgym.envs:EvAirSimDrone')\n", (52, 115), False, 'from gym.envs.registration import register\n')] |
import os
import functools
import utils as u
import logger
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
import torch
import math
import os.path
from torch.nn import BCEWithLogitsLoss
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import NotFittedError
class Trainer:
def __init__(
self,
args,
splitter,
final_splitter,
comp_loss,
dataset,
disc_encoder=None,
cont_encoder=None,
classifier=None,
combined_splitter=None,
downstream_splitter=None,
train_encoder=True,
):
self.final_epoch_id = 9999
self.args = args
self.dataset = dataset
self.splitter = splitter
self.combined_splitter = combined_splitter
self.downstream_splitter = downstream_splitter
self.final_splitter = final_splitter
self.tasker = splitter.tasker
if combined_splitter != None:
self.disc_tasker = combined_splitter.disc_tasker
else:
self.disc_tasker = None
self.disc_encoder = disc_encoder
self.cont_encoder = cont_encoder
self.classifier = classifier
self.downstream_classifiers = {
"logistic": LogisticRegression(),
"decision": DecisionTreeClassifier(),
"xgb": XGBClassifier(),
}
self.comp_loss = comp_loss
self.interpolation_loss = torch.nn.BCELoss() # For continuous DGNN
self.num_nodes = self.tasker.data.num_nodes
self.num_classes = self.tasker.num_classes
self.train_encoder = (
train_encoder # Indicate which kind of training this class is for
)
self.init_optimizers(args)
self.downstream = False # Used to keep track of whether we're downstream or not
self.frozen = False # Used to keep track of whether the encoder is frozen
self.set_save_predictions = (
False # Keep track of whether to save predictions or not
)
self.has_time_query = (
hasattr(self.args, "has_time_query") and self.args.has_time_query == True
)
self.use_tgn_memory = (
args.model == "tgn" and self.args.gcn_parameters["use_memory"] == True
)
self.tgn_train_memory_backup = None
self.tgn_val_memory_backup = None
self.embedding_cache = {"TRAIN": {}, "VALID": {}, "TEST": {}}
# Runs using a logfile are usually more serious and we'll like to keep those checkpoints.
# Runs not using a logfile store the checkpoints in the working dir where they may
# later be overwritten
if args.use_logfile:
if args.temporal_granularity == "continuous":
self.checkpoint_filename_prefix = (
"checkpoints/{}-{}-learning_rate{}".format(
args.data, args.model, args.learning_rate
)
)
else:
self.checkpoint_filename_prefix = "checkpoints/{}-{}-{}-".format(
args.data, args.model, args.grid
)
prediction_folder = "predictions/{}-{}/".format(args.data, args.model)
self.prediction_filename_prefix = "{}{}-".format(
prediction_folder, u.get_gridcell(args)
)
os.makedirs(prediction_folder, exist_ok=True)
else:
self.checkpoint_filename_prefix = "checkpoints/"
# self.checkpoint_filename_prefix = 'wikipedia-tgat-'
self.prediction_filename_prefix = "predictions/"
# if self.tasker.is_static:
# adj_matrix = u.sparse_prepare_tensor(self.tasker.adj_matrix, torch_size = [self.num_nodes], ignore_batch_dim = False)
# self.hist_adj = [adj_matrix]
# self.hist_ndFeats = [self.tasker.nodes_feats.float()]
def report_trainable_parameters(self):
# The "requires_grad if test" seem to make no difference, but it's kept in just in case and it doesn't hurt.
count_trainable_parameters = lambda model: sum(
[p.numel() for p in model.parameters() if p.requires_grad]
)
num_disc_encoder_params = 0
num_cont_encoder_params = 0
num_classifier_params = 0
if self.disc_encoder != None:
num_disc_encoder_params = count_trainable_parameters(self.disc_encoder)
if self.cont_encoder != None:
num_cont_encoder_params = count_trainable_parameters(self.cont_encoder)
if self.classifier != None:
num_classifier_params = count_trainable_parameters(self.classifier)
num_encoder_params = num_disc_encoder_params + num_cont_encoder_params
num_total_params = num_encoder_params + num_classifier_params
if self.disc_tasker != None:
disc_node_feats = self.disc_tasker.feats_per_node
else:
disc_node_feats = self.tasker.feats_per_node
cont_node_feats = self.args.gcn_parameters["layer_2_feats"]
if self.args.temporal_granularity in ["static", "discrete"]:
node_feats = disc_node_feats
else:
node_feats = self.args.gcn_parameters["layer_2_feats"]
num_parameter_str = "Number of parameters (i) Encoder(s): {}, (ii) Classifier: {}, Total: {}, features per node {}".format(
num_encoder_params, num_classifier_params, num_total_params, node_feats
)
print(num_parameter_str)
self.logger.logger.info(num_parameter_str)
def init_optimizers(self, args):
if self.disc_encoder != None and self.args.model != "random":
params = self.disc_encoder.parameters()
self.opt_encoder = torch.optim.Adam(params, lr=args.learning_rate)
# , weight_decay=args.weight_decay)
self.opt_encoder.zero_grad()
if self.cont_encoder != None:
params = self.cont_encoder.parameters()
self.opt_cont_encoder = torch.optim.Adam(params, lr=args.learning_rate)
# , weight_decay=args.weight_decay)
self.opt_cont_encoder.zero_grad()
if self.classifier != None:
params = self.classifier.parameters()
if self.train_encoder:
self.opt_decoder = torch.optim.Adam(
params, lr=args.learning_rate
) # , weight_decay=args.weight_decay)
else:
# If we train only the decoder we want a specific learning rate for it and regularization on it.
self.opt_decoder = torch.optim.Adam(
params,
lr=args.decoder_learning_rate,
weight_decay=args.decoder_weight_decay,
)
self.opt_decoder.zero_grad()
# Checks all checkpoints, if one is missing for one of the encoders, then no is returned.
def checkpoint_exists(self):
def encoder_exists(encoder_type):
prefix = self.checkpoint_filename_prefix
return os.path.isfile("{}{}_encoder.pth".format(prefix, encoder_type))
exists = True
if self.disc_encoder != None:
exists = encoder_exists("disc")
if self.cont_encoder != None:
exists = exists and encoder_exists("cont")
return exists
def save_checkpoint(self):
if self.disc_encoder != None:
self.save_encoder("disc")
if self.cont_encoder != None:
self.save_encoder("cont")
prefix = self.checkpoint_filename_prefix
torch.save(self.classifier, prefix + "cls.pth")
self.logger.logger.info("=> saved checkpoint")
def save_encoder(self, encoder_type):
assert encoder_type in ["disc", "cont"]
if encoder_type == "disc":
encoder = self.disc_encoder
else:
encoder = self.cont_encoder
prefix = self.checkpoint_filename_prefix
torch.save(encoder, "{}{}_encoder.pth".format(prefix, encoder_type))
if self.use_tgn_memory:
torch.save(self.tgn_train_memory_backup, prefix + "tgn_memory_train.pth")
torch.save(self.tgn_val_memory_backup, prefix + "tgn_memory_val.pth")
def load_checkpoint(
self, load_disc_encoder=True, load_cont_encoder=True, load_decoder=True
):
if self.disc_encoder != None and load_disc_encoder:
self.disc_encoder = self.load_encoder("disc")
if self.cont_encoder != None and load_cont_encoder:
self.cont_encoder = self.load_encoder("cont")
if load_decoder:
# Remember to initialize optimizers if the classifier is to be further optimized
prefix = self.checkpoint_filename_prefix
self.classifier = torch.load(
prefix + "cls.pth",
map_location=torch.device(self.args.device),
)
def load_encoder(self, encoder_type):
assert encoder_type in ["disc", "cont"]
if encoder_type == "disc":
old_encoder = self.disc_encoder
else:
old_encoder = self.cont_encoder
old_encoder_name = type(old_encoder).__name__
prefix = self.checkpoint_filename_prefix
encoder = torch.load(
"{}{}_encoder.pth".format(prefix, encoder_type),
map_location=torch.device(self.args.device),
)
if hasattr(encoder, "device"):
encoder.device = self.args.device
if hasattr(encoder, "memory"): # i.e it is tgn
encoder.memory.device = self.args.device
encoder.message_aggregator.device = self.args.device
encoder.memory_updater.device = self.args.device
encoder.embedding_module.device = self.args.device
if self.use_tgn_memory:
self.tgn_train_memory_backup = torch.load(
prefix + "tgn_memory_train.pth",
map_location=torch.device(self.args.device),
)
self.tgn_val_memory_backup = torch.load(
prefix + "tgn_memory_val.pth",
map_location=torch.device(self.args.device),
)
self.logger.logger.info("<= loaded checkpoint {}".format(prefix))
new_encoder_name = type(encoder).__name__
error_msg = (
"Loaded encoder is not correct class. Was {}, should have been {}".format(
new_encoder_name, old_encoder_name
)
)
assert old_encoder_name == new_encoder_name, error_msg
return encoder
def train(self):
self.downstream = False
self.logger = logger.Logger(
self.args,
self.num_classes,
self.num_nodes,
train_encoder=self.train_encoder,
)
self.report_trainable_parameters()
def run_epochs(run_epoch, splitter, logger):
self.tr_step = 0
best_eval_valid = 0
eval_valid = 0
epochs_without_impr = 0
for e in range(1, self.args.num_epochs + 1):
eval_train, nodes_embs = run_epoch(splitter.train, e, "TRAIN")
epochs_without_impr += 1
do_eval = (
e >= self.args.eval_after_epochs
and e % self.args.eval_epoch_interval == 0
)
if len(splitter.val) > 0 and do_eval:
eval_valid, _ = run_epoch(splitter.val, e, "VALID")
if eval_valid > best_eval_valid:
best_eval_valid = eval_valid
epochs_without_impr = 0
print(
"### w"
+ str(self.args.rank)
+ ") ep "
+ str(e)
+ " - Best valid measure:"
+ str(eval_valid)
)
else:
if epochs_without_impr > self.args.early_stop_patience:
print(
"### w"
+ str(self.args.rank)
+ ") ep "
+ str(e)
+ " - Early stop."
)
break
if len(splitter.test) > 0 and eval_valid == best_eval_valid and do_eval:
self.save_checkpoint()
eval_test, _ = run_epoch(splitter.test, e, "TEST")
# if self.args.save_node_embeddings:
# self.save_node_embs_csv(nodes_embs, self.splitter.train_idx, log_file+'_train_nodeembs.csv.gz')
# self.save_node_embs_csv(nodes_embs, self.splitter.val_idx, log_file+'_valid_nodeembs.csv.gz')
# self.save_node_embs_csv(nodes_embs, self.splitter.test_idx, log_file+'_test_nodeembs.csv.gz')
if self.train_encoder:
run_epochs(self.run_epoch, self.splitter, self.logger)
else:
assert (
self.args.temporal_granularity == "continuous"
), "Frozen encoder training only supported for continuous models"
self.logger.logger.info("##### Decoder training")
self.frozen = True
self.load_checkpoint(load_disc_encoder=False, load_decoder=False)
run_epochs(
self.frozen_encoder_epoch, self.combined_splitter, self.logger
)
self.frozen = False # Let it go! Let it gooo!
if (
hasattr(self.args, "final_epoch")
and self.args.final_epoch
and self.train_encoder
):
# Final epoch on all edges to report accurate metrics
self.load_checkpoint(load_decoder=True)
if self.args.temporal_granularity != "continuous":
run_epoch = self.run_epoch
else:
run_epoch = self.frozen_encoder_epoch
run_epoch(self.final_splitter.val, self.final_epoch_id, "VALID")
self.set_save_predictions = self.args.save_predictions
run_epoch(self.final_splitter.test, self.final_epoch_id, "TEST")
self.set_save_predictions = False
self.logger.close()
if self.args.run_downstream:
# Downstream learning
print("Started downstream learning")
self.logger.logger.info("##### Downstream learning")
self.downstream = True # Is it pretty? No. Does it work? Yes
self.downstream_loggers = {
key: logger.Logger(
self.args, self.num_classes, self.num_nodes, classifier_name=key
)
for key in self.downstream_classifiers.keys()
}
self.load_checkpoint(load_decoder=True)
e = 0 # epoch
_ = self.run_downstream_epoch(self.downstream_splitter.train, e, "TRAIN")
_ = self.run_downstream_epoch(self.downstream_splitter.val, e, "VALID")
_ = self.run_downstream_epoch(self.downstream_splitter.test, e, "TEST")
for logr in self.downstream_loggers.values():
logr.close()
self.downstream = False
# Load model and only run on test
def eval(self):
self.logger = logger.Logger(
self.args,
self.num_classes,
self.num_nodes,
train_encoder=self.train_encoder,
)
# Load in everything
self.load_checkpoint()
def run_test_epoch(run_epoch, splitter, logger):
self.set_save_predictions = self.args.save_predictions
eval_test, _ = run_epoch(splitter.test, 1, "TEST")
self.set_save_predictions = False
if self.args.temporal_granularity in ["static", "discrete"]:
run_test_epoch(self.run_epoch, self.splitter, self.logger)
else:
assert (
self.args.temporal_granularity == "continuous"
), "Frozen encoder training only supported for continuous models"
self.logger.logger.info("##### Decoder eval")
self.frozen = True
run_test_epoch(
self.frozen_encoder_epoch, self.combined_splitter, self.logger
)
self.frozen = False # Let it go! Let it gooo!
self.logger.close()
def _epoch_decorator(run_epoch_func):
@functools.wraps(run_epoch_func)
def wrapper(*args, **kwards):
self = args[0]
split = args[1]
epoch = args[2]
set_name = args[3]
if self.use_tgn_memory:
self.prepare_tgn_memory(set_name)
log_interval = 999
if set_name == "TEST":
log_interval = 1
# Epoch start logger(s)
if not self.downstream:
self.logger.log_epoch_start(
epoch, len(split), set_name, minibatch_log_interval=log_interval
)
else:
# Using variable name logr instead of logger to avoid overwriting import logger
for logr in self.downstream_loggers.values():
logr.log_epoch_start(
epoch, len(split), set_name, minibatch_log_interval=log_interval
)
# Run epoch
nodes_embs = run_epoch_func(*args, **kwards)
if self.use_tgn_memory:
self.backup_tgn_memory(set_name)
# Epoch done logger(s)
if not self.downstream:
eval_measure = self.logger.log_epoch_done()
else:
for logr in self.downstream_loggers.values():
logr.log_epoch_done()
eval_measure = None # Doesn't matter here
return eval_measure, nodes_embs
return wrapper
@_epoch_decorator
def run_downstream_epoch(self, split, epoch, set_name):
raise NotImplementedError
self.encoder = self.encoder.eval()
torch.set_grad_enabled(False)
for s in split:
encode_sample, test_sample = s
# Encoder
es = self.prepare_sample(encode_sample, self.args.temporal_granularity)
nodes_embs = self.encode(es, set_name)
downstream_loss = torch.tensor(0.0) # No loss since we only encode
# Downstream
s = self.prepare_sample(test_sample, "static", only_label_sp=True)
predictions_dict, probs_dict = self.predict_downstream(
nodes_embs, s.label_sp["idx"], s.label_sp["vals"], set_name
)
for classifier_name in predictions_dict:
log = self.downstream_loggers[classifier_name]
if set_name in ["TEST", "VALID"] and self.args.task == "link_pred":
log.log_minibatch(
downstream_loss.detach(),
predictions_dict[classifier_name],
probs_dict[classifier_name],
s.label_sp["vals"],
adj=s.label_sp["idx"],
prev_adj=s.prev_adj["idx"],
)
else:
log.log_minibatch(
downstream_loss.detach(),
predictions_dict[classifier_name],
probs_dict[classifier_name],
s.label_sp["vals"],
)
self.encoder = self.encoder.train()
torch.set_grad_enabled(True)
return nodes_embs
@_epoch_decorator
def frozen_encoder_epoch(self, split, epoch, set_name):
if set_name == "TRAIN":
torch.set_grad_enabled(True)
else:
torch.set_grad_enabled(False)
# Freeze encoder
self.cont_encoder = self.cont_encoder.eval()
for param in self.cont_encoder.parameters():
param.requires_grad = False
# Cache encoder embeddings for each snapshot
to_cache = len(self.embedding_cache[set_name]) == 0
if to_cache:
self.logger.logger.info("Cache empty, encoding to fill cache")
else:
self.logger.logger.info("Using cache")
# Epoch
i = 0
for s in split:
encode_sample, test_sample = s
i = i + 1
if to_cache:
# Update cache
es = self.prepare_sample(encode_sample, self.args.temporal_granularity)
nodes_embs = self.encode(es, set_name)
self.embedding_cache[set_name][i] = nodes_embs
else:
# Use cache
nodes_embs = self.embedding_cache[set_name][i]
# assert nodes_embs.isnan().any() == False, 'A node embedding is nan'
# Node embs is occasionally nan. Sets nan to zero
nodes_embs[nodes_embs.isnan()] = 0
s = self.prepare_sample(test_sample, "static", only_label_sp=True)
# Decoder
predictions = self.predict(nodes_embs, s.label_sp)
loss = self.comp_loss(predictions, s.label_sp["vals"])
# print("loss:", loss)
# assert math.isnan(loss.item()) == False, 'Loss is nan'
probs = torch.softmax(predictions, dim=1)[:, 1]
if self.set_save_predictions:
if (
hasattr(self.args, "custom_labeler")
and self.args.custom_labeler == True
):
settype = self.tasker.custom_labeler.settype
prefix = "{}_{}".format(self.prediction_filename_prefix, settype)
u.save_predictions(
probs,
s.label_sp["idx"],
s.label_sp["vals"],
i,
prefix,
self.dataset,
)
else:
u.save_predictions(
probs,
s.label_sp["idx"],
s.label_sp["vals"],
i,
self.prediction_filename_prefix,
self.dataset,
)
if set_name in ["TEST", "VALID"] and self.args.task == "link_pred":
self.logger.log_minibatch(
loss.detach(),
predictions.detach().cpu(),
probs.detach().cpu(),
s.label_sp["vals"],
adj=s.label_sp["idx"],
prev_adj=s.prev_adj["idx"],
)
else:
self.logger.log_minibatch(
loss.detach(),
predictions.detach().cpu(),
probs.detach().cpu(),
s.label_sp["vals"],
)
if set_name == "TRAIN":
self.optim_step_decoder(loss)
self.cont_encoder = self.cont_encoder.train()
torch.set_grad_enabled(True)
return nodes_embs
@_epoch_decorator
def run_epoch(self, split, epoch, set_name):
snapshot_free = (
self.args.temporal_granularity == "continuous" or self.args.model == "seal"
)
if set_name == "TRAIN":
if self.disc_encoder != None:
self.disc_encoder.train()
if self.cont_encoder != None:
self.cont_encoder.train()
# If the cls is using dropout also call cls.train here. However we currently don't.
torch.set_grad_enabled(True)
else:
if self.disc_encoder != None:
self.disc_encoder.eval()
if self.cont_encoder != None:
self.cont_encoder.eval()
torch.set_grad_enabled(False)
i = 0
for s in split:
# split is a data_split class and this calls the __get_item__ function
# s = sample
# for key in s.keys():
# print(key, u.naturalsize(u.get_memory_size(s[key])))
# Reshapes and sends the tensors to device
s = self.prepare_sample(s, self.args.temporal_granularity)
i = i + 1
# print("ss", i, 'set name', set_name)
if not snapshot_free: # Snapshots, i.e. the Static, Discrete
nodes_embs = self.encode(s, set_name) # Encoder
predictions = self.predict(nodes_embs, s.label_sp) # Decoder
loss = self.comp_loss(predictions, s.label_sp["vals"])
probs = torch.softmax(predictions, dim=1)[:, 1]
if self.set_save_predictions:
settype = self.tasker.custom_labeler.settype
prefix = "{}_{}".format(self.prediction_filename_prefix, settype)
u.save_predictions(
probs,
s.label_sp["idx"],
s.label_sp["vals"],
i,
prefix,
self.dataset,
)
if set_name in ["TEST", "VALID"] and self.args.task == "link_pred":
self.logger.log_minibatch(
loss.detach(),
predictions.detach().cpu(),
probs.detach().cpu(),
s.label_sp["vals"],
adj=s.label_sp["idx"],
prev_adj=s.prev_adj["idx"],
)
else:
self.logger.log_minibatch(
loss.detach(),
predictions.detach().cpu(),
probs.detach().cpu(),
s.label_sp["vals"],
)
if set_name == "TRAIN" and not self.downstream:
self.optim_step(loss) # Only for DGNN training
else: # Edge based training - including continuous
if self.args.model == "seal":
nodes_embs = self.predict_seal(s, set_name)
else:
nodes_embs = self.predict_continuous(
s.hist_adj,
s.hist_time,
s.hist_ndFeats,
s.hist_node_mask,
set_name,
)
# Logging done internally in continuous training
if self.disc_encoder != None:
self.disc_encoder.train()
if self.cont_encoder != None:
self.cont_encoder.train()
torch.set_grad_enabled(True)
return nodes_embs
def encode(self, sample, set_name, temporal_granularity=None):
if temporal_granularity == None:
temporal_granularity = self.args.temporal_granularity
if temporal_granularity != "continuous":
nodes_embs = self.disc_encoder(
sample.hist_adj,
sample.hist_ndFeats,
sample.hist_vals,
sample.hist_node_mask,
)
else: # If snapshot based and continuous, used for downstream learning.
nodes_embs = self.predict_continuous(
sample.hist_adj,
sample.hist_time,
sample.hist_ndFeats,
sample.hist_node_mask,
set_name,
)
return nodes_embs
def predict(self, nodes_embs, label_sp):
node_indices = label_sp["idx"]
if self.has_time_query:
time = label_sp["time"]
edge_type = label_sp["type"]
time_feats = label_sp["time_feats"]
batch_size = self.args.decoder_batch_size
gather_predictions = []
for i in range(1 + (node_indices.size(1) // batch_size)):
b_start = i * batch_size
b_end = (i + 1) * batch_size
links = node_indices[:, b_start:b_end]
x = self.gather_node_embs(nodes_embs, links)
if self.has_time_query:
t = time[b_start:b_end].unsqueeze(1)
e_type = edge_type[b_start:b_end, :]
t_feats = time_feats[b_start:b_end, :]
predictions = self.classifier(x, t, e_type, t_feats)
else:
predictions = self.classifier(x)
gather_predictions.append(predictions)
gather_predictions = torch.cat(gather_predictions, dim=0)
return gather_predictions
def gather_node_embs(self, nodes_embs, links):
cls_input = []
for node_set in links:
cls_input.append(nodes_embs[node_set])
return torch.cat(cls_input, dim=1)
def predict_continuous(self, hist_adj, hist_time, hist_ndFeats, mask, set_name):
batch_size = self.args.continuous_batch_size
assert len(hist_adj) == len(hist_time) == len(hist_ndFeats) == 1
# Some Torch to numpy and GPU to CPU ping pong caused by TGAT taking numpy as input
# Luckily this happens only 3 times per epoch.
adj = hist_adj[0]
times = hist_time[0].cpu().numpy()
assert len(adj) == 2
src_idx_l = adj[0].cpu().numpy()
target_idx_l = adj[1].cpu().numpy()
num_nodes = len(np.unique(src_idx_l))
num_edges = len(np.atleast_1d(src_idx_l))
if num_edges <= 1: # The rest of the function assumes multiple edges.
# Ignore this one edge and simply return previous embeddings. This may happen on sparse datasets with small snapshots (the beginning of UC is an example)
nodes_embs = self.cont_encoder.node_embed.detach()
assert (
len(nodes_embs) == self.num_nodes
), "Node embeddings need to include all nodes"
return nodes_embs
num_batches = num_edges // batch_size
# The below line is potentially great for datasets which have no edge features. But if the initialization is dictated by edge features (as in TGAT), then we'll have to do something different here.
# self.cont_encoder.update_node_features(hist_ndFeats[0].to_dense().cpu().numpy())
# Combining source and target since TGAT is based on node embeddings and since our networks are treated as undirected.
# The ngh_finder retrieves a sample of edges per node which are used for the embedding.
# nodes = torch.stack([src_idx_l,target_idx_l], dim=1).flatten()
# times = torch.stack([times, times], dim=1).flatten()
for i in range(1 + num_batches):
# print("Batch {}/{}".format(i+1, num_batches+1))
src_batch, target_batch, times_batch, edge_idxs = self.get_continuous_batch(
src_idx_l, target_idx_l, times, i, batch_size
)
# print("continuous batch times", times_batch.min(), times_batch.max())
size = len(src_batch)
if size <= 1:
continue # TGAT breaks down if the batch only contains one edge.
# nembs_batch = self.cont_encoder(src_batch, #contrast used instead
# times_batch,
# hist_ndFeats[0],
# mask)
target_l_fake = np.random.randint(0, num_nodes, size)
with torch.no_grad():
pos_label = torch.ones(size, dtype=torch.float, device=self.args.device)
neg_label = torch.zeros(
size, dtype=torch.float, device=self.args.device
)
self.opt_cont_encoder.zero_grad()
pos_prob, neg_prob = self.cont_encoder.contrast(
src_batch, target_batch, target_l_fake, times_batch, edge_idxs
)
if not self.downstream and not self.frozen:
# If we're downstream or frozen we just want to encode
inter_loss = self.interpolation_loss(pos_prob, pos_label)
inter_loss += self.interpolation_loss(neg_prob, neg_label)
if set_name == "TRAIN":
inter_loss.backward()
self.opt_cont_encoder.step()
if self.use_tgn_memory:
self.cont_encoder.memory.detach_memory()
with torch.no_grad():
self.cont_encoder = self.cont_encoder.eval()
probs = torch.tensor(
np.concatenate(
[
(pos_prob).cpu().detach().numpy(),
(neg_prob).cpu().detach().numpy(),
]
)
)
predictions = torch.stack((probs, 1 - probs), dim=1)
true_label = torch.tensor(
np.concatenate([np.ones(size), np.zeros(size)])
)
self.logger.log_minibatch(
inter_loss.detach(),
predictions.detach().cpu(),
probs.detach().cpu(),
true_label,
calc_lp_metrics=False,
)
# Detach breaks the link between the models so the continuous model and classifier are trained separately.
nodes_embs = self.cont_encoder.node_embed.detach()
assert (
len(nodes_embs) == self.num_nodes
), "Node embeddings need to include all nodes"
return nodes_embs
def predict_downstream(self, nodes_embs, node_index, true_classes, set_name):
# If training we can only fit once, so no batching.
# Since we train using negative samples the data should be small enough to be handled in one go.
nodes_embs = nodes_embs.cpu()
node_index = node_index.cpu()
true_classes = true_classes.cpu()
if set_name == "TRAIN":
predictions_dict, probs_dict = self.predict_downstream_batch(
nodes_embs, node_index, true_classes, set_name
)
return predictions_dict, probs_dict
# Batching for validation and test
def get_batch(i, batch_size, nodes_embs, node_index, true_classes, set_name):
batch_start = i * batch_size
batch_end = (i + 1) * batch_size
node_index_batch = node_index[:, batch_start:batch_end]
true_classes_batch = true_classes[batch_start:batch_end]
predictions_dict, probs_dict = self.predict_downstream_batch(
nodes_embs, node_index_batch, true_classes_batch, set_name
)
return predictions_dict, probs_dict
batch_size = self.args.decoder_batch_size
# TODO Parallel seems to make it slower, consider investigating in the future, maybe different batch size
# parallel = Parallel(n_jobs=-1)
# gather_predictions = parallel(delayed(get_batch)(
# i, batch_size, nodes_embs, node_index, true_classes, set_name)
# for i in range(1 +(node_index.size(1)//batch_size))
# )
gather_predictions = [
get_batch(i, batch_size, nodes_embs, node_index, true_classes, set_name)
for i in range(1 + (node_index.size(1) // batch_size))
]
# Prepare predictions for concatenation
predictions_dict_list = {}
probs_dict_list = {}
for predictions_dict, probs_dict in gather_predictions:
for classifier_name in predictions_dict:
if classifier_name in predictions_dict_list.keys():
predictions_dict_list[classifier_name].append(
predictions_dict[classifier_name]
)
probs_dict_list[classifier_name].append(probs_dict[classifier_name])
else:
predictions_dict_list[classifier_name] = [
predictions_dict[classifier_name]
]
probs_dict_list[classifier_name] = [probs_dict[classifier_name]]
# Concatenate batched predictions
predictions_dict = {}
probs_dict = {}
for classifier_name in predictions_dict_list:
predictions = torch.cat(predictions_dict_list[classifier_name], dim=0)
probs = torch.cat(probs_dict_list[classifier_name], dim=0)
predictions_dict[classifier_name] = predictions
probs_dict[classifier_name] = probs
return predictions_dict, probs_dict
def predict_downstream_batch(self, nodes_embs, node_index, true_classes, set_name):
embedding_size = nodes_embs[1].size()[0]
n1, n2 = torch.split(
self.gather_node_embs(nodes_embs, node_index), embedding_size, dim=1
)
X = np.array(torch.cat([n1, n2, np.multiply(n1, n2)], dim=1))
y = np.array(true_classes)
# A rare problem where some embeddings on some snapshots cause X to include nan values
if not np.isfinite(X).all():
for logr in self.downstream_loggers.values():
logr.logger.warning(
"nan/inf/-inf observed in downstream X. Setting values to default valid numbers"
)
X = np.nan_to_num(X)
predictions_dict = {}
probs_dict = {}
for key, classifier in self.downstream_classifiers.items():
if set_name == "TRAIN":
classifier.fit(X, y)
predictions = classifier.predict_proba(X)
probs = predictions[:, 1]
self.downstream_classifiers[key] = classifier
predictions_dict[key] = torch.tensor(predictions)
probs_dict[key] = torch.tensor(probs)
return predictions_dict, probs_dict
def get_continuous_batch(self, src_l, target_l, times_l, i, batch_size):
start_idx = i * batch_size
end_idx = start_idx + batch_size
src_batch = src_l[start_idx:end_idx]
target_batch = target_l[start_idx:end_idx]
times_batch = times_l[start_idx:end_idx]
edge_idxs = np.arange(start_idx, start_idx + len(src_batch))
# print("batch", i, "src", src_l.shape, "sidx", start_idx, "eidx", end_idx, "src batch", src_batch.shape)
return src_batch, target_batch, times_batch, edge_idxs
def optim_step(self, loss):
self.tr_step += 1
loss.backward()
if self.tr_step % self.args.steps_accum_gradients == 0:
if self.disc_encoder != None and self.args.model != "random":
self.opt_encoder.step()
self.opt_encoder.zero_grad()
self.opt_decoder.step()
self.opt_decoder.zero_grad()
def optim_step_decoder(self, loss):
self.tr_step += 1
loss.backward()
if self.tr_step % self.args.steps_accum_gradients == 0:
self.opt_decoder.step()
self.opt_decoder.zero_grad()
def prepare_sample(
self, sample, temporal_granularity="static", only_label_sp=False
):
sample = u.Namespace(sample)
sample.hist_vals, sample.hist_time = [], []
if self.args.model == "seal":
# For SEAL we want to pack the data edge by edge into a dataloader (yes a second one)
seal_dataset = SEALDataset(
self.args,
self.args.gcn_parameters["hops"],
self.tasker.prepare_node_feats(sample.hist_ndFeats[0]).to_dense(),
sample.label_exist["idx"].squeeze(),
sample.label_non_exist["idx"].squeeze(),
sample.hist_adj[0]["idx"].squeeze(),
)
seal_loader = pygeomDataLoader(seal_dataset, batch_size=32)
return seal_loader # Returns a dataloader instead of a sample if it is SEAL
else:
# For the static and continuous case there will be only one iteration
for i, adj in enumerate(sample.hist_adj):
# Prepares an edge index (edge list) as expected by PyTorch Geometric
# Squeeze removes dimensions of size 1
vals = adj["vals"].squeeze().t()
sample.hist_vals.append(vals.to(self.args.device))
if temporal_granularity == "continuous":
hist_time = adj["time"].squeeze().t()
sample.hist_time.append(hist_time.to(self.args.device))
if hasattr(self.args, "pygeom") and self.args.pygeom == False:
# Only used for the original implementation of EGCN
adj_idx = u.sparse_prepare_tensor(adj, torch_size=[self.num_nodes])
else:
adj_idx = adj["idx"].squeeze().t()
sample.hist_adj[i] = adj_idx.to(self.args.device)
if not only_label_sp:
# Created some problems for reddit_tgn, we don't use this there anyways.
if self.disc_tasker != None:
nodes = self.disc_tasker.prepare_node_feats(
sample.hist_ndFeats[i]
)
else:
nodes = self.tasker.prepare_node_feats(sample.hist_ndFeats[i])
sample.hist_ndFeats[i] = nodes.to(self.args.device)
hist_node_mask = sample.hist_node_mask[i]
# transposed to have same dimensions as scorer
sample.hist_node_mask[i] = hist_node_mask.to(self.args.device).t()
label_sp = self.ignore_batch_dim(sample.label_sp)
if self.has_time_query:
label_sp["time"] = label_sp["time"].squeeze().to(self.args.device)
label_sp["type"] = label_sp["type"].squeeze().to(self.args.device)
label_sp["time_feats"] = (
label_sp["time_feats"].squeeze().to(self.args.device)
)
if self.args.task in ["link_pred", "edge_cls"]:
label_sp["idx"] = label_sp["idx"].to(self.args.device).t()
else:
label_sp["idx"] = label_sp["idx"].to(self.args.device)
label_sp["vals"] = label_sp["vals"].type(torch.long).to(self.args.device)
sample.label_sp = label_sp
return sample
def ignore_batch_dim(self, adj):
if self.args.task in ["link_pred", "edge_cls"]:
adj["idx"] = adj["idx"][0]
adj["vals"] = adj["vals"][0]
return adj
def save_node_embs_csv(self, nodes_embs, indexes, file_name):
csv_node_embs = []
for node_id in indexes:
orig_ID = torch.DoubleTensor([self.tasker.data.contID_to_origID[node_id]])
csv_node_embs.append(
torch.cat((orig_ID, nodes_embs[node_id].double())).detach().numpy()
)
pd.DataFrame(np.array(csv_node_embs)).to_csv(
file_name, header=None, index=None, compression="gzip"
)
print("Node embs saved in", file_name)
def prepare_tgn_memory(self, set_name):
if set_name == "TRAIN":
self.logger.logger.info("init memory")
self.cont_encoder.memory.__init_memory__()
elif set_name == "VALID":
self.logger.logger.info("restore training memory")
assert self.tgn_train_memory_backup is not None
self.cont_encoder.memory.restore_memory(self.tgn_train_memory_backup)
elif set_name == "TEST":
self.logger.logger.info("restore validation memory")
assert self.tgn_val_memory_backup is not None
self.cont_encoder.memory.restore_memory(self.tgn_val_memory_backup)
def backup_tgn_memory(self, set_name):
if set_name == "TRAIN":
print("save train memory")
self.tgn_train_memory_backup = self.cont_encoder.memory.backup_memory()
assert self.tgn_train_memory_backup is not None
elif set_name == "VALID":
print("save validation memory")
self.tgn_val_memory_backup = self.cont_encoder.memory.backup_memory()
assert self.tgn_val_memory_backup is not None
| [
"utils.sparse_prepare_tensor",
"torch.softmax",
"numpy.array",
"numpy.isfinite",
"torch.DoubleTensor",
"utils.get_gridcell",
"utils.save_predictions",
"numpy.multiply",
"utils.Namespace",
"sklearn.tree.DecisionTreeClassifier",
"functools.wraps",
"numpy.ones",
"logger.Logger",
"torch.save",
"torch.no_grad",
"xgboost.XGBClassifier",
"torch.cat",
"numpy.atleast_1d",
"torch.optim.Adam",
"torch.device",
"numpy.unique",
"os.makedirs",
"torch.stack",
"sklearn.linear_model.LogisticRegression",
"torch.tensor",
"torch.nn.BCELoss",
"numpy.random.randint",
"numpy.zeros",
"torch.set_grad_enabled",
"torch.zeros",
"numpy.nan_to_num",
"torch.ones"
] | [((1678, 1696), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1694, 1696), False, 'import torch\n'), ((7817, 7864), 'torch.save', 'torch.save', (['self.classifier', "(prefix + 'cls.pth')"], {}), "(self.classifier, prefix + 'cls.pth')\n", (7827, 7864), False, 'import torch\n'), ((10893, 10990), 'logger.Logger', 'logger.Logger', (['self.args', 'self.num_classes', 'self.num_nodes'], {'train_encoder': 'self.train_encoder'}), '(self.args, self.num_classes, self.num_nodes, train_encoder=\n self.train_encoder)\n', (10906, 10990), False, 'import logger\n'), ((15662, 15759), 'logger.Logger', 'logger.Logger', (['self.args', 'self.num_classes', 'self.num_nodes'], {'train_encoder': 'self.train_encoder'}), '(self.args, self.num_classes, self.num_nodes, train_encoder=\n self.train_encoder)\n', (15675, 15759), False, 'import logger\n'), ((16776, 16807), 'functools.wraps', 'functools.wraps', (['run_epoch_func'], {}), '(run_epoch_func)\n', (16791, 16807), False, 'import functools\n'), ((18415, 18444), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (18437, 18444), False, 'import torch\n'), ((19922, 19950), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (19944, 19950), False, 'import torch\n'), ((23455, 23483), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (23477, 23483), False, 'import torch\n'), ((27075, 27103), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (27097, 27103), False, 'import torch\n'), ((28888, 28924), 'torch.cat', 'torch.cat', (['gather_predictions'], {'dim': '(0)'}), '(gather_predictions, dim=0)\n', (28897, 28924), False, 'import torch\n'), ((29133, 29160), 'torch.cat', 'torch.cat', (['cls_input'], {'dim': '(1)'}), '(cls_input, dim=1)\n', (29142, 29160), False, 'import torch\n'), ((37325, 37347), 'numpy.array', 'np.array', (['true_classes'], {}), '(true_classes)\n', (37333, 37347), True, 'import numpy as np\n'), ((39516, 39535), 'utils.Namespace', 'u.Namespace', (['sample'], {}), '(sample)\n', (39527, 39535), True, 'import utils as u\n'), ((1491, 1511), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1509, 1511), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1537, 1561), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1559, 1561), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1582, 1597), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (1595, 1597), False, 'from xgboost import XGBClassifier\n'), ((3609, 3654), 'os.makedirs', 'os.makedirs', (['prediction_folder'], {'exist_ok': '(True)'}), '(prediction_folder, exist_ok=True)\n', (3620, 3654), False, 'import os\n'), ((5981, 6028), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'args.learning_rate'}), '(params, lr=args.learning_rate)\n', (5997, 6028), False, 'import torch\n'), ((6245, 6292), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'args.learning_rate'}), '(params, lr=args.learning_rate)\n', (6261, 6292), False, 'import torch\n'), ((8311, 8384), 'torch.save', 'torch.save', (['self.tgn_train_memory_backup', "(prefix + 'tgn_memory_train.pth')"], {}), "(self.tgn_train_memory_backup, prefix + 'tgn_memory_train.pth')\n", (8321, 8384), False, 'import torch\n'), ((8397, 8466), 'torch.save', 'torch.save', (['self.tgn_val_memory_backup', "(prefix + 'tgn_memory_val.pth')"], {}), "(self.tgn_val_memory_backup, prefix + 'tgn_memory_val.pth')\n", (8407, 8466), False, 'import torch\n'), ((18701, 18718), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (18713, 18718), False, 'import torch\n'), ((20106, 20134), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (20128, 20134), False, 'import torch\n'), ((20161, 20190), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (20183, 20190), False, 'import torch\n'), ((24016, 24044), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (24038, 24044), False, 'import torch\n'), ((24237, 24266), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (24259, 24266), False, 'import torch\n'), ((29729, 29749), 'numpy.unique', 'np.unique', (['src_idx_l'], {}), '(src_idx_l)\n', (29738, 29749), True, 'import numpy as np\n'), ((29775, 29799), 'numpy.atleast_1d', 'np.atleast_1d', (['src_idx_l'], {}), '(src_idx_l)\n', (29788, 29799), True, 'import numpy as np\n'), ((31703, 31740), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_nodes', 'size'], {}), '(0, num_nodes, size)\n', (31720, 31740), True, 'import numpy as np\n'), ((36703, 36759), 'torch.cat', 'torch.cat', (['predictions_dict_list[classifier_name]'], {'dim': '(0)'}), '(predictions_dict_list[classifier_name], dim=0)\n', (36712, 36759), False, 'import torch\n'), ((36780, 36830), 'torch.cat', 'torch.cat', (['probs_dict_list[classifier_name]'], {'dim': '(0)'}), '(probs_dict_list[classifier_name], dim=0)\n', (36789, 36830), False, 'import torch\n'), ((37711, 37727), 'numpy.nan_to_num', 'np.nan_to_num', (['X'], {}), '(X)\n', (37724, 37727), True, 'import numpy as np\n'), ((38111, 38136), 'torch.tensor', 'torch.tensor', (['predictions'], {}), '(predictions)\n', (38123, 38136), False, 'import torch\n'), ((38167, 38186), 'torch.tensor', 'torch.tensor', (['probs'], {}), '(probs)\n', (38179, 38186), False, 'import torch\n'), ((43031, 43095), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[self.tasker.data.contID_to_origID[node_id]]'], {}), '([self.tasker.data.contID_to_origID[node_id]])\n', (43049, 43095), False, 'import torch\n'), ((3562, 3582), 'utils.get_gridcell', 'u.get_gridcell', (['args'], {}), '(args)\n', (3576, 3582), True, 'import utils as u\n'), ((6544, 6591), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'args.learning_rate'}), '(params, lr=args.learning_rate)\n', (6560, 6591), False, 'import torch\n'), ((6833, 6933), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'args.decoder_learning_rate', 'weight_decay': 'args.decoder_weight_decay'}), '(params, lr=args.decoder_learning_rate, weight_decay=args.\n decoder_weight_decay)\n', (6849, 6933), False, 'import torch\n'), ((9590, 9620), 'torch.device', 'torch.device', (['self.args.device'], {}), '(self.args.device)\n', (9602, 9620), False, 'import torch\n'), ((14933, 15012), 'logger.Logger', 'logger.Logger', (['self.args', 'self.num_classes', 'self.num_nodes'], {'classifier_name': 'key'}), '(self.args, self.num_classes, self.num_nodes, classifier_name=key)\n', (14946, 15012), False, 'import logger\n'), ((21679, 21712), 'torch.softmax', 'torch.softmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (21692, 21712), False, 'import torch\n'), ((31759, 31774), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (31772, 31774), False, 'import torch\n'), ((31804, 31864), 'torch.ones', 'torch.ones', (['size'], {'dtype': 'torch.float', 'device': 'self.args.device'}), '(size, dtype=torch.float, device=self.args.device)\n', (31814, 31864), False, 'import torch\n'), ((31893, 31954), 'torch.zeros', 'torch.zeros', (['size'], {'dtype': 'torch.float', 'device': 'self.args.device'}), '(size, dtype=torch.float, device=self.args.device)\n', (31904, 31954), False, 'import torch\n'), ((9095, 9125), 'torch.device', 'torch.device', (['self.args.device'], {}), '(self.args.device)\n', (9107, 9125), False, 'import torch\n'), ((10200, 10230), 'torch.device', 'torch.device', (['self.args.device'], {}), '(self.args.device)\n', (10212, 10230), False, 'import torch\n'), ((10375, 10405), 'torch.device', 'torch.device', (['self.args.device'], {}), '(self.args.device)\n', (10387, 10405), False, 'import torch\n'), ((22087, 22180), 'utils.save_predictions', 'u.save_predictions', (['probs', "s.label_sp['idx']", "s.label_sp['vals']", 'i', 'prefix', 'self.dataset'], {}), "(probs, s.label_sp['idx'], s.label_sp['vals'], i, prefix,\n self.dataset)\n", (22105, 22180), True, 'import utils as u\n'), ((22386, 22505), 'utils.save_predictions', 'u.save_predictions', (['probs', "s.label_sp['idx']", "s.label_sp['vals']", 'i', 'self.prediction_filename_prefix', 'self.dataset'], {}), "(probs, s.label_sp['idx'], s.label_sp['vals'], i, self.\n prediction_filename_prefix, self.dataset)\n", (22404, 22505), True, 'import utils as u\n'), ((25031, 25064), 'torch.softmax', 'torch.softmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (25044, 25064), False, 'import torch\n'), ((25289, 25382), 'utils.save_predictions', 'u.save_predictions', (['probs', "s.label_sp['idx']", "s.label_sp['vals']", 'i', 'prefix', 'self.dataset'], {}), "(probs, s.label_sp['idx'], s.label_sp['vals'], i, prefix,\n self.dataset)\n", (25307, 25382), True, 'import utils as u\n'), ((32734, 32749), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32747, 32749), False, 'import torch\n'), ((33174, 33212), 'torch.stack', 'torch.stack', (['(probs, 1 - probs)'], {'dim': '(1)'}), '((probs, 1 - probs), dim=1)\n', (33185, 33212), False, 'import torch\n'), ((37283, 37302), 'numpy.multiply', 'np.multiply', (['n1', 'n2'], {}), '(n1, n2)\n', (37294, 37302), True, 'import numpy as np\n'), ((37459, 37473), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (37470, 37473), True, 'import numpy as np\n'), ((41042, 41099), 'utils.sparse_prepare_tensor', 'u.sparse_prepare_tensor', (['adj'], {'torch_size': '[self.num_nodes]'}), '(adj, torch_size=[self.num_nodes])\n', (41065, 41099), True, 'import utils as u\n'), ((43251, 43274), 'numpy.array', 'np.array', (['csv_node_embs'], {}), '(csv_node_embs)\n', (43259, 43274), True, 'import numpy as np\n'), ((33300, 33313), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (33307, 33313), True, 'import numpy as np\n'), ((33315, 33329), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (33323, 33329), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
A basic demonstration of Bayesian statistics by flipping lizards.
"""
import sys
import os
import math
import argparse
import unittest
from scipy.stats import beta, binom
class BinomialModel(object):
"""
A class for demonstrating Bayesian statistics with a binomial sampling
distribution and a conjugate beta prior.
Inspired by the lizard-flipping example in Chapter 2 of <NAME>'s book
on phylogenetic comparative methods:
https://lukejharmon.github.io/pcm/chapter2_stats/
>>> m = BinomialModel(100, 10, 0.1)
>>> m.n
100
>>> m.k
10
>>> m.p
0.1
>>> abs(m.get_likelihood() - 0.1318653) < 0.00001
True
>>> abs(m.get_posterior_density() - 13.3184) < 0.0001
True
>>> abs(m.get_marginal_likelihood() - 0.00990099) < 0.000001
True
"""
def __init__(self,
number_of_flips = 100,
number_of_heads = 63,
probability_of_heads = 0.5,
prior_beta_a = 1.0,
prior_beta_b = 1.0):
self.n = number_of_flips
self.k = number_of_heads
self.beta_a = prior_beta_a
self.beta_b = prior_beta_b
self.p = probability_of_heads
def get_prior_distribution(self):
return beta(self.beta_a, self.beta_b)
def get_posterior_distribution(self):
return beta(
self.beta_a + self.k,
self.beta_b + (self.n - self.k))
def get_prior_density(self, p = None):
if p is None:
p = self.p
prior_dist = self.get_prior_distribution()
return prior_dist.pdf(p)
def get_log_prior_density(self, p = None):
if p is None:
p = self.p
prior_dist = self.get_prior_distribution()
return prior_dist.logpdf(p)
def get_posterior_density(self, p = None):
if p is None:
p = self.p
post_dist = self.get_posterior_distribution()
return post_dist.pdf(p)
def get_log_posterior_density(self, p = None):
if p is None:
p = self.p
post_dist = self.get_posterior_distribution()
return post_dist.logpdf(p)
def get_likelihood(self, p = None):
if p is None:
p = self.p
return binom.pmf(k = self.k, n = self.n, p = p)
def get_log_likelihood(self, p = None):
if p is None:
p = self.p
return binom.logpmf(k = self.k, n = self.n, p = p)
def get_log_marginal_likelihood(self):
"""
To get this, we just have to rearrange Bayes rule as follows:
p(rate_of_heads | data) = p(data | rate_of_heads) p(rate_of_heads)
----------------------------------------
p(data)
p(data) p(rate_of_heads | data) = p(data | rate_of_heads) p(rate_of_heads)
p(data) = p(data | rate_of_heads) p(rate_of_heads)
----------------------------------------
p(rate_of_heads | data)
Or in words, the marginal probability of the data equals the likelihood
times the prior density divided by the posterior density.
On a log scale, this is the log likelihood plus the log prior density
minus the posterior density.
"""
return (self.get_log_likelihood() + self.get_log_prior_density() -
self.get_log_posterior_density())
def get_marginal_likelihood(self):
return math.exp(self.get_log_marginal_likelihood())
def arg_is_positive_int(i):
try:
if int(i) < 1:
raise
except:
msg = '{0!r} is not a positive integer'.format(i)
raise argparse.ArgumentTypeError(msg)
return int(i)
def arg_is_positive_float(i):
try:
if float(i) <= 0.0:
raise
except:
msg = '{0!r} is not a positive real number'.format(i)
raise argparse.ArgumentTypeError(msg)
return float(i)
def arg_is_nonnegative_float(i):
try:
if float(i) < 0.0:
raise
except:
msg = '{0!r} is not a non-negative real number'.format(i)
raise argparse.ArgumentTypeError(msg)
return float(i)
def main_cli(argv = sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--number-of-flips',
action = 'store',
type = arg_is_positive_int,
default = 100,
help = 'Number of lizard flips.')
parser.add_argument('-k', '--number-of-heads',
action = 'store',
type = arg_is_positive_int,
default = 63,
help = 'Number of lizards that land heads up.')
parser.add_argument('-p', '--probability-of-heads',
action = 'store',
type = arg_is_nonnegative_float,
default = 0.5,
help = ('Probability of any lizard landing heads up under the '
'\'null\' model.'))
parser.add_argument('-a', '--beta-prior-alpha',
action = 'store',
type = arg_is_positive_float,
default = 1.0,
help = ('Value of the alpha parameter of the beta prior on the '
'probability of heads.'))
parser.add_argument('-b', '--beta-prior-beta',
action = 'store',
type = arg_is_positive_float,
default = 1.0,
help = ('Value of the beta parameter of the beta prior on the '
'probability of heads.'))
if argv == sys.argv:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
m = BinomialModel(
number_of_flips = args.number_of_flips,
number_of_heads = args.number_of_heads,
probability_of_heads = args.probability_of_heads,
prior_beta_a = args.beta_prior_alpha,
prior_beta_b = args.beta_prior_beta)
p = args.probability_of_heads
msg = """
Let's use Bayes rule to calculate the posterior probability of 2 models:
1. A "null" model where the probability of heads is fixed to 0.5
2. An alternative model where the probabiliy of heads is free to vary between
0 and 1 according to a beta prior
First, we need the marginal probability of the data (the marginal likelihood)
under both models. For the null model there are no free parameters to marginalize
over, so the marginal likelihood is just the likelihood.
p(data | null model) = {p_data_given_null_model}
For the alternative model, we can easily get the densities from the prior and
posterior disributions (they are both beta distributions), and the likelihood
is a binomial, just like for the null model. With these three numbers, we can
solve for the marginal probability of the data (the denominator of the model's
posterior density).
p(data | alt model) = {p_data_given_alt_model}
Now, we can get the overall (marginal) probability of the data under either of these two models:
p(data) = [ p(data | null model) p(null model) ] + [ p(data | alt model) p(alt model) ]
Let's assume a priori that both models are equally probable (i.e, p(null model)
= p(alt model) = 0.5). This simplifies the above equation to:
p(data) = 0.5 p(data | null model) + 0.5 p(data | alt model)
= 0.5 [ p(data | null model) + p(data | alt model) ]
Now, we can calculate the posterior probability of both models:
p(null model | data) = p(data | null model) p(null model)
----------------------------------
p(data)
= p(data | null model) 0.5
--------------------------------------------------
0.5 [ p(data | null model) + p(data | alt model) ]
= p(data | null model)
--------------------------------------------------
p(data | null model) + p(data | alt model)
= {p_null_given_data}
p(alt model | data) = p(data | alt model)
------------------------------------------
p(data | null model) + p(data | alt model)
= {p_alt_given_data}
""".format(
p_data_given_null_model = m.get_likelihood(p),
p_data_given_alt_model = m.get_marginal_likelihood(),
p_null_given_data = m.get_likelihood(p) / (m.get_likelihood(p) + m.get_marginal_likelihood()),
p_alt_given_data = m.get_marginal_likelihood() / (m.get_likelihood(p) + m.get_marginal_likelihood()))
print(msg)
if __name__ == "__main__":
if "--run-tests" in sys.argv:
sys.stderr.write("""
*********************************************************************
Running test suite using the following Python executable and version:
{0}
{1}
*********************************************************************
\n""".format(sys.executable, sys.version))
import doctest
# doctest.testmod(verbose = True)
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite())
tests = unittest.defaultTestLoader.loadTestsFromName(
os.path.splitext(os.path.basename(__file__))[0])
suite.addTests(tests)
runner = unittest.TextTestRunner(verbosity = 2)
runner.run(suite)
sys.exit(0)
main_cli()
| [
"unittest.TestSuite",
"doctest.DocTestSuite",
"argparse.ArgumentParser",
"argparse.ArgumentTypeError",
"scipy.stats.binom.pmf",
"scipy.stats.beta",
"os.path.basename",
"sys.exit",
"unittest.TextTestRunner",
"scipy.stats.binom.logpmf"
] | [((4274, 4299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4297, 4299), False, 'import argparse\n'), ((1273, 1303), 'scipy.stats.beta', 'beta', (['self.beta_a', 'self.beta_b'], {}), '(self.beta_a, self.beta_b)\n', (1277, 1303), False, 'from scipy.stats import beta, binom\n'), ((1362, 1421), 'scipy.stats.beta', 'beta', (['(self.beta_a + self.k)', '(self.beta_b + (self.n - self.k))'], {}), '(self.beta_a + self.k, self.beta_b + (self.n - self.k))\n', (1366, 1421), False, 'from scipy.stats import beta, binom\n'), ((2278, 2312), 'scipy.stats.binom.pmf', 'binom.pmf', ([], {'k': 'self.k', 'n': 'self.n', 'p': 'p'}), '(k=self.k, n=self.n, p=p)\n', (2287, 2312), False, 'from scipy.stats import beta, binom\n'), ((2424, 2461), 'scipy.stats.binom.logpmf', 'binom.logpmf', ([], {'k': 'self.k', 'n': 'self.n', 'p': 'p'}), '(k=self.k, n=self.n, p=p)\n', (2436, 2461), False, 'from scipy.stats import beta, binom\n'), ((9016, 9036), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (9034, 9036), False, 'import unittest\n'), ((9258, 9294), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (9281, 9294), False, 'import unittest\n'), ((9332, 9343), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9340, 9343), False, 'import sys\n'), ((3720, 3751), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (3746, 3751), False, 'import argparse\n'), ((3944, 3975), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (3970, 3975), False, 'import argparse\n'), ((4176, 4207), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (4202, 4207), False, 'import argparse\n'), ((9059, 9081), 'doctest.DocTestSuite', 'doctest.DocTestSuite', ([], {}), '()\n', (9079, 9081), False, 'import doctest\n'), ((9178, 9204), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (9194, 9204), False, 'import os\n')] |
from sqlalchemy import (create_engine, MetaData, Column,
Table, Integer, String, ForeignKey, select)
engine = create_engine('sqlite:///base.db',
echo=False)
metadata = MetaData(bind=engine)
artists = Table('artistas', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('nome', String(40), index=True,
nullable=False, unique=True))
discs = Table('discos', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('artista_id', ForeignKey('artistas.id'), nullable=False),
Column('album', String(40), nullable=False),
Column('ano', Integer, nullable=False))
metadata.create_all()
def search_albums(artist):
artist_id = [x for x in select([artists.c.id]).
where(artists.c.nome == artist.lower()).execute()]
if artist_id:
query = select([discs.c.id, discs.c.album,
discs.c.ano, discs.c.artista_id]).where(
discs.c.artista_id == artist_id[0][0]).execute()
return {_id: {'album': album,
'ano': ano,
'id_artista': artista}
for _id, album, ano, artista in query}
return {}
def id_artist(artist):
searched = select([artists]).where(artists.c.nome == artist)
result = [_id for _id, artist in searched.execute()]
if result:
return result[0]
else:
insert_artist(artist)
return id_artist(artist)
def search_all_artists():
return {_id: artist for _id, artist in select([artists]).execute()}
def insert_artist(artist):
conn = engine.connect()
artista_ins = artists.insert()
new_artist = artista_ins.values(nome=artist)
try:
conn.execute(new_artist)
status = True
except Exception as e:
print(e)
status = False
finally:
conn.close()
return status
def insert_album(disc, year, artist):
conn = engine.connect()
disc_ins = discs.insert()
new_disc = disc_ins.values(artista_id=id_artist(artist),
album=disc,
ano=year)
try:
conn.execute(new_disc)
status = True
except Exception as e:
print(e)
status = False
finally:
conn.close()
return status
| [
"sqlalchemy.create_engine",
"sqlalchemy.ForeignKey",
"sqlalchemy.MetaData",
"sqlalchemy.String",
"sqlalchemy.select",
"sqlalchemy.Column"
] | [((136, 182), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///base.db"""'], {'echo': '(False)'}), "('sqlite:///base.db', echo=False)\n", (149, 182), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((218, 239), 'sqlalchemy.MetaData', 'MetaData', ([], {'bind': 'engine'}), '(bind=engine)\n', (226, 239), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((295, 354), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), "('id', Integer, primary_key=True, autoincrement=True)\n", (301, 354), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((513, 572), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), "('id', Integer, primary_key=True, autoincrement=True)\n", (519, 572), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((726, 764), 'sqlalchemy.Column', 'Column', (['"""ano"""', 'Integer'], {'nullable': '(False)'}), "('ano', Integer, nullable=False)\n", (732, 764), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((387, 397), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (393, 397), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((609, 634), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""artistas.id"""'], {}), "('artistas.id')\n", (619, 634), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((683, 693), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (689, 693), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((1374, 1391), 'sqlalchemy.select', 'select', (['[artists]'], {}), '([artists])\n', (1380, 1391), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((1665, 1682), 'sqlalchemy.select', 'select', (['[artists]'], {}), '([artists])\n', (1671, 1682), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((973, 1041), 'sqlalchemy.select', 'select', (['[discs.c.id, discs.c.album, discs.c.ano, discs.c.artista_id]'], {}), '([discs.c.id, discs.c.album, discs.c.ano, discs.c.artista_id])\n', (979, 1041), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n'), ((846, 868), 'sqlalchemy.select', 'select', (['[artists.c.id]'], {}), '([artists.c.id])\n', (852, 868), False, 'from sqlalchemy import create_engine, MetaData, Column, Table, Integer, String, ForeignKey, select\n')] |
from dctopo import FatTreeTopo #, VL2Topo, TreeTopo
import sys
import time
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.node import RemoteController
if __name__ == '__main__':
runtime = sys.argv[1]
load = sys.argv[2]
setLogLevel('info')
fattree = FatTreeTopo(k=6, speed=0.02)
net = Mininet(topo=fattree, link=TCLink, controller=RemoteController)
net.start()
h = net.hosts
hnum = len(h)
time.sleep(5)
for i in range(0, hnum):
h[i].cmd("python ~/mininet-repnet/run_time_load_id.py " + runtime + " " + load + " " + str(i) + " &")
h[3].cmd("ping -c10000 -i0.1 10.5.1.2 > ping2.trace & ")
h[4].cmd("ping -c10000 -i0.1 10.5.1.3 > ping3.trace & ")
h[5].cmd("ping -c10000 -i0.1 10.5.1.4 > ping4.trace ")
time.sleep(3)
net.stop()
| [
"mininet.log.setLogLevel",
"time.sleep",
"dctopo.FatTreeTopo",
"mininet.net.Mininet"
] | [((413, 432), 'mininet.log.setLogLevel', 'setLogLevel', (['"""info"""'], {}), "('info')\n", (424, 432), False, 'from mininet.log import setLogLevel\n'), ((447, 475), 'dctopo.FatTreeTopo', 'FatTreeTopo', ([], {'k': '(6)', 'speed': '(0.02)'}), '(k=6, speed=0.02)\n', (458, 475), False, 'from dctopo import FatTreeTopo\n'), ((486, 549), 'mininet.net.Mininet', 'Mininet', ([], {'topo': 'fattree', 'link': 'TCLink', 'controller': 'RemoteController'}), '(topo=fattree, link=TCLink, controller=RemoteController)\n', (493, 549), False, 'from mininet.net import Mininet\n'), ((606, 619), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (616, 619), False, 'import time\n'), ((947, 960), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (957, 960), False, 'import time\n')] |
from model.contact import Contact
from random import randrange
import random
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.fill_new_entry(
Contact(_first_name="Zbigniew", _middle_name="Janusz", _last_name="Brzeczyszczykiewicz",
_nickname="nick123",
_tittle="tittle123", _company="Company XYZ", _address="adress 3/5", _telephone_home="658123123",
_telephone_mobile="654987987", _telephone_work="54121212", _fax="+52 45878787",
_email="<EMAIL>",
_email_2="<EMAIL>", _email_3="<EMAIL>", _homepage="www.bartek123.com",
_bday="10", _bmonth="July",
_byear="1989", _aday="19", _amonth="December", _ayear="2010",
_secondary_address="secondary address",
_secondary_home="secondaryHome", _secondary_notes="some additional notes"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
#assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
#assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| [
"random.choice",
"model.contact.Contact"
] | [((1033, 1060), 'random.choice', 'random.choice', (['old_contacts'], {}), '(old_contacts)\n', (1046, 1060), False, 'import random\n'), ((216, 831), 'model.contact.Contact', 'Contact', ([], {'_first_name': '"""Zbigniew"""', '_middle_name': '"""Janusz"""', '_last_name': '"""Brzeczyszczykiewicz"""', '_nickname': '"""nick123"""', '_tittle': '"""tittle123"""', '_company': '"""Company XYZ"""', '_address': '"""adress 3/5"""', '_telephone_home': '"""658123123"""', '_telephone_mobile': '"""654987987"""', '_telephone_work': '"""54121212"""', '_fax': '"""+52 45878787"""', '_email': '"""<EMAIL>"""', '_email_2': '"""<EMAIL>"""', '_email_3': '"""<EMAIL>"""', '_homepage': '"""www.bartek123.com"""', '_bday': '"""10"""', '_bmonth': '"""July"""', '_byear': '"""1989"""', '_aday': '"""19"""', '_amonth': '"""December"""', '_ayear': '"""2010"""', '_secondary_address': '"""secondary address"""', '_secondary_home': '"""secondaryHome"""', '_secondary_notes': '"""some additional notes"""'}), "(_first_name='Zbigniew', _middle_name='Janusz', _last_name=\n 'Brzeczyszczykiewicz', _nickname='nick123', _tittle='tittle123',\n _company='Company XYZ', _address='adress 3/5', _telephone_home=\n '658123123', _telephone_mobile='654987987', _telephone_work='54121212',\n _fax='+52 45878787', _email='<EMAIL>', _email_2='<EMAIL>', _email_3=\n '<EMAIL>', _homepage='www.bartek123.com', _bday='10', _bmonth='July',\n _byear='1989', _aday='19', _amonth='December', _ayear='2010',\n _secondary_address='secondary address', _secondary_home='secondaryHome',\n _secondary_notes='some additional notes')\n", (223, 831), False, 'from model.contact import Contact\n')] |
import os
import datetime
import argparse
import sys
import logging
import contextlib
import psycopg2
from dotenv import load_dotenv
from logging.config import dictConfig
from api_shopify import Shopify_API
from api_amazon import Amazon_API
LOGGING_CONFIG = {
"version": 1,
"formatters": {
"simple": {
"format": "%(levelname)s\t%(name)s\t%(asctime)s\t%(module)s@%(lineno)s\t%(message)s"
},
},
"handlers": {
"cli_handler": {
"level": "INFO",
"class": "concurrent_log_handler.ConcurrentRotatingFileHandler",
"formatter": "simple",
"filename": "automationtools/logs/cli.log",
"maxBytes": 1000000,
"backupCount": 10,
"encoding": "utf8",
}
},
"loggers": {
"cli_logger": {"level": "INFO", "handlers": ["cli_handler"]},
},
}
dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("cli_logger")
AMAZON_MARKETPLACE_IDS = "ATVPDKIKX0DER"
DB_STRING = os.getenv("DB_STRING")
def create_parser():
parser = argparse.ArgumentParser(description="Argument parser for Moondance.")
parser.add_argument(
"--sync-all",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-shopify-products",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-shopify-order-events",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-shopify-customers",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-shopify-sales",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-amazon-sales",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-amazon-financial-events",
action="store_true",
default=False,
)
parser.add_argument(
"--sync-amazon-sales-lines",
action="store_true",
default=False,
)
parser.add_argument(
"--time-interval",
type=str,
help='Date range to pull in for orders using format "YYYY-MM-DD to YYYY-MM-DD"',
)
parser.add_argument(
"--rebuild-sales-orders",
action="store_true",
default=False,
)
return parser
def cli():
parser = create_parser()
args = parser.parse_args()
interval = set_interval(args.time_interval)
if args.sync_all:
sync_shopify(
command="products",
request_parameters={
"updated_at_min": interval["start_datetime"],
"limit": 100,
},
)
sync_shopify(
command="sales_orders",
request_parameters={
"updated_at_min": interval["start_datetime"],
"status": "any",
"limit": 100,
},
)
sync_shopify(
command="sync_shopify_order_events",
request_parameters={
"limit": 100,
},
)
sync_shopify(
command="customers",
request_parameters={
"updated_at_min": interval["start_datetime"],
"limit": 100,
},
)
sync_amazon(
command="sales_orders",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"LastUpdatedBefore": interval["end_datetime"],
"LastUpdatedAfter": interval["start_datetime"],
},
)
sync_amazon(
command="sales_order_lines",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"LastUpdatedBefore": interval["end_datetime"],
"LastUpdatedAfter": interval["start_datetime"],
},
)
sync_amazon(
command="financial_events",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"PostedBefore": interval["end_datetime"],
"PostedAfter": interval["start_datetime"],
},
)
rebuild_sales_orders()
sys.exit()
if args.sync_shopify_products:
sync_shopify(
command="products",
request_parameters={
"updated_at_min": interval["start_datetime"],
},
)
if args.sync_shopify_sales:
sync_shopify(
command="sales_orders",
request_parameters={
"status": "any",
"updated_at_min": interval["start_datetime"],
"limit": 100,
},
)
if args.sync_shopify_order_events:
sync_shopify(
command="sync_shopify_order_events",
request_parameters={
"limit": 100,
},
)
if args.sync_shopify_customers:
sync_shopify(
command="customers",
request_parameters={
"updated_at_min": interval["start_datetime"],
"limit": 100,
},
)
if args.sync_amazon_sales:
sync_amazon(
command="sales_orders",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"LastUpdatedBefore": interval["end_datetime"],
"LastUpdatedAfter": interval["start_datetime"],
},
)
if args.sync_amazon_sales_lines:
sync_amazon(
command="sales_order_lines",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"LastUpdatedBefore": interval["end_datetime"],
"LastUpdatedAfter": interval["start_datetime"],
},
)
if args.sync_amazon_financial_events:
sync_amazon(
command="financial_events",
request_parameters={
"MarketplaceIds": AMAZON_MARKETPLACE_IDS,
"PostedBefore": interval["end_datetime"],
"PostedAfter": interval["start_datetime"],
},
)
if args.rebuild_sales_orders:
rebuild_sales_orders()
def set_interval(time_interval):
try:
log = "set time interval: starting"
logger.info(log)
now = datetime.datetime.utcnow()
end_datetime = (now - datetime.timedelta(**{"minutes": 3})).isoformat()
if not time_interval:
start_datetime = (now - datetime.timedelta(**{"days": 3})).isoformat()
else:
time_interval = time_interval.split(" ")
interval = {time_interval[1].strip(): int(time_interval[0].strip())}
start_datetime = (now - datetime.timedelta(**interval)).isoformat()
return {
"start_datetime": start_datetime,
"end_datetime": end_datetime,
}
except Exception:
log = "set time interval: failed"
logger.error(log, exc_info=1)
sys.exit()
finally:
log = f"set time interval: completed using range of {start_datetime} to {end_datetime}"
logger.info(log)
def sync_shopify(command, request_parameters):
try:
logger.info(f"sync shopify {command}: starting program")
shopify = Shopify_API(logger=logger)
shopify.process_data(command=command, request_parameters=request_parameters)
logger.info(f"sync shopify {command}: completed program")
except Exception:
logger.error(f"sync shopify {command}: failed program", exc_info=1)
def sync_amazon(command, request_parameters):
try:
logger.info(f"sync amazon {command}: starting program")
amazon = Amazon_API(logger=logger)
amazon.process_data(command=command, request_parameters=request_parameters)
logger.info(f"sync amazon {command}: completed program")
except Exception:
logger.error(f"sync amazon {command}: failed program", exc_info=1)
def rebuild_sales_orders():
try:
logger.info("rebuilding sales orders: starting program")
script_path = "automationtools/templates/scripts/"
with contextlib.closing(psycopg2.connect(DB_STRING)) as conn:
with contextlib.closing(conn.cursor()) as cursor:
for file_name in os.listdir(script_path):
try:
logger.info(
f"rebuilding sales orders script {file_name}: starting execution"
)
with open(f"{script_path}/{file_name}", "r") as f:
sql = f.read()
cursor.execute(sql)
conn.commit()
logger.info(
f"rebuilding sales orders script {file_name}: completed execution"
)
except Exception:
logger.error(
f"rebuilding sales orders script {file_name}: failed execution",
exc_info=1,
)
logger.info("rebuilding sales orders: completed program")
except Exception:
logger.error("rebuilding sales orders: failed program", exc_info=1)
if __name__ == "__main__":
cli()
| [
"logging.getLogger",
"psycopg2.connect",
"os.listdir",
"argparse.ArgumentParser",
"os.getenv",
"datetime.datetime.utcnow",
"logging.config.dictConfig",
"api_amazon.Amazon_API",
"sys.exit",
"datetime.timedelta",
"api_shopify.Shopify_API"
] | [((884, 910), 'logging.config.dictConfig', 'dictConfig', (['LOGGING_CONFIG'], {}), '(LOGGING_CONFIG)\n', (894, 910), False, 'from logging.config import dictConfig\n'), ((920, 951), 'logging.getLogger', 'logging.getLogger', (['"""cli_logger"""'], {}), "('cli_logger')\n", (937, 951), False, 'import logging\n'), ((1006, 1028), 'os.getenv', 'os.getenv', (['"""DB_STRING"""'], {}), "('DB_STRING')\n", (1015, 1028), False, 'import os\n'), ((1065, 1134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argument parser for Moondance."""'}), "(description='Argument parser for Moondance.')\n", (1088, 1134), False, 'import argparse\n'), ((4274, 4284), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4282, 4284), False, 'import sys\n'), ((6417, 6443), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (6441, 6443), False, 'import datetime\n'), ((7379, 7405), 'api_shopify.Shopify_API', 'Shopify_API', ([], {'logger': 'logger'}), '(logger=logger)\n', (7390, 7405), False, 'from api_shopify import Shopify_API\n'), ((7793, 7818), 'api_amazon.Amazon_API', 'Amazon_API', ([], {'logger': 'logger'}), '(logger=logger)\n', (7803, 7818), False, 'from api_amazon import Amazon_API\n'), ((7092, 7102), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7100, 7102), False, 'import sys\n'), ((8261, 8288), 'psycopg2.connect', 'psycopg2.connect', (['DB_STRING'], {}), '(DB_STRING)\n', (8277, 8288), False, 'import psycopg2\n'), ((8394, 8417), 'os.listdir', 'os.listdir', (['script_path'], {}), '(script_path)\n', (8404, 8417), False, 'import os\n'), ((6474, 6510), 'datetime.timedelta', 'datetime.timedelta', ([], {}), "(**{'minutes': 3})\n", (6492, 6510), False, 'import datetime\n'), ((6591, 6624), 'datetime.timedelta', 'datetime.timedelta', ([], {}), "(**{'days': 3})\n", (6609, 6624), False, 'import datetime\n'), ((6822, 6852), 'datetime.timedelta', 'datetime.timedelta', ([], {}), '(**interval)\n', (6840, 6852), False, 'import datetime\n')] |
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User
from rest_framework.exceptions import ValidationError
class RegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=8,max_length=16)
confirm_password = serializers.CharField(min_length=8,max_length=16)
username = serializers.CharField(min_length=4)
class Meta:
model = User
fields = ['username','email','first_name','last_name','password','confirm_password']
def create(self, validated_data):
password = validated_data.pop('password') #1
confirm_password = validated_data.pop('confirm_password') #1
if password != confirm_password:
raise ValidationError({"data":"Passwords don't match!"})
user = User.objects.create(**validated_data)
user.set_password(password)
user.save()
UserProfile.objects.create(user=user,email=user.email,
full_name=user.first_name.capitalize() + " " + user.last_name.capitalize())
return user
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ['id','user','full_name','phone','email',]
class MovieSerializer(serializers.ModelSerializer):
name = serializers.CharField(max_length=50)
description = serializers.CharField(max_length=500)
average_rate = serializers.FloatField(min_value=1,max_value=10)
class Meta:
model = Movie
fields = ['name','description','average_rate']
| [
"rest_framework.serializers.FloatField",
"django.contrib.auth.models.User.objects.create",
"rest_framework.serializers.CharField",
"rest_framework.exceptions.ValidationError"
] | [((231, 281), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'min_length': '(8)', 'max_length': '(16)'}), '(min_length=8, max_length=16)\n', (252, 281), False, 'from rest_framework import serializers\n'), ((304, 354), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'min_length': '(8)', 'max_length': '(16)'}), '(min_length=8, max_length=16)\n', (325, 354), False, 'from rest_framework import serializers\n'), ((369, 404), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'min_length': '(4)'}), '(min_length=4)\n', (390, 404), False, 'from rest_framework import serializers\n'), ((1340, 1376), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1361, 1376), False, 'from rest_framework import serializers\n'), ((1395, 1432), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1416, 1432), False, 'from rest_framework import serializers\n'), ((1452, 1501), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (1474, 1501), False, 'from rest_framework import serializers\n'), ((822, 859), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {}), '(**validated_data)\n', (841, 859), False, 'from django.contrib.auth.models import User\n'), ((756, 807), 'rest_framework.exceptions.ValidationError', 'ValidationError', (['{\'data\': "Passwords don\'t match!"}'], {}), '({\'data\': "Passwords don\'t match!"})\n', (771, 807), False, 'from rest_framework.exceptions import ValidationError\n')] |
import matplotlib.pyplot as plt
import numpy as np
import cv2
class linefollow:
def __init__(self, path):
self.orgimg = cv2.imread(path)
self.size()
self.set_()
def size(self):
self.h , self.w = self.orgimg.shape[:2]
def gary(self):
return cv2.cvtColor(self.orgimg, cv2.COLOR_BGR2GRAY)
def otsu(self):
re, th = cv2.threshold(self.gary(), 0, 255, cv2.THRESH_OTSU)
return th
def median(self):
msksize = round(self.h / 10)
if msksize % 2 == 0 : msksize += 1
return cv2.medianBlur(self.otsu(), msksize)
def flip_(self):
return cv2.flip(self.median(), -1)
def set_(self):
self.msk_w = round(self.w/6)
self.msk_h = round(self.h/6)
self.flip = self.flip_()
def windows(self):
pass
class planA(linefollow):
def windows(self):
show = cv2.flip(self.orgimg, -1)
for y in range(3):
loc = 0
max = 0
for x in range(0, self.w - self.msk_w, round(self.msk_w / 2)):
win = self.flip[(y*self.msk_h):(y*self.msk_h)+self.msk_h, x:x+self.msk_w]
win = list(np.concatenate(win))
if(max < win.count(0)):
max = win.count(0)
loc = x
self.deaw(show, loc, y)
#cv2.rectangle(show, (loc, (y*self.msk_h)), (loc+self.msk_w, (y*self.msk_h)+self.msk_h), (255, 0, 0), 5)
return cv2.flip(show, -1)
def deaw(self, show, loc, y):
center_x = round(loc + (self.msk_w/2))
center_y = round((y*self.msk_h) + (self.msk_h/2))
center_xx = round(self.w/2)
cv2.rectangle(show, (loc, (y*self.msk_h)), (loc+self.msk_w, (y*self.msk_h)+self.msk_h), (255, 0, 0), 5)
cv2.circle(show, (center_x ,center_y), 10, (0, 255, 0), -1)
cv2.circle(show, (center_xx, center_y), 10, (0, 0, 255), -1)
cv2.line(show, (center_x, center_y), (center_xx, center_y), (125, 125, 125), 2)
class planB(linefollow):
def windows(self):
show = cv2.flip(self.orgimg, -1)
loc = 0
max = 0
x = self.w - self.msk_w
for y in range(0, self.h - self.msk_h, round(self.msk_h / 2)):
win = self.flip[y:y+self.msk_h, x:x+self.msk_w]
win = list(np.concatenate(win))
if(max < win.count(0)):
max = win.count(0)
loc = y
cv2.rectangle(show, (x, loc), (x + self.msk_w , loc + self.msk_h), (255, 0, 0), 5)
return cv2.flip(show, -1)
| [
"cv2.rectangle",
"cv2.flip",
"cv2.line",
"cv2.circle",
"cv2.cvtColor",
"numpy.concatenate",
"cv2.imread"
] | [((134, 150), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (144, 150), False, 'import cv2\n'), ((298, 343), 'cv2.cvtColor', 'cv2.cvtColor', (['self.orgimg', 'cv2.COLOR_BGR2GRAY'], {}), '(self.orgimg, cv2.COLOR_BGR2GRAY)\n', (310, 343), False, 'import cv2\n'), ((907, 932), 'cv2.flip', 'cv2.flip', (['self.orgimg', '(-1)'], {}), '(self.orgimg, -1)\n', (915, 932), False, 'import cv2\n'), ((1488, 1506), 'cv2.flip', 'cv2.flip', (['show', '(-1)'], {}), '(show, -1)\n', (1496, 1506), False, 'import cv2\n'), ((1692, 1804), 'cv2.rectangle', 'cv2.rectangle', (['show', '(loc, y * self.msk_h)', '(loc + self.msk_w, y * self.msk_h + self.msk_h)', '(255, 0, 0)', '(5)'], {}), '(show, (loc, y * self.msk_h), (loc + self.msk_w, y * self.\n msk_h + self.msk_h), (255, 0, 0), 5)\n', (1705, 1804), False, 'import cv2\n'), ((1804, 1863), 'cv2.circle', 'cv2.circle', (['show', '(center_x, center_y)', '(10)', '(0, 255, 0)', '(-1)'], {}), '(show, (center_x, center_y), 10, (0, 255, 0), -1)\n', (1814, 1863), False, 'import cv2\n'), ((1872, 1932), 'cv2.circle', 'cv2.circle', (['show', '(center_xx, center_y)', '(10)', '(0, 0, 255)', '(-1)'], {}), '(show, (center_xx, center_y), 10, (0, 0, 255), -1)\n', (1882, 1932), False, 'import cv2\n'), ((1941, 2020), 'cv2.line', 'cv2.line', (['show', '(center_x, center_y)', '(center_xx, center_y)', '(125, 125, 125)', '(2)'], {}), '(show, (center_x, center_y), (center_xx, center_y), (125, 125, 125), 2)\n', (1949, 2020), False, 'import cv2\n'), ((2086, 2111), 'cv2.flip', 'cv2.flip', (['self.orgimg', '(-1)'], {}), '(self.orgimg, -1)\n', (2094, 2111), False, 'import cv2\n'), ((2454, 2540), 'cv2.rectangle', 'cv2.rectangle', (['show', '(x, loc)', '(x + self.msk_w, loc + self.msk_h)', '(255, 0, 0)', '(5)'], {}), '(show, (x, loc), (x + self.msk_w, loc + self.msk_h), (255, 0, \n 0), 5)\n', (2467, 2540), False, 'import cv2\n'), ((2552, 2570), 'cv2.flip', 'cv2.flip', (['show', '(-1)'], {}), '(show, -1)\n', (2560, 2570), False, 'import cv2\n'), ((2330, 2349), 'numpy.concatenate', 'np.concatenate', (['win'], {}), '(win)\n', (2344, 2349), True, 'import numpy as np\n'), ((1192, 1211), 'numpy.concatenate', 'np.concatenate', (['win'], {}), '(win)\n', (1206, 1211), True, 'import numpy as np\n')] |
import json
import boto3
from crhelper import CfnResource
import os
helper = CfnResource()
s3 = boto3.client('s3')
s3_resource = boto3.resource('s3')
sourceBucket = os.environ['s3sourceBucket']
sourcePrefix = os.environ['s3sourcePrefix']
destinationbucket = os.environ['s3destinationBucket']
def lambda_handler(event, context):
helper(event, context)
@helper.create
@helper.update
def copy_website(event, _):
bucket = s3_resource.Bucket(sourceBucket)
for object in bucket.objects.filter(Prefix=sourcePrefix):
file = object.key
try:
copy_source = {'Bucket': sourceBucket, 'Key': file}
s3_resource.meta.client.copy(
copy_source, destinationbucket, file.replace(sourcePrefix, ""))
except:
print("An exception occurred copying: " + file)
@helper.delete
def delete_website(_, __):
for object in s3_resource.Bucket(destinationbucket).objects.all():
s3.delete_object(Bucket=destinationbucket, Key=object.key)
bucket = s3_resource.Bucket(destinationbucket)
bucket.object_versions.delete()
| [
"boto3.resource",
"crhelper.CfnResource",
"boto3.client"
] | [((78, 91), 'crhelper.CfnResource', 'CfnResource', ([], {}), '()\n', (89, 91), False, 'from crhelper import CfnResource\n'), ((98, 116), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (110, 116), False, 'import boto3\n'), ((131, 151), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (145, 151), False, 'import boto3\n')] |
from eTravelerComponents import Traveler
traveler = Traveler('TS8_sim', 'ScienceRaft', 'Test Stand 8 EO simulation')
#
# Data acquisition jobs
#
fe55_raft_acq = traveler.stepFactory('fe55_raft_acq',
description='Fe55 acquisition')
dark_raft_acq = traveler.stepFactory('dark_raft_acq',
description='Darks acquisition')
flat_pair_raft_acq = traveler.stepFactory('flat_pair_raft_acq',
description='Flat pairs acquisition')
ppump_raft_acq = traveler.stepFactory('ppump_raft_acq',
description='Pocket Pumping acquisition')
sflat_raft_acq = traveler.stepFactory('sflat_raft_acq',
description='Superflats acquisition')
qe_raft_acq = traveler.stepFactory('qe_raft_acq', description='QE acquisition')
spot_raft_acq = traveler.stepFactory('spot_raft_acq',
description='Spot acquisition')
#
# Analysis jobs
#
fe55_analysis = traveler.stepFactory('fe55_raft_analysis',
description='Fe55 analysis')
fe55_analysis.add_pre_reqs(fe55_raft_acq)
read_noise = traveler.stepFactory('read_noise_raft',
description='Read noise analysis')
read_noise.add_pre_reqs(fe55_raft_acq, fe55_analysis)
bright_defects = traveler.stepFactory('bright_defects_raft',
description='Bright defects analysis')
bright_defects.add_pre_reqs(dark_raft_acq, fe55_analysis)
dark_defects = traveler.stepFactory('dark_defects_raft',
description='Dark defects analysis')
dark_defects.add_pre_reqs(sflat_raft_acq, fe55_analysis, bright_defects)
traps = traveler.stepFactory('traps_raft', description='Charge traps analysis')
traps.add_pre_reqs(ppump_raft_acq, fe55_analysis, bright_defects, dark_defects)
mask_generators = fe55_analysis, bright_defects, dark_defects, traps
dark_current = traveler.stepFactory('dark_current_raft',
description='Dark current analysis')
dark_current.add_pre_reqs(dark_raft_acq)
dark_current.add_pre_reqs(*mask_generators)
cte = traveler.stepFactory('cte_raft', description='Charge transfer efficiency')
cte.add_pre_reqs(sflat_raft_acq)
cte.add_pre_reqs(*mask_generators)
prnu = \
traveler.stepFactory('prnu_raft',
description='Photo-response non-uniformity analysis')
prnu.add_pre_reqs(qe_raft_acq)
prnu.add_pre_reqs(*mask_generators)
flat_pairs_analysis = \
traveler.stepFactory('flat_pairs_raft_analysis',
description='Full well and linearity analysis')
flat_pairs_analysis.add_pre_reqs(flat_pair_raft_acq)
flat_pairs_analysis.add_pre_reqs(*mask_generators)
ptc = traveler.stepFactory('ptc_raft', description='Photon transfer curve')
ptc.add_pre_reqs(flat_pair_raft_acq)
ptc.add_pre_reqs(*mask_generators)
qe_analysis = traveler.stepFactory('qe_raft_analysis', description='QE analysis')
qe_analysis.add_pre_reqs(qe_raft_acq)
qe_analysis.add_pre_reqs(*mask_generators)
crosstalk = traveler.stepFactory('crosstalk_raft',
description='Crosstalk analysis')
crosstalk.add_pre_reqs(spot_raft_acq)
crosstalk.add_pre_reqs(*mask_generators)
test_report = traveler.stepFactory('test_report_raft',
description='Test report generation')
test_report.add_pre_reqs(fe55_analysis, read_noise, bright_defects,
dark_defects, traps, dark_current, cte, prnu,
flat_pairs_analysis, ptc, qe_analysis, crosstalk)
#
# Write travelers
#
traveler.write_fake_eT_traveler('TS8_sim_traveler.py')
traveler.write_yml('TS8_sim_traveler.yml')
| [
"eTravelerComponents.Traveler"
] | [((53, 117), 'eTravelerComponents.Traveler', 'Traveler', (['"""TS8_sim"""', '"""ScienceRaft"""', '"""Test Stand 8 EO simulation"""'], {}), "('TS8_sim', 'ScienceRaft', 'Test Stand 8 EO simulation')\n", (61, 117), False, 'from eTravelerComponents import Traveler\n')] |
# Copyright 2019 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from datetime import datetime
from dateutil import tz
from mediatimestamp.immutable import (
Timestamp,
TimeOffset,
TsValueError,
mediatimestamp,
SupportsMediaTimestamp,
SupportsMediaTimeOffset,
mediatimeoffset)
class TestTimestamp(unittest.TestCase):
def test_mediatimestamp(self):
to = TimeOffset()
self.assertNotIsInstance(to, SupportsMediaTimestamp)
ts = Timestamp()
self.assertIsInstance(ts, SupportsMediaTimestamp)
self.assertIsInstance(ts, SupportsMediaTimeOffset)
self.assertEqual(ts, mediatimestamp(ts))
self.assertEqual(ts, mediatimeoffset(ts))
class _convertable (object):
def __mediatimestamp__(self) -> Timestamp:
return Timestamp()
c = _convertable()
self.assertIsInstance(c, SupportsMediaTimestamp)
self.assertIsInstance(c, SupportsMediaTimeOffset)
self.assertEqual(ts, mediatimestamp(c))
self.assertEqual(ts, mediatimeoffset(c))
def test_MAX_NANOSEC(self):
self.assertEqual(Timestamp.MAX_NANOSEC, 1000000000)
def test_get_time_pythonic(self):
"""This tests that the fallback pure python implementation of get_time works as expected."""
test_ts = [
(1512489451.0, Timestamp(1512489451 + 37, 0)),
(1512489451.1, Timestamp(1512489451 + 37, 100000000))
]
for t in test_ts:
with mock.patch("time.time") as time:
time.return_value = t[0]
gottime = Timestamp.get_time()
self.assertEqual(gottime, t[1], msg="Times not equal, expected: %r, got %r" % (t[1], gottime))
def test_iaddsub(self):
"""This tests integer addition and subtraction on timestamps."""
ts = Timestamp(10, 0)
ts += TimeOffset(1, 2)
self.assertEqual(ts, Timestamp(11, 2))
ts -= TimeOffset(1, 2)
self.assertEqual(ts, Timestamp(10, 0))
ts -= TimeOffset(100, 5)
self.assertEqual(ts, Timestamp(90, 5, -1))
ts = Timestamp(281474976710655, 999999999)
ts += TimeOffset(0, 1)
self.assertEqual(ts, Timestamp(281474976710655, 999999999))
toff = TimeOffset(10, 0)
toff -= TimeOffset(100, 0)
self.assertEqual(toff, TimeOffset(90, 0, -1))
toff = TimeOffset(10, 0)
toff -= TimeOffset(0, 1)
self.assertEqual(toff, TimeOffset(9, 999999999))
toff = TimeOffset(10, 500000000)
toff += TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(11, 0))
toff = TimeOffset(10, 500000000, -1)
toff -= TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(11, 0, -1))
toff = TimeOffset(10, 0, -1)
toff += TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(9, 500000000, -1))
def test_addsub(self):
"""This tests addition and subtraction on timestamps."""
tests_ts = [
(Timestamp(10, 0), '+', TimeOffset(1, 2), Timestamp(11, 2)),
(Timestamp(11, 2), '-', TimeOffset(1, 2), Timestamp(10, 0)),
(TimeOffset(11, 2), '-', TimeOffset(1, 2), TimeOffset(10, 0)),
(Timestamp(10, 0), '-', TimeOffset(11, 2), Timestamp(1, 2, -1)),
(TimeOffset(10, 0), '-', TimeOffset(11, 2), TimeOffset(1, 2, -1)),
(TimeOffset(10, 0), '-', Timestamp(11, 2), TimeOffset(1, 2, -1)),
(Timestamp(10, 0), '-', Timestamp(11, 2), TimeOffset(1, 2, -1)),
(Timestamp(11, 2), '-', Timestamp(10, 0), TimeOffset(1, 2, 1)),
]
for t in tests_ts:
if t[1] == '+':
r = t[0] + t[2]
else:
r = t[0] - t[2]
self.assertEqual(r, t[3],
msg="{!r} {} {!r} = {!r}, expected {!r}".format(t[0], t[1], t[2], r, t[3]))
self.assertEqual(type(r), type(t[3]),
msg=("type({!r} {} {!r}) == {!r}, expected {!r}"
.format(t[0], t[1], t[2], type(r), type(t[3]))))
def test_multdiv(self):
"""This tests multiplication and division on timestamps."""
tests_ts = [
(TimeOffset(10, 10), '*', 0, TimeOffset(0, 0)),
(TimeOffset(10, 10), '*', 10, TimeOffset(100, 100)),
(10, '*', TimeOffset(10, 10), TimeOffset(100, 100)),
(TimeOffset(10, 10), '*', (-10), TimeOffset(100, 100, -1)),
(TimeOffset(10, 10, -1), '*', 10, TimeOffset(100, 100, -1)),
(TimeOffset(100, 100), '//', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '//', -10, TimeOffset(10, 10, -1)),
(TimeOffset(100, 100, -1), '//', 10, TimeOffset(10, 10, -1)),
(TimeOffset(281474976710654, 0), '//', 281474976710655, TimeOffset(0, 999999999)),
(Timestamp(100, 100), '//', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '/', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '/', -10, TimeOffset(10, 10, -1)),
(TimeOffset(100, 100, -1), '/', 10, TimeOffset(10, 10, -1)),
(TimeOffset(281474976710654, 0), '/', 281474976710655, TimeOffset(0, 999999999)),
(Timestamp(100, 100), '/', 10, TimeOffset(10, 10)),
(Timestamp(10, 10), '*', 10, TimeOffset(100, 100)),
(10, '*', Timestamp(10, 10), TimeOffset(100, 100)),
]
for t in tests_ts:
if t[1] == '*':
r = t[0] * t[2]
elif t[1] == '//':
r = t[0] // t[2]
else:
r = t[0] / t[2]
self.assertEqual(r, t[3],
msg="{!r} {} {!r} == {!r}, expected {!r}".format(t[0], t[1], t[2], r, t[3]))
self.assertEqual(type(r), type(t[3]),
msg=("type({!r} {} {!r}) == {!r}, expected {!r}"
.format(t[0], t[1], t[2], type(r), type(t[3]))))
def test_compare(self):
"""This tests comparison of timestamps."""
self.assertEqual(Timestamp(1, 2), Timestamp(1, 2))
self.assertNotEqual(Timestamp(1, 2), Timestamp(1, 3))
self.assertLess(Timestamp(1, 0), Timestamp(1, 2))
self.assertLessEqual(Timestamp(1, 2), Timestamp(1, 2))
self.assertGreater(Timestamp(2, 0), Timestamp(1, 0))
self.assertGreaterEqual(Timestamp(2, 0), Timestamp(2, 0))
self.assertNotEqual(Timestamp(2, 0), Timestamp(3, 0))
self.assertEqual(Timestamp(2, 0), 2)
self.assertGreater(Timestamp(2, 0), 1)
self.assertLess(Timestamp(2, 0), 3)
self.assertLess(TimeOffset(2, 0), 3)
self.assertGreaterEqual(TimeOffset(1, 0, 1), TimeOffset(1, 0, -1))
def test_invalid_str(self):
"""This tests that invalid strings fed into from_str raise exceptions."""
tests_ts = [
"a",
"2015-02-17T12:53:48.5",
"2015-02T12:53:48.5",
"2015-02-17T12:53.5",
"12:53:48.5"
]
for t in tests_ts:
try:
Timestamp.from_str(t)
self.assertTrue(False)
except Exception:
pass
def test_invalid_int(self):
"""This tests that invalid int values fed into timestamp constructor get normalised."""
tests_ts = [
(Timestamp(-1, 0), Timestamp(1, 0, -1)),
(Timestamp(281474976710656, 0), Timestamp(281474976710655, 999999999)),
(Timestamp(0, 1000000000), Timestamp(1, 0)),
(Timestamp(0, -1), Timestamp(0, 1, -1)),
(Timestamp(5, -1000000007), Timestamp(3, 999999993))
]
for t in tests_ts:
self.assertEqual(t[0], t[1])
def test_convert_str(self):
"""This tests that various string formats can be converted to timestamps."""
tests_ts = [
("1:2", Timestamp(1, 2)),
("1.2", Timestamp(1, 200000000)),
("1", Timestamp(1, 0)),
("2015-02-17T12:53:48.5Z", Timestamp(1424177663, 500000000)),
("2015-02-17T12:53:48.000102003Z", Timestamp(1424177663, 102003))
]
for t in tests_ts:
ts = Timestamp.from_str(t[0])
self.assertTrue(isinstance(ts, Timestamp))
self.assertEqual(ts, t[1])
def test_convert_sec_nsec(self):
"""This tests that the conversion to and from TAI second:nanosecond pairs works as expected."""
tests_ts = [
("0:0", TimeOffset(0, 0), "0:0"),
("0:1", TimeOffset(0, 1), "0:1"),
("-0:1", TimeOffset(0, 1, -1), "-0:1"),
("5", TimeOffset(5, 0), "5:0"),
("5:1", TimeOffset(5, 1), "5:1"),
("-5:1", TimeOffset(5, 1, -1), "-5:1"),
("5:999999999", TimeOffset(5, 999999999), "5:999999999")
]
for t in tests_ts:
ts = TimeOffset.from_sec_nsec(t[0])
self.assertEqual(
ts,
t[1],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
ts_str = ts.to_sec_nsec()
self.assertEqual(
ts_str,
t[2],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
self.assertEqual(ts_str, str(ts))
def test_ts_convert_tai_sec_nsec(self):
"""This tests that the conversion to and from TAI second:nanosecond pairs works as expected."""
tests_ts = [
("0:0", Timestamp(0, 0), "0:0"),
("0:1", Timestamp(0, 1), "0:1"),
("-0:1", Timestamp(0, 1, -1), "-0:1"),
("5", Timestamp(5, 0), "5:0"),
("5:1", Timestamp(5, 1), "5:1"),
("-5:1", Timestamp(5, 1, -1), "-5:1"),
("5:999999999", Timestamp(5, 999999999), "5:999999999")
]
for t in tests_ts:
ts = Timestamp.from_sec_nsec(t[0])
self.assertIsInstance(ts, Timestamp,
msg=("Timestamp.from_sec_nsec({!r}) == {!r} not an instance of Timestamp"
.format(t[0], ts)))
self.assertEqual(
ts,
t[1],
msg="Timestamp.from_sec_nsec({!r}) == {!r}, expected {!r}".format(t[0], ts, t[1]))
ts_str = ts.to_sec_nsec()
self.assertEqual(
ts_str,
t[2],
msg="{!r}.to_sec_nsec() == {!r}, expected {!r}".format(ts, ts_str, t[2]))
self.assertEqual(ts_str, str(ts))
def test_convert_sec_frac(self):
"""This tests that the conversion to and from TAI seconds with fractional parts works as expected."""
tests_ts = [
("0.0", TimeOffset(0, 0), "0.0"),
("0.1", TimeOffset(0, 1000000000 // 10), "0.1"),
("-0.1", TimeOffset(0, 1000000000 // 10, -1), "-0.1"),
("5", TimeOffset(5, 0), "5.0"),
("5.1", TimeOffset(5, 1000000000 // 10), "5.1"),
("-5.1", TimeOffset(5, 1000000000 // 10, -1), "-5.1"),
("5.10000000", TimeOffset(5, 1000000000 // 10), "5.1"),
("5.123456789", TimeOffset(5, 123456789), "5.123456789"),
("5.000000001", TimeOffset(5, 1), "5.000000001"),
("5.0000000001", TimeOffset(5, 0), "5.0")
]
for t in tests_ts:
ts = TimeOffset.from_sec_frac(t[0])
self.assertEqual(
ts,
t[1],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
ts_str = ts.to_sec_frac()
self.assertEqual(
ts_str,
t[2],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
def test_ts_convert_tai_sec_frac(self):
"""This tests that the conversion to and from TAI seconds with fractional parts works as expected."""
tests_ts = [
("0.0", Timestamp(0, 0), "0.0"),
("0.1", Timestamp(0, 1000000000 // 10), "0.1"),
("-0.1", Timestamp(0, 100000000, -1), "-0.1"),
("5", Timestamp(5, 0), "5.0"),
("5.1", Timestamp(5, 1000000000 // 10), "5.1"),
("-5.1", Timestamp(5, 100000000, -1), "-5.1"),
("5.10000000", Timestamp(5, 1000000000 // 10), "5.1"),
("5.123456789", Timestamp(5, 123456789), "5.123456789"),
("5.000000001", Timestamp(5, 1), "5.000000001"),
("5.0000000001", Timestamp(5, 0), "5.0")
]
for t in tests_ts:
ts = Timestamp.from_sec_frac(t[0])
self.assertIsInstance(ts, Timestamp,
msg=("Timestamp.from_sec_frac({!r}) == {!r} not instance of Timestamp"
.format(t[0], ts)))
self.assertEqual(
ts,
t[1],
msg="Timestamp.from_sec_frac({!r}) == {!r}, expected {!r}".format(t[0], ts, t[1]))
ts_str = ts.to_sec_frac()
self.assertEqual(
ts_str,
t[2],
msg="{!r}.ts_to_sec_frac() == {!r}, expected {!r}".format(ts, ts_str, t[2]))
def test_convert_iso_utc(self):
"""This tests that conversion to and from ISO date format UTC time works as expected."""
tests = [
(Timestamp(1424177663, 102003), "2015-02-17T12:53:48.000102003Z"),
# the leap second is 23:59:60
# 30 June 1972 23:59:59 (2287785599, first time): TAI= UTC + 10 seconds
(Timestamp(78796809, 0), "1972-06-30T23:59:59.000000000Z"),
# 30 June 1972 23:59:60 (2287785599,second time): TAI= UTC + 11 seconds
(Timestamp(78796810, 0), "1972-06-30T23:59:60.000000000Z"),
# 1 July 1972 00:00:00 (2287785600) TAI= UTC + 11 seconds
(Timestamp(78796811, 0), "1972-07-01T00:00:00.000000000Z"),
(Timestamp(1341100833, 0), "2012-06-30T23:59:59.000000000Z"),
(Timestamp(1341100834, 0), "2012-06-30T23:59:60.000000000Z"),
(Timestamp(1341100835, 0), "2012-07-01T00:00:00.000000000Z"),
(Timestamp(1341100835, 1), "2012-07-01T00:00:00.000000001Z"),
(Timestamp(1341100835, 100000000), "2012-07-01T00:00:00.100000000Z"),
(Timestamp(1341100835, 999999999), "2012-07-01T00:00:00.999999999Z"),
(Timestamp(283996818, 0), "1979-01-01T00:00:00.000000000Z") # 1979
]
for t in tests:
utc = t[0].to_iso8601_utc()
self.assertEqual(utc, t[1])
ts = Timestamp.from_iso8601_utc(t[1])
self.assertEqual(ts, t[0])
bad_params = [
("2012-07-01Y00:00:00.000000001Z",),
("2012-07~01T00:00:00.000000001Z",),
("2012-07-01T00:00:00.0000.0001Z",),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp.from_iso8601_utc(*p)
def test_smpte_timelabel(self):
"""This tests that conversion to and from SMPTE time labels works correctly."""
tests = [
("2015-01-23T12:34:56F00 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F01 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F02 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F28 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:59F01 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:59F29 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:60F00 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:60F29 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T01:00:00F00 30000/1001 UTC+01:00 TAI-36", 30000, 1001, 60*60),
("2015-06-30T18:59:59F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T18:59:60F00 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T18:59:60F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T19:00:00F00 30000/1001 UTC-05:00 TAI-36", 30000, 1001, -5*60*60)
]
for t in tests:
ts = Timestamp.from_smpte_timelabel(t[0])
self.assertEqual(t[0], ts.to_smpte_timelabel(t[1], t[2], t[3]))
bad_params = [
("potato",),
("the quick brown fox jumps over the lazy dog",),
("",),
('\u3069\u3082\u3042\u308a\u304c\u3068\u3046\u3001\u30df\u30b9\u30bf\u30fc\u30fb\u30ed\u30dc\u30c8\u30fc',),
("About half nine on tuesday",),
("0315-13~35T25:63:60F56 50000/1002 UTC-25:35 TAY-2",),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp.from_smpte_timelabel(*p)
bad_params = [
(0, 1),
(1, 0),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp(0, 0).to_smpte_timelabel(*p)
with mock.patch("time.timezone", 0):
with mock.patch("time.localtime") as localtime:
localtime.return_value.tm_isdst = 1
ts = Timestamp.from_smpte_timelabel("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35")
self.assertEqual("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35",
ts.to_smpte_timelabel(30000, 1001))
def test_from_datetime(self):
"""Conversion from python's datetime object."""
tests = [
(datetime(1970, 1, 1, 0, 0, 0, 0, tz.gettz('UTC')), Timestamp(0, 0)),
(datetime(1983, 3, 29, 15, 45, 0, 0, tz.gettz('UTC')), Timestamp(417800721, 0)),
(datetime(2017, 12, 5, 16, 33, 12, 196, tz.gettz('UTC')), Timestamp(1512491629, 196000)),
]
for t in tests:
self.assertEqual(Timestamp.from_datetime(t[0]), t[1])
def test_to_datetime(self):
"""Conversion to python's datetime object."""
tests = [
(datetime(1970, 1, 1, 0, 0, 0, 0, tz.gettz('UTC')), Timestamp(0, 0)),
(datetime(1983, 3, 29, 15, 45, 0, 0, tz.gettz('UTC')), Timestamp(417800721, 0)),
(datetime(2017, 12, 5, 16, 33, 12, 196, tz.gettz('UTC')), Timestamp(1512491629, 196000)),
(datetime(2017, 12, 5, 16, 33, 13, 0, tz.gettz('UTC')), Timestamp(1512491629, 999999999)),
]
for t in tests:
self.assertEqual(t[0], t[1].to_datetime())
def test_from_str(self):
"""Conversion from string formats."""
tests = [
("2015-01-23T12:34:56F00 30000/1001 UTC-05:00 TAI-35", Timestamp(1422034531, 17100000)),
("2015-01-23T12:34:56.0Z", Timestamp(1422016531, 0)),
("now", Timestamp(0, 0)),
]
for t in tests:
with mock.patch("time.time", return_value=0.0):
self.assertEqual(Timestamp.from_str(t[0]), t[1])
def test_get_leap_seconds(self):
"""get_leap_seconds should return the correct number of leap seconds at any point in history."""
tests = [
(Timestamp(63072008, 999999999), 0),
(Timestamp(63072009, 0), 10),
(Timestamp(78796809, 999999999), 10),
(Timestamp(78796810, 0), 11),
(Timestamp(94694410, 999999999), 11),
(Timestamp(94694411, 0), 12),
(Timestamp(417800721, 0), 21),
(Timestamp(773020827, 999999999), 28),
(Timestamp(773020828, 0), 29),
(Timestamp(1512491629, 0), 37),
]
for t in tests:
self.assertEqual(t[0].get_leap_seconds(), t[1])
| [
"mediatimestamp.immutable.Timestamp.from_iso8601_utc",
"mediatimestamp.immutable.Timestamp.from_sec_nsec",
"mediatimestamp.immutable.Timestamp.get_time",
"dateutil.tz.gettz",
"mediatimestamp.immutable.Timestamp.from_str",
"mediatimestamp.immutable.TimeOffset",
"mediatimestamp.immutable.Timestamp.from_datetime",
"mediatimestamp.immutable.mediatimestamp",
"mediatimestamp.immutable.Timestamp.from_sec_frac",
"mediatimestamp.immutable.mediatimeoffset",
"mediatimestamp.immutable.Timestamp.from_smpte_timelabel",
"mediatimestamp.immutable.TimeOffset.from_sec_nsec",
"mediatimestamp.immutable.TimeOffset.from_sec_frac",
"unittest.mock.patch",
"mediatimestamp.immutable.Timestamp"
] | [((971, 983), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', ([], {}), '()\n', (981, 983), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1059, 1070), 'mediatimestamp.immutable.Timestamp', 'Timestamp', ([], {}), '()\n', (1068, 1070), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2440, 2456), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (2449, 2456), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2471, 2487), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)'], {}), '(1, 2)\n', (2481, 2487), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2549, 2565), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)'], {}), '(1, 2)\n', (2559, 2565), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2627, 2645), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(5)'], {}), '(100, 5)\n', (2637, 2645), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2711, 2748), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(281474976710655)', '(999999999)'], {}), '(281474976710655, 999999999)\n', (2720, 2748), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2763, 2779), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1)'], {}), '(0, 1)\n', (2773, 2779), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2864, 2881), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)'], {}), '(10, 0)\n', (2874, 2881), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2898, 2916), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(0)'], {}), '(100, 0)\n', (2908, 2916), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2987, 3004), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)'], {}), '(10, 0)\n', (2997, 3004), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3021, 3037), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1)'], {}), '(0, 1)\n', (3031, 3037), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3111, 3136), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(500000000)'], {}), '(10, 500000000)\n', (3121, 3136), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3153, 3177), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(500000000)'], {}), '(0, 500000000)\n', (3163, 3177), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3244, 3273), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(500000000)', '(-1)'], {}), '(10, 500000000, -1)\n', (3254, 3273), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3290, 3314), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(500000000)'], {}), '(0, 500000000)\n', (3300, 3314), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3385, 3406), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)', '(-1)'], {}), '(10, 0, -1)\n', (3395, 3406), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3423, 3447), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(500000000)'], {}), '(0, 500000000)\n', (3433, 3447), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1218, 1236), 'mediatimestamp.immutable.mediatimestamp', 'mediatimestamp', (['ts'], {}), '(ts)\n', (1232, 1236), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1267, 1286), 'mediatimestamp.immutable.mediatimeoffset', 'mediatimeoffset', (['ts'], {}), '(ts)\n', (1282, 1286), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1589, 1606), 'mediatimestamp.immutable.mediatimestamp', 'mediatimestamp', (['c'], {}), '(c)\n', (1603, 1606), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1637, 1655), 'mediatimestamp.immutable.mediatimeoffset', 'mediatimeoffset', (['c'], {}), '(c)\n', (1652, 1655), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2517, 2533), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (2526, 2533), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2595, 2611), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (2604, 2611), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2675, 2695), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(90)', '(5)', '(-1)'], {}), '(90, 5, -1)\n', (2684, 2695), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2809, 2846), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(281474976710655)', '(999999999)'], {}), '(281474976710655, 999999999)\n', (2818, 2846), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2948, 2969), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(90)', '(0)', '(-1)'], {}), '(90, 0, -1)\n', (2958, 2969), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3069, 3093), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(9)', '(999999999)'], {}), '(9, 999999999)\n', (3079, 3093), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3209, 3226), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(11)', '(0)'], {}), '(11, 0)\n', (3219, 3226), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3346, 3367), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(11)', '(0)', '(-1)'], {}), '(11, 0, -1)\n', (3356, 3367), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3479, 3507), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(9)', '(500000000)', '(-1)'], {}), '(9, 500000000, -1)\n', (3489, 3507), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6727, 6742), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6736, 6742), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6744, 6759), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6753, 6759), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6789, 6804), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6798, 6804), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6806, 6821), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(3)'], {}), '(1, 3)\n', (6815, 6821), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6847, 6862), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(0)'], {}), '(1, 0)\n', (6856, 6862), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6864, 6879), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6873, 6879), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6910, 6925), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6919, 6925), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6927, 6942), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (6936, 6942), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6971, 6986), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (6980, 6986), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6988, 7003), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(0)'], {}), '(1, 0)\n', (6997, 7003), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7037, 7052), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7046, 7052), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7054, 7069), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7063, 7069), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7099, 7114), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7108, 7114), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7116, 7131), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(3)', '(0)'], {}), '(3, 0)\n', (7125, 7131), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7158, 7173), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7167, 7173), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7205, 7220), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7214, 7220), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7249, 7264), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(2)', '(0)'], {}), '(2, 0)\n', (7258, 7264), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7293, 7309), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(2)', '(0)'], {}), '(2, 0)\n', (7303, 7309), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7346, 7365), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (7356, 7365), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7367, 7387), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(0)', '(-1)'], {}), '(1, 0, -1)\n', (7377, 7387), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8865, 8889), 'mediatimestamp.immutable.Timestamp.from_str', 'Timestamp.from_str', (['t[0]'], {}), '(t[0])\n', (8883, 8889), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9558, 9588), 'mediatimestamp.immutable.TimeOffset.from_sec_nsec', 'TimeOffset.from_sec_nsec', (['t[0]'], {}), '(t[0])\n', (9582, 9588), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10533, 10562), 'mediatimestamp.immutable.Timestamp.from_sec_nsec', 'Timestamp.from_sec_nsec', (['t[0]'], {}), '(t[0])\n', (10556, 10562), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12025, 12055), 'mediatimestamp.immutable.TimeOffset.from_sec_frac', 'TimeOffset.from_sec_frac', (['t[0]'], {}), '(t[0])\n', (12049, 12055), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((13188, 13217), 'mediatimestamp.immutable.Timestamp.from_sec_frac', 'Timestamp.from_sec_frac', (['t[0]'], {}), '(t[0])\n', (13211, 13217), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((15228, 15260), 'mediatimestamp.immutable.Timestamp.from_iso8601_utc', 'Timestamp.from_iso8601_utc', (['t[1]'], {}), '(t[1])\n', (15254, 15260), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((17155, 17191), 'mediatimestamp.immutable.Timestamp.from_smpte_timelabel', 'Timestamp.from_smpte_timelabel', (['t[0]'], {}), '(t[0])\n', (17185, 17191), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18002, 18032), 'unittest.mock.patch', 'mock.patch', (['"""time.timezone"""', '(0)'], {}), "('time.timezone', 0)\n", (18012, 18032), False, 'from unittest import mock\n'), ((1404, 1415), 'mediatimestamp.immutable.Timestamp', 'Timestamp', ([], {}), '()\n', (1413, 1415), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1937, 1966), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512489451 + 37)', '(0)'], {}), '(1512489451 + 37, 0)\n', (1946, 1966), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((1996, 2033), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512489451 + 37)', '(100000000)'], {}), '(1512489451 + 37, 100000000)\n', (2005, 2033), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((2093, 2116), 'unittest.mock.patch', 'mock.patch', (['"""time.time"""'], {}), "('time.time')\n", (2103, 2116), False, 'from unittest import mock\n'), ((2193, 2213), 'mediatimestamp.immutable.Timestamp.get_time', 'Timestamp.get_time', ([], {}), '()\n', (2211, 2213), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3637, 3653), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (3646, 3653), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3660, 3676), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)'], {}), '(1, 2)\n', (3670, 3676), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3678, 3694), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (3687, 3694), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3710, 3726), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (3719, 3726), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3733, 3749), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)'], {}), '(1, 2)\n', (3743, 3749), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3751, 3767), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (3760, 3767), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3783, 3800), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(11)', '(2)'], {}), '(11, 2)\n', (3793, 3800), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3807, 3823), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)'], {}), '(1, 2)\n', (3817, 3823), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3825, 3842), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)'], {}), '(10, 0)\n', (3835, 3842), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3858, 3874), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (3867, 3874), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3881, 3898), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(11)', '(2)'], {}), '(11, 2)\n', (3891, 3898), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3900, 3919), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)', '(-1)'], {}), '(1, 2, -1)\n', (3909, 3919), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3935, 3952), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)'], {}), '(10, 0)\n', (3945, 3952), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3959, 3976), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(11)', '(2)'], {}), '(11, 2)\n', (3969, 3976), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((3978, 3998), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)', '(-1)'], {}), '(1, 2, -1)\n', (3988, 3998), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4014, 4031), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(0)'], {}), '(10, 0)\n', (4024, 4031), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4038, 4054), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (4047, 4054), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4056, 4076), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)', '(-1)'], {}), '(1, 2, -1)\n', (4066, 4076), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4092, 4108), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (4101, 4108), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4115, 4131), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (4124, 4131), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4133, 4153), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)', '(-1)'], {}), '(1, 2, -1)\n', (4143, 4153), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4169, 4185), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(11)', '(2)'], {}), '(11, 2)\n', (4178, 4185), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4192, 4208), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(0)'], {}), '(10, 0)\n', (4201, 4208), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4210, 4229), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4220, 4229), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4867, 4885), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (4877, 4885), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4895, 4911), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(0)'], {}), '(0, 0)\n', (4905, 4911), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4927, 4945), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (4937, 4945), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((4956, 4976), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (4966, 4976), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5001, 5019), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5011, 5019), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5021, 5041), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5031, 5041), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5057, 5075), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5067, 5075), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5089, 5113), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)', '(-1)'], {}), '(100, 100, -1)\n', (5099, 5113), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5129, 5151), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)', '(-1)'], {}), '(10, 10, -1)\n', (5139, 5151), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5162, 5186), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)', '(-1)'], {}), '(100, 100, -1)\n', (5172, 5186), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5202, 5222), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5212, 5222), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5234, 5252), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5244, 5252), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5268, 5288), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5278, 5288), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5301, 5323), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)', '(-1)'], {}), '(10, 10, -1)\n', (5311, 5323), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5339, 5363), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)', '(-1)'], {}), '(100, 100, -1)\n', (5349, 5363), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5375, 5397), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)', '(-1)'], {}), '(10, 10, -1)\n', (5385, 5397), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5413, 5443), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(281474976710654)', '(0)'], {}), '(281474976710654, 0)\n', (5423, 5443), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5468, 5492), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(999999999)'], {}), '(0, 999999999)\n', (5478, 5492), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5508, 5527), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(100)', '(100)'], {}), '(100, 100)\n', (5517, 5527), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5539, 5557), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5549, 5557), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5573, 5593), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5583, 5593), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5604, 5622), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5614, 5622), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5638, 5658), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5648, 5658), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5670, 5692), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)', '(-1)'], {}), '(10, 10, -1)\n', (5680, 5692), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5708, 5732), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)', '(-1)'], {}), '(100, 100, -1)\n', (5718, 5732), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5743, 5765), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)', '(-1)'], {}), '(10, 10, -1)\n', (5753, 5765), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5781, 5811), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(281474976710654)', '(0)'], {}), '(281474976710654, 0)\n', (5791, 5811), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5835, 5859), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(999999999)'], {}), '(0, 999999999)\n', (5845, 5859), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5875, 5894), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(100)', '(100)'], {}), '(100, 100)\n', (5884, 5894), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5905, 5923), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(10)', '(10)'], {}), '(10, 10)\n', (5915, 5923), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5939, 5956), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(10)'], {}), '(10, 10)\n', (5948, 5956), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((5967, 5987), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (5977, 5987), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6012, 6029), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(10)', '(10)'], {}), '(10, 10)\n', (6021, 6029), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((6031, 6051), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(100)', '(100)'], {}), '(100, 100)\n', (6041, 6051), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((7744, 7765), 'mediatimestamp.immutable.Timestamp.from_str', 'Timestamp.from_str', (['t'], {}), '(t)\n', (7762, 7765), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8020, 8036), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(-1)', '(0)'], {}), '(-1, 0)\n', (8029, 8036), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8038, 8057), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(0)', '(-1)'], {}), '(1, 0, -1)\n', (8047, 8057), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8073, 8102), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(281474976710656)', '(0)'], {}), '(281474976710656, 0)\n', (8082, 8102), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8104, 8141), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(281474976710655)', '(999999999)'], {}), '(281474976710655, 999999999)\n', (8113, 8141), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8157, 8181), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(1000000000)'], {}), '(0, 1000000000)\n', (8166, 8181), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8183, 8198), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(0)'], {}), '(1, 0)\n', (8192, 8198), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8214, 8230), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(-1)'], {}), '(0, -1)\n', (8223, 8230), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8232, 8251), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(1)', '(-1)'], {}), '(0, 1, -1)\n', (8241, 8251), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8267, 8292), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(-1000000007)'], {}), '(5, -1000000007)\n', (8276, 8292), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8294, 8317), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(3)', '(999999993)'], {}), '(3, 999999993)\n', (8303, 8317), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8558, 8573), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(2)'], {}), '(1, 2)\n', (8567, 8573), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8596, 8619), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(200000000)'], {}), '(1, 200000000)\n', (8605, 8619), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8640, 8655), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1)', '(0)'], {}), '(1, 0)\n', (8649, 8655), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8697, 8729), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1424177663)', '(500000000)'], {}), '(1424177663, 500000000)\n', (8706, 8729), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((8779, 8808), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1424177663)', '(102003)'], {}), '(1424177663, 102003)\n', (8788, 8808), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9168, 9184), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(0)'], {}), '(0, 0)\n', (9178, 9184), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9214, 9230), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1)'], {}), '(0, 1)\n', (9224, 9230), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9261, 9281), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1)', '(-1)'], {}), '(0, 1, -1)\n', (9271, 9281), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9310, 9326), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(0)'], {}), '(5, 0)\n', (9320, 9326), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9356, 9372), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1)'], {}), '(5, 1)\n', (9366, 9372), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9403, 9423), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1)', '(-1)'], {}), '(5, 1, -1)\n', (9413, 9423), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((9462, 9486), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(999999999)'], {}), '(5, 999999999)\n', (9472, 9486), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10150, 10165), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (10159, 10165), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10195, 10210), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(1)'], {}), '(0, 1)\n', (10204, 10210), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10241, 10260), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(1)', '(-1)'], {}), '(0, 1, -1)\n', (10250, 10260), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10289, 10304), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(0)'], {}), '(5, 0)\n', (10298, 10304), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10334, 10349), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(1)'], {}), '(5, 1)\n', (10343, 10349), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10380, 10399), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(1)', '(-1)'], {}), '(5, 1, -1)\n', (10389, 10399), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((10438, 10461), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(999999999)'], {}), '(5, 999999999)\n', (10447, 10461), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11390, 11406), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(0)'], {}), '(0, 0)\n', (11400, 11406), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11436, 11467), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1000000000 // 10)'], {}), '(0, 1000000000 // 10)\n', (11446, 11467), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11498, 11533), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(0)', '(1000000000 // 10)', '(-1)'], {}), '(0, 1000000000 // 10, -1)\n', (11508, 11533), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11562, 11578), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(0)'], {}), '(5, 0)\n', (11572, 11578), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11608, 11639), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1000000000 // 10)'], {}), '(5, 1000000000 // 10)\n', (11618, 11639), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11670, 11705), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1000000000 // 10)', '(-1)'], {}), '(5, 1000000000 // 10, -1)\n', (11680, 11705), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11743, 11774), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1000000000 // 10)'], {}), '(5, 1000000000 // 10)\n', (11753, 11774), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11812, 11836), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(123456789)'], {}), '(5, 123456789)\n', (11822, 11836), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11882, 11898), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(1)'], {}), '(5, 1)\n', (11892, 11898), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((11945, 11961), 'mediatimestamp.immutable.TimeOffset', 'TimeOffset', (['(5)', '(0)'], {}), '(5, 0)\n', (11955, 11961), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12577, 12592), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (12586, 12592), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12622, 12652), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(1000000000 // 10)'], {}), '(0, 1000000000 // 10)\n', (12631, 12652), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12683, 12710), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(100000000)', '(-1)'], {}), '(0, 100000000, -1)\n', (12692, 12710), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12739, 12754), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(0)'], {}), '(5, 0)\n', (12748, 12754), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12784, 12814), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(1000000000 // 10)'], {}), '(5, 1000000000 // 10)\n', (12793, 12814), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12845, 12872), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(100000000)', '(-1)'], {}), '(5, 100000000, -1)\n', (12854, 12872), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12910, 12940), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(1000000000 // 10)'], {}), '(5, 1000000000 // 10)\n', (12919, 12940), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((12978, 13001), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(123456789)'], {}), '(5, 123456789)\n', (12987, 13001), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((13047, 13062), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(1)'], {}), '(5, 1)\n', (13056, 13062), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((13109, 13124), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(5)', '(0)'], {}), '(5, 0)\n', (13118, 13124), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((13975, 14004), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1424177663)', '(102003)'], {}), '(1424177663, 102003)\n', (13984, 14004), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14184, 14206), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(78796809)', '(0)'], {}), '(78796809, 0)\n', (14193, 14206), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14343, 14365), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(78796810)', '(0)'], {}), '(78796810, 0)\n', (14352, 14365), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14494, 14516), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(78796811)', '(0)'], {}), '(78796811, 0)\n', (14503, 14516), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14567, 14591), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100833)', '(0)'], {}), '(1341100833, 0)\n', (14576, 14591), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14641, 14665), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100834)', '(0)'], {}), '(1341100834, 0)\n', (14650, 14665), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14715, 14739), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100835)', '(0)'], {}), '(1341100835, 0)\n', (14724, 14739), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14790, 14814), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100835)', '(1)'], {}), '(1341100835, 1)\n', (14799, 14814), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14864, 14896), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100835)', '(100000000)'], {}), '(1341100835, 100000000)\n', (14873, 14896), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((14946, 14978), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1341100835)', '(999999999)'], {}), '(1341100835, 999999999)\n', (14955, 14978), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((15029, 15052), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(283996818)', '(0)'], {}), '(283996818, 0)\n', (15038, 15052), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((15581, 15611), 'mediatimestamp.immutable.Timestamp.from_iso8601_utc', 'Timestamp.from_iso8601_utc', (['*p'], {}), '(*p)\n', (15607, 15611), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((17741, 17775), 'mediatimestamp.immutable.Timestamp.from_smpte_timelabel', 'Timestamp.from_smpte_timelabel', (['*p'], {}), '(*p)\n', (17771, 17775), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18051, 18079), 'unittest.mock.patch', 'mock.patch', (['"""time.localtime"""'], {}), "('time.localtime')\n", (18061, 18079), False, 'from unittest import mock\n'), ((18167, 18256), 'mediatimestamp.immutable.Timestamp.from_smpte_timelabel', 'Timestamp.from_smpte_timelabel', (['"""2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35"""'], {}), "(\n '2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35')\n", (18197, 18256), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18582, 18597), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (18591, 18597), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18667, 18690), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(417800721)', '(0)'], {}), '(417800721, 0)\n', (18676, 18690), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18763, 18792), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512491629)', '(196000)'], {}), '(1512491629, 196000)\n', (18772, 18792), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18859, 18888), 'mediatimestamp.immutable.Timestamp.from_datetime', 'Timestamp.from_datetime', (['t[0]'], {}), '(t[0])\n', (18882, 18888), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19066, 19081), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (19075, 19081), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19151, 19174), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(417800721)', '(0)'], {}), '(417800721, 0)\n', (19160, 19174), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19247, 19276), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512491629)', '(196000)'], {}), '(1512491629, 196000)\n', (19256, 19276), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19347, 19379), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512491629)', '(999999999)'], {}), '(1512491629, 999999999)\n', (19356, 19379), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19634, 19665), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1422034531)', '(17100000)'], {}), '(1422034531, 17100000)\n', (19643, 19665), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19707, 19731), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1422016531)', '(0)'], {}), '(1422016531, 0)\n', (19716, 19731), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19754, 19769), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (19763, 19769), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((19824, 19865), 'unittest.mock.patch', 'mock.patch', (['"""time.time"""'], {'return_value': '(0.0)'}), "('time.time', return_value=0.0)\n", (19834, 19865), False, 'from unittest import mock\n'), ((20107, 20137), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(63072008)', '(999999999)'], {}), '(63072008, 999999999)\n', (20116, 20137), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20156, 20178), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(63072009)', '(0)'], {}), '(63072009, 0)\n', (20165, 20178), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20198, 20228), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(78796809)', '(999999999)'], {}), '(78796809, 999999999)\n', (20207, 20228), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20248, 20270), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(78796810)', '(0)'], {}), '(78796810, 0)\n', (20257, 20270), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20290, 20320), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(94694410)', '(999999999)'], {}), '(94694410, 999999999)\n', (20299, 20320), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20340, 20362), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(94694411)', '(0)'], {}), '(94694411, 0)\n', (20349, 20362), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20382, 20405), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(417800721)', '(0)'], {}), '(417800721, 0)\n', (20391, 20405), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20425, 20456), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(773020827)', '(999999999)'], {}), '(773020827, 999999999)\n', (20434, 20456), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20476, 20499), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(773020828)', '(0)'], {}), '(773020828, 0)\n', (20485, 20499), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((20519, 20543), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(1512491629)', '(0)'], {}), '(1512491629, 0)\n', (20528, 20543), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((18564, 18579), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (18572, 18579), False, 'from dateutil import tz\n'), ((18649, 18664), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (18657, 18664), False, 'from dateutil import tz\n'), ((18745, 18760), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (18753, 18760), False, 'from dateutil import tz\n'), ((19048, 19063), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (19056, 19063), False, 'from dateutil import tz\n'), ((19133, 19148), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (19141, 19148), False, 'from dateutil import tz\n'), ((19229, 19244), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (19237, 19244), False, 'from dateutil import tz\n'), ((19329, 19344), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (19337, 19344), False, 'from dateutil import tz\n'), ((19900, 19924), 'mediatimestamp.immutable.Timestamp.from_str', 'Timestamp.from_str', (['t[0]'], {}), '(t[0])\n', (19918, 19924), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n'), ((17949, 17964), 'mediatimestamp.immutable.Timestamp', 'Timestamp', (['(0)', '(0)'], {}), '(0, 0)\n', (17958, 17964), False, 'from mediatimestamp.immutable import Timestamp, TimeOffset, TsValueError, mediatimestamp, SupportsMediaTimestamp, SupportsMediaTimeOffset, mediatimeoffset\n')] |
"""Testing the module delta tables."""
from pyspark.sql import types as T
from getl.common.delta_table import DeltaTable
def create_dataframe(spark_session, data):
schema = T.StructType(
[
T.StructField("file_path", T.StringType(), True),
T.StructField("file_desc", T.StringType(), True),
]
)
return spark_session.createDataFrame(data, schema)
def test_upsert_all(spark_session, tmp_dir):
"""Correct parameters are passed to the upsert all fuction."""
# ARRANGE
create_dataframe(
spark_session,
[
("path/to/file1", "about stuff"),
("path/to/file2", "gloomhaven is a nice place"),
],
).write.save(tmp_dir, format="delta")
update_df = create_dataframe(
spark_session,
[
("path/to/file2", "gloomhaven is a bad place"),
("path/to/file3", "my little haven"),
],
)
delta_table = DeltaTable(path=tmp_dir, spark=spark_session)
# ACT
dataframe = delta_table.upsert_all(
update_df, merge_statement="source.file_path = updates.file_path"
)
# ASSER
assert dataframe.collect()[0][1] == "gloomhaven is a bad place"
assert dataframe.collect()[1][1] == "my little haven"
assert dataframe.collect()[2][1] == "about stuff"
| [
"pyspark.sql.types.StringType",
"getl.common.delta_table.DeltaTable"
] | [((957, 1002), 'getl.common.delta_table.DeltaTable', 'DeltaTable', ([], {'path': 'tmp_dir', 'spark': 'spark_session'}), '(path=tmp_dir, spark=spark_session)\n', (967, 1002), False, 'from getl.common.delta_table import DeltaTable\n'), ((243, 257), 'pyspark.sql.types.StringType', 'T.StringType', ([], {}), '()\n', (255, 257), True, 'from pyspark.sql import types as T\n'), ((305, 319), 'pyspark.sql.types.StringType', 'T.StringType', ([], {}), '()\n', (317, 319), True, 'from pyspark.sql import types as T\n')] |
import sys
import os
from six import StringIO
import datetime
from pkg_resources import resource_filename
import textwrap
from io import StringIO
import nose.tools as nt
from nose.plugins.attrib import attr
from unittest import mock
import numpy.testing as nptest
import pandas.util.testing as pdtest
import numpy as np
import pandas
import pyodbc
import wqio
from wqio import utils
from pycvc import dataAccess, external
def test__fix_nsqd_bacteria_units():
cols = ['param', 'conc_units', 'res']
inputdf = pandas.DataFrame({
'conc_units': ['MPN/100 mL', 'MPN/100 mL', 'CFU/100 mL', 'ug/L'],
'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'],
'res': [1, 2, 3, 4]
})
outputdf = external._fix_nsqd_bacteria_units(inputdf, unitscol='conc_units')
expected = pandas.DataFrame({
'conc_units': ['CFU/100 mL', 'CFU/100 mL', 'CFU/100 mL', 'ug/L'],
'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'],
'res': [1, 2, 3, 4]
})
pdtest.assert_frame_equal(outputdf[cols], expected[cols])
| [
"pandas.DataFrame",
"pycvc.external._fix_nsqd_bacteria_units",
"pandas.util.testing.assert_frame_equal"
] | [((523, 686), 'pandas.DataFrame', 'pandas.DataFrame', (["{'conc_units': ['MPN/100 mL', 'MPN/100 mL', 'CFU/100 mL', 'ug/L'], 'param':\n ['E Coli', 'E Coli', 'Fecal', 'Copper'], 'res': [1, 2, 3, 4]}"], {}), "({'conc_units': ['MPN/100 mL', 'MPN/100 mL', 'CFU/100 mL',\n 'ug/L'], 'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'], 'res': [1, 2,\n 3, 4]})\n", (539, 686), False, 'import pandas\n'), ((725, 790), 'pycvc.external._fix_nsqd_bacteria_units', 'external._fix_nsqd_bacteria_units', (['inputdf'], {'unitscol': '"""conc_units"""'}), "(inputdf, unitscol='conc_units')\n", (758, 790), False, 'from pycvc import dataAccess, external\n'), ((806, 969), 'pandas.DataFrame', 'pandas.DataFrame', (["{'conc_units': ['CFU/100 mL', 'CFU/100 mL', 'CFU/100 mL', 'ug/L'], 'param':\n ['E Coli', 'E Coli', 'Fecal', 'Copper'], 'res': [1, 2, 3, 4]}"], {}), "({'conc_units': ['CFU/100 mL', 'CFU/100 mL', 'CFU/100 mL',\n 'ug/L'], 'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'], 'res': [1, 2,\n 3, 4]})\n", (822, 969), False, 'import pandas\n'), ((997, 1054), 'pandas.util.testing.assert_frame_equal', 'pdtest.assert_frame_equal', (['outputdf[cols]', 'expected[cols]'], {}), '(outputdf[cols], expected[cols])\n', (1022, 1054), True, 'import pandas.util.testing as pdtest\n')] |
import warnings
from datetime import datetime, date
import pandas as pd
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import itertools
import streamlit as st
import yfinance as yf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from yahooquery import Ticker
import src.tools.functions as f0
warnings.filterwarnings("ignore")
pd.plotting.register_matplotlib_converters()
plt.style.use("seaborn-poster")
sm, med, lg = "20", "25", "30"
plt.rcParams["font.size"] = sm # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
plt.rcParams["figure.figsize"] = [20, 10]
plt.rcParams["figure.dpi"] = 100
plt.rcParams["axes.facecolor"] = "silver"
class The_SARIMA_Model(object):
def __init__(self, stock):
self.sss = stock
self.company = f0.company_longName(self.sss)
def dataHull(self):
self.start = "2011-10-01"
self.end = "2021-10-19"
self.x_data = yf.download(self.sss, start=self.end)["Adj Close"]
self.x_data.columns = [self.company]
self.spData = yf.download(self.sss, period='max')
self.spData = pd.DataFrame(self.spData.loc[:self.end])
self.dataSP = pd.DataFrame(self.spData["Close"])
self.dataSP.columns = [self.sss]
self.dataSP.index = pd.to_datetime(self.dataSP.index)
self.df_settle = self.spData["Close"].resample("BM").ffill().dropna()
self.df_rolling = self.df_settle.rolling(12)
self.df_mean = self.df_rolling.mean()
self.df_std = self.df_rolling.std()
def adf(self):
self.dataHull()
self.result = adfuller(self.df_settle)
self.critical_values = self.result[4]
self.df_log = np.log(self.df_settle)
self.df_log_ma = self.df_log.rolling(2).mean()
self.df_detrend = self.df_log - self.df_log_ma
self.df_detrend.dropna(inplace=True)
# Mean and standard deviation of detrended data
self.df_detrend_rolling = self.df_detrend.rolling(12)
self.df_detrend_ma = self.df_detrend_rolling.mean()
self.df_detrend_std = self.df_detrend_rolling.std()
self.result2 = adfuller(self.df_detrend)
self.critical_values2 = self.result2[4]
self.df_log_diff = self.df_log.diff(periods=3).dropna()
# Mean and standard deviation of differenced data
self.df_diff_rolling = self.df_log_diff.rolling(12)
self.df_diff_ma = self.df_diff_rolling.mean()
self.df_diff_std = self.df_diff_rolling.std()
def seasonal_decomp(self):
self.adf()
self.decompose_result = seasonal_decompose(self.df_log.dropna(), period=12)
self.df_trend = self.decompose_result.trend
self.df_season = self.decompose_result.seasonal
self.df_residual = self.decompose_result.resid
self.df_log_diff = self.df_residual.diff().dropna()
# Mean and standard deviation of differenced data
self.df_diff_rolling = self.df_log_diff.rolling(12)
self.df_diff_ma = self.df_diff_rolling.mean()
self.df_diff_std = self.df_diff_rolling.std()
self.result = adfuller(self.df_residual.dropna())
self.critical_values = self.result[4]
def arima_grid_search(self, s=12):
self.seasonal_decomp()
self.s = s
self.p = self.d = self.q = range(2)
self.param_combinations = list(itertools.product(self.p, self.d, self.q))
self.lowest_aic, self.pdq, self.pdqs = None, None, None
self.total_iterations = 0
for order in self.param_combinations:
for (self.p, self.q, self.d) in self.param_combinations:
self.seasonal_order = (self.p, self.q, self.d, self.s)
self.total_iterations += 1
try:
self.model = SARIMAX(
self.df_settle,
order=order,
seasonal_order=self.seasonal_order,
enforce_stationarity=False,
enforce_invertibility=False,
disp=False,
)
self.model_result = self.model.fit(maxiter=200, disp=False)
if not self.lowest_aic or self.model_result.aic < self.lowest_aic:
self.lowest_aic = self.model_result.aic
self.pdq, self.pdqs = order, self.seasonal_order
except Exception:
continue
return self.lowest_aic, self.pdq, self.pdqs
def fitModel_to_SARIMAX(self):
self.arima_grid_search()
self.model = SARIMAX(
self.df_settle,
order=self.pdq,
seasonal_order=self.seasonal_order,
enforce_stationarity=True,
enforce_invertibility=True,
disp=False,
)
self.model_results = self.model.fit(maxiter=200, disp=False)
return self.model_results
def predict(self):
self.fitModel_to_SARIMAX()
self.n = len(self.df_settle.index)
self.prediction = self.model_results.get_prediction(start=self.n - 12 * 5, end=self.n + 12)
self.prediction_ci = self.prediction.conf_int()
self.prediction_ci.columns=['Lower_Confidence_Boundary', 'Upper_Confidence_Boundary']
fig, ax = plt.subplots()
ax = self.df_settle['2019':].plot(label='Live_Price', color='k')
self.prediction_ci['2019':].plot(
ax=ax,
style=['--', '--'],
color=['r','g'],
label='predicted/forecasted',
)
ci_index = self.prediction_ci.index
lower_ci = self.prediction_ci.iloc[:, 0]
upper_ci = self.prediction_ci.iloc[:, 1]
ax.fill_between(
ci_index,
lower_ci,
upper_ci,
color='c',
alpha=.01,
label='95% Confidence Interval'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='r',
where=ci_index<'2020 11/30',
alpha=.2,
label='Training'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='gold',
where=ci_index.isin(ci_index[43:60]),
alpha=.2,
label='Testing'
)
ax.fill_between(
ci_index,
(self.prediction_ci.iloc[:, 0]),
(self.prediction_ci.iloc[:, 1]),
color='darkgreen',
where=ci_index.isin(ci_index[59:]),
alpha=.2,
label='Forecast'
)
ax.set_xlabel('Time (years)')
ax.set_ylabel('Prices')
ax.axvline(x='2020 06/25', color = 'k')
ax.axvline(x='2021 10/25', color = 'k')
ax.set_facecolor('white')
plt.grid(True, which='major', axis='both', color='k', alpha=.34)
ax.legend()
plt.title('SARIMA FORECAST')
l = plt.legend(loc='best', shadow=True, fontsize='x-large')
for text in l.get_texts():
text.set_color("k")
text.set_fontweight(13)
text.set_fontsize(13)
l.get_frame().set_facecolor('white');
st.pyplot(fig)
| [
"matplotlib.pyplot.grid",
"streamlit.pyplot",
"statsmodels.tsa.stattools.adfuller",
"matplotlib.pyplot.legend",
"pandas.to_datetime",
"numpy.log",
"matplotlib.pyplot.style.use",
"itertools.product",
"yfinance.download",
"src.tools.functions.company_longName",
"matplotlib.pyplot.subplots",
"pandas.plotting.register_matplotlib_converters",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"warnings.filterwarnings",
"matplotlib.pyplot.rc",
"statsmodels.tsa.statespace.sarimax.SARIMAX"
] | [((459, 492), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (482, 492), False, 'import warnings\n'), ((493, 537), 'pandas.plotting.register_matplotlib_converters', 'pd.plotting.register_matplotlib_converters', ([], {}), '()\n', (535, 537), True, 'import pandas as pd\n'), ((538, 569), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-poster"""'], {}), "('seaborn-poster')\n", (551, 569), True, 'import matplotlib.pyplot as plt\n'), ((663, 692), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'med'}), "('axes', titlesize=med)\n", (669, 692), True, 'import matplotlib.pyplot as plt\n'), ((723, 752), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'med'}), "('axes', labelsize=med)\n", (729, 752), True, 'import matplotlib.pyplot as plt\n'), ((785, 814), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'sm'}), "('xtick', labelsize=sm)\n", (791, 814), True, 'import matplotlib.pyplot as plt\n'), ((846, 875), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'sm'}), "('ytick', labelsize=sm)\n", (852, 875), True, 'import matplotlib.pyplot as plt\n'), ((907, 936), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'sm'}), "('legend', fontsize=sm)\n", (913, 936), True, 'import matplotlib.pyplot as plt\n'), ((956, 986), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'lg'}), "('figure', titlesize=lg)\n", (962, 986), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1046), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (1025, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1337), 'src.tools.functions.company_longName', 'f0.company_longName', (['self.sss'], {}), '(self.sss)\n', (1327, 1337), True, 'import src.tools.functions as f0\n'), ((1571, 1606), 'yfinance.download', 'yf.download', (['self.sss'], {'period': '"""max"""'}), "(self.sss, period='max')\n", (1582, 1606), True, 'import yfinance as yf\n'), ((1629, 1669), 'pandas.DataFrame', 'pd.DataFrame', (['self.spData.loc[:self.end]'], {}), '(self.spData.loc[:self.end])\n', (1641, 1669), True, 'import pandas as pd\n'), ((1692, 1726), 'pandas.DataFrame', 'pd.DataFrame', (["self.spData['Close']"], {}), "(self.spData['Close'])\n", (1704, 1726), True, 'import pandas as pd\n'), ((1796, 1829), 'pandas.to_datetime', 'pd.to_datetime', (['self.dataSP.index'], {}), '(self.dataSP.index)\n', (1810, 1829), True, 'import pandas as pd\n'), ((2119, 2143), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['self.df_settle'], {}), '(self.df_settle)\n', (2127, 2143), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((2212, 2234), 'numpy.log', 'np.log', (['self.df_settle'], {}), '(self.df_settle)\n', (2218, 2234), True, 'import numpy as np\n'), ((2653, 2678), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['self.df_detrend'], {}), '(self.df_detrend)\n', (2661, 2678), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((5125, 5271), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'SARIMAX', (['self.df_settle'], {'order': 'self.pdq', 'seasonal_order': 'self.seasonal_order', 'enforce_stationarity': '(True)', 'enforce_invertibility': '(True)', 'disp': '(False)'}), '(self.df_settle, order=self.pdq, seasonal_order=self.seasonal_order,\n enforce_stationarity=True, enforce_invertibility=True, disp=False)\n', (5132, 5271), False, 'from statsmodels.tsa.statespace.sarimax import SARIMAX\n'), ((5843, 5857), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5855, 5857), True, 'import matplotlib.pyplot as plt\n'), ((7488, 7553), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""major"""', 'axis': '"""both"""', 'color': '"""k"""', 'alpha': '(0.34)'}), "(True, which='major', axis='both', color='k', alpha=0.34)\n", (7496, 7553), True, 'import matplotlib.pyplot as plt\n'), ((7581, 7609), 'matplotlib.pyplot.title', 'plt.title', (['"""SARIMA FORECAST"""'], {}), "('SARIMA FORECAST')\n", (7590, 7609), True, 'import matplotlib.pyplot as plt\n'), ((7622, 7677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(True)', 'fontsize': '"""x-large"""'}), "(loc='best', shadow=True, fontsize='x-large')\n", (7632, 7677), True, 'import matplotlib.pyplot as plt\n'), ((7869, 7883), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (7878, 7883), True, 'import streamlit as st\n'), ((1452, 1489), 'yfinance.download', 'yf.download', (['self.sss'], {'start': 'self.end'}), '(self.sss, start=self.end)\n', (1463, 1489), True, 'import yfinance as yf\n'), ((3882, 3923), 'itertools.product', 'itertools.product', (['self.p', 'self.d', 'self.q'], {}), '(self.p, self.d, self.q)\n', (3899, 3923), False, 'import itertools\n'), ((4306, 4451), 'statsmodels.tsa.statespace.sarimax.SARIMAX', 'SARIMAX', (['self.df_settle'], {'order': 'order', 'seasonal_order': 'self.seasonal_order', 'enforce_stationarity': '(False)', 'enforce_invertibility': '(False)', 'disp': '(False)'}), '(self.df_settle, order=order, seasonal_order=self.seasonal_order,\n enforce_stationarity=False, enforce_invertibility=False, disp=False)\n', (4313, 4451), False, 'from statsmodels.tsa.statespace.sarimax import SARIMAX\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 14:02:54 2017 - v1.0 Finalised Fri Apr 13
@author: michaelhodge
"""
#A script to perform a misfit analysis between manual and algorithm methods
#to identify the best performing parameter space
#Loads packages required
import pickle
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
from Algorithm_misfit import algorithm_misfit
#Creates blank variables
prof_height_subsample=np.zeros((num_subsample,nump))
prof_distance_subsample=np.zeros((num_subsample,nump))
# Creates subsample of data for analysis
n=-1
for i in range (0,num_profiles,subsample):
n=n+1
prof_height_subsample[n,:]=prof_height[i,:]
prof_distance_subsample[n,:]=prof_distance[i,:]
iterations=num_subsample
print ('Running Algorithm on Sub Sampled Catalog of size %d, Please Wait...' % (iterations))
#Run smoothing and misfit analysis between subsampled data set
#Choose minimum and maximum filter bin size (bin_min, bin_max) and step between (bin_step).
#Choose minimum and maximum slope threshold (theta_T_min, theta_T_max) and step between (theta_T_step)
#Choose derivative of slope threshold (phi_T)
bin_max = 40
bin_min = 9 #needs to be an odd integer
bin_step = 4 #needs to be an even integer
theta_T_max = 40 #insert positive integer here, turns to negative later
theta_T_min = 7 #insert positive integer here, turns to negative later
theta_T_step = 4 #insert positive integer here, turns to negative later
phi_T = 5
#---IMPORTANT---
#Choose two types of filter method to compare: 1 - None; 2 - Average;
#3 - Sav Gol; 4 - Median; 5 - Lowess
#Comment out filters not needed
#---ANALYSIS 1----
#method = 1 #No smoothing
#method_name_1 = 'None'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (No Filter), Please Wait...')
#
#method = 2 #Average smoothing
#method_name_1 = 'Average'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Average Filter), Please Wait...')
method = 3 #Sav Gol smoothing
method_name_1 = 'Sav Gol'
analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
print ('Finished Analysis (Savitzky-Golay Filter), Please Wait...')
#method = 4 #Median smoothing
#method_name_1 = 'Median'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Median Filter), Please Wait...')
#
#method = 5 #Lowess smoothing
#method_name_1 = 'Lowess'
#analysis_1=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Lowess Filter), Please Wait...')
analysis_number_1=method
#---IMPORTANT---
#---ANALYSIS 2----
#method = 1 #No smoothing
#method_name_2 = 'None'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (No Filter), Please Wait...')
#
#method = 2 #Average smoothing
#method_name_2 = 'Average'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Average Filter), Please Wait...')
#
#method = 3 #Sav Gol smoothing
#method_name_2 = 'Sav Gol'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Savitzky-Golay Filter), Please Wait...')
#
#method = 4 #Median smoothing
#method_name_2 = 'Median'
#analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
#print ('Finished Analysis (Median Filter), Please Wait...')
method = 5 #Lowess smoothing
method_name_2 = 'Lowess'
analysis_2=algorithm_misfit(prof_distance_subsample,prof_height_subsample,h_manual,w_manual,slope_manual,nump,iterations,method,bin_max,bin_min,bin_step,theta_T_max,theta_T_min,theta_T_step,phi_T)
print ('Finished Analysis (Lowess Filter), Please Wait...')
analysis_number_2=method
#Output values for ANALYSIS 1
h_1=analysis_1[0] #scarp height
w_1=analysis_1[1] #scarp width
slope_1=analysis_1[2] #scarp slope
misfit_height_1=analysis_1[3] #misfit height
misfit_width_1=analysis_1[4] #misfit width
misfit_slope_1=analysis_1[5] #misfit slope
misfit_height_average_1=analysis_1[6] #average misfit height
misfit_width_average_1=analysis_1[7] #average misfit width
misfit_slope_average_1=analysis_1[8] #average misfit slope
#Output values for ANALYSIS 2
h_2=analysis_2[0] #scarp height
w_2=analysis_2[1] #scarp width
slope_2=analysis_2[2] #scarp slope
misfit_height_2=analysis_2[3] #misfit height
misfit_width_2=analysis_2[4] #misfit width
misfit_slope_2=analysis_2[5] #misfit slope
misfit_height_average_2=analysis_2[6] #average misfit height
misfit_width_average_2=analysis_2[7] #average misfit width
misfit_slope_average_2=analysis_2[8] #average misfit slope
#Grid setup
gridx=analysis_1[9]
gridy=analysis_1[10]
#Dump save analysis
with open('Misfit_Analysis.pickle', 'wb') as f:
pickle.dump(h_1, f)
pickle.dump(h_2, f)
pickle.dump(w_1, f)
pickle.dump(w_2, f)
pickle.dump(slope_1, f)
pickle.dump(slope_2, f)
pickle.dump(misfit_height_1, f)
pickle.dump(misfit_height_2, f)
pickle.dump(misfit_width_1, f)
pickle.dump(misfit_width_2, f)
pickle.dump(misfit_slope_1, f)
pickle.dump(misfit_slope_2, f)
pickle.dump(misfit_height_average_1, f)
pickle.dump(misfit_height_average_2, f)
pickle.dump(misfit_width_average_1, f)
pickle.dump(misfit_width_average_2, f)
pickle.dump(misfit_slope_average_1, f)
pickle.dump(misfit_slope_average_2, f)
pickle.dump(gridx, f)
pickle.dump(gridy, f)
#Count the number of samples where scarp parameter was calculated
misfit_height_1_min=np.zeros((iterations,1))
misfit_height_2_min=np.zeros((iterations,1))
misfit_height_1_count=np.zeros((len(misfit_height_average_1[:,1]),(len(misfit_height_average_1[1,:]))))
misfit_height_2_count=np.zeros((len(misfit_height_average_2[:,1]),(len(misfit_height_average_2[1,:]))))
for i in range (0,iterations):
misfit_height_1_min[i]=np.ndarray.min(abs(misfit_height_1[i,:,:]))
misfit_height_2_min[i]=np.ndarray.min(abs(misfit_height_2[i,:,:]))
misfit_height_1_count_all=np.count_nonzero(~np.isnan(misfit_height_1_min))
misfit_height_2_count_all=np.count_nonzero(~np.isnan(misfit_height_2_min))
for m in range (0,(len(misfit_height_average_1[:,1]))):
for n in range (0,(len(misfit_height_average_1[1,:]))):
misfit_height_1_count[m,n]=np.count_nonzero(~np.isnan(misfit_height_1[:,m,n]))
for m in range (0,(len(misfit_height_average_1[:,1]))):
for n in range (0,(len(misfit_height_average_1[1,:]))):
misfit_height_2_count[m,n]=np.count_nonzero(~np.isnan(misfit_height_2[:,m,n]))
#Determining the best parameter space
value = 0.0
count_min=0.5 #Minimum number of successful profiles (normally 50% or 0.5)
A=(abs(misfit_height_average_1)+abs(misfit_width_average_1)+abs(misfit_slope_average_1))/(misfit_height_1_count/num_subsample)
where_are_NaNs = np.isnan(A)
A[where_are_NaNs] = 9999
where_less_than_mincount=misfit_height_1_count/num_subsample<count_min
A[where_less_than_mincount] = 9999
X_1 = np.abs(A-value)
idx_1 = np.where( X_1 == X_1.min() )
B=(abs(misfit_height_average_2)+abs(misfit_width_average_2)+abs(misfit_slope_average_2))/(misfit_height_2_count/num_subsample)
where_are_NaNs = np.isnan(B)
B[where_are_NaNs] = 9999
where_less_than_mincount=misfit_height_2_count/num_subsample<count_min
B[where_less_than_mincount] = 9999
X_2 = np.abs(B-value)
idx_2 = np.where( X_2 == X_2.min() )
#Prints out the best parameter space as 'Method name (i.e., Sav Gol,), average height misfit, average width misfit, average slope misfit, count, slope threshold, bin size'
if abs(A[idx_1[0], idx_1[1]])<abs(B[idx_2[0], idx_2[1]]):
print('Best Parameter Space:')
print('method = %s' %method_name_1)
print('bin size = %s' %gridy[idx_1[0], idx_1[1]])
print('slope threshold = %s' %gridx[idx_1[0], idx_1[1]])
print('average misfit height (m) = %s' %misfit_height_average_1[idx_1[0], idx_1[1]])
print('average misfit width (m) = %s' %misfit_width_average_1[idx_1[0], idx_1[1]])
print('average misfit slope (degrees) = %s' %misfit_slope_average_1[idx_1[0], idx_1[1]])
print('misfit count = %s' %misfit_height_1_count[idx_1[0], idx_1[1]])
method=analysis_number_1
theta_T=np.int(gridx[idx_1[0], idx_1[1]])
idx_theta=np.int(idx_1[1])
bin_size=np.int(gridy[idx_1[0], idx_1[1]])
idx_b=np.int(idx_1[0])
else:
print('Best Parameter Space:')
print('method = %s' %method_name_2)
print('bin size = %s' %gridy[idx_2[0], idx_2[1]])
print('slope threshold = %s' %gridx[idx_2[0], idx_2[1]])
print('average misfit height (m) = %s' %misfit_height_average_2[idx_2[0], idx_2[1]])
print('average misfit width (m) = %s' %misfit_width_average_2[idx_2[0], idx_2[1]])
print('average misfit slope (degrees) = %s' %misfit_slope_average_2[idx_2[0], idx_2[1]])
print('misfit count = %s' %misfit_height_2_count[idx_2[0], idx_2[1]])
method=analysis_number_2
theta_T=np.int(gridx[idx_2[0], idx_2[1]])
idx_theta=np.int(idx_2[1])
bin_size=np.int(gridy[idx_2[0], idx_2[1]])
idx_b=np.int(idx_2[0])
###
#Set levels for misfit plots
levels_height=[-10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10]
levels_width=[-20, -15, -5, 0, 5, 10, 15, 20]
levels_slope=[-40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40]
levels_count=[2,4,6,8,10,12,14,16,18,20]
#Plot figures
plt.figure(3)
#Plot for analysis number 1
plt.subplot(4,2,1)
plt.contourf(gridx,gridy,misfit_height_average_1,levels_height,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit height (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit height using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,3)
plt.contourf(gridx,gridy,misfit_width_average_1,levels_width,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit width (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit width using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,5)
plt.contourf(gridx,gridy,misfit_slope_average_1,levels_slope,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit slope ($^\circ$)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit slope using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,7)
cmap = plt.cm.get_cmap("winter")
plt.contourf(gridx,gridy,misfit_height_1_count,levels_count,cmap=cmap, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit count', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.xlabel('$\mathit{b}$ value (m)', fontsize=8)
plt.ylabel('${\\theta}_T$ ($^\circ$)', fontsize=8)
plt.title('Misfit count using %s filter' %method_name_1, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
#Plot for analysis number 2
plt.subplot(4,2,2)
plt.contourf(gridx,gridy,misfit_height_average_2,levels_height,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit height (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit height using %s filter' %method_name_2, fontsize=8)
#plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,4)
plt.contourf(gridx,gridy,misfit_width_average_2,levels_width,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit width (m)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit width using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,6)
plt.contourf(gridx,gridy,misfit_slope_average_2,levels_slope,cmap=plt.cm.bwr, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit slope ($^\circ$)', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit slope using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(4,2,8)
cmap = plt.cm.get_cmap("winter")
plt.contourf(gridx,gridy,misfit_height_2_count,levels_count,cmap=cmap, extend ='both')
plt.gca().patch.set_color('.25')
cbar=plt.colorbar()
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=8)
cbar.set_label('misfit count', rotation=270, fontsize=8)
plt.scatter(gridx,gridy,s=2,facecolors='none', edgecolors='w')
plt.title('Misfit count using %s filter' %method_name_2, fontsize=8)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.tight_layout()
if method==analysis_number_1:
h_subsample=h_1[:,idx_b,idx_theta]
w_subsample=w_1[:,idx_b,idx_theta]
slope_subsample=slope_1[:,idx_b,idx_theta]
else:
h_subsample=h_2[:,idx_b,idx_theta]
w_subsample=w_2[:,idx_b,idx_theta]
slope_subsample=slope_2[:,idx_b,idx_theta]
#Plot against manual plot
plt.figure(4) #plot manual data
plt.subplot(3,1,1)
plt.scatter(dist_along_fault,h_manual,s=5,color='black')
plt.scatter(dist_along_fault,h_subsample,s=5,color='red')
plt.ylabel('Scarp Height (m)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Height Profile', fontsize=8)
#plt.ylim([0, np.int(math.ceil(np.amax(h_manual)/10.0))*10])
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(3,1,2)
plt.scatter(dist_along_fault,w_manual,s=5,color='black')
plt.scatter(dist_along_fault,w_subsample,s=5,color='red')
plt.ylabel('Scarp Width (m)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Width Profile', fontsize=8)
#plt.ylim([0, np.int(math.ceil(np.amax(w_manual)/10.0))*10])
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
plt.subplot(3,1,3)
plt.scatter(dist_along_fault,slope_manual,s=5,color='black')
plt.scatter(dist_along_fault,slope_subsample,s=5,color='red')
plt.xlabel('Distance along fault (km)', fontsize=8)
plt.ylabel('Scarp Slope ($^\circ$)', fontsize=8)
plt.title('Manual (black) v Algorithm (red) Scarp Slope Profile', fontsize=8)
plt.subplots_adjust(hspace=1)
plt.xticks(size = 8)
plt.yticks(size = 8)
#plt.ylim([(np.int(math.ceil(np.amin(slope_manual)/10.0))*10)-10,0])
### END
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.xlabel",
"Algorithm_misfit.algorithm_misfit",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"numpy.abs",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"numpy.isnan",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.title",
"numpy.int",
"matplotlib.pyplot.subplots_adjust",
"pickle.dump",
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] | [((511, 542), 'numpy.zeros', 'np.zeros', (['(num_subsample, nump)'], {}), '((num_subsample, nump))\n', (519, 542), True, 'import numpy as np\n'), ((566, 597), 'numpy.zeros', 'np.zeros', (['(num_subsample, nump)'], {}), '((num_subsample, nump))\n', (574, 597), True, 'import numpy as np\n'), ((2427, 2634), 'Algorithm_misfit.algorithm_misfit', 'algorithm_misfit', (['prof_distance_subsample', 'prof_height_subsample', 'h_manual', 'w_manual', 'slope_manual', 'nump', 'iterations', 'method', 'bin_max', 'bin_min', 'bin_step', 'theta_T_max', 'theta_T_min', 'theta_T_step', 'phi_T'], {}), '(prof_distance_subsample, prof_height_subsample, h_manual,\n w_manual, slope_manual, nump, iterations, method, bin_max, bin_min,\n bin_step, theta_T_max, theta_T_min, theta_T_step, phi_T)\n', (2443, 2634), False, 'from Algorithm_misfit import algorithm_misfit\n'), ((4715, 4922), 'Algorithm_misfit.algorithm_misfit', 'algorithm_misfit', (['prof_distance_subsample', 'prof_height_subsample', 'h_manual', 'w_manual', 'slope_manual', 'nump', 'iterations', 'method', 'bin_max', 'bin_min', 'bin_step', 'theta_T_max', 'theta_T_min', 'theta_T_step', 'phi_T'], {}), '(prof_distance_subsample, prof_height_subsample, h_manual,\n w_manual, slope_manual, nump, iterations, method, bin_max, bin_min,\n bin_step, theta_T_max, theta_T_min, theta_T_step, phi_T)\n', (4731, 4922), False, 'from Algorithm_misfit import algorithm_misfit\n'), ((6762, 6787), 'numpy.zeros', 'np.zeros', (['(iterations, 1)'], {}), '((iterations, 1))\n', (6770, 6787), True, 'import numpy as np\n'), ((6807, 6832), 'numpy.zeros', 'np.zeros', (['(iterations, 1)'], {}), '((iterations, 1))\n', (6815, 6832), True, 'import numpy as np\n'), ((8062, 8073), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (8070, 8073), True, 'import numpy as np\n'), ((8211, 8228), 'numpy.abs', 'np.abs', (['(A - value)'], {}), '(A - value)\n', (8217, 8228), True, 'import numpy as np\n'), ((8409, 8420), 'numpy.isnan', 'np.isnan', (['B'], {}), '(B)\n', (8417, 8420), True, 'import numpy as np\n'), ((8558, 8575), 'numpy.abs', 'np.abs', (['(B - value)'], {}), '(B - value)\n', (8564, 8575), True, 'import numpy as np\n'), ((10566, 10579), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (10576, 10579), True, 'import matplotlib.pyplot as plt\n'), ((10610, 10630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(1)'], {}), '(4, 2, 1)\n', (10621, 10630), True, 'import matplotlib.pyplot as plt\n'), ((10629, 10732), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_height_average_1', 'levels_height'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_height_average_1, levels_height, cmap=plt\n .cm.bwr, extend='both')\n", (10641, 10732), True, 'import matplotlib.pyplot as plt\n'), ((10763, 10777), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10775, 10777), True, 'import matplotlib.pyplot as plt\n'), ((10903, 10968), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (10914, 10968), True, 'import matplotlib.pyplot as plt\n'), ((10966, 11036), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit height using %s filter' % method_name_1)"], {'fontsize': '(8)'}), "('Misfit height using %s filter' % method_name_1, fontsize=8)\n", (10975, 11036), True, 'import matplotlib.pyplot as plt\n'), ((11036, 11054), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (11046, 11054), True, 'import matplotlib.pyplot as plt\n'), ((11057, 11075), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (11067, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11079, 11099), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(3)'], {}), '(4, 2, 3)\n', (11090, 11099), True, 'import matplotlib.pyplot as plt\n'), ((11098, 11199), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_width_average_1', 'levels_width'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_width_average_1, levels_width, cmap=plt.\n cm.bwr, extend='both')\n", (11110, 11199), True, 'import matplotlib.pyplot as plt\n'), ((11230, 11244), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11242, 11244), True, 'import matplotlib.pyplot as plt\n'), ((11369, 11434), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (11380, 11434), True, 'import matplotlib.pyplot as plt\n'), ((11432, 11501), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit width using %s filter' % method_name_1)"], {'fontsize': '(8)'}), "('Misfit width using %s filter' % method_name_1, fontsize=8)\n", (11441, 11501), True, 'import matplotlib.pyplot as plt\n'), ((11501, 11519), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (11511, 11519), True, 'import matplotlib.pyplot as plt\n'), ((11522, 11540), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (11532, 11540), True, 'import matplotlib.pyplot as plt\n'), ((11544, 11564), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(5)'], {}), '(4, 2, 5)\n', (11555, 11564), True, 'import matplotlib.pyplot as plt\n'), ((11563, 11664), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_slope_average_1', 'levels_slope'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_slope_average_1, levels_slope, cmap=plt.\n cm.bwr, extend='both')\n", (11575, 11664), True, 'import matplotlib.pyplot as plt\n'), ((11695, 11709), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11707, 11709), True, 'import matplotlib.pyplot as plt\n'), ((11841, 11906), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (11852, 11906), True, 'import matplotlib.pyplot as plt\n'), ((11904, 11973), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit slope using %s filter' % method_name_1)"], {'fontsize': '(8)'}), "('Misfit slope using %s filter' % method_name_1, fontsize=8)\n", (11913, 11973), True, 'import matplotlib.pyplot as plt\n'), ((11973, 11991), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (11983, 11991), True, 'import matplotlib.pyplot as plt\n'), ((11994, 12012), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (12004, 12012), True, 'import matplotlib.pyplot as plt\n'), ((12016, 12036), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(7)'], {}), '(4, 2, 7)\n', (12027, 12036), True, 'import matplotlib.pyplot as plt\n'), ((12042, 12067), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""winter"""'], {}), "('winter')\n", (12057, 12067), True, 'import matplotlib.pyplot as plt\n'), ((12068, 12161), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_height_1_count', 'levels_count'], {'cmap': 'cmap', 'extend': '"""both"""'}), "(gridx, gridy, misfit_height_1_count, levels_count, cmap=cmap,\n extend='both')\n", (12080, 12161), True, 'import matplotlib.pyplot as plt\n'), ((12193, 12207), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (12205, 12207), True, 'import matplotlib.pyplot as plt\n'), ((12328, 12393), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (12339, 12393), True, 'import matplotlib.pyplot as plt\n'), ((12391, 12440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{b}$ value (m)"""'], {'fontsize': '(8)'}), "('$\\\\mathit{b}$ value (m)', fontsize=8)\n", (12401, 12440), True, 'import matplotlib.pyplot as plt\n'), ((12440, 12491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\theta}_T$ ($^\\\\circ$)"""'], {'fontsize': '(8)'}), "('${\\\\theta}_T$ ($^\\\\circ$)', fontsize=8)\n", (12450, 12491), True, 'import matplotlib.pyplot as plt\n'), ((12491, 12560), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit count using %s filter' % method_name_1)"], {'fontsize': '(8)'}), "('Misfit count using %s filter' % method_name_1, fontsize=8)\n", (12500, 12560), True, 'import matplotlib.pyplot as plt\n'), ((12560, 12578), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (12570, 12578), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12599), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (12591, 12599), True, 'import matplotlib.pyplot as plt\n'), ((12632, 12652), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(2)'], {}), '(4, 2, 2)\n', (12643, 12652), True, 'import matplotlib.pyplot as plt\n'), ((12651, 12754), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_height_average_2', 'levels_height'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_height_average_2, levels_height, cmap=plt\n .cm.bwr, extend='both')\n", (12663, 12754), True, 'import matplotlib.pyplot as plt\n'), ((12785, 12799), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (12797, 12799), True, 'import matplotlib.pyplot as plt\n'), ((12925, 12990), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (12936, 12990), True, 'import matplotlib.pyplot as plt\n'), ((12988, 13058), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit height using %s filter' % method_name_2)"], {'fontsize': '(8)'}), "('Misfit height using %s filter' % method_name_2, fontsize=8)\n", (12997, 13058), True, 'import matplotlib.pyplot as plt\n'), ((13089, 13107), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (13099, 13107), True, 'import matplotlib.pyplot as plt\n'), ((13110, 13128), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (13120, 13128), True, 'import matplotlib.pyplot as plt\n'), ((13132, 13152), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(4)'], {}), '(4, 2, 4)\n', (13143, 13152), True, 'import matplotlib.pyplot as plt\n'), ((13151, 13252), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_width_average_2', 'levels_width'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_width_average_2, levels_width, cmap=plt.\n cm.bwr, extend='both')\n", (13163, 13252), True, 'import matplotlib.pyplot as plt\n'), ((13283, 13297), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13295, 13297), True, 'import matplotlib.pyplot as plt\n'), ((13422, 13487), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (13433, 13487), True, 'import matplotlib.pyplot as plt\n'), ((13485, 13554), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit width using %s filter' % method_name_2)"], {'fontsize': '(8)'}), "('Misfit width using %s filter' % method_name_2, fontsize=8)\n", (13494, 13554), True, 'import matplotlib.pyplot as plt\n'), ((13554, 13572), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (13564, 13572), True, 'import matplotlib.pyplot as plt\n'), ((13575, 13593), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (13585, 13593), True, 'import matplotlib.pyplot as plt\n'), ((13597, 13617), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(6)'], {}), '(4, 2, 6)\n', (13608, 13617), True, 'import matplotlib.pyplot as plt\n'), ((13616, 13717), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_slope_average_2', 'levels_slope'], {'cmap': 'plt.cm.bwr', 'extend': '"""both"""'}), "(gridx, gridy, misfit_slope_average_2, levels_slope, cmap=plt.\n cm.bwr, extend='both')\n", (13628, 13717), True, 'import matplotlib.pyplot as plt\n'), ((13748, 13762), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13760, 13762), True, 'import matplotlib.pyplot as plt\n'), ((13894, 13959), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (13905, 13959), True, 'import matplotlib.pyplot as plt\n'), ((13957, 14026), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit slope using %s filter' % method_name_2)"], {'fontsize': '(8)'}), "('Misfit slope using %s filter' % method_name_2, fontsize=8)\n", (13966, 14026), True, 'import matplotlib.pyplot as plt\n'), ((14026, 14044), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (14036, 14044), True, 'import matplotlib.pyplot as plt\n'), ((14047, 14065), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (14057, 14065), True, 'import matplotlib.pyplot as plt\n'), ((14069, 14089), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(2)', '(8)'], {}), '(4, 2, 8)\n', (14080, 14089), True, 'import matplotlib.pyplot as plt\n'), ((14095, 14120), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""winter"""'], {}), "('winter')\n", (14110, 14120), True, 'import matplotlib.pyplot as plt\n'), ((14121, 14214), 'matplotlib.pyplot.contourf', 'plt.contourf', (['gridx', 'gridy', 'misfit_height_2_count', 'levels_count'], {'cmap': 'cmap', 'extend': '"""both"""'}), "(gridx, gridy, misfit_height_2_count, levels_count, cmap=cmap,\n extend='both')\n", (14133, 14214), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14260), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14258, 14260), True, 'import matplotlib.pyplot as plt\n'), ((14381, 14446), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gridx', 'gridy'], {'s': '(2)', 'facecolors': '"""none"""', 'edgecolors': '"""w"""'}), "(gridx, gridy, s=2, facecolors='none', edgecolors='w')\n", (14392, 14446), True, 'import matplotlib.pyplot as plt\n'), ((14444, 14513), 'matplotlib.pyplot.title', 'plt.title', (["('Misfit count using %s filter' % method_name_2)"], {'fontsize': '(8)'}), "('Misfit count using %s filter' % method_name_2, fontsize=8)\n", (14453, 14513), True, 'import matplotlib.pyplot as plt\n'), ((14513, 14531), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (14523, 14531), True, 'import matplotlib.pyplot as plt\n'), ((14534, 14552), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (14544, 14552), True, 'import matplotlib.pyplot as plt\n'), ((14556, 14574), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14572, 14574), True, 'import matplotlib.pyplot as plt\n'), ((14895, 14908), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (14905, 14908), True, 'import matplotlib.pyplot as plt\n'), ((14928, 14948), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (14939, 14948), True, 'import matplotlib.pyplot as plt\n'), ((14947, 15006), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'h_manual'], {'s': '(5)', 'color': '"""black"""'}), "(dist_along_fault, h_manual, s=5, color='black')\n", (14958, 15006), True, 'import matplotlib.pyplot as plt\n'), ((15005, 15065), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'h_subsample'], {'s': '(5)', 'color': '"""red"""'}), "(dist_along_fault, h_subsample, s=5, color='red')\n", (15016, 15065), True, 'import matplotlib.pyplot as plt\n'), ((15067, 15109), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scarp Height (m)"""'], {'fontsize': '(8)'}), "('Scarp Height (m)', fontsize=8)\n", (15077, 15109), True, 'import matplotlib.pyplot as plt\n'), ((15110, 15188), 'matplotlib.pyplot.title', 'plt.title', (['"""Manual (black) v Algorithm (red) Scarp Height Profile"""'], {'fontsize': '(8)'}), "('Manual (black) v Algorithm (red) Scarp Height Profile', fontsize=8)\n", (15119, 15188), True, 'import matplotlib.pyplot as plt\n'), ((15250, 15279), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (15269, 15279), True, 'import matplotlib.pyplot as plt\n'), ((15280, 15298), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (15290, 15298), True, 'import matplotlib.pyplot as plt\n'), ((15301, 15319), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (15311, 15319), True, 'import matplotlib.pyplot as plt\n'), ((15322, 15342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (15333, 15342), True, 'import matplotlib.pyplot as plt\n'), ((15341, 15400), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'w_manual'], {'s': '(5)', 'color': '"""black"""'}), "(dist_along_fault, w_manual, s=5, color='black')\n", (15352, 15400), True, 'import matplotlib.pyplot as plt\n'), ((15400, 15460), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'w_subsample'], {'s': '(5)', 'color': '"""red"""'}), "(dist_along_fault, w_subsample, s=5, color='red')\n", (15411, 15460), True, 'import matplotlib.pyplot as plt\n'), ((15464, 15505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scarp Width (m)"""'], {'fontsize': '(8)'}), "('Scarp Width (m)', fontsize=8)\n", (15474, 15505), True, 'import matplotlib.pyplot as plt\n'), ((15506, 15583), 'matplotlib.pyplot.title', 'plt.title', (['"""Manual (black) v Algorithm (red) Scarp Width Profile"""'], {'fontsize': '(8)'}), "('Manual (black) v Algorithm (red) Scarp Width Profile', fontsize=8)\n", (15515, 15583), True, 'import matplotlib.pyplot as plt\n'), ((15645, 15674), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (15664, 15674), True, 'import matplotlib.pyplot as plt\n'), ((15675, 15693), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (15685, 15693), True, 'import matplotlib.pyplot as plt\n'), ((15696, 15714), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (15706, 15714), True, 'import matplotlib.pyplot as plt\n'), ((15717, 15737), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (15728, 15737), True, 'import matplotlib.pyplot as plt\n'), ((15736, 15799), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'slope_manual'], {'s': '(5)', 'color': '"""black"""'}), "(dist_along_fault, slope_manual, s=5, color='black')\n", (15747, 15799), True, 'import matplotlib.pyplot as plt\n'), ((15797, 15861), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist_along_fault', 'slope_subsample'], {'s': '(5)', 'color': '"""red"""'}), "(dist_along_fault, slope_subsample, s=5, color='red')\n", (15808, 15861), True, 'import matplotlib.pyplot as plt\n'), ((15867, 15918), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance along fault (km)"""'], {'fontsize': '(8)'}), "('Distance along fault (km)', fontsize=8)\n", (15877, 15918), True, 'import matplotlib.pyplot as plt\n'), ((15919, 15968), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scarp Slope ($^\\\\circ$)"""'], {'fontsize': '(8)'}), "('Scarp Slope ($^\\\\circ$)', fontsize=8)\n", (15929, 15968), True, 'import matplotlib.pyplot as plt\n'), ((15968, 16045), 'matplotlib.pyplot.title', 'plt.title', (['"""Manual (black) v Algorithm (red) Scarp Slope Profile"""'], {'fontsize': '(8)'}), "('Manual (black) v Algorithm (red) Scarp Slope Profile', fontsize=8)\n", (15977, 16045), True, 'import matplotlib.pyplot as plt\n'), ((16046, 16075), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (16065, 16075), True, 'import matplotlib.pyplot as plt\n'), ((16076, 16094), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(8)'}), '(size=8)\n', (16086, 16094), True, 'import matplotlib.pyplot as plt\n'), ((16097, 16115), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(8)'}), '(size=8)\n', (16107, 16115), True, 'import matplotlib.pyplot as plt\n'), ((6001, 6020), 'pickle.dump', 'pickle.dump', (['h_1', 'f'], {}), '(h_1, f)\n', (6012, 6020), False, 'import pickle\n'), ((6025, 6044), 'pickle.dump', 'pickle.dump', (['h_2', 'f'], {}), '(h_2, f)\n', (6036, 6044), False, 'import pickle\n'), ((6049, 6068), 'pickle.dump', 'pickle.dump', (['w_1', 'f'], {}), '(w_1, f)\n', (6060, 6068), False, 'import pickle\n'), ((6073, 6092), 'pickle.dump', 'pickle.dump', (['w_2', 'f'], {}), '(w_2, f)\n', (6084, 6092), False, 'import pickle\n'), ((6097, 6120), 'pickle.dump', 'pickle.dump', (['slope_1', 'f'], {}), '(slope_1, f)\n', (6108, 6120), False, 'import pickle\n'), ((6125, 6148), 'pickle.dump', 'pickle.dump', (['slope_2', 'f'], {}), '(slope_2, f)\n', (6136, 6148), False, 'import pickle\n'), ((6153, 6184), 'pickle.dump', 'pickle.dump', (['misfit_height_1', 'f'], {}), '(misfit_height_1, f)\n', (6164, 6184), False, 'import pickle\n'), ((6189, 6220), 'pickle.dump', 'pickle.dump', (['misfit_height_2', 'f'], {}), '(misfit_height_2, f)\n', (6200, 6220), False, 'import pickle\n'), ((6225, 6255), 'pickle.dump', 'pickle.dump', (['misfit_width_1', 'f'], {}), '(misfit_width_1, f)\n', (6236, 6255), False, 'import pickle\n'), ((6260, 6290), 'pickle.dump', 'pickle.dump', (['misfit_width_2', 'f'], {}), '(misfit_width_2, f)\n', (6271, 6290), False, 'import pickle\n'), ((6295, 6325), 'pickle.dump', 'pickle.dump', (['misfit_slope_1', 'f'], {}), '(misfit_slope_1, f)\n', (6306, 6325), False, 'import pickle\n'), ((6330, 6360), 'pickle.dump', 'pickle.dump', (['misfit_slope_2', 'f'], {}), '(misfit_slope_2, f)\n', (6341, 6360), False, 'import pickle\n'), ((6365, 6404), 'pickle.dump', 'pickle.dump', (['misfit_height_average_1', 'f'], {}), '(misfit_height_average_1, f)\n', (6376, 6404), False, 'import pickle\n'), ((6409, 6448), 'pickle.dump', 'pickle.dump', (['misfit_height_average_2', 'f'], {}), '(misfit_height_average_2, f)\n', (6420, 6448), False, 'import pickle\n'), ((6453, 6491), 'pickle.dump', 'pickle.dump', (['misfit_width_average_1', 'f'], {}), '(misfit_width_average_1, f)\n', (6464, 6491), False, 'import pickle\n'), ((6496, 6534), 'pickle.dump', 'pickle.dump', (['misfit_width_average_2', 'f'], {}), '(misfit_width_average_2, f)\n', (6507, 6534), False, 'import pickle\n'), ((6539, 6577), 'pickle.dump', 'pickle.dump', (['misfit_slope_average_1', 'f'], {}), '(misfit_slope_average_1, f)\n', (6550, 6577), False, 'import pickle\n'), ((6582, 6620), 'pickle.dump', 'pickle.dump', (['misfit_slope_average_2', 'f'], {}), '(misfit_slope_average_2, f)\n', (6593, 6620), False, 'import pickle\n'), ((6625, 6646), 'pickle.dump', 'pickle.dump', (['gridx', 'f'], {}), '(gridx, f)\n', (6636, 6646), False, 'import pickle\n'), ((6651, 6672), 'pickle.dump', 'pickle.dump', (['gridy', 'f'], {}), '(gridy, f)\n', (6662, 6672), False, 'import pickle\n'), ((9417, 9450), 'numpy.int', 'np.int', (['gridx[idx_1[0], idx_1[1]]'], {}), '(gridx[idx_1[0], idx_1[1]])\n', (9423, 9450), True, 'import numpy as np\n'), ((9465, 9481), 'numpy.int', 'np.int', (['idx_1[1]'], {}), '(idx_1[1])\n', (9471, 9481), True, 'import numpy as np\n'), ((9495, 9528), 'numpy.int', 'np.int', (['gridy[idx_1[0], idx_1[1]]'], {}), '(gridy[idx_1[0], idx_1[1]])\n', (9501, 9528), True, 'import numpy as np\n'), ((9539, 9555), 'numpy.int', 'np.int', (['idx_1[0]'], {}), '(idx_1[0])\n', (9545, 9555), True, 'import numpy as np\n'), ((10136, 10169), 'numpy.int', 'np.int', (['gridx[idx_2[0], idx_2[1]]'], {}), '(gridx[idx_2[0], idx_2[1]])\n', (10142, 10169), True, 'import numpy as np\n'), ((10184, 10200), 'numpy.int', 'np.int', (['idx_2[1]'], {}), '(idx_2[1])\n', (10190, 10200), True, 'import numpy as np\n'), ((10214, 10247), 'numpy.int', 'np.int', (['gridy[idx_2[0], idx_2[1]]'], {}), '(gridy[idx_2[0], idx_2[1]])\n', (10220, 10247), True, 'import numpy as np\n'), ((10258, 10274), 'numpy.int', 'np.int', (['idx_2[0]'], {}), '(idx_2[0])\n', (10264, 10274), True, 'import numpy as np\n'), ((7264, 7293), 'numpy.isnan', 'np.isnan', (['misfit_height_1_min'], {}), '(misfit_height_1_min)\n', (7272, 7293), True, 'import numpy as np\n'), ((7339, 7368), 'numpy.isnan', 'np.isnan', (['misfit_height_2_min'], {}), '(misfit_height_2_min)\n', (7347, 7368), True, 'import numpy as np\n'), ((10725, 10734), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10732, 10734), True, 'import matplotlib.pyplot as plt\n'), ((11192, 11201), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11199, 11201), True, 'import matplotlib.pyplot as plt\n'), ((11657, 11666), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11664, 11666), True, 'import matplotlib.pyplot as plt\n'), ((12155, 12164), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12162, 12164), True, 'import matplotlib.pyplot as plt\n'), ((12747, 12756), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12754, 12756), True, 'import matplotlib.pyplot as plt\n'), ((13245, 13254), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13252, 13254), True, 'import matplotlib.pyplot as plt\n'), ((13710, 13719), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13717, 13719), True, 'import matplotlib.pyplot as plt\n'), ((14208, 14217), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14215, 14217), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7574), 'numpy.isnan', 'np.isnan', (['misfit_height_1[:, m, n]'], {}), '(misfit_height_1[:, m, n])\n', (7548, 7574), True, 'import numpy as np\n'), ((7743, 7777), 'numpy.isnan', 'np.isnan', (['misfit_height_2[:, m, n]'], {}), '(misfit_height_2[:, m, n])\n', (7751, 7777), True, 'import numpy as np\n')] |
#coding: utf-8
#!/usr/bin/python
'''
REFERÊNCIAS:
<http://acodigo.blogspot.com.br/2013/05/procesamiento-de-imagenes-en-opencv.html>
@Author RodriguesFAS
@Date 26/09/2017
@Email <<EMAIL>> || <<EMAIL>>
@site <htpp://rodriguesfas.com.br>
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
src = 'src/lena.jpg'
img_org = cv2.imread(src)
t, dst1 = cv2.GaussianBlur(img, dst, Size(13,7), 8)
cv2.imshow('GaussianBlur', dst1)
cv2.waitKey(0) | [
"cv2.waitKey",
"cv2.imread",
"cv2.imshow"
] | [((373, 388), 'cv2.imread', 'cv2.imread', (['src'], {}), '(src)\n', (383, 388), False, 'import cv2\n'), ((443, 475), 'cv2.imshow', 'cv2.imshow', (['"""GaussianBlur"""', 'dst1'], {}), "('GaussianBlur', dst1)\n", (453, 475), False, 'import cv2\n'), ((477, 491), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (488, 491), False, 'import cv2\n')] |
from asyncio import sleep
from os import mkdir, getcwd
from os.path import join
from sys import platform
from .logger import logger
from .criptografia import fingerprint
from .descargar import _extraer_links, aio_descargar, filtro, _crear_archivo, Reglas
from csv import writer
# Crear loop paraa async
# Create loop for async
if platform == 'win32':
from asyncio import ProactorEventLoop
_loop = ProactorEventLoop()
else:
from asyncio import get_event_loop
_loop = get_event_loop()
# ESPANOL
# Funcion para guardar los links encontrados en un archivo .txt
# ENGLISH
# Function to save all links founded in a .txt file
def _guardar_links(directorio, links):
with open(join(directorio, 'lista_links.txt'), 'w') as archivo:
archivo.write('\n'.join(x for x in links))
archivo.close()
# ESPANOL
# Funcion para guardar hashes en un archivo .csv
# ENGLISH
# Funtion to create a .csv file and save hashes in there
def _guardar_hashes(directorio, hashes):
with open(join(directorio, 'hashes.csv'), 'w') as archivo:
escritor = writer(archivo)
escritor.writerow(['Archivo', 'Hash'])
for hash in hashes:
escritor.writerow(hash)
archivo.close()
###Crea un directorio y luego cd en el
def crear_mantener_directorio(directorio, loger):
try:
mkdir(directorio)
except Exception as e:
loger.logear(e)
# ESPANOL#
###Funcion que crea una ruta a un directorio de una carpeta
# ENGLISH#
###This function found the directory to work
def conseguir_directorio(directorio, archivo):
if directorio != None:
return join(directorio, archivo)
# ESPANOL#
###Funcion que fuciona la creacion de un hash con la descarga de un archivo
# ENGLISH#
###This function create a file with the bytes recieved and calculate the hash for fingerprint, them that hash is
###Inserted into a the hash list that the user have, them return it [name_of file,hash]
def hash_y_archivo(hashes, nombre, contenido, directorio, parametros, descargar_archivos, guardar_hashes, loger,
reglas):
try:
if not filtro(nombre, contenido, parametros, reglas):
if descargar_archivos:
_crear_archivo(join(directorio, nombre), contenido, logger=loger)
if guardar_hashes:
try:
hashes.append([nombre, fingerprint(contenido)])
except Exception as e:
loger.logear(e)
except:
print('error en hashyarchivo')
return hashes
# ESPANOL
# Funcion que analiza un link para luego extraerle el hash,, descargarlo o determinar los links que tiene
# ENGLISH
# This function realize all the scraping process
async def mapear(url, profundidad, parametros, descargar_archivos, guardar_links, guardar_hashes, loger, reglas,
velocidad,
informacion=None, directorio_a=getcwd()):
hashes = [] # This variable store all the hashes found
try:
if profundidad > 0:
###This is only used after the first link is used
if informacion == None:
informacion = await aio_descargar(url, logger=loger)
# Get the directory to work
directorio = conseguir_directorio(directorio_a, informacion[0])
# Try to create a directory and cd into it
crear_mantener_directorio(directorio, loger)
# Extrack all the links of html bytes
links = _extraer_links(informacion[2])
# Try to download the file and extrack the hash
hashes = hash_y_archivo(hashes, informacion[1], informacion[2], directorio, parametros, descargar_archivos,
guardar_hashes, loger, reglas)
# Continue if someone wants to save all the linkss found in a file ('lista_links.txt')
if guardar_links:
_guardar_links(directorio, links)
# Work with all links in the links list
for numero, link in enumerate(links):
try:
# This statement because sometimes without it the program make duplications or use unnecesary resources
if link != url:
# Get information to mapear function
informacion = await aio_descargar(link, logger=loger)
await sleep(profundidad / velocidad) # Go to other process in this time
# Extract the hash and download the file
hashes = hash_y_archivo(hashes, informacion[1], informacion[2], directorio, parametros,
descargar_archivos,
guardar_hashes, loger, reglas)
# Start maping the link
_loop.create_task(
mapear(link, profundidad - 1, parametros, descargar_archivos, guardar_links, guardar_hashes,
loger, reglas, velocidad=velocidad,
informacion=informacion, directorio_a=directorio))
except:
pass
# This is the progress of analysis in the current 'url'
loger.logear('{}% en {}'.format(100 * (numero + 1) / len(links), url))
# Save all the hashes found in a .csv file
_guardar_hashes(directorio, hashes)
except Exception as e:
# Exception debuging
loger.logear(e)
try:
# Try to create the file
_guardar_hashes(directorio, hashes)
except Exception as e:
# Debuging
loger.logear('[ERROR] Se re-intento es cribir el hash pero no se logro')
# ESPANOL
# Funcion final que se utiliza para hacer el analisis de un url, (esta es la que debe usarce para el scraping)
# ENGLISH
# This function is the only that can be used from this file, it is the implementation of mapear
def scrap(url, debug_file=None, debug=True, profundidad=2, parametros={}, descargar_archivos=True, guardar_links=True,
guardar_hashes=True, reglas=Reglas, velocidad=3):
loger = logger(debug_file, debug)
_loop.run_until_complete(
mapear(url, profundidad, parametros, descargar_archivos, guardar_links, guardar_hashes, loger, reglas,
velocidad=velocidad))
| [
"csv.writer",
"os.path.join",
"os.getcwd",
"os.mkdir",
"asyncio.sleep",
"asyncio.get_event_loop",
"asyncio.ProactorEventLoop"
] | [((406, 425), 'asyncio.ProactorEventLoop', 'ProactorEventLoop', ([], {}), '()\n', (423, 425), False, 'from asyncio import ProactorEventLoop\n'), ((483, 499), 'asyncio.get_event_loop', 'get_event_loop', ([], {}), '()\n', (497, 499), False, 'from asyncio import get_event_loop\n'), ((2910, 2918), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2916, 2918), False, 'from os import mkdir, getcwd\n'), ((1073, 1088), 'csv.writer', 'writer', (['archivo'], {}), '(archivo)\n', (1079, 1088), False, 'from csv import writer\n'), ((1332, 1349), 'os.mkdir', 'mkdir', (['directorio'], {}), '(directorio)\n', (1337, 1349), False, 'from os import mkdir, getcwd\n'), ((1620, 1645), 'os.path.join', 'join', (['directorio', 'archivo'], {}), '(directorio, archivo)\n', (1624, 1645), False, 'from os.path import join\n'), ((692, 727), 'os.path.join', 'join', (['directorio', '"""lista_links.txt"""'], {}), "(directorio, 'lista_links.txt')\n", (696, 727), False, 'from os.path import join\n'), ((1005, 1035), 'os.path.join', 'join', (['directorio', '"""hashes.csv"""'], {}), "(directorio, 'hashes.csv')\n", (1009, 1035), False, 'from os.path import join\n'), ((2227, 2251), 'os.path.join', 'join', (['directorio', 'nombre'], {}), '(directorio, nombre)\n', (2231, 2251), False, 'from os.path import join\n'), ((4393, 4423), 'asyncio.sleep', 'sleep', (['(profundidad / velocidad)'], {}), '(profundidad / velocidad)\n', (4398, 4423), False, 'from asyncio import sleep\n')] |
import pytest
import torch.cuda
from torch import nn
from torch.optim import SGD
from yann.callbacks import (
History, HistoryPlotter, HistoryWriter, Logger, Checkpoint
)
from yann.datasets import TinyDigits
from yann.datasets.wrappers import Slice
from yann.modules import Flatten
from yann.train import Trainer
devices = ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']
@pytest.mark.slow
@pytest.mark.parametrize('device', devices)
def test_train(tmpdir, device):
"""Sanity check train run"""
model = nn.Sequential(
nn.Conv2d(1, 20, 3),
nn.ReLU(inplace=True),
nn.Conv2d(20, 20, 3),
nn.ReLU(inplace=True),
Flatten(),
nn.Linear(320, 10)
)
train = Trainer(
root=tmpdir,
model=model,
dataset=Slice(TinyDigits(), 0, 256),
device=device,
optimizer=SGD(model.parameters(), lr=.01, momentum=0.9, weight_decay=.001),
loss=nn.CrossEntropyLoss(),
callbacks=[
History(),
HistoryPlotter(save=True),
HistoryWriter(),
Logger(batch_freq=20),
Checkpoint()
]
)
train(2)
assert train.paths.checkpoints.is_dir()
assert train.history.metrics
export_path = train.export()
assert export_path
assert export_path.is_dir()
# @pytest.mark.slow
# @pytest.mark.parametrize('device', devices)
# def test_train_resolved(tmpdir, device):
# from yann.data.transform import ImageTransformer
#
# train = Trainer(
# root=tmpdir,
# model='densenet121',
# dataset='CIFAR10',
# loss='CrossEntropy',
# optimizer='SGD',
# transform=ImageTransformer(resize=224)
# )
#
# # train(1)
# def test_transforms():
#
# t = Trainer(
# transform={
# 'mask': 'foo',
# 'label': 'foo'
# }
# ) | [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"yann.callbacks.HistoryWriter",
"yann.callbacks.HistoryPlotter",
"torch.nn.Conv2d",
"yann.callbacks.History",
"pytest.mark.parametrize",
"yann.modules.Flatten",
"yann.callbacks.Checkpoint",
"torch.nn.Linear",
"yann.datasets.TinyDigits",
"yann.callbacks.Logger"
] | [((407, 449), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""device"""', 'devices'], {}), "('device', devices)\n", (430, 449), False, 'import pytest\n'), ((543, 562), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(3)'], {}), '(1, 20, 3)\n', (552, 562), False, 'from torch import nn\n'), ((568, 589), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (575, 589), False, 'from torch import nn\n'), ((595, 615), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(20)', '(3)'], {}), '(20, 20, 3)\n', (604, 615), False, 'from torch import nn\n'), ((621, 642), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (628, 642), False, 'from torch import nn\n'), ((648, 657), 'yann.modules.Flatten', 'Flatten', ([], {}), '()\n', (655, 657), False, 'from yann.modules import Flatten\n'), ((663, 681), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(10)'], {}), '(320, 10)\n', (672, 681), False, 'from torch import nn\n'), ((889, 910), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (908, 910), False, 'from torch import nn\n'), ((758, 770), 'yann.datasets.TinyDigits', 'TinyDigits', ([], {}), '()\n', (768, 770), False, 'from yann.datasets import TinyDigits\n'), ((934, 943), 'yann.callbacks.History', 'History', ([], {}), '()\n', (941, 943), False, 'from yann.callbacks import History, HistoryPlotter, HistoryWriter, Logger, Checkpoint\n'), ((951, 976), 'yann.callbacks.HistoryPlotter', 'HistoryPlotter', ([], {'save': '(True)'}), '(save=True)\n', (965, 976), False, 'from yann.callbacks import History, HistoryPlotter, HistoryWriter, Logger, Checkpoint\n'), ((984, 999), 'yann.callbacks.HistoryWriter', 'HistoryWriter', ([], {}), '()\n', (997, 999), False, 'from yann.callbacks import History, HistoryPlotter, HistoryWriter, Logger, Checkpoint\n'), ((1007, 1028), 'yann.callbacks.Logger', 'Logger', ([], {'batch_freq': '(20)'}), '(batch_freq=20)\n', (1013, 1028), False, 'from yann.callbacks import History, HistoryPlotter, HistoryWriter, Logger, Checkpoint\n'), ((1036, 1048), 'yann.callbacks.Checkpoint', 'Checkpoint', ([], {}), '()\n', (1046, 1048), False, 'from yann.callbacks import History, HistoryPlotter, HistoryWriter, Logger, Checkpoint\n')] |
# Load pickled data
import pickle
import numpy as np
import tensorflow as tf
tf.python.control_flow_ops = tf
#from generator import generator
# WAITING FOR CODE PACKAGE TO SYNC UP
with open('train.p', mode='rb') as f:
data = pickle.load(f)
X_train, y_train = data['features'], data['labels']
# Initial Setup for Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.utils.np_utils import to_categorical
# # Build Convolutional Pooling Neural Network with Dropout in Keras Here
# model = Sequential()
# model.add(Convolution2D(32, 3, 3, input_shape=(32, 32, 3)))
# model.add(MaxPooling2D((2, 2)))
# model.add(Dropout(0.5))
# model.add(Activation('relu'))
# model.add(Flatten())
# model.add(Dense(128))
# model.add(Activation('relu'))
# model.add(Dense(43))
# model.add(Activation('softmax'))
#LeNet
def LeNet(X_train,Y_train):
model=Sequential()
model.add(Conv2D(filters=5,kernel_size=(3,3),strides=(1,1),input_shape=X_train.shape[1:],padding='same',
data_format='channels_last',activation='relu',kernel_initializer='uniform')) #[None,28,28,5]
model.add(Dropout(0.2))
model.add(MaxPooling2D((2,2))) #池化核大小[None,14,14,5]
model.add(Conv2D(16,(3,3),strides=(1,1),data_format='channels_last',padding='same',activation='relu',kernel_initializer='uniform'))#[None,12,12,16]
model.add(Dropout(0.3))
model.add(MaxPooling2D(2,2)) #output_shape=[None,6,6,16]
model.add(Conv2D(32, (3, 3), strides=(1, 1), data_format='channels_last', padding='same', activation='relu',
kernel_initializer='uniform')) #[None,4,4,32]
model.add(Dropout(0.2))
# model.add(MaxPooling2D(2, 2))
model.add(Conv2D(100,(3,3),strides=(1,1),data_format='channels_last',activation='relu',kernel_initializer='uniform')) #[None,2,2,100]
model.add(Flatten(data_format='channels_last')) #[None,400]
model.add(Dense(168,activation='relu')) #[None,168]
model.add(Dense(84,activation='relu')) #[None,84]
model.add(Dense(43,activation='softmax')) #[None,10]
#打印参数
model.summary()
#编译模型
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
return model
# preprocess data
X_normalized = np.array(X_train / 255.0 - 0.5 )
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
model = LeNet(X_normalized,y_one_hot)
model.fit(X_normalized, y_one_hot, epochs=10, validation_split=0.2)
# model.compile('adam', 'categorical_crossentropy', ['accuracy'])
# history = model.fit(X_normalized, y_one_hot, epochs=10, validation_split=0.2)
with open('test.p', 'rb') as f:
data_test = pickle.load(f)
X_test = data_test['features']
y_test = data_test['labels']
# preprocess data
X_normalized_test = np.array(X_test / 255.0 - 0.5 )
y_one_hot_test = label_binarizer.fit_transform(y_test)
print("Testing")
metrics = model.evaluate(X_normalized_test, y_one_hot_test)
for metric_i in range(len(model.metrics_names)):
metric_name = model.metrics_names[metric_i]
metric_value = metrics[metric_i]
print('{}: {}'.format(metric_name, metric_value)) | [
"sklearn.preprocessing.LabelBinarizer",
"keras.layers.core.Flatten",
"pickle.load",
"keras.models.Sequential",
"numpy.array",
"keras.layers.convolutional.Conv2D",
"keras.layers.convolutional.MaxPooling2D",
"keras.layers.core.Dropout",
"keras.layers.core.Dense"
] | [((2525, 2556), 'numpy.array', 'np.array', (['(X_train / 255.0 - 0.5)'], {}), '(X_train / 255.0 - 0.5)\n', (2533, 2556), True, 'import numpy as np\n'), ((2626, 2642), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2640, 2642), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((3111, 3141), 'numpy.array', 'np.array', (['(X_test / 255.0 - 0.5)'], {}), '(X_test / 255.0 - 0.5)\n', (3119, 3141), True, 'import numpy as np\n'), ((230, 244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (241, 244), False, 'import pickle\n'), ((1068, 1080), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1078, 1080), False, 'from keras.models import Sequential\n'), ((2996, 3010), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3007, 3010), False, 'import pickle\n'), ((1095, 1283), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(5)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'input_shape': 'X_train.shape[1:]', 'padding': '"""same"""', 'data_format': '"""channels_last"""', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(filters=5, kernel_size=(3, 3), strides=(1, 1), input_shape=X_train.\n shape[1:], padding='same', data_format='channels_last', activation=\n 'relu', kernel_initializer='uniform')\n", (1101, 1283), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((1319, 1331), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1326, 1331), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((1347, 1367), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (1359, 1367), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((1405, 1538), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'strides': '(1, 1)', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(16, (3, 3), strides=(1, 1), data_format='channels_last', padding=\n 'same', activation='relu', kernel_initializer='uniform')\n", (1411, 1538), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((1557, 1569), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1564, 1569), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((1585, 1603), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1597, 1603), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((1648, 1781), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)', 'data_format': '"""channels_last"""', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(32, (3, 3), strides=(1, 1), data_format='channels_last', padding=\n 'same', activation='relu', kernel_initializer='uniform')\n", (1654, 1781), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((1830, 1842), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1837, 1842), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((1894, 2012), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(100)', '(3, 3)'], {'strides': '(1, 1)', 'data_format': '"""channels_last"""', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(100, (3, 3), strides=(1, 1), data_format='channels_last', activation\n ='relu', kernel_initializer='uniform')\n", (1900, 2012), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((2033, 2069), 'keras.layers.core.Flatten', 'Flatten', ([], {'data_format': '"""channels_last"""'}), "(data_format='channels_last')\n", (2040, 2069), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((2098, 2127), 'keras.layers.core.Dense', 'Dense', (['(168)'], {'activation': '"""relu"""'}), "(168, activation='relu')\n", (2103, 2127), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((2156, 2184), 'keras.layers.core.Dense', 'Dense', (['(84)'], {'activation': '"""relu"""'}), "(84, activation='relu')\n", (2161, 2184), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((2213, 2244), 'keras.layers.core.Dense', 'Dense', (['(43)'], {'activation': '"""softmax"""'}), "(43, activation='softmax')\n", (2218, 2244), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n')] |
import numpy as np
import tensorflow as tf
def tensor_to_image(tensor):
return (np.squeeze(tensor.numpy()).clip(0, 1) * 255).astype(np.uint8)
@tf.function
def denormalize_tensor(tensor):
tensor = tf.math.multiply(tensor, 128)
return tf.math.add(tensor, 127.5)
@tf.function
def tensor_to_uint8(tensor):
return tf.image.convert_image_dtype(tensor, dtype=tf.uint8, saturate=True)
| [
"tensorflow.math.add",
"tensorflow.math.multiply",
"tensorflow.image.convert_image_dtype"
] | [((208, 237), 'tensorflow.math.multiply', 'tf.math.multiply', (['tensor', '(128)'], {}), '(tensor, 128)\n', (224, 237), True, 'import tensorflow as tf\n'), ((249, 275), 'tensorflow.math.add', 'tf.math.add', (['tensor', '(127.5)'], {}), '(tensor, 127.5)\n', (260, 275), True, 'import tensorflow as tf\n'), ((331, 398), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['tensor'], {'dtype': 'tf.uint8', 'saturate': '(True)'}), '(tensor, dtype=tf.uint8, saturate=True)\n', (359, 398), True, 'import tensorflow as tf\n')] |
from Jumpscale import j
import krakenex
from pykrakenapi import KrakenAPI
JSConfigClient = j.application.JSBaseConfigClass
class KrakenClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.kraken.clients
name* = "" (S)
api_key_ = "" (S)
private_key_ = "" (S)
"""
def _init(self):
kraken_api = krakenex.API()
kraken_api.key = self.api_key_
kraken_api.secret = self.private_key_
self.api = KrakenAPI(kraken_api)
def test(self):
k = self.api
self._log_debug("open orders")
self._log_debug(k.get_open_orders())
self._log_debug("get account balance")
self._log_debug(k.get_account_balance())
| [
"krakenex.API",
"pykrakenapi.KrakenAPI"
] | [((358, 372), 'krakenex.API', 'krakenex.API', ([], {}), '()\n', (370, 372), False, 'import krakenex\n'), ((477, 498), 'pykrakenapi.KrakenAPI', 'KrakenAPI', (['kraken_api'], {}), '(kraken_api)\n', (486, 498), False, 'from pykrakenapi import KrakenAPI\n')] |
from queue import Queue
from base import NeighborSolutionLocator
from base.Gate import Gate
from base.MySolution import MySolution
from base.NeighborSolutionLocator import findBestNeighbor, findAmnestyNeighbor
import sys
class TabuSearch:
threshold = 1000
p = 0.9
def __init__(self, tabulist: list, maxIterations: int):
self.tabulist = tabulist
self.maxIterations = maxIterations
def mustStop(self, currentIteration, bestSolutionFound):
if currentIteration > self.maxIterations:
print('the search is bounded %s times' %self.maxIterations)
sys.exit(-1)
return True
shortpaths = bestSolutionFound.dist
currentLayers = bestSolutionFound.currentLayers
i = 0
while i < len(currentLayers):
q1 = currentLayers[i].control
q2 = currentLayers[i].target
l1 = bestSolutionFound.locations[q1]
l2 = bestSolutionFound.locations[q2]
paths = shortpaths[l1][l2].distance
if paths > 3:
return False
else:
g = Gate()
g.type = currentLayers[i].type
g.control = l1
g.target = l2
bestSolutionFound.circuits.append(g)
del currentLayers[i]
return True
def run(self, initialSolution, type, delta):
bestSolution = initialSolution
currentIteration = 0
while not self.mustStop(currentIteration, bestSolution):
candidateNeighbors = list(bestSolution.getNeighbors(type,delta))
solutionsInTabu = self.tabulist
bestneighborfound = findBestNeighbor(candidateNeighbors, solutionsInTabu,type)
if bestneighborfound == None:
neighbor = bestSolution.getNeighbors(type, delta)
if neighbor == None or len(neighbor) <= 0:
break
else:
bestneighborfound = findAmnestyNeighbor(
neighbor, solutionsInTabu,type)
if len(self.tabulist)>=3:
self.tabulist.pop(0)
self.tabulist.append(bestSolution.swapped_edge)
bestSolution = MySolution(graph=bestneighborfound.graph, dist=bestneighborfound.dist,locations=bestneighborfound.locations,qubits=bestneighborfound.qubits, currentLayers=bestneighborfound.currentLayers,nextLayers_1=bestneighborfound.nextLayers_1)
bestSolution.circuits = bestneighborfound.circuits
bestSolution.swaps = bestneighborfound.swaps
bestSolution.score = bestneighborfound.score
bestSolution.subscore = bestneighborfound.subscore
bestSolution.swapped_edge = bestneighborfound.swapped_edge
# bestSolution=bestneighborfound
# currentSolution =bestSolution
currentIteration+=1
if self.mustStop(currentIteration, bestSolution):
return bestSolution
return None
| [
"base.NeighborSolutionLocator.findBestNeighbor",
"base.NeighborSolutionLocator.findAmnestyNeighbor",
"base.Gate.Gate",
"sys.exit",
"base.MySolution.MySolution"
] | [((607, 619), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (615, 619), False, 'import sys\n'), ((1681, 1740), 'base.NeighborSolutionLocator.findBestNeighbor', 'findBestNeighbor', (['candidateNeighbors', 'solutionsInTabu', 'type'], {}), '(candidateNeighbors, solutionsInTabu, type)\n', (1697, 1740), False, 'from base.NeighborSolutionLocator import findBestNeighbor, findAmnestyNeighbor\n'), ((2234, 2481), 'base.MySolution.MySolution', 'MySolution', ([], {'graph': 'bestneighborfound.graph', 'dist': 'bestneighborfound.dist', 'locations': 'bestneighborfound.locations', 'qubits': 'bestneighborfound.qubits', 'currentLayers': 'bestneighborfound.currentLayers', 'nextLayers_1': 'bestneighborfound.nextLayers_1'}), '(graph=bestneighborfound.graph, dist=bestneighborfound.dist,\n locations=bestneighborfound.locations, qubits=bestneighborfound.qubits,\n currentLayers=bestneighborfound.currentLayers, nextLayers_1=\n bestneighborfound.nextLayers_1)\n', (2244, 2481), False, 'from base.MySolution import MySolution\n'), ((1119, 1125), 'base.Gate.Gate', 'Gate', ([], {}), '()\n', (1123, 1125), False, 'from base.Gate import Gate\n'), ((1995, 2047), 'base.NeighborSolutionLocator.findAmnestyNeighbor', 'findAmnestyNeighbor', (['neighbor', 'solutionsInTabu', 'type'], {}), '(neighbor, solutionsInTabu, type)\n', (2014, 2047), False, 'from base.NeighborSolutionLocator import findBestNeighbor, findAmnestyNeighbor\n')] |
#!/usr/bin/env python3
import sys, json
import whiplash
try:
assert len(sys.argv) == 4
except:
print("Usage: ./commit_result.py input.json executable_id output.json")
sys.exit()
# Login as test user
db = whiplash.db("localhost", 1337, username="test", password="<PASSWORD>")
# Split input into model and property
input = json.load(open(sys.argv[1],'r'))
params = input.pop('params')
input_model = input
# Commit input model
input_model_id = db.models.commit([input_model])['ids'][0]
# Submit query (property creation)
executable_id = sys.argv[2]
filters = {
'input_model': {'_id': input_model_id},
'executable': {'_id': executable_id},
'params': params,
'output_model': {}
}
settings = {
'manual': 1
}
print(db.submit(filters, settings))
# Get property ID
filter = {
'input_model_id': input_model_id,
'executable_id': executable_id
}
for key in params:
filter['params.'+key] = params[key]
print(db.properties.query(filter, ['_id']))
property_id = db.properties.query(filter, ['_id'])[0]['_id']
# Commit output model
output_model = json.load(open(sys.argv[3],'r'))
output_model['property_id'] = property_id
output_model_id = db.models.commit([output_model])['ids'][0]
# Update property with output model ID and set as resolved
db.properties.update({'_id': property_id}, {'output_model_id': output_model_id, 'status': 'resolved'})
| [
"whiplash.db",
"sys.exit"
] | [((218, 288), 'whiplash.db', 'whiplash.db', (['"""localhost"""', '(1337)'], {'username': '"""test"""', 'password': '"""<PASSWORD>"""'}), "('localhost', 1337, username='test', password='<PASSWORD>')\n", (229, 288), False, 'import whiplash\n'), ((180, 190), 'sys.exit', 'sys.exit', ([], {}), '()\n', (188, 190), False, 'import sys, json\n')] |
"""
X.509 Certificate Tests
"""
import base64
import subprocess
import datetime
from asn1crypto import pem
from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name
from asn1crypto.keys import RSAPublicKey
from asn1crypto.csr import CertificationRequest, CertificationRequestInfo
import pkcs11
from pkcs11.util.rsa import encode_rsa_public_key
from pkcs11.util.dsa import decode_dsa_signature
from pkcs11.util.ec import decode_ecdsa_signature
from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key
from pkcs11 import (
Attribute,
KeyType,
Mechanism,
)
from . import TestCase, Not, Only, requires, OPENSSL
# X.509 self-signed certificate (generated with OpenSSL)
# openssl req -x509 \
# -newkey rsa:512 \
# -keyout key.pem \
# -out cert.pem \
# -days 365 \
# -nodes
_, _, CERT = pem.unarmor(b"""
-----BEGIN CERTIFICATE-----
MI<KEY>IB<KEY>
-----END CERTIFICATE-----
""")
class X509Tests(TestCase):
def test_import_ca_certificate_easy(self):
cert = self.session.create_object(decode_x509_certificate(CERT))
self.assertIsInstance(cert, pkcs11.Certificate)
@Not.nfast
@Not.opencryptoki
def test_import_ca_certificate(self):
cert = self.session.create_object(
decode_x509_certificate(CERT, extended_set=True))
self.assertIsInstance(cert, pkcs11.Certificate)
self.assertEqual(cert[Attribute.HASH_OF_ISSUER_PUBLIC_KEY],
b'\xf9\xc1\xb6\xe3\x43\xf3\xcf\x4c\xba\x8a'
b'\x0b\x66\x86\x79\x35\xfb\x52\x85\xbf\xa8')
# Cert is self signed
self.assertEqual(cert[Attribute.HASH_OF_SUBJECT_PUBLIC_KEY],
b'\xf9\xc1\xb6\xe3\x43\xf3\xcf\x4c\xba\x8a'
b'\x0b\x66\x86\x79\x35\xfb\x52\x85\xbf\xa8')
@requires(Mechanism.SHA1_RSA_PKCS)
def test_verify_certificate_rsa(self):
# Warning: proof of concept code only!
x509 = Certificate.load(CERT)
key = self.session.create_object(decode_x509_public_key(CERT))
self.assertIsInstance(key, pkcs11.PublicKey)
value = x509['tbs_certificate'].dump()
signature = x509.signature
assert x509.signature_algo == 'rsassa_pkcs1v15'
assert x509.hash_algo == 'sha1'
self.assertTrue(key.verify(value, signature,
mechanism=Mechanism.SHA1_RSA_PKCS))
@requires(Mechanism.DSA_SHA1)
def test_verify_certificate_dsa(self):
# Warning: proof of concept code only!
CERT = base64.b64decode("""
MIIDbjCCAy6gAwIBAgIJAKPBInGiPjXNMAkGByqGSM44BAMwRTELMAkGA1UEBhMC
QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp
dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMjI1MTBaFw0xOTA3MDMxMjI1MTBaMEUxCzAJ
BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l
dCBXaWRnaXRzIFB0eSBMdGQ<KEY>MSEwHwYD
VQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGSCCQCjwSJxoj41zTAMBgNVHRME
BTADAQH/MAkGByqGSM44BAMDLwAwLAIUNE+zTuFe01v0BRTLarPtGK8ZHHcCFB9Y
YAwtpblAgUEdGuoAtnoEQ2tc
""")
x509 = Certificate.load(CERT)
key = self.session.create_object(decode_x509_public_key(CERT))
self.assertIsInstance(key, pkcs11.PublicKey)
value = x509['tbs_certificate'].dump()
assert x509.signature_algo == 'dsa'
assert x509.hash_algo == 'sha1'
signature = decode_dsa_signature(x509.signature)
self.assertTrue(key.verify(value, signature,
mechanism=Mechanism.DSA_SHA1))
@requires(Mechanism.ECDSA_SHA1)
def test_verify_certificate_ecdsa(self):
# Warning: proof of concept code only!
CERT = base64.b64decode("""
MIIDGjCCAsKgAwIBAgIJAL+PbwiJUZB1MAkGByqGSM49BAEwRTELMAkGA1UEBhMC
QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp
dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMTUxMTBaFw0xOTA3MDMxMTUxMTBaMEUxCzAJ
BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l
dCBXaWRnaXRzIFB0eSBMdGQwggFLMIIBAwYHKoZIzj0CATCB9wIBATAsBgcqhkjO
PQEBAiEA/////wAAAAEAAAAAAAAAAAAAAAD///////////////<KEY>
""")
x509 = Certificate.load(CERT)
key = self.session.create_object(decode_x509_public_key(CERT))
self.assertIsInstance(key, pkcs11.PublicKey)
value = x509['tbs_certificate'].dump()
assert x509.signature_algo == 'ecdsa'
assert x509.hash_algo == 'sha1'
signature = decode_ecdsa_signature(x509.signature)
self.assertTrue(key.verify(value, signature,
mechanism=Mechanism.ECDSA_SHA1))
@Only.openssl
@requires(Mechanism.RSA_PKCS_KEY_PAIR_GEN, Mechanism.SHA1_RSA_PKCS)
def test_self_sign_certificate(self):
# Warning: proof of concept code only!
pub, priv = self.session.generate_keypair(KeyType.RSA, 1024)
tbs = TbsCertificate({
'version': 'v1',
'serial_number': 1,
'issuer': Name.build({
'common_name': 'Test Certificate',
}),
'subject': Name.build({
'common_name': 'Test Certificate',
}),
'signature': {
'algorithm': 'sha1_rsa',
'parameters': None,
},
'validity': {
'not_before': Time({
'utc_time': datetime.datetime(2017, 1, 1, 0, 0),
}),
'not_after': Time({
'utc_time': datetime.datetime(2038, 12, 31, 23, 59),
}),
},
'subject_public_key_info': {
'algorithm': {
'algorithm': 'rsa',
'parameters': None,
},
'public_key': RSAPublicKey.load(encode_rsa_public_key(pub)),
}
})
# Sign the TBS Certificate
value = priv.sign(tbs.dump(),
mechanism=Mechanism.SHA1_RSA_PKCS)
cert = Certificate({
'tbs_certificate': tbs,
'signature_algorithm': {
'algorithm': 'sha1_rsa',
'parameters': None,
},
'signature_value': value,
})
# Pipe our certificate to OpenSSL to verify it
with subprocess.Popen((OPENSSL, 'verify'),
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL) as proc:
proc.stdin.write(pem.armor('CERTIFICATE', cert.dump()))
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
@Only.openssl
@requires(Mechanism.RSA_PKCS_KEY_PAIR_GEN, Mechanism.SHA1_RSA_PKCS)
def test_sign_csr(self):
# Warning: proof of concept code only!
pub, priv = self.session.generate_keypair(KeyType.RSA, 1024)
info = CertificationRequestInfo({
'version': 0,
'subject': Name.build({
'common_name': 'Test Certificate',
}),
'subject_pk_info': {
'algorithm': {
'algorithm': 'rsa',
'parameters': None,
},
'public_key': RSAPublicKey.load(encode_rsa_public_key(pub)),
},
})
# Sign the CSR Info
value = priv.sign(info.dump(),
mechanism=Mechanism.SHA1_RSA_PKCS)
csr = CertificationRequest({
'certification_request_info': info,
'signature_algorithm': {
'algorithm': 'sha1_rsa',
'parameters': None,
},
'signature': value,
})
# Pipe our CSR to OpenSSL to verify it
with subprocess.Popen((OPENSSL, 'req',
'-inform', 'der',
'-noout',
'-verify'),
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL) as proc:
proc.stdin.write(csr.dump())
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
| [
"datetime.datetime",
"asn1crypto.pem.unarmor",
"asn1crypto.x509.Certificate",
"asn1crypto.csr.CertificationRequest",
"subprocess.Popen",
"asn1crypto.x509.Name.build",
"pkcs11.util.rsa.encode_rsa_public_key",
"base64.b64decode",
"pkcs11.util.dsa.decode_dsa_signature",
"pkcs11.util.x509.decode_x509_public_key",
"pkcs11.util.ec.decode_ecdsa_signature",
"pkcs11.util.x509.decode_x509_certificate",
"asn1crypto.x509.Certificate.load"
] | [((838, 938), 'asn1crypto.pem.unarmor', 'pem.unarmor', (["b'\\n-----BEGIN CERTIFICATE-----\\nMI<KEY>IB<KEY>\\n-----END CERTIFICATE-----\\n'"], {}), "(\n b'\\n-----BEGIN CERTIFICATE-----\\nMI<KEY>IB<KEY>\\n-----END CERTIFICATE-----\\n'\n )\n", (849, 938), False, 'from asn1crypto import pem\n'), ((1967, 1989), 'asn1crypto.x509.Certificate.load', 'Certificate.load', (['CERT'], {}), '(CERT)\n', (1983, 1989), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((2559, 3118), 'base64.b64decode', 'base64.b64decode', (['"""\n MIIDbjCCAy6gAwIBAgIJAKPBInGiPjXNMAkGByqGSM44BAMwRTELMAkGA1UEBhMC\n QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp\n dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMjI1MTBaFw0xOTA3MDMxMjI1MTBaMEUxCzAJ\n BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l\n dCBXaWRnaXRzIFB0eSBMdGQ<KEY>MSEwHwYD\n VQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGSCCQCjwSJxoj41zTAMBgNVHRME\n BTADAQH/MAkGByqGSM44BAMDLwAwLAIUNE+zTuFe01v0BRTLarPtGK8ZHHcCFB9Y\n YAwtpblAgUEdGuoAtnoEQ2tc\n """'], {}), '(\n """\n MIIDbjCCAy6gAwIBAgIJAKPBInGiPjXNMAkGByqGSM44BAMwRTELMAkGA1UEBhMC\n QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp\n dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMjI1MTBaFw0xOTA3MDMxMjI1MTBaMEUxCzAJ\n BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l\n dCBXaWRnaXRzIFB0eSBMdGQ<KEY>MSEwHwYD\n VQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGSCCQCjwSJxoj41zTAMBgNVHRME\n BTADAQH/MAkGByqGSM44BAMDLwAwLAIUNE+zTuFe01v0BRTLarPtGK8ZHHcCFB9Y\n YAwtpblAgUEdGuoAtnoEQ2tc\n """\n )\n', (2575, 3118), False, 'import base64\n'), ((3125, 3147), 'asn1crypto.x509.Certificate.load', 'Certificate.load', (['CERT'], {}), '(CERT)\n', (3141, 3147), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((3426, 3462), 'pkcs11.util.dsa.decode_dsa_signature', 'decode_dsa_signature', (['x509.signature'], {}), '(x509.signature)\n', (3446, 3462), False, 'from pkcs11.util.dsa import decode_dsa_signature\n'), ((3727, 4199), 'base64.b64decode', 'base64.b64decode', (['"""\n MIIDGjCCAsKgAwIBAgIJAL+PbwiJUZB1MAkGByqGSM49BAEwRTELMAkGA1UEBhMC\n QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp\n dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMTUxMTBaFw0xOTA3MDMxMTUxMTBaMEUxCzAJ\n BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l\n dCBXaWRnaXRzIFB0eSBMdGQwggFLMIIBAwYHKoZIzj0CATCB9wIBATAsBgcqhkjO\n PQEBAiEA/////wAAAAEAAAAAAAAAAAAAAAD///////////////<KEY>\n """'], {}), '(\n """\n MIIDGjCCAsKgAwIBAgIJAL+PbwiJUZB1MAkGByqGSM49BAEwRTELMAkGA1UEBhMC\n QVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp\n dHMgUHR5IEx0ZDAeFw0xNzA3MDMxMTUxMTBaFw0xOTA3MDMxMTUxMTBaMEUxCzAJ\n BgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5l\n dCBXaWRnaXRzIFB0eSBMdGQwggFLMIIBAwYHKoZIzj0CATCB9wIBATAsBgcqhkjO\n PQEBAiEA/////wAAAAEAAAAAAAAAAAAAAAD///////////////<KEY>\n """\n )\n', (3743, 4199), False, 'import base64\n'), ((4206, 4228), 'asn1crypto.x509.Certificate.load', 'Certificate.load', (['CERT'], {}), '(CERT)\n', (4222, 4228), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((4509, 4547), 'pkcs11.util.ec.decode_ecdsa_signature', 'decode_ecdsa_signature', (['x509.signature'], {}), '(x509.signature)\n', (4531, 4547), False, 'from pkcs11.util.ec import decode_ecdsa_signature\n'), ((6057, 6194), 'asn1crypto.x509.Certificate', 'Certificate', (["{'tbs_certificate': tbs, 'signature_algorithm': {'algorithm': 'sha1_rsa',\n 'parameters': None}, 'signature_value': value}"], {}), "({'tbs_certificate': tbs, 'signature_algorithm': {'algorithm':\n 'sha1_rsa', 'parameters': None}, 'signature_value': value})\n", (6068, 6194), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((7474, 7630), 'asn1crypto.csr.CertificationRequest', 'CertificationRequest', (["{'certification_request_info': info, 'signature_algorithm': {'algorithm':\n 'sha1_rsa', 'parameters': None}, 'signature': value}"], {}), "({'certification_request_info': info,\n 'signature_algorithm': {'algorithm': 'sha1_rsa', 'parameters': None},\n 'signature': value})\n", (7494, 7630), False, 'from asn1crypto.csr import CertificationRequest, CertificationRequestInfo\n'), ((1048, 1077), 'pkcs11.util.x509.decode_x509_certificate', 'decode_x509_certificate', (['CERT'], {}), '(CERT)\n', (1071, 1077), False, 'from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key\n'), ((1270, 1318), 'pkcs11.util.x509.decode_x509_certificate', 'decode_x509_certificate', (['CERT'], {'extended_set': '(True)'}), '(CERT, extended_set=True)\n', (1293, 1318), False, 'from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key\n'), ((2031, 2059), 'pkcs11.util.x509.decode_x509_public_key', 'decode_x509_public_key', (['CERT'], {}), '(CERT)\n', (2053, 2059), False, 'from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key\n'), ((3189, 3217), 'pkcs11.util.x509.decode_x509_public_key', 'decode_x509_public_key', (['CERT'], {}), '(CERT)\n', (3211, 3217), False, 'from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key\n'), ((4270, 4298), 'pkcs11.util.x509.decode_x509_public_key', 'decode_x509_public_key', (['CERT'], {}), '(CERT)\n', (4292, 4298), False, 'from pkcs11.util.x509 import decode_x509_certificate, decode_x509_public_key\n'), ((6354, 6446), 'subprocess.Popen', 'subprocess.Popen', (["(OPENSSL, 'verify')"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.DEVNULL'}), "((OPENSSL, 'verify'), stdin=subprocess.PIPE, stdout=\n subprocess.DEVNULL)\n", (6370, 6446), False, 'import subprocess\n'), ((7778, 7905), 'subprocess.Popen', 'subprocess.Popen', (["(OPENSSL, 'req', '-inform', 'der', '-noout', '-verify')"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.DEVNULL'}), "((OPENSSL, 'req', '-inform', 'der', '-noout', '-verify'),\n stdin=subprocess.PIPE, stdout=subprocess.DEVNULL)\n", (7794, 7905), False, 'import subprocess\n'), ((5034, 5081), 'asn1crypto.x509.Name.build', 'Name.build', (["{'common_name': 'Test Certificate'}"], {}), "({'common_name': 'Test Certificate'})\n", (5044, 5081), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((5137, 5184), 'asn1crypto.x509.Name.build', 'Name.build', (["{'common_name': 'Test Certificate'}"], {}), "({'common_name': 'Test Certificate'})\n", (5147, 5184), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((6984, 7031), 'asn1crypto.x509.Name.build', 'Name.build', (["{'common_name': 'Test Certificate'}"], {}), "({'common_name': 'Test Certificate'})\n", (6994, 7031), False, 'from asn1crypto.x509 import Certificate, TbsCertificate, Time, Name\n'), ((5852, 5878), 'pkcs11.util.rsa.encode_rsa_public_key', 'encode_rsa_public_key', (['pub'], {}), '(pub)\n', (5873, 5878), False, 'from pkcs11.util.rsa import encode_rsa_public_key\n'), ((7275, 7301), 'pkcs11.util.rsa.encode_rsa_public_key', 'encode_rsa_public_key', (['pub'], {}), '(pub)\n', (7296, 7301), False, 'from pkcs11.util.rsa import encode_rsa_public_key\n'), ((5431, 5466), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)', '(0)', '(0)'], {}), '(2017, 1, 1, 0, 0)\n', (5448, 5466), False, 'import datetime\n'), ((5557, 5596), 'datetime.datetime', 'datetime.datetime', (['(2038)', '(12)', '(31)', '(23)', '(59)'], {}), '(2038, 12, 31, 23, 59)\n', (5574, 5596), False, 'import datetime\n')] |
from django import forms
class SignUpIn(forms.Form):
username = forms.CharField(label="Username", max_length=25,
widget=forms.TextInput(attrs={'placeholder': 'Username'}))
password = forms.CharField(label="Password", max_length=100,
widget=forms.TextInput(attrs={'placeholder': 'Password'}))
class ChooseRoom(forms.Form):
room = forms.CharField(label="Room name", max_length=25,
widget=forms.TextInput(attrs={'placeholder': 'Room name'}))
class DelaySend(forms.Form):
date = forms.DateField(widget=forms.widgets.DateInput(attrs={'type': 'date'}))
# time = forms.TimeField(widget=forms.widgets.TimeInput(attrs={'type': 'time'}), format='%Y-%m-%d %H:%M')
time = forms.TimeField(widget=forms.widgets.TimeInput(attrs={'type': 'time', 'format': '%H:%M:%S'}))
message = forms.CharField(label="Message", max_length=500,
widget=forms.Textarea(attrs={'placeholder': 'Message'}))
CHOICES = [('Anon', 'Anon'), ('NotAnon', 'NotAnon')]
anon = forms.ChoiceField(widget=forms.RadioSelect, choices=CHOICES)
| [
"django.forms.widgets.DateInput",
"django.forms.widgets.TimeInput",
"django.forms.ChoiceField",
"django.forms.Textarea",
"django.forms.TextInput"
] | [((1093, 1153), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'widget': 'forms.RadioSelect', 'choices': 'CHOICES'}), '(widget=forms.RadioSelect, choices=CHOICES)\n', (1110, 1153), False, 'from django import forms\n'), ((157, 207), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Username'}"}), "(attrs={'placeholder': 'Username'})\n", (172, 207), False, 'from django import forms\n'), ((312, 362), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Password'}"}), "(attrs={'placeholder': 'Password'})\n", (327, 362), False, 'from django import forms\n'), ((491, 542), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Room name'}"}), "(attrs={'placeholder': 'Room name'})\n", (506, 542), False, 'from django import forms\n'), ((609, 656), 'django.forms.widgets.DateInput', 'forms.widgets.DateInput', ([], {'attrs': "{'type': 'date'}"}), "(attrs={'type': 'date'})\n", (632, 656), False, 'from django import forms\n'), ((803, 872), 'django.forms.widgets.TimeInput', 'forms.widgets.TimeInput', ([], {'attrs': "{'type': 'time', 'format': '%H:%M:%S'}"}), "(attrs={'type': 'time', 'format': '%H:%M:%S'})\n", (826, 872), False, 'from django import forms\n'), ((975, 1023), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'placeholder': 'Message'}"}), "(attrs={'placeholder': 'Message'})\n", (989, 1023), False, 'from django import forms\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Learning.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1046, 507)
self.resultSave_Button = QtWidgets.QPushButton(Dialog)
self.resultSave_Button.setEnabled(False)
self.resultSave_Button.setGeometry(QtCore.QRect(330, 460, 150, 40))
self.resultSave_Button.setCheckable(False)
self.resultSave_Button.setObjectName("resultSave_Button")
self.pause_Button = QtWidgets.QPushButton(Dialog)
self.pause_Button.setEnabled(False)
self.pause_Button.setGeometry(QtCore.QRect(170, 460, 150, 40))
self.pause_Button.setObjectName("pause_Button")
self.Status_GroupBox = QtWidgets.QGroupBox(Dialog)
self.Status_GroupBox.setGeometry(QtCore.QRect(10, 10, 1021, 441))
self.Status_GroupBox.setObjectName("Status_GroupBox")
self.displayMode_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.displayMode_Label.setGeometry(QtCore.QRect(890, 100, 81, 16))
self.displayMode_Label.setObjectName("displayMode_Label")
self.displayMode_ComboBox = QtWidgets.QComboBox(self.Status_GroupBox)
self.displayMode_ComboBox.setGeometry(QtCore.QRect(890, 120, 121, 22))
self.displayMode_ComboBox.setObjectName("displayMode_ComboBox")
self.displayMode_ComboBox.addItem("")
self.displayMode_ComboBox.addItem("")
self.displayMode_ComboBox.addItem("")
self.yAxis_label = QtWidgets.QLabel(self.Status_GroupBox)
self.yAxis_label.setGeometry(QtCore.QRect(890, 150, 47, 13))
self.yAxis_label.setObjectName("yAxis_label")
self.label_3 = QtWidgets.QLabel(self.Status_GroupBox)
self.label_3.setGeometry(QtCore.QRect(940, 170, 21, 16))
self.label_3.setObjectName("label_3")
self.yAxisMin_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.yAxisMin_LineEdit.setGeometry(QtCore.QRect(890, 170, 41, 20))
self.yAxisMin_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.yAxisMin_LineEdit.setObjectName("yAxisMin_LineEdit")
self.yAxisMax_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.yAxisMax_LineEdit.setGeometry(QtCore.QRect(960, 170, 41, 20))
self.yAxisMax_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.yAxisMax_LineEdit.setObjectName("yAxisMax_LineEdit")
self.cycle_CheckBox = QtWidgets.QCheckBox(self.Status_GroupBox)
self.cycle_CheckBox.setGeometry(QtCore.QRect(890, 200, 101, 17))
self.cycle_CheckBox.setObjectName("cycle_CheckBox")
self.result_Display_Button = QtWidgets.QPushButton(self.Status_GroupBox)
self.result_Display_Button.setGeometry(QtCore.QRect(942, 217, 70, 23))
self.result_Display_Button.setObjectName("result_Display_Button")
self.totalEpoch_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.totalEpoch_Label.setGeometry(QtCore.QRect(890, 290, 120, 13))
self.totalEpoch_Label.setObjectName("totalEpoch_Label")
self.currentLearningSetup_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.currentLearningSetup_Label.setGeometry(QtCore.QRect(890, 340, 120, 13))
self.currentLearningSetup_Label.setObjectName("currentLearningSetup_Label")
self.currentEpoch_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.currentEpoch_Label.setGeometry(QtCore.QRect(890, 390, 120, 13))
self.currentEpoch_Label.setObjectName("currentEpoch_Label")
self.currentEpoch_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.currentEpoch_LineEdit.setGeometry(QtCore.QRect(890, 410, 121, 20))
self.currentEpoch_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.currentEpoch_LineEdit.setReadOnly(True)
self.currentEpoch_LineEdit.setObjectName("currentEpoch_LineEdit")
self.totalEpoch_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.totalEpoch_LineEdit.setGeometry(QtCore.QRect(890, 310, 121, 20))
self.totalEpoch_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.totalEpoch_LineEdit.setReadOnly(True)
self.totalEpoch_LineEdit.setObjectName("totalEpoch_LineEdit")
self.currentLearningSetup_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.currentLearningSetup_LineEdit.setGeometry(QtCore.QRect(890, 360, 121, 20))
self.currentLearningSetup_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.currentLearningSetup_LineEdit.setReadOnly(True)
self.currentLearningSetup_LineEdit.setObjectName("currentLearningSetup_LineEdit")
self.graph_Widget = QtWidgets.QWidget(self.Status_GroupBox)
self.graph_Widget.setGeometry(QtCore.QRect(10, 20, 871, 411))
self.graph_Widget.setObjectName("graph_Widget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.graph_Widget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 431, 411))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.weightGraphLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.weightGraphLayout.setContentsMargins(0, 0, 0, 0)
self.weightGraphLayout.setObjectName("weightGraphLayout")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.graph_Widget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(440, 0, 421, 411))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.progressGraphLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.progressGraphLayout.setContentsMargins(0, 0, 0, 0)
self.progressGraphLayout.setObjectName("progressGraphLayout")
self.macro_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.macro_Label.setGeometry(QtCore.QRect(890, 240, 120, 13))
self.macro_Label.setObjectName("macro_Label")
self.macro_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.macro_LineEdit.setGeometry(QtCore.QRect(890, 260, 121, 20))
self.macro_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.macro_LineEdit.setReadOnly(True)
self.macro_LineEdit.setObjectName("macro_LineEdit")
self.displayWeight_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.displayWeight_Label.setGeometry(QtCore.QRect(890, 20, 81, 16))
self.displayWeight_Label.setObjectName("displayWeight_Label")
self.weightName_ComboBox = QtWidgets.QComboBox(self.Status_GroupBox)
self.weightName_ComboBox.setGeometry(QtCore.QRect(890, 40, 121, 22))
self.weightName_ComboBox.setObjectName("weightName_ComboBox")
self.weight_Display_Button = QtWidgets.QPushButton(self.Status_GroupBox)
self.weight_Display_Button.setGeometry(QtCore.QRect(940, 70, 70, 23))
self.weight_Display_Button.setObjectName("weight_Display_Button")
self.start_Button = QtWidgets.QPushButton(Dialog)
self.start_Button.setGeometry(QtCore.QRect(10, 460, 150, 40))
self.start_Button.setObjectName("start_Button")
self.exit_Button = QtWidgets.QPushButton(Dialog)
self.exit_Button.setGeometry(QtCore.QRect(880, 460, 150, 40))
self.exit_Button.setObjectName("exit_Button")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.resultSave_Button.setText(_translate("Dialog", "Result Save"))
self.pause_Button.setText(_translate("Dialog", "Pause"))
self.Status_GroupBox.setTitle(_translate("Dialog", "Status"))
self.displayMode_Label.setText(_translate("Dialog", "Progress Display"))
self.displayMode_ComboBox.setItemText(0, _translate("Dialog", "Mean Squared Error"))
self.displayMode_ComboBox.setItemText(1, _translate("Dialog", "Cross Entropy"))
self.displayMode_ComboBox.setItemText(2, _translate("Dialog", "Semantic Stress"))
self.yAxis_label.setText(_translate("Dialog", "Y-Axis"))
self.label_3.setText(_translate("Dialog", "~"))
self.yAxisMin_LineEdit.setText(_translate("Dialog", "-0.01"))
self.yAxisMax_LineEdit.setText(_translate("Dialog", "1.01"))
self.cycle_CheckBox.setText(_translate("Dialog", "Use Cycle"))
self.result_Display_Button.setText(_translate("Dialog", "Display"))
self.totalEpoch_Label.setText(_translate("Dialog", "Total Epoch"))
self.currentLearningSetup_Label.setText(_translate("Dialog", "Current Learning Setup"))
self.currentEpoch_Label.setText(_translate("Dialog", "Current Epoch"))
self.currentEpoch_LineEdit.setText(_translate("Dialog", "-"))
self.totalEpoch_LineEdit.setText(_translate("Dialog", "-"))
self.currentLearningSetup_LineEdit.setText(_translate("Dialog", "-"))
self.macro_Label.setText(_translate("Dialog", "Macro Status"))
self.macro_LineEdit.setText(_translate("Dialog", "-"))
self.displayWeight_Label.setText(_translate("Dialog", "Weight Display"))
self.weight_Display_Button.setText(_translate("Dialog", "Display"))
self.start_Button.setText(_translate("Dialog", "Start"))
self.exit_Button.setText(_translate("Dialog", "Exit"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QDialog",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QPushButton"
] | [((9757, 9789), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (9779, 9789), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9804, 9823), 'PyQt5.QtWidgets.QDialog', 'QtWidgets.QDialog', ([], {}), '()\n', (9821, 9823), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((413, 442), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (434, 442), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((718, 747), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (739, 747), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((954, 981), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Dialog'], {}), '(Dialog)\n', (973, 981), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1154, 1192), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (1170, 1192), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1373, 1414), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (1392, 1414), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1737, 1775), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (1753, 1775), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1925, 1963), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (1941, 1963), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2111, 2152), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (2130, 2152), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2398, 2439), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (2417, 2439), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2682, 2723), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (2701, 2723), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2897, 2940), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (2918, 2940), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3129, 3167), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (3145, 3167), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3352, 3390), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (3368, 3390), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3597, 3635), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (3613, 3635), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3821, 3862), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (3840, 3862), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4181, 4222), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (4200, 4222), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4541, 4582), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (4560, 4582), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4934, 4973), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (4951, 4973), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5139, 5175), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.graph_Widget'], {}), '(self.graph_Widget)\n', (5156, 5175), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5360, 5408), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (5381, 5408), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5578, 5614), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.graph_Widget'], {}), '(self.graph_Widget)\n', (5595, 5614), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5809, 5859), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalLayoutWidget_2'], {}), '(self.verticalLayoutWidget_2)\n', (5830, 5859), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6024, 6062), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (6040, 6062), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6220, 6261), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (6239, 6261), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6545, 6583), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (6561, 6583), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6768, 6809), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (6787, 6809), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6997, 7040), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.Status_GroupBox'], {}), '(self.Status_GroupBox)\n', (7018, 7040), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7224, 7253), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (7245, 7253), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7410, 7439), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (7431, 7439), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7613, 7658), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (7650, 7658), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((537, 568), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(330)', '(460)', '(150)', '(40)'], {}), '(330, 460, 150, 40)\n', (549, 568), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((832, 863), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(170)', '(460)', '(150)', '(40)'], {}), '(170, 460, 150, 40)\n', (844, 863), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1024, 1055), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(1021)', '(441)'], {}), '(10, 10, 1021, 441)\n', (1036, 1055), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1237, 1267), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(100)', '(81)', '(16)'], {}), '(890, 100, 81, 16)\n', (1249, 1267), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1462, 1493), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(120)', '(121)', '(22)'], {}), '(890, 120, 121, 22)\n', (1474, 1493), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1814, 1844), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(150)', '(47)', '(13)'], {}), '(890, 150, 47, 13)\n', (1826, 1844), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1998, 2028), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(940)', '(170)', '(21)', '(16)'], {}), '(940, 170, 21, 16)\n', (2010, 2028), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2197, 2227), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(170)', '(41)', '(20)'], {}), '(890, 170, 41, 20)\n', (2209, 2227), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2484, 2514), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(960)', '(170)', '(41)', '(20)'], {}), '(960, 170, 41, 20)\n', (2496, 2514), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2765, 2796), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(200)', '(101)', '(17)'], {}), '(890, 200, 101, 17)\n', (2777, 2796), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2989, 3019), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(942)', '(217)', '(70)', '(23)'], {}), '(942, 217, 70, 23)\n', (3001, 3019), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3211, 3242), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(290)', '(120)', '(13)'], {}), '(890, 290, 120, 13)\n', (3223, 3242), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3444, 3475), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(340)', '(120)', '(13)'], {}), '(890, 340, 120, 13)\n', (3456, 3475), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3681, 3712), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(390)', '(120)', '(13)'], {}), '(890, 390, 120, 13)\n', (3693, 3712), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3911, 3942), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(410)', '(121)', '(20)'], {}), '(890, 410, 121, 20)\n', (3923, 3942), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4269, 4300), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(310)', '(121)', '(20)'], {}), '(890, 310, 121, 20)\n', (4281, 4300), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4639, 4670), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(360)', '(121)', '(20)'], {}), '(890, 360, 121, 20)\n', (4651, 4670), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5013, 5043), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(871)', '(411)'], {}), '(10, 20, 871, 411)\n', (5025, 5043), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5223, 5251), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(431)', '(411)'], {}), '(0, 0, 431, 411)\n', (5235, 5251), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5664, 5694), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(440)', '(0)', '(421)', '(411)'], {}), '(440, 0, 421, 411)\n', (5676, 5694), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6101, 6132), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(240)', '(120)', '(13)'], {}), '(890, 240, 120, 13)\n', (6113, 6132), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6303, 6334), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(260)', '(121)', '(20)'], {}), '(890, 260, 121, 20)\n', (6315, 6334), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6630, 6659), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(20)', '(81)', '(16)'], {}), '(890, 20, 81, 16)\n', (6642, 6659), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6856, 6886), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(890)', '(40)', '(121)', '(22)'], {}), '(890, 40, 121, 22)\n', (6868, 6886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7089, 7118), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(940)', '(70)', '(70)', '(23)'], {}), '(940, 70, 70, 23)\n', (7101, 7118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7293, 7323), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(460)', '(150)', '(40)'], {}), '(10, 460, 150, 40)\n', (7305, 7323), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7478, 7509), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(880)', '(460)', '(150)', '(40)'], {}), '(880, 460, 150, 40)\n', (7490, 7509), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import sys
import re
from collections import defaultdict
import numpy
if len(sys.argv) >= 2:
FILE = sys.argv[1]
else:
FILE = "20190817T1454-1cc19b18.csv"
if len(sys.argv) >= 3:
OUTPUT = sys.argv[2]
else:
# E.g. 20190817T1454-1cc19b18.csv to
# 20190817T1454-1cc19b18-medians.csv
OUTPUT = re.sub(r"(\.[^.]+)$", r"-medians\1", FILE)
def parse_file(filename):
results = defaultdict(list) # (variant, t, a) -> [time]
with open(filename, "r", encoding="utf-8") as file:
file.readline() # skip header
for line in file:
if line.strip() == "":
continue
try:
variant, t, a, i, time, n_attempts = line.split(",")
# n_attempts is suffixed with \n
except ValueError:
print("Error: could not read line:\n%s" % line)
t = int(t)
if a != "None":
a = int(a)
time = float(str(time))
results[(variant, t, a)].append(time)
quartiles = {} # (variant, t, a) -> {25: x, 50: y, 75: z}
for k, times in results.items():
quartiles[k] = {
25: numpy.percentile(times, 25),
50: numpy.median(times),
75: numpy.percentile(times, 75),
}
out = "variant,t,a,25,median,75\n"
for k in sorted(quartiles.keys()):
out += "{},{},{},{},{},{}\n".format(k[0], k[1], k[2], quartiles[k][25],
quartiles[k][50], quartiles[k][75])
return out
out = parse_file(FILE)
with open(OUTPUT, "x") as f:
f.write(out)
| [
"re.sub",
"numpy.median",
"collections.defaultdict",
"numpy.percentile"
] | [((313, 355), 're.sub', 're.sub', (['"""(\\\\.[^.]+)$"""', '"""-medians\\\\1"""', 'FILE'], {}), "('(\\\\.[^.]+)$', '-medians\\\\1', FILE)\n", (319, 355), False, 'import re\n'), ((397, 414), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (408, 414), False, 'from collections import defaultdict\n'), ((1161, 1188), 'numpy.percentile', 'numpy.percentile', (['times', '(25)'], {}), '(times, 25)\n', (1177, 1188), False, 'import numpy\n'), ((1206, 1225), 'numpy.median', 'numpy.median', (['times'], {}), '(times)\n', (1218, 1225), False, 'import numpy\n'), ((1243, 1270), 'numpy.percentile', 'numpy.percentile', (['times', '(75)'], {}), '(times, 75)\n', (1259, 1270), False, 'import numpy\n')] |
import argparse
import json
import datetime
import time
import re
from tqdm import tqdm
from dataset_utils import loop_and_process, name_to_file_name, read_list_from_file
from get_songs import raw_songs_dir
# parser = argparse.ArgumentParser()
# parser.add_argument("--dir_path", type=str, help="Directory will all files")
# parser.add_argument("--list_file", type=str, help="List file with list of all songs")
# args = parser.parse_args()
char_allow_list = {'\n', ' '}
def get_context(lyrics, i):
return lyrics[max(i - 10, 0): min(i + 10, len(lyrics))]
def get_lyric_blocks(song, input_format):
if input_format == "raw_song":
return [song['lyrics']]
elif input_format == "verses":
return [verse['lyrics'] for verse in song['verses']]
return []
def analyze_characters(dir_path, list_file, input_format, out_file):
song_list = read_list_from_file("{}/{}".format(dir_path, list_file))
character_dict = {}
j = 1
start = time.time()
bar = tqdm(song_list)
for song_name in bar:
# bar.write("starting {}, {} out of {}".format(song_name, j, len(song_list)))
song_file_name = name_to_file_name(song_name.strip())
with open('{}/{}.json'.format(dir_path, song_file_name)) as jsonfile:
song = json.load(jsonfile)
lyric_blocks = get_lyric_blocks(song, input_format)
for lyrics in lyric_blocks:
for i in range(0, len(lyrics)):
c = lyrics[i]
if re.search(r'[^a-zA-Z0-9]+', c) is not None and c not in char_allow_list:
# add to characters dictionary
if c not in character_dict.keys():
character_dict[c] = {
"count" : 1,
"context":
[{"song": song_name, "line": get_context(lyrics, i)}]
}
else:
character_dict[c]['count'] = character_dict[c]['count'] + 1
character_dict[c]['context'].append({"song": song_name, "line": get_context(lyrics, i)})
j = j + 1
with open("{}.json".format(out_file), "w") as openfile:
json.dump(character_dict, openfile)
time_taken = str(datetime.timedelta(seconds=time.time() - start))
print("{} for {}".format(time_taken, len(song_list)))
| [
"tqdm.tqdm",
"json.load",
"time.time",
"json.dump",
"re.search"
] | [((970, 981), 'time.time', 'time.time', ([], {}), '()\n', (979, 981), False, 'import time\n'), ((992, 1007), 'tqdm.tqdm', 'tqdm', (['song_list'], {}), '(song_list)\n', (996, 1007), False, 'from tqdm import tqdm\n'), ((2274, 2309), 'json.dump', 'json.dump', (['character_dict', 'openfile'], {}), '(character_dict, openfile)\n', (2283, 2309), False, 'import json\n'), ((1279, 1298), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (1288, 1298), False, 'import json\n'), ((2358, 2369), 'time.time', 'time.time', ([], {}), '()\n', (2367, 2369), False, 'import time\n'), ((1508, 1537), 're.search', 're.search', (['"""[^a-zA-Z0-9]+"""', 'c'], {}), "('[^a-zA-Z0-9]+', c)\n", (1517, 1537), False, 'import re\n')] |
import numpy as np
import tensorflow as tf
#Данные
with open('data.csv', 'r') as f:
xxx=np.loadtxt(f, delimiter=',', skiprows=1)
with open('data.csv', 'r') as f:
yyy=np.loadtxt(f, delimiter=',', skiprows=1).T[0].T
xx=xxx.T
for i in range(len(xx[0])):
xx[0][i]=1
xx=xx.T
qw=[]
for i in yyy:
qw.append([float(i)])
yy=np.array(qw)
#Объявляем входное значение x, вес w, какое значение должны получить y
x=tf.placeholder(tf.float32, shape=(len(xx[0]),))
y=tf.placeholder(tf.float32, shape=(1,))
w=tf.Variable(tf.zeros([1, len(xx[0])]))
#Получаем выходное значение
y2=tf.multiply(x, w)
#Рассчитываем ошибку выходных данных
loss=tf.reduce_mean(tf.square(y2-y))
optimizer=tf.train.GradientDescentOptimizer(0.05).minimize(loss)
#Запуск обучения
with tf.Session() as session:
tf.global_variables_initializer().run()
for i in range(len(xx)):
feed_dict={x: xx[i:(i+1)][0], y: yy[i:(i+1)][0]}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
print("ошибка: %f" % (l, ))
iii=session.run(w)[0]
np.savetxt('weights.csv', iii, delimiter=',')
print(iii) | [
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.multiply",
"tensorflow.train.GradientDescentOptimizer",
"numpy.array",
"tensorflow.global_variables_initializer",
"numpy.savetxt",
"tensorflow.square",
"numpy.loadtxt"
] | [((322, 334), 'numpy.array', 'np.array', (['qw'], {}), '(qw)\n', (330, 334), True, 'import numpy as np\n'), ((459, 497), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1,)'}), '(tf.float32, shape=(1,))\n', (473, 497), True, 'import tensorflow as tf\n'), ((571, 588), 'tensorflow.multiply', 'tf.multiply', (['x', 'w'], {}), '(x, w)\n', (582, 588), True, 'import tensorflow as tf\n'), ((90, 130), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(f, delimiter=',', skiprows=1)\n", (100, 130), True, 'import numpy as np\n'), ((647, 664), 'tensorflow.square', 'tf.square', (['(y2 - y)'], {}), '(y2 - y)\n', (656, 664), True, 'import tensorflow as tf\n'), ((752, 764), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (762, 764), True, 'import tensorflow as tf\n'), ((1011, 1056), 'numpy.savetxt', 'np.savetxt', (['"""weights.csv"""', 'iii'], {'delimiter': '""","""'}), "('weights.csv', iii, delimiter=',')\n", (1021, 1056), True, 'import numpy as np\n'), ((674, 713), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.05)'], {}), '(0.05)\n', (707, 713), True, 'import tensorflow as tf\n'), ((778, 811), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (809, 811), True, 'import tensorflow as tf\n'), ((169, 209), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(f, delimiter=',', skiprows=1)\n", (179, 209), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Main entry point for running BayesOpt experiments.
Defines high-level parameters that will be fed into bo_exp.
"""
from exps.defs import exp_async_synch
from exps.exp_utils import get_math_exp_task, \
get_commit_hash, create_default_parser, \
get_interface, get_iters, \
generate_starting_data, create_intial_busy_jobs, \
get_default_hp_priors_and_bounds
from ml_utils import timed_print as print
# ********* Parser ******** #
parser = create_default_parser(synth_time=True)
parser.add_argument('-f', '--func', help='Function index.',
default=10, type=int)
args = parser.parse_args()
print(f"Got arguments: \n{args}")
# ********* Exp settings ******** #
debug = False
force_run = False
foldername = get_commit_hash(with_date=True)
# ********* Overrides ******** #
# args.func = 0
# args.proc = 'synch-LP'
# args.workers = 4
# args.batch = 4
# ********* Task and data ******** #
f, x_lim, true_min_loc, true_min_val = get_math_exp_task(args.func)
hp_bounds, hyper_priors, restart_bounds = get_default_hp_priors_and_bounds(
args, x_lim)
sampler = f
async_interface = get_interface(args.workers, synth=args.timer)
n_iter = get_iters(args, max_num_queries=200)
n_init = 3 * len(x_lim)
x_init, y_init = generate_starting_data(n_init, x_lim, sampler,
async_interface=async_interface,
seed=args.seed)
n_busy = args.workers
starting_jobs = create_intial_busy_jobs(n_busy, x_lim, sampler, args.seed)
# ********* Run ******** #
exp_async_synch(args=args,
sampler=sampler,
true_min_val=true_min_val,
x_init=x_init,
y_init=y_init,
x_lim=x_lim,
foldername=foldername,
hp_bounds=hp_bounds,
restart_bounds=restart_bounds,
hyper_priors=hyper_priors,
n_iter=n_iter,
starting_jobs=starting_jobs,
async_interface=async_interface,
force_run=force_run,
debug=debug)
| [
"exps.exp_utils.create_default_parser",
"exps.exp_utils.get_default_hp_priors_and_bounds",
"exps.defs.exp_async_synch",
"exps.exp_utils.generate_starting_data",
"ml_utils.timed_print",
"exps.exp_utils.get_math_exp_task",
"exps.exp_utils.get_interface",
"exps.exp_utils.get_iters",
"exps.exp_utils.get_commit_hash",
"exps.exp_utils.create_intial_busy_jobs"
] | [((487, 525), 'exps.exp_utils.create_default_parser', 'create_default_parser', ([], {'synth_time': '(True)'}), '(synth_time=True)\n', (508, 525), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((656, 692), 'ml_utils.timed_print', 'print', (['f"""Got arguments: \n{args}"""'], {}), '(f"""Got arguments: \n{args}""")\n', (661, 692), True, 'from ml_utils import timed_print as print\n'), ((777, 808), 'exps.exp_utils.get_commit_hash', 'get_commit_hash', ([], {'with_date': '(True)'}), '(with_date=True)\n', (792, 808), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1005, 1033), 'exps.exp_utils.get_math_exp_task', 'get_math_exp_task', (['args.func'], {}), '(args.func)\n', (1022, 1033), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1076, 1121), 'exps.exp_utils.get_default_hp_priors_and_bounds', 'get_default_hp_priors_and_bounds', (['args', 'x_lim'], {}), '(args, x_lim)\n', (1108, 1121), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1158, 1203), 'exps.exp_utils.get_interface', 'get_interface', (['args.workers'], {'synth': 'args.timer'}), '(args.workers, synth=args.timer)\n', (1171, 1203), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1214, 1250), 'exps.exp_utils.get_iters', 'get_iters', (['args'], {'max_num_queries': '(200)'}), '(args, max_num_queries=200)\n', (1223, 1250), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1293, 1393), 'exps.exp_utils.generate_starting_data', 'generate_starting_data', (['n_init', 'x_lim', 'sampler'], {'async_interface': 'async_interface', 'seed': 'args.seed'}), '(n_init, x_lim, sampler, async_interface=\n async_interface, seed=args.seed)\n', (1315, 1393), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1508, 1566), 'exps.exp_utils.create_intial_busy_jobs', 'create_intial_busy_jobs', (['n_busy', 'x_lim', 'sampler', 'args.seed'], {}), '(n_busy, x_lim, sampler, args.seed)\n', (1531, 1566), False, 'from exps.exp_utils import get_math_exp_task, get_commit_hash, create_default_parser, get_interface, get_iters, generate_starting_data, create_intial_busy_jobs, get_default_hp_priors_and_bounds\n'), ((1599, 1942), 'exps.defs.exp_async_synch', 'exp_async_synch', ([], {'args': 'args', 'sampler': 'sampler', 'true_min_val': 'true_min_val', 'x_init': 'x_init', 'y_init': 'y_init', 'x_lim': 'x_lim', 'foldername': 'foldername', 'hp_bounds': 'hp_bounds', 'restart_bounds': 'restart_bounds', 'hyper_priors': 'hyper_priors', 'n_iter': 'n_iter', 'starting_jobs': 'starting_jobs', 'async_interface': 'async_interface', 'force_run': 'force_run', 'debug': 'debug'}), '(args=args, sampler=sampler, true_min_val=true_min_val,\n x_init=x_init, y_init=y_init, x_lim=x_lim, foldername=foldername,\n hp_bounds=hp_bounds, restart_bounds=restart_bounds, hyper_priors=\n hyper_priors, n_iter=n_iter, starting_jobs=starting_jobs,\n async_interface=async_interface, force_run=force_run, debug=debug)\n', (1614, 1942), False, 'from exps.defs import exp_async_synch\n')] |
from dataclasses import dataclass, field
import numpy as np
@dataclass
class AnnParameters:
"""Dataclass for storage of network parameters."""
n_layers: int
nodes_per_layer: np.ndarray
input_dim: int
output_dim: int
input_bounds = None
output_bounds = None
weights: list = field(default_factory=list)
bias: list = field(default_factory=list)
M_plus: list = field(default_factory=list)
M_minus: list = field(default_factory=list)
UB: list = field(default_factory=list)
LB: list = field(default_factory=list)
redundancy_matrix: list = field(default_factory=list)
| [
"dataclasses.field"
] | [((307, 334), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (312, 334), False, 'from dataclasses import dataclass, field\n'), ((352, 379), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (357, 379), False, 'from dataclasses import dataclass, field\n'), ((399, 426), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (404, 426), False, 'from dataclasses import dataclass, field\n'), ((447, 474), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (452, 474), False, 'from dataclasses import dataclass, field\n'), ((490, 517), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (495, 517), False, 'from dataclasses import dataclass, field\n'), ((533, 560), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (538, 560), False, 'from dataclasses import dataclass, field\n'), ((591, 618), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (596, 618), False, 'from dataclasses import dataclass, field\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.