code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# Generated by Django 2.2.2 on 2019-11-13 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='users',
name='site_key',
field=models.CharField(blank=True, default='<KEY>', max_length=32, unique=True),
),
]
| [
"django.db.models.CharField"
]
| [((324, 397), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""<KEY>"""', 'max_length': '(32)', 'unique': '(True)'}), "(blank=True, default='<KEY>', max_length=32, unique=True)\n", (340, 397), False, 'from django.db import migrations, models\n')] |
# -*- coding: UTF-8 -*-
import sys
import socket
import time
import threading
import select
HOST = '192.168.11.98'
PORT = int(sys.argv[1])
queue = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
queue.append(s)
print("add client to queue")
def socketRecv():
while True:
data = s.recv(1024).decode("utf-8")
print(data)
time.sleep(0.1)
def inputJob():
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
socketThread = threading.Thread(target=socketRecv)
socketThread.start()
# inputThread = Thread(target=inputJob)
# inputThread.start()
try:
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
except KeyboardInterrupt or EOFError:
print("in except")
# s.close() # 關閉連線
socketThread.do_run = False
# socketThread.join()
# inputThread.join()
print("close thread")
sys.exit(0)
| [
"threading.Thread",
"time.sleep",
"socket.socket",
"sys.exit"
]
| [((156, 205), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (169, 205), False, 'import socket\n'), ((535, 570), 'threading.Thread', 'threading.Thread', ([], {'target': 'socketRecv'}), '(target=socketRecv)\n', (551, 570), False, 'import threading\n'), ((384, 399), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (394, 399), False, 'import time\n'), ((502, 517), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (512, 517), False, 'import time\n'), ((743, 758), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (753, 758), False, 'import time\n'), ((957, 968), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (965, 968), False, 'import sys\n')] |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Import observable(s) into Anomali ThreatStream with approval"
class Input:
FILE = "file"
OBSERVABLE_SETTINGS = "observable_settings"
class Output:
RESULTS = "results"
class ImportObservableInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"$ref": "#/definitions/file",
"title": "File",
"description": "File of data to be imported into Anomali ThreatStream",
"order": 1
},
"observable_settings": {
"$ref": "#/definitions/observable_settings",
"title": "Observable Settings",
"description": "Settings needed for importing an observable that needs approval",
"order": 2
}
},
"required": [
"file"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"observable_settings": {
"type": "object",
"title": "observable_settings",
"properties": {
"classification": {
"type": "string",
"title": "Classification",
"description": "Classification of the observable",
"default": "private",
"enum": [
"public",
"private"
],
"order": 4
},
"confidence": {
"type": "integer",
"title": "Confidence",
"description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",
"order": 1
},
"domain_mapping": {
"type": "string",
"title": "Domain Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 8
},
"email_mapping": {
"type": "string",
"title": "Email Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 10
},
"expiration_ts": {
"type": "string",
"title": "Expiration Time Stamp",
"displayType": "date",
"description": "Time stamp of when intelligence will expire on ThreatStream",
"format": "date-time",
"order": 5
},
"ip_mapping": {
"type": "string",
"title": "IP Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 7
},
"md5_mapping": {
"type": "string",
"title": "MD5 Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 11
},
"notes": {
"type": "array",
"title": "Notes",
"description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g ['note1', 'note2', 'note3']",
"items": {
"type": "string"
},
"order": 6
},
"severity": {
"type": "string",
"title": "Severity",
"description": "Severity you want to assign to the observable when it is imported",
"default": "",
"enum": [
"low",
"medium",
"high",
"very-high",
""
],
"order": 3
},
"source_confidence_weight": {
"type": "integer",
"title": "Source Confidence Weight",
"description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",
"order": 2
},
"threat_type": {
"type": "string",
"title": "Threat Type",
"description": "Type of threat associated with the imported observables",
"order": 13
},
"trustedcircles": {
"type": "array",
"title": "Trusted Circles",
"description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",
"items": {
"type": "integer"
},
"order": 12
},
"url_mapping": {
"type": "string",
"title": "URL Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 9
}
},
"required": [
"classification"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ImportObservableOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"$ref": "#/definitions/import_observable_response",
"title": "Results",
"description": "Results from importing observable(s)",
"order": 1
}
},
"definitions": {
"import_observable_response": {
"type": "object",
"title": "import_observable_response",
"properties": {
"import_session_id": {
"type": "string",
"title": "Import Session ID",
"description": "ID for import session",
"order": 3
},
"job_id": {
"type": "string",
"title": "Job ID",
"description": "Job ID",
"order": 1
},
"success": {
"type": "boolean",
"title": "Success",
"description": "If import was successful",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"json.loads"
]
| [((353, 5143), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "file": {\n "$ref": "#/definitions/file",\n "title": "File",\n "description": "File of data to be imported into Anomali ThreatStream",\n "order": 1\n },\n "observable_settings": {\n "$ref": "#/definitions/observable_settings",\n "title": "Observable Settings",\n "description": "Settings needed for importing an observable that needs approval",\n "order": 2\n }\n },\n "required": [\n "file"\n ],\n "definitions": {\n "file": {\n "id": "file",\n "type": "object",\n "title": "File",\n "description": "File Object",\n "properties": {\n "content": {\n "type": "string",\n "title": "Content",\n "description": "File contents",\n "format": "bytes"\n },\n "filename": {\n "type": "string",\n "title": "Filename",\n "description": "Name of file"\n }\n }\n },\n "observable_settings": {\n "type": "object",\n "title": "observable_settings",\n "properties": {\n "classification": {\n "type": "string",\n "title": "Classification",\n "description": "Classification of the observable",\n "default": "private",\n "enum": [\n "public",\n "private"\n ],\n "order": 4\n },\n "confidence": {\n "type": "integer",\n "title": "Confidence",\n "description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",\n "order": 1\n },\n "domain_mapping": {\n "type": "string",\n "title": "Domain Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 8\n },\n "email_mapping": {\n "type": "string",\n "title": "Email Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 10\n },\n "expiration_ts": {\n "type": "string",\n "title": "Expiration Time Stamp",\n "displayType": "date",\n "description": "Time stamp of when intelligence will expire on ThreatStream",\n "format": "date-time",\n "order": 5\n },\n "ip_mapping": {\n "type": "string",\n "title": "IP Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 7\n },\n "md5_mapping": {\n "type": "string",\n "title": "MD5 Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 11\n },\n "notes": {\n "type": "array",\n "title": "Notes",\n "description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g [\'note1\', \'note2\', \'note3\']",\n "items": {\n "type": "string"\n },\n "order": 6\n },\n "severity": {\n "type": "string",\n "title": "Severity",\n "description": "Severity you want to assign to the observable when it is imported",\n "default": "",\n "enum": [\n "low",\n "medium",\n "high",\n "very-high",\n ""\n ],\n "order": 3\n },\n "source_confidence_weight": {\n "type": "integer",\n "title": "Source Confidence Weight",\n "description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",\n "order": 2\n },\n "threat_type": {\n "type": "string",\n "title": "Threat Type",\n "description": "Type of threat associated with the imported observables",\n "order": 13\n },\n "trustedcircles": {\n "type": "array",\n "title": "Trusted Circles",\n "description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",\n "items": {\n "type": "integer"\n },\n "order": 12\n },\n "url_mapping": {\n "type": "string",\n "title": "URL Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 9\n }\n },\n "required": [\n "classification"\n ]\n }\n }\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "file": {\n "$ref": "#/definitions/file",\n "title": "File",\n "description": "File of data to be imported into Anomali ThreatStream",\n "order": 1\n },\n "observable_settings": {\n "$ref": "#/definitions/observable_settings",\n "title": "Observable Settings",\n "description": "Settings needed for importing an observable that needs approval",\n "order": 2\n }\n },\n "required": [\n "file"\n ],\n "definitions": {\n "file": {\n "id": "file",\n "type": "object",\n "title": "File",\n "description": "File Object",\n "properties": {\n "content": {\n "type": "string",\n "title": "Content",\n "description": "File contents",\n "format": "bytes"\n },\n "filename": {\n "type": "string",\n "title": "Filename",\n "description": "Name of file"\n }\n }\n },\n "observable_settings": {\n "type": "object",\n "title": "observable_settings",\n "properties": {\n "classification": {\n "type": "string",\n "title": "Classification",\n "description": "Classification of the observable",\n "default": "private",\n "enum": [\n "public",\n "private"\n ],\n "order": 4\n },\n "confidence": {\n "type": "integer",\n "title": "Confidence",\n "description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",\n "order": 1\n },\n "domain_mapping": {\n "type": "string",\n "title": "Domain Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 8\n },\n "email_mapping": {\n "type": "string",\n "title": "Email Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 10\n },\n "expiration_ts": {\n "type": "string",\n "title": "Expiration Time Stamp",\n "displayType": "date",\n "description": "Time stamp of when intelligence will expire on ThreatStream",\n "format": "date-time",\n "order": 5\n },\n "ip_mapping": {\n "type": "string",\n "title": "IP Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 7\n },\n "md5_mapping": {\n "type": "string",\n "title": "MD5 Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 11\n },\n "notes": {\n "type": "array",\n "title": "Notes",\n "description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g [\'note1\', \'note2\', \'note3\']",\n "items": {\n "type": "string"\n },\n "order": 6\n },\n "severity": {\n "type": "string",\n "title": "Severity",\n "description": "Severity you want to assign to the observable when it is imported",\n "default": "",\n "enum": [\n "low",\n "medium",\n "high",\n "very-high",\n ""\n ],\n "order": 3\n },\n "source_confidence_weight": {\n "type": "integer",\n "title": "Source Confidence Weight",\n "description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",\n "order": 2\n },\n "threat_type": {\n "type": "string",\n "title": "Threat Type",\n "description": "Type of threat associated with the imported observables",\n "order": 13\n },\n "trustedcircles": {\n "type": "array",\n "title": "Trusted Circles",\n "description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",\n "items": {\n "type": "integer"\n },\n "order": 12\n },\n "url_mapping": {\n "type": "string",\n "title": "URL Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 9\n }\n },\n "required": [\n "classification"\n ]\n }\n }\n}\n """\n )\n', (363, 5143), False, 'import json\n'), ((5277, 6222), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "results": {\n "$ref": "#/definitions/import_observable_response",\n "title": "Results",\n "description": "Results from importing observable(s)",\n "order": 1\n }\n },\n "definitions": {\n "import_observable_response": {\n "type": "object",\n "title": "import_observable_response",\n "properties": {\n "import_session_id": {\n "type": "string",\n "title": "Import Session ID",\n "description": "ID for import session",\n "order": 3\n },\n "job_id": {\n "type": "string",\n "title": "Job ID",\n "description": "Job ID",\n "order": 1\n },\n "success": {\n "type": "boolean",\n "title": "Success",\n "description": "If import was successful",\n "order": 2\n }\n }\n }\n }\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "results": {\n "$ref": "#/definitions/import_observable_response",\n "title": "Results",\n "description": "Results from importing observable(s)",\n "order": 1\n }\n },\n "definitions": {\n "import_observable_response": {\n "type": "object",\n "title": "import_observable_response",\n "properties": {\n "import_session_id": {\n "type": "string",\n "title": "Import Session ID",\n "description": "ID for import session",\n "order": 3\n },\n "job_id": {\n "type": "string",\n "title": "Job ID",\n "description": "Job ID",\n "order": 1\n },\n "success": {\n "type": "boolean",\n "title": "Success",\n "description": "If import was successful",\n "order": 2\n }\n }\n }\n }\n}\n """\n )\n', (5287, 6222), False, 'import json\n')] |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from mockito import mock, when, unstub, any, verify, never, times
from mock import Mock
from trove.quota.quota import DbQuotaDriver
from trove.quota.models import Resource
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.db.models import DatabaseModelBase
from trove.extensions.mgmt.quota.service import QuotaController
from trove.common import exception
from trove.common import cfg
from trove.quota.quota import run_with_quotas
from trove.quota.quota import QUOTAS
"""
Unit tests for the classes and functions in DbQuotaDriver.py.
"""
CONF = cfg.CONF
resources = {
Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),
}
FAKE_TENANT1 = "123456"
FAKE_TENANT2 = "654321"
class Run_with_quotasTest(testtools.TestCase):
def setUp(self):
super(Run_with_quotasTest, self).setUp()
self.quota_reserve_orig = QUOTAS.reserve
self.quota_rollback_orig = QUOTAS.rollback
self.quota_commit_orig = QUOTAS.commit
QUOTAS.reserve = Mock()
QUOTAS.rollback = Mock()
QUOTAS.commit = Mock()
def tearDown(self):
super(Run_with_quotasTest, self).tearDown()
QUOTAS.reserve = self.quota_reserve_orig
QUOTAS.rollback = self.quota_rollback_orig
QUOTAS.commit = self.quota_commit_orig
def test_run_with_quotas(self):
f = Mock()
run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.commit.called)
self.assertFalse(QUOTAS.rollback.called)
self.assertTrue(f.called)
def test_run_with_quotas_error(self):
f = Mock(side_effect=Exception())
self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1,
{'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.rollback.called)
self.assertFalse(QUOTAS.commit.called)
self.assertTrue(f.called)
class QuotaControllerTest(testtools.TestCase):
def setUp(self):
super(QuotaControllerTest, self).setUp()
context = mock()
context.is_admin = True
req = mock()
req.environ = mock()
when(req.environ).get(any()).thenReturn(context)
self.req = req
self.controller = QuotaController()
def tearDown(self):
super(QuotaControllerTest, self).tearDown()
unstub()
def test_update_unknown_resource(self):
body = {'quotas': {'unknown_resource': 5}}
self.assertRaises(exception.QuotaResourceUnknown,
self.controller.update, self.req, body,
FAKE_TENANT1, FAKE_TENANT2)
def test_update_resource_no_value(self):
quota = mock(Quota)
when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(quota)
body = {'quotas': {'instances': None}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(quota, never).save()
self.assertEquals(200, result.status)
def test_update_resource_instance(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
body = {'quotas': {'instances': 2}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, times=1).save()
self.assertTrue('instances' in result._data['quotas'])
self.assertEquals(200, result.status)
self.assertEquals(2, result._data['quotas']['instances'])
@testtools.skipIf(not CONF.trove_volume_support,
'Volume support is not enabled')
def test_update_resource_volume(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
volume_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='volumes').thenReturn(volume_quota)
body = {'quotas': {'instances': None, 'volumes': 10}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, never).save()
self.assertFalse('instances' in result._data['quotas'])
verify(volume_quota, times=1).save()
self.assertEquals(200, result.status)
self.assertEquals(10, result._data['quotas']['volumes'])
class DbQuotaDriverTest(testtools.TestCase):
def setUp(self):
super(DbQuotaDriverTest, self).setUp()
self.driver = DbQuotaDriver(resources)
self.orig_Quota_find_all = Quota.find_all
self.orig_QuotaUsage_find_all = QuotaUsage.find_all
self.orig_QuotaUsage_find_by = QuotaUsage.find_by
self.orig_Reservation_create = Reservation.create
self.orig_QuotaUsage_create = QuotaUsage.create
self.orig_QuotaUsage_save = QuotaUsage.save
self.orig_Reservation_save = Reservation.save
self.mock_quota_result = Mock()
self.mock_usage_result = Mock()
Quota.find_all = Mock(return_value=self.mock_quota_result)
QuotaUsage.find_all = Mock(return_value=self.mock_usage_result)
def tearDown(self):
super(DbQuotaDriverTest, self).tearDown()
Quota.find_all = self.orig_Quota_find_all
QuotaUsage.find_all = self.orig_QuotaUsage_find_all
QuotaUsage.find_by = self.orig_QuotaUsage_find_by
Reservation.create = self.orig_Reservation_create
QuotaUsage.create = self.orig_QuotaUsage_create
QuotaUsage.save = self.orig_QuotaUsage_save
Reservation.save = self.orig_Reservation_save
def test_get_defaults(self):
defaults = self.driver.get_defaults(resources)
self.assertEqual(CONF.max_instances_per_user,
defaults[Resource.INSTANCES])
self.assertEqual(CONF.max_volumes_per_user,
defaults[Resource.VOLUMES])
def test_get_quota_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=12)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.INSTANCES, quota.resource)
self.assertEquals(12, quota.hard_limit)
def test_get_quota_by_tenant_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.VOLUMES, quota.resource)
self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit)
def test_get_all_quotas_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22),
Quota(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
hard_limit=15)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_all_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(CONF.max_instances_per_user,
quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_one_default(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_quota_usage_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=3,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(3, usage.in_use)
self.assertEquals(1, usage.reserved)
def test_get_quota_usage_by_tenant_default(self):
FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(return_value=FAKE_QUOTA)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(0, usage.in_use)
self.assertEquals(0, usage.reserved)
def test_get_all_quota_usages_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=2,
reserved=1),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(2, usages[Resource.INSTANCES].in_use)
self.assertEquals(1, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(1, usages[Resource.VOLUMES].in_use)
self.assertEquals(1, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_all_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_one_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0)]
NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_reserve(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': 2, 'volumes': 3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(2, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_reserve_resource_unknown(self):
delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123}
self.assertRaises(exception.QuotaResourceUnknown,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_usage(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 5, 'volumes': 3}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_reserved(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 4, 'volumes': 2}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_but_can_apply_negative_deltas(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=10,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=50,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': -1, 'volumes': -3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(-1, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(-3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_commit(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.commit(FAKE_RESERVATIONS)
self.assertEqual(6, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[0].status)
self.assertEqual(3, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[1].status)
def test_rollback(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.rollback(FAKE_RESERVATIONS)
self.assertEqual(5, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[0].status)
self.assertEqual(1, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[1].status)
| [
"trove.quota.models.Reservation",
"trove.quota.quota.DbQuotaDriver",
"testtools.skipIf",
"mockito.mock",
"mock.Mock",
"mockito.unstub",
"trove.extensions.mgmt.quota.service.QuotaController",
"mockito.verify",
"trove.quota.models.Quota",
"mockito.when",
"trove.quota.quota.run_with_quotas",
"trove.quota.models.Resource",
"mockito.any",
"trove.quota.models.QuotaUsage"
]
| [((1308, 1362), 'trove.quota.models.Resource', 'Resource', (['Resource.INSTANCES', '"""max_instances_per_user"""'], {}), "(Resource.INSTANCES, 'max_instances_per_user')\n", (1316, 1362), False, 'from trove.quota.models import Resource\n'), ((1386, 1436), 'trove.quota.models.Resource', 'Resource', (['Resource.VOLUMES', '"""max_volumes_per_user"""'], {}), "(Resource.VOLUMES, 'max_volumes_per_user')\n", (1394, 1436), False, 'from trove.quota.models import Resource\n'), ((4582, 4667), 'testtools.skipIf', 'testtools.skipIf', (['(not CONF.trove_volume_support)', '"""Volume support is not enabled"""'], {}), "(not CONF.trove_volume_support, 'Volume support is not enabled'\n )\n", (4598, 4667), False, 'import testtools\n'), ((1781, 1787), 'mock.Mock', 'Mock', ([], {}), '()\n', (1785, 1787), False, 'from mock import Mock\n'), ((1814, 1820), 'mock.Mock', 'Mock', ([], {}), '()\n', (1818, 1820), False, 'from mock import Mock\n'), ((1845, 1851), 'mock.Mock', 'Mock', ([], {}), '()\n', (1849, 1851), False, 'from mock import Mock\n'), ((2126, 2132), 'mock.Mock', 'Mock', ([], {}), '()\n', (2130, 2132), False, 'from mock import Mock\n'), ((2141, 2205), 'trove.quota.quota.run_with_quotas', 'run_with_quotas', (['FAKE_TENANT1', "{'instances': 1, 'volumes': 5}", 'f'], {}), "(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)\n", (2156, 2205), False, 'from trove.quota.quota import run_with_quotas\n'), ((2913, 2919), 'mockito.mock', 'mock', ([], {}), '()\n', (2917, 2919), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((2966, 2972), 'mockito.mock', 'mock', ([], {}), '()\n', (2970, 2972), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((2995, 3001), 'mockito.mock', 'mock', ([], {}), '()\n', (2999, 3001), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((3108, 3125), 'trove.extensions.mgmt.quota.service.QuotaController', 'QuotaController', ([], {}), '()\n', (3123, 3125), False, 'from trove.extensions.mgmt.quota.service import QuotaController\n'), ((3211, 3219), 'mockito.unstub', 'unstub', ([], {}), '()\n', (3217, 3219), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((3556, 3567), 'mockito.mock', 'mock', (['Quota'], {}), '(Quota)\n', (3560, 3567), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4036, 4047), 'mockito.mock', 'mock', (['Quota'], {}), '(Quota)\n', (4040, 4047), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4753, 4764), 'mockito.mock', 'mock', (['Quota'], {}), '(Quota)\n', (4757, 4764), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4926, 4937), 'mockito.mock', 'mock', (['Quota'], {}), '(Quota)\n', (4930, 4937), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((5662, 5686), 'trove.quota.quota.DbQuotaDriver', 'DbQuotaDriver', (['resources'], {}), '(resources)\n', (5675, 5686), False, 'from trove.quota.quota import DbQuotaDriver\n'), ((6108, 6114), 'mock.Mock', 'Mock', ([], {}), '()\n', (6112, 6114), False, 'from mock import Mock\n'), ((6148, 6154), 'mock.Mock', 'Mock', ([], {}), '()\n', (6152, 6154), False, 'from mock import Mock\n'), ((6180, 6221), 'mock.Mock', 'Mock', ([], {'return_value': 'self.mock_quota_result'}), '(return_value=self.mock_quota_result)\n', (6184, 6221), False, 'from mock import Mock\n'), ((6252, 6293), 'mock.Mock', 'Mock', ([], {'return_value': 'self.mock_usage_result'}), '(return_value=self.mock_usage_result)\n', (6256, 6293), False, 'from mock import Mock\n'), ((7296, 7326), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (7300, 7326), False, 'from mock import Mock\n'), ((7711, 7732), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (7715, 7732), False, 'from mock import Mock\n'), ((8446, 8476), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (8450, 8476), False, 'from mock import Mock\n'), ((9198, 9219), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (9202, 9219), False, 'from mock import Mock\n'), ((10198, 10228), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (10202, 10228), False, 'from mock import Mock\n'), ((11194, 11224), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (11198, 11224), False, 'from mock import Mock\n'), ((11649, 11736), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,\n reserved=0)\n', (11659, 11736), False, 'from trove.quota.models import QuotaUsage\n'), ((11867, 11888), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (11871, 11888), False, 'from mock import Mock\n'), ((11917, 11946), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTA'}), '(return_value=FAKE_QUOTA)\n', (11921, 11946), False, 'from mock import Mock\n'), ((12807, 12837), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (12811, 12837), False, 'from mock import Mock\n'), ((14120, 14141), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (14124, 14141), False, 'from mock import Mock\n'), ((14170, 14199), 'mock.Mock', 'Mock', ([], {'side_effect': 'FAKE_QUOTAS'}), '(side_effect=FAKE_QUOTAS)\n', (14174, 14199), False, 'from mock import Mock\n'), ((15260, 15347), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,\n reserved=0)\n', (15270, 15347), False, 'from trove.quota.models import QuotaUsage\n'), ((15489, 15519), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (15493, 15519), False, 'from mock import Mock\n'), ((15548, 15581), 'mock.Mock', 'Mock', ([], {'return_value': 'NEW_FAKE_QUOTA'}), '(return_value=NEW_FAKE_QUOTA)\n', (15552, 15581), False, 'from mock import Mock\n'), ((16904, 16925), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (16908, 16925), False, 'from mock import Mock\n'), ((16963, 16993), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (16967, 16993), False, 'from mock import Mock\n'), ((17020, 17026), 'mock.Mock', 'Mock', ([], {}), '()\n', (17024, 17026), False, 'from mock import Mock\n'), ((17056, 17062), 'mock.Mock', 'Mock', ([], {}), '()\n', (17060, 17062), False, 'from mock import Mock\n'), ((18509, 18530), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (18513, 18530), False, 'from mock import Mock\n'), ((18568, 18598), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (18572, 18598), False, 'from mock import Mock\n'), ((19475, 19496), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (19479, 19496), False, 'from mock import Mock\n'), ((19534, 19564), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (19538, 19564), False, 'from mock import Mock\n'), ((20416, 20437), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (20420, 20437), False, 'from mock import Mock\n'), ((20475, 20505), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (20479, 20505), False, 'from mock import Mock\n'), ((21375, 21396), 'mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (21379, 21396), False, 'from mock import Mock\n'), ((21434, 21464), 'mock.Mock', 'Mock', ([], {'return_value': 'FAKE_QUOTAS'}), '(return_value=FAKE_QUOTAS)\n', (21438, 21464), False, 'from mock import Mock\n'), ((21492, 21498), 'mock.Mock', 'Mock', ([], {}), '()\n', (21496, 21498), False, 'from mock import Mock\n'), ((21528, 21534), 'mock.Mock', 'Mock', ([], {}), '()\n', (21532, 21534), False, 'from mock import Mock\n'), ((22125, 22131), 'mock.Mock', 'Mock', ([], {}), '()\n', (22129, 22131), False, 'from mock import Mock\n'), ((22158, 22164), 'mock.Mock', 'Mock', ([], {}), '()\n', (22162, 22164), False, 'from mock import Mock\n'), ((23065, 23094), 'mock.Mock', 'Mock', ([], {'side_effect': 'FAKE_QUOTAS'}), '(side_effect=FAKE_QUOTAS)\n', (23069, 23094), False, 'from mock import Mock\n'), ((23631, 23637), 'mock.Mock', 'Mock', ([], {}), '()\n', (23635, 23637), False, 'from mock import Mock\n'), ((23664, 23670), 'mock.Mock', 'Mock', ([], {}), '()\n', (23668, 23670), False, 'from mock import Mock\n'), ((24571, 24600), 'mock.Mock', 'Mock', ([], {'side_effect': 'FAKE_QUOTAS'}), '(side_effect=FAKE_QUOTAS)\n', (24575, 24600), False, 'from mock import Mock\n'), ((7125, 7198), 'trove.quota.models.Quota', 'Quota', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'hard_limit': '(12)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)\n', (7130, 7198), False, 'from trove.quota.models import Quota\n'), ((8121, 8194), 'trove.quota.models.Quota', 'Quota', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'hard_limit': '(22)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)\n', (8126, 8194), False, 'from trove.quota.models import Quota\n'), ((8277, 8348), 'trove.quota.models.Quota', 'Quota', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'hard_limit': '(15)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)\n', (8282, 8348), False, 'from trove.quota.models import Quota\n'), ((10027, 10100), 'trove.quota.models.Quota', 'Quota', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'hard_limit': '(22)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)\n', (10032, 10100), False, 'from trove.quota.models import Quota\n'), ((10969, 11056), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(3)', 'reserved': '(1)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3,\n reserved=1)\n', (10979, 11056), False, 'from trove.quota.models import QuotaUsage\n'), ((12370, 12459), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(2)', 'reserved': '(1)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2,\n reserved=1)\n', (12380, 12459), False, 'from trove.quota.models import QuotaUsage\n'), ((12582, 12669), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(1)', 'reserved': '(1)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1,\n reserved=1)\n', (12592, 12669), False, 'from trove.quota.models import QuotaUsage\n'), ((13683, 13772), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(0)', 'reserved': '(0)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0,\n reserved=0)\n', (13693, 13772), False, 'from trove.quota.models import QuotaUsage\n'), ((13895, 13982), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,\n reserved=0)\n', (13905, 13982), False, 'from trove.quota.models import QuotaUsage\n'), ((15045, 15134), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(0)', 'reserved': '(0)'}), '(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0,\n reserved=0)\n', (15055, 15134), False, 'from trove.quota.models import QuotaUsage\n'), ((16387, 16482), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(1)', 'reserved': '(2)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=1, reserved=2)\n', (16397, 16482), False, 'from trove.quota.models import QuotaUsage\n'), ((16639, 16733), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(1)', 'reserved': '(1)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 1, reserved=1)\n', (16649, 16733), False, 'from trove.quota.models import QuotaUsage\n'), ((17992, 18087), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(0)', 'reserved': '(0)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=0, reserved=0)\n', (18002, 18087), False, 'from trove.quota.models import QuotaUsage\n'), ((18244, 18338), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 0, reserved=0)\n', (18254, 18338), False, 'from trove.quota.models import QuotaUsage\n'), ((18958, 19053), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(1)', 'reserved': '(0)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=1, reserved=0)\n', (18968, 19053), False, 'from trove.quota.models import QuotaUsage\n'), ((19210, 19304), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 0, reserved=0)\n', (19220, 19304), False, 'from trove.quota.models import QuotaUsage\n'), ((19899, 19994), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(1)', 'reserved': '(2)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=1, reserved=2)\n', (19909, 19994), False, 'from trove.quota.models import QuotaUsage\n'), ((20151, 20245), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(0)', 'reserved': '(0)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 0, reserved=0)\n', (20161, 20245), False, 'from trove.quota.models import QuotaUsage\n'), ((20856, 20952), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(10)', 'reserved': '(0)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=10, reserved=0)\n', (20866, 20952), False, 'from trove.quota.models import QuotaUsage\n'), ((21109, 21204), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(50)', 'reserved': '(0)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 50, reserved=0)\n', (21119, 21204), False, 'from trove.quota.models import QuotaUsage\n'), ((22189, 22284), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(5)', 'reserved': '(2)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=5, reserved=2)\n', (22199, 22284), False, 'from trove.quota.models import QuotaUsage\n'), ((22441, 22535), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(1)', 'reserved': '(2)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 1, reserved=2)\n', (22451, 22535), False, 'from trove.quota.models import QuotaUsage\n'), ((22698, 22768), 'trove.quota.models.Reservation', 'Reservation', ([], {'usage_id': '(1)', 'delta': '(1)', 'status': 'Reservation.Statuses.RESERVED'}), '(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED)\n', (22709, 22768), False, 'from trove.quota.models import Reservation\n'), ((22881, 22951), 'trove.quota.models.Reservation', 'Reservation', ([], {'usage_id': '(2)', 'delta': '(2)', 'status': 'Reservation.Statuses.RESERVED'}), '(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)\n', (22892, 22951), False, 'from trove.quota.models import Reservation\n'), ((23695, 23790), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(1)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.INSTANCES', 'in_use': '(5)', 'reserved': '(2)'}), '(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,\n in_use=5, reserved=2)\n', (23705, 23790), False, 'from trove.quota.models import QuotaUsage\n'), ((23947, 24041), 'trove.quota.models.QuotaUsage', 'QuotaUsage', ([], {'id': '(2)', 'tenant_id': 'FAKE_TENANT1', 'resource': 'Resource.VOLUMES', 'in_use': '(1)', 'reserved': '(2)'}), '(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=\n 1, reserved=2)\n', (23957, 24041), False, 'from trove.quota.models import QuotaUsage\n'), ((24204, 24274), 'trove.quota.models.Reservation', 'Reservation', ([], {'usage_id': '(1)', 'delta': '(1)', 'status': 'Reservation.Statuses.RESERVED'}), '(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED)\n', (24215, 24274), False, 'from trove.quota.models import Reservation\n'), ((24387, 24457), 'trove.quota.models.Reservation', 'Reservation', ([], {'usage_id': '(2)', 'delta': '(2)', 'status': 'Reservation.Statuses.RESERVED'}), '(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)\n', (24398, 24457), False, 'from trove.quota.models import Reservation\n'), ((3891, 3911), 'mockito.verify', 'verify', (['quota', 'never'], {}), '(quota, never)\n', (3897, 3911), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4362, 4393), 'mockito.verify', 'verify', (['instance_quota'], {'times': '(1)'}), '(instance_quota, times=1)\n', (4368, 4393), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((5266, 5295), 'mockito.verify', 'verify', (['instance_quota', 'never'], {}), '(instance_quota, never)\n', (5272, 5295), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((5375, 5404), 'mockito.verify', 'verify', (['volume_quota'], {'times': '(1)'}), '(volume_quota, times=1)\n', (5381, 5404), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((3032, 3037), 'mockito.any', 'any', ([], {}), '()\n', (3035, 3037), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((3010, 3027), 'mockito.when', 'when', (['req.environ'], {}), '(req.environ)\n', (3014, 3027), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((3576, 3599), 'mockito.when', 'when', (['DatabaseModelBase'], {}), '(DatabaseModelBase)\n', (3580, 3599), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4056, 4079), 'mockito.when', 'when', (['DatabaseModelBase'], {}), '(DatabaseModelBase)\n', (4060, 4079), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4773, 4796), 'mockito.when', 'when', (['DatabaseModelBase'], {}), '(DatabaseModelBase)\n', (4777, 4796), False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((4946, 4969), 'mockito.when', 'when', (['DatabaseModelBase'], {}), '(DatabaseModelBase)\n', (4950, 4969), False, 'from mockito import mock, when, unstub, any, verify, never, times\n')] |
from flask_restful import reqparse
def retornar_parser():
parser = reqparse.RequestParser()
parser.add_argument('sentenca', type=str, required=True)
return parser
| [
"flask_restful.reqparse.RequestParser"
]
| [((72, 96), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (94, 96), False, 'from flask_restful import reqparse\n')] |
from django.contrib import admin
from dicoms.models import Subject
from dicoms.models import Session
from dicoms.models import Series
admin.site.register(Session)
admin.site.register(Subject)
admin.site.register(Series)
| [
"django.contrib.admin.site.register"
]
| [((135, 163), 'django.contrib.admin.site.register', 'admin.site.register', (['Session'], {}), '(Session)\n', (154, 163), False, 'from django.contrib import admin\n'), ((164, 192), 'django.contrib.admin.site.register', 'admin.site.register', (['Subject'], {}), '(Subject)\n', (183, 192), False, 'from django.contrib import admin\n'), ((193, 220), 'django.contrib.admin.site.register', 'admin.site.register', (['Series'], {}), '(Series)\n', (212, 220), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2018
# FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de)
# KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import roslib
import rospy
import tf
import tf2_ros
import geometry_msgs.msg
import lanelet2
stb = None
static_transform = None
lat_origin = None
lon_origin = None
map_frame_id = None
actual_utm_with_no_offset_frame_id = None
def timer_callback(event):
global stb, static_transform
static_transform.header.stamp = rospy.Time.now()
stb.sendTransform(static_transform)
def wait_for_params_successful():
global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id
for i in range(3000):
try:
lat_origin = float(rospy.get_param("/lanelet2_interface_ros/lat_origin"))
lon_origin = float(rospy.get_param("/lanelet2_interface_ros/lon_origin"))
map_frame_id = rospy.get_param("/lanelet2_interface_ros/map_frame_id")
actual_utm_with_no_offset_frame_id = rospy.get_param(
"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id")
except Exception:
rospy.sleep(0.01)
continue
return True
return False
if __name__ == '__main__':
rospy.init_node('map_frame_to_utm_tf_publisher')
if not wait_for_params_successful():
rospy.logerr("map_frame_to_utm_tf_publisher: Could not initialize")
exit()
origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin)
projector = lanelet2.projection.UtmProjector(
lanelet2.io.Origin(origin_latlon), False, False)
origin_xy = projector.forward(origin_latlon)
stb = tf2_ros.TransformBroadcaster()
static_transform = geometry_msgs.msg.TransformStamped()
static_transform.header.stamp = rospy.Time.now()
static_transform.header.frame_id = map_frame_id
static_transform.child_frame_id = actual_utm_with_no_offset_frame_id
static_transform.transform.translation.x = -origin_xy.x
static_transform.transform.translation.y = -origin_xy.y
static_transform.transform.translation.z = 0.0
q = tf.transformations.quaternion_from_euler(0, 0, 0)
static_transform.transform.rotation.x = q[0]
static_transform.transform.rotation.y = q[1]
static_transform.transform.rotation.z = q[2]
static_transform.transform.rotation.w = q[3]
rospy.Timer(rospy.Duration(1.), timer_callback)
rospy.spin()
| [
"rospy.logerr",
"lanelet2.io.Origin",
"rospy.init_node",
"rospy.get_param",
"tf2_ros.TransformBroadcaster",
"rospy.Time.now",
"tf.transformations.quaternion_from_euler",
"lanelet2.core.GPSPoint",
"rospy.spin",
"rospy.Duration",
"rospy.sleep"
]
| [((2032, 2048), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2046, 2048), False, 'import rospy\n'), ((2798, 2846), 'rospy.init_node', 'rospy.init_node', (['"""map_frame_to_utm_tf_publisher"""'], {}), "('map_frame_to_utm_tf_publisher')\n", (2813, 2846), False, 'import rospy\n'), ((3001, 3047), 'lanelet2.core.GPSPoint', 'lanelet2.core.GPSPoint', (['lat_origin', 'lon_origin'], {}), '(lat_origin, lon_origin)\n', (3023, 3047), False, 'import lanelet2\n'), ((3215, 3245), 'tf2_ros.TransformBroadcaster', 'tf2_ros.TransformBroadcaster', ([], {}), '()\n', (3243, 3245), False, 'import tf2_ros\n'), ((3343, 3359), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3357, 3359), False, 'import rospy\n'), ((3664, 3713), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3704, 3713), False, 'import tf\n'), ((3968, 3980), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3978, 3980), False, 'import rospy\n'), ((2897, 2964), 'rospy.logerr', 'rospy.logerr', (['"""map_frame_to_utm_tf_publisher: Could not initialize"""'], {}), "('map_frame_to_utm_tf_publisher: Could not initialize')\n", (2909, 2964), False, 'import rospy\n'), ((3106, 3139), 'lanelet2.io.Origin', 'lanelet2.io.Origin', (['origin_latlon'], {}), '(origin_latlon)\n', (3124, 3139), False, 'import lanelet2\n'), ((3927, 3946), 'rospy.Duration', 'rospy.Duration', (['(1.0)'], {}), '(1.0)\n', (3941, 3946), False, 'import rospy\n'), ((2448, 2503), 'rospy.get_param', 'rospy.get_param', (['"""/lanelet2_interface_ros/map_frame_id"""'], {}), "('/lanelet2_interface_ros/map_frame_id')\n", (2463, 2503), False, 'import rospy\n'), ((2553, 2630), 'rospy.get_param', 'rospy.get_param', (['"""/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id"""'], {}), "('/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id')\n", (2568, 2630), False, 'import rospy\n'), ((2280, 2333), 'rospy.get_param', 'rospy.get_param', (['"""/lanelet2_interface_ros/lat_origin"""'], {}), "('/lanelet2_interface_ros/lat_origin')\n", (2295, 2333), False, 'import rospy\n'), ((2366, 2419), 'rospy.get_param', 'rospy.get_param', (['"""/lanelet2_interface_ros/lon_origin"""'], {}), "('/lanelet2_interface_ros/lon_origin')\n", (2381, 2419), False, 'import rospy\n'), ((2686, 2703), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (2697, 2703), False, 'import rospy\n')] |
# Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as KratosUtilities
from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis
class SodShockTubeTest(KratosUnittest.TestCase):
def testSodShockTubeExplicitASGS(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = False
self.shock_capturing = False
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitASGSShockCapturing(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = False
self.shock_capturing = True
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitOSS(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = True
self.shock_capturing = False
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitOSSShockCapturing(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = True
self.shock_capturing = True
self._CustomizeSimulationSettings()
def setUp(self):
self.print_output = False
self.print_reference_values = False
self.check_absolute_tolerance = 1.0e-8
self.check_relative_tolerance = 1.0e-10
self.work_folder = "sod_shock_tube_test"
settings_filename = "ProjectParameters.json"
# Read the simulation settings
with KratosUnittest.WorkFolderScope(self.work_folder,__file__):
with open(settings_filename,'r') as parameter_file:
self.parameters = KratosMultiphysics.Parameters(parameter_file.read())
def runTest(self):
# If required, add the output process to the test settings
if self.print_output:
self._AddOutput()
# If required, add the reference values output process to the test settings
if self.print_reference_values:
self._AddReferenceValuesOutput()
else:
self._AddReferenceValuesCheck()
# Create the test simulation
with KratosUnittest.WorkFolderScope(self.work_folder,__file__):
self.model = KratosMultiphysics.Model()
simulation = FluidDynamicsAnalysis(self.model, self.parameters)
simulation.Run()
def tearDown(self):
with KratosUnittest.WorkFolderScope(self.work_folder, __file__):
KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time')
def _CustomizeSimulationSettings(self):
# Customize simulation settings
self.parameters["solver_settings"]["solver_type"].SetString(self.solver_type)
self.parameters["solver_settings"]["use_oss"].SetBool(self.use_oss)
self.parameters["solver_settings"]["shock_capturing"].SetBool(self.shock_capturing)
def _AddOutput(self):
gid_output_settings = KratosMultiphysics.Parameters("""{
"python_module" : "gid_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "GiDOutputProcess",
"help" : "This process writes postprocessing files for GiD",
"Parameters" : {
"model_part_name" : "FluidModelPart",
"output_name" : "TO_BE_DEFINED",
"postprocess_parameters" : {
"result_file_configuration" : {
"gidpost_flags" : {
"GiDPostMode" : "GiD_PostBinary",
"WriteDeformedMeshFlag" : "WriteDeformed",
"WriteConditionsFlag" : "WriteConditions",
"MultiFileFlag" : "SingleFile"
},
"file_label" : "step",
"output_control_type" : "step",
"output_frequency" : 1.0,
"body_output" : true,
"node_output" : false,
"skin_output" : false,
"plane_output" : [],
"nodal_results" : ["DENSITY","MOMENTUM","TOTAL_ENERGY"],
"gauss_point_results" : ["SHOCK_SENSOR","THERMAL_SENSOR","SHEAR_SENSOR"],
"nodal_nonhistorical_results" : ["ARTIFICIAL_BULK_VISCOSITY","ARTIFICIAL_CONDUCTIVITY","ARTIFICIAL_DYNAMIC_VISCOSITY"]
},
"point_data_configuration" : []
}
}
}""")
output_name = "sod_shock_tube{0}{1}{2}".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
gid_output_settings["Parameters"]["output_name"].SetString(output_name)
self.parameters["output_processes"]["gid_output"].Append(gid_output_settings)
def _AddReferenceValuesOutput(self):
json_output_settings = KratosMultiphysics.Parameters("""{
"python_module" : "json_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "JsonOutputProcess",
"Parameters" : {
"output_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],
"output_file_name" : "TO_BE_DEFINED",
"model_part_name" : "FluidModelPart.FluidParts_Fluid",
"time_frequency" : 0.025
}
}""")
output_file_name = "sod_shock_tube{0}{1}{2}_results.json".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
json_output_settings["Parameters"]["output_file_name"].SetString(output_file_name)
self.parameters["processes"]["json_check_process_list"].Append(json_output_settings)
def _AddReferenceValuesCheck(self):
json_check_settings = KratosMultiphysics.Parameters("""{
"python_module" : "from_json_check_result_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "FromJsonCheckResultProcess",
"Parameters" : {
"check_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],
"input_file_name" : "TO_BE_DEFINED",
"model_part_name" : "FluidModelPart.FluidParts_Fluid",
"tolerance" : 0.0,
"relative_tolerance" : 0.0,
"time_frequency" : 0.025
}
}""")
input_file_name = "sod_shock_tube{0}{1}{2}_results.json".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
json_check_settings["Parameters"]["input_file_name"].SetString(input_file_name)
json_check_settings["Parameters"]["tolerance"].SetDouble(self.check_absolute_tolerance)
json_check_settings["Parameters"]["relative_tolerance"].SetDouble(self.check_relative_tolerance)
self.parameters["processes"]["json_check_process_list"].Append(json_check_settings)
if __name__ == '__main__':
test = SodShockTubeTest()
test.setUp()
# test.testSodShockTubeExplicitASGS()
test.testSodShockTubeExplicitASGSShockCapturing()
# test.testSodShockTubeExplicitOSS()
# test.testSodShockTubeExplicitOSSShockCapturing()
test.runTest()
test.tearDown()
| [
"KratosMultiphysics.kratos_utilities.DeleteFileIfExisting",
"KratosMultiphysics.KratosUnittest.WorkFolderScope",
"KratosMultiphysics.Parameters",
"KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis.FluidDynamicsAnalysis",
"KratosMultiphysics.Model"
]
| [((2962, 4763), 'KratosMultiphysics.Parameters', 'KratosMultiphysics.Parameters', (['"""{\n "python_module" : "gid_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "GiDOutputProcess",\n "help" : "This process writes postprocessing files for GiD",\n "Parameters" : {\n "model_part_name" : "FluidModelPart",\n "output_name" : "TO_BE_DEFINED",\n "postprocess_parameters" : {\n "result_file_configuration" : {\n "gidpost_flags" : {\n "GiDPostMode" : "GiD_PostBinary",\n "WriteDeformedMeshFlag" : "WriteDeformed",\n "WriteConditionsFlag" : "WriteConditions",\n "MultiFileFlag" : "SingleFile"\n },\n "file_label" : "step",\n "output_control_type" : "step",\n "output_frequency" : 1.0,\n "body_output" : true,\n "node_output" : false,\n "skin_output" : false,\n "plane_output" : [],\n "nodal_results" : ["DENSITY","MOMENTUM","TOTAL_ENERGY"],\n "gauss_point_results" : ["SHOCK_SENSOR","THERMAL_SENSOR","SHEAR_SENSOR"],\n "nodal_nonhistorical_results" : ["ARTIFICIAL_BULK_VISCOSITY","ARTIFICIAL_CONDUCTIVITY","ARTIFICIAL_DYNAMIC_VISCOSITY"]\n },\n "point_data_configuration" : []\n }\n }\n }"""'], {}), '(\n """{\n "python_module" : "gid_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "GiDOutputProcess",\n "help" : "This process writes postprocessing files for GiD",\n "Parameters" : {\n "model_part_name" : "FluidModelPart",\n "output_name" : "TO_BE_DEFINED",\n "postprocess_parameters" : {\n "result_file_configuration" : {\n "gidpost_flags" : {\n "GiDPostMode" : "GiD_PostBinary",\n "WriteDeformedMeshFlag" : "WriteDeformed",\n "WriteConditionsFlag" : "WriteConditions",\n "MultiFileFlag" : "SingleFile"\n },\n "file_label" : "step",\n "output_control_type" : "step",\n "output_frequency" : 1.0,\n "body_output" : true,\n "node_output" : false,\n "skin_output" : false,\n "plane_output" : [],\n "nodal_results" : ["DENSITY","MOMENTUM","TOTAL_ENERGY"],\n "gauss_point_results" : ["SHOCK_SENSOR","THERMAL_SENSOR","SHEAR_SENSOR"],\n "nodal_nonhistorical_results" : ["ARTIFICIAL_BULK_VISCOSITY","ARTIFICIAL_CONDUCTIVITY","ARTIFICIAL_DYNAMIC_VISCOSITY"]\n },\n "point_data_configuration" : []\n }\n }\n }"""\n )\n', (2991, 4763), False, 'import KratosMultiphysics\n'), ((5246, 5766), 'KratosMultiphysics.Parameters', 'KratosMultiphysics.Parameters', (['"""{\n "python_module" : "json_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "JsonOutputProcess",\n "Parameters" : {\n "output_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],\n "output_file_name" : "TO_BE_DEFINED",\n "model_part_name" : "FluidModelPart.FluidParts_Fluid",\n "time_frequency" : 0.025\n }\n }"""'], {}), '(\n """{\n "python_module" : "json_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "JsonOutputProcess",\n "Parameters" : {\n "output_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],\n "output_file_name" : "TO_BE_DEFINED",\n "model_part_name" : "FluidModelPart.FluidParts_Fluid",\n "time_frequency" : 0.025\n }\n }"""\n )\n', (5275, 5766), False, 'import KratosMultiphysics\n'), ((6283, 6931), 'KratosMultiphysics.Parameters', 'KratosMultiphysics.Parameters', (['"""{\n "python_module" : "from_json_check_result_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "FromJsonCheckResultProcess",\n "Parameters" : {\n "check_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],\n "input_file_name" : "TO_BE_DEFINED",\n "model_part_name" : "FluidModelPart.FluidParts_Fluid",\n "tolerance" : 0.0,\n "relative_tolerance" : 0.0,\n "time_frequency" : 0.025\n }\n }"""'], {}), '(\n """{\n "python_module" : "from_json_check_result_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "FromJsonCheckResultProcess",\n "Parameters" : {\n "check_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],\n "input_file_name" : "TO_BE_DEFINED",\n "model_part_name" : "FluidModelPart.FluidParts_Fluid",\n "tolerance" : 0.0,\n "relative_tolerance" : 0.0,\n "time_frequency" : 0.025\n }\n }"""\n )\n', (6312, 6931), False, 'import KratosMultiphysics\n'), ((1528, 1586), 'KratosMultiphysics.KratosUnittest.WorkFolderScope', 'KratosUnittest.WorkFolderScope', (['self.work_folder', '__file__'], {}), '(self.work_folder, __file__)\n', (1558, 1586), True, 'import KratosMultiphysics.KratosUnittest as KratosUnittest\n'), ((2168, 2226), 'KratosMultiphysics.KratosUnittest.WorkFolderScope', 'KratosUnittest.WorkFolderScope', (['self.work_folder', '__file__'], {}), '(self.work_folder, __file__)\n', (2198, 2226), True, 'import KratosMultiphysics.KratosUnittest as KratosUnittest\n'), ((2252, 2278), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ([], {}), '()\n', (2276, 2278), False, 'import KratosMultiphysics\n'), ((2304, 2354), 'KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis.FluidDynamicsAnalysis', 'FluidDynamicsAnalysis', (['self.model', 'self.parameters'], {}), '(self.model, self.parameters)\n', (2325, 2354), False, 'from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis\n'), ((2422, 2480), 'KratosMultiphysics.KratosUnittest.WorkFolderScope', 'KratosUnittest.WorkFolderScope', (['self.work_folder', '__file__'], {}), '(self.work_folder, __file__)\n', (2452, 2480), True, 'import KratosMultiphysics.KratosUnittest as KratosUnittest\n'), ((2494, 2565), 'KratosMultiphysics.kratos_utilities.DeleteFileIfExisting', 'KratosUtilities.DeleteFileIfExisting', (['"""sod_shock_tube_geom_coarse.time"""'], {}), "('sod_shock_tube_geom_coarse.time')\n", (2530, 2565), True, 'import KratosMultiphysics.kratos_utilities as KratosUtilities\n')] |
import asyncio
import json
import logging
import websockets
logging.basicConfig()
async def counter(websocket, path):
try:
print("connect")
async for message in websocket:
print(message)
finally:
USERS.remove(websocket)
async def main():
async with websockets.serve(counter, "localhost", 5000):
await asyncio.Future() # run forever
if __name__ == "__main__":
asyncio.run(main())
| [
"logging.basicConfig",
"websockets.serve",
"asyncio.Future"
]
| [((61, 82), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (80, 82), False, 'import logging\n'), ((302, 346), 'websockets.serve', 'websockets.serve', (['counter', '"""localhost"""', '(5000)'], {}), "(counter, 'localhost', 5000)\n", (318, 346), False, 'import websockets\n'), ((362, 378), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (376, 378), False, 'import asyncio\n')] |
import re
import numbers
import collections
import logging
from collections.abc import Iterable
import itertools
import aws_error_utils
from .lookup import Ids, lookup_accounts_for_ou
from .format import format_account_id
LOGGER = logging.getLogger(__name__)
_Context = collections.namedtuple("_Context", [
"session",
"ids",
"principal",
"principal_filter",
"permission_set",
"permission_set_filter",
"target",
"target_filter",
"get_principal_names",
"get_permission_set_names",
"get_target_names",
"ou_recursive",
"cache",
"filter_cache"
])
def _filter(filter_cache, key, func, args):
if not func:
return True
if key not in filter_cache:
filter_cache[key] = func(*args)
return filter_cache[key]
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
def _is_principal_tuple(principal):
try:
return all([
len(principal) == 2,
isinstance(principal[0], str),
principal[0] in ["GROUP", "USER"],
isinstance(principal[1], str),
])
except:
return False
def _process_principal(principal):
if not principal:
return None
if isinstance(principal, str):
return [(None, principal)]
if _is_principal_tuple(principal):
return [tuple(principal)]
else:
return _flatten(_process_principal(p) for p in principal)
def _process_permission_set(ids, permission_set):
if not permission_set:
return None
if not isinstance(permission_set, str) and isinstance(permission_set, Iterable):
return _flatten(_process_permission_set(ids, ps) for ps in permission_set)
if permission_set.startswith("arn"):
permission_set_arn = permission_set
elif permission_set.startswith("ssoins-") or permission_set.startswith("ins-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{permission_set}"
elif permission_set.startswith("ps-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}"
else:
raise TypeError(f"Invalid permission set id {permission_set}")
return [permission_set_arn]
def _is_target_tuple(target):
try:
return all([
len(target) == 2,
isinstance(target[0], str),
target[0] in ["AWS_OU", "AWS_ACCOUNT"],
isinstance(target[1], str),
])
except:
return False
def _process_target(target):
if not target:
return None
if isinstance(target, numbers.Number):
return [("AWS_ACCOUNT", format_account_id(target))]
if isinstance(target, str):
if re.match(r"^\d+$", target):
return [("AWS_ACCOUNT", format_account_id(target))]
elif re.match(r"^r-[a-z0-9]{4,32}$", target) or re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$", target):
return [("AWS_OU", target)]
else:
raise TypeError(f"Invalid target {target}")
elif _is_target_tuple(target):
target_type, target_id = target
if target_type not in ["AWS_ACCOUNT", "AWS_OU"]:
raise TypeError(f"Invalid target type {target_type}")
return [(target_type, target_id)]
else:
value = _flatten(_process_target(t) for t in target)
return value
def _get_account_iterator(target, context: _Context):
def target_iterator():
target_name = None
if context.get_target_names:
organizations_client = context.session.client("organizations")
account = organizations_client.describe_account(AccountId=target[1])["Account"]
if account.get("Name"):
target_name = account["Name"]
value = (*target, target_name)
if not _filter(context.filter_cache, value[1], context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
else:
LOGGER.debug(f"Visiting single account: {value}")
yield value
return target_iterator
def _get_ou_iterator(target, context: _Context):
def target_iterator():
target_name = None
# if context.get_target_names:
# organizations_client = context.session.client("organizations")
# ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"]
# if ou.get("Name"):
# target_name = ou("Name")
value = (*target, target_name)
accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive)
for account in accounts:
yield "AWS_ACCOUNT", account["Id"], account["Name"]
return target_iterator
def _get_single_target_iterator(target, context: _Context):
target_type = target[0]
if target_type == "AWS_ACCOUNT":
return _get_account_iterator(target, context)
elif target_type == "AWS_OU":
return _get_ou_iterator(target, context)
else:
raise TypeError(f"Invalid target type {target_type}")
def _get_all_accounts_iterator(context: _Context):
def target_iterator():
organizations_client = context.session.client("organizations")
accounts_paginator = organizations_client.get_paginator("list_accounts")
for response in accounts_paginator.paginate():
LOGGER.debug(f"ListAccounts page: {response}")
for account in response["Accounts"]:
account_id = account["Id"]
account_name = account["Name"]
value = ("AWS_ACCOUNT", account_id, account_name)
if not _filter(context.filter_cache, account_id, context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
continue
LOGGER.debug(f"Visiting account: {value}")
yield value
return target_iterator
def _get_target_iterator(context: _Context):
if context.target:
iterables = [_get_single_target_iterator(t, context) for t in context.target]
def target_iterator():
return itertools.chain(*[it() for it in iterables])
return target_iterator
else:
LOGGER.debug(f"Iterating for all accounts")
return _get_all_accounts_iterator(context)
def _get_single_permission_set_iterator(permission_set, context: _Context):
permission_set_arn = permission_set
permission_set_id = permission_set_arn.split("/")[-1]
def permission_set_iterator(target_type, target_id, target_name):
if not context.get_permission_set_names:
permission_set_name = None
else:
sso_admin_client = context.session.client("sso-admin")
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
permission_set_name = response["PermissionSet"]["Name"]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Single permission set is filtered: {(permission_set_id, permission_set_name)}")
else:
LOGGER.debug(f"Visiting single permission set {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_all_permission_sets_iterator(context: _Context):
def permission_set_iterator(target_type, target_id, target_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
permission_sets_paginator = sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account")
for response in permission_sets_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id):
LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}")
if "PermissionSets" not in response:
continue
for permission_set_arn in response["PermissionSets"]:
permission_set_id = permission_set_arn.split("/", 2)[-1]
if not context.get_permission_set_names:
permission_set_name = None
else:
if permission_set_arn not in context.cache:
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
context.cache[permission_set_arn] = response["PermissionSet"]["Name"]
permission_set_name = context.cache[permission_set_arn]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Permission set is filtered: {(permission_set_id, permission_set_name)}")
continue
LOGGER.debug(f"Visiting permission set: {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_permission_set_iterator(context: _Context):
if context.permission_set:
iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]
def permission_set_iterator(target_type, target_id, target_name):
return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables])
return permission_set_iterator
else:
LOGGER.debug("Iterating for all permission sets")
return _get_all_permission_sets_iterator(context)
def _get_principal_iterator(context: _Context):
def principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
identity_store_client = context.session.client("identitystore")
assignments_paginator = sso_admin_client.get_paginator("list_account_assignments")
for response in assignments_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id,
PermissionSetArn=permission_set_arn):
LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}")
if not response["AccountAssignments"] and not "NextToken" in response:
LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}")
for assignment in response["AccountAssignments"]:
principal_type = assignment["PrincipalType"]
principal_id = assignment["PrincipalId"]
LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}")
if context.principal:
for principal in context.principal:
type_matches = (principal[0] is None or principal[0] != principal_type)
if type_matches and principal[1] == principal_id:
LOGGER.debug(f"Found principal {principal_type}:{principal_id}")
break
else:
LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals")
continue
principal_key = (principal_type, principal_id)
if not context.get_principal_names:
principal_name = None
else:
if principal_key not in context.cache:
if principal_type == "GROUP":
try:
response = identity_store_client.describe_group(
IdentityStoreId=context.ids.identity_store_id,
GroupId=principal_id
)
LOGGER.debug(f"DescribeGroup response: {response}")
context.cache[principal_key] = response["DisplayName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
elif principal_type == "USER":
try:
response = identity_store_client.describe_user(
IdentityStoreId=context.ids.identity_store_id,
UserId=principal_id
)
LOGGER.debug(f"DescribeUser response: {response}")
context.cache[principal_key] = response["UserName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
else:
raise ValueError(f"Unknown principal type {principal_type}")
principal_name = context.cache[principal_key]
if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)):
if context.principal:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
else:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
continue
LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}")
yield principal_type, principal_id, principal_name
return principal_iterator
Assignment = collections.namedtuple("Assignment", [
"instance_arn",
"principal_type",
"principal_id",
"principal_name",
"permission_set_arn",
"permission_set_name",
"target_type",
"target_id",
"target_name",
])
def list_assignments(
session,
instance_arn=None,
identity_store_id=None,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
"""Iterate over AWS SSO assignments.
Args:
session (boto3.Session): boto3 session to use
instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances
identity_store_id (str): The identity store to use if principal names are being retrieved
or it will be looked up using ListInstances
principal: A principal specification or list of principal specifications.
A principal specification is a principal id or a 2-tuple of principal type and id.
principal_filter: A callable taking principal type, principal id, and principal name
(which may be None), and returning True if the principal should be included.
permission_set: A permission set arn or id, or a list of the same.
permission_set_filter: A callable taking permission set arn and name (name may be None),
returning True if the permission set should be included.
target: A target specification or list of target specifications.
A target specification is an account or OU id, or a 2-tuple of target type, which
is either AWS_ACCOUNT or AWS_OU, and target id.
target_filter: A callable taking target type, target id, and target name
(which may be None), and returning True if the target should be included.
get_principal_names (bool): Retrieve names for principals in assignments.
get_permission_set_names (bool): Retrieve names for permission sets in assignments.
get_target_names (bool): Retrieve names for targets in assignments.
ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts
including those in child OUs.
Returns:
An iterator over Assignment namedtuples
"""
ids = Ids(lambda: session, instance_arn, identity_store_id)
return _list_assignments(
session,
ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
)
def _list_assignments(
session,
ids,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
principal = _process_principal(principal)
permission_set = _process_permission_set(ids, permission_set)
target = _process_target(target)
cache = {}
filter_cache = {}
context = _Context(
session = session,
ids=ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
cache=cache,
filter_cache=filter_cache,
)
target_iterator = _get_target_iterator(context)
permission_set_iterator = _get_permission_set_iterator(context)
principal_iterator = _get_principal_iterator(context)
for target_type, target_id, target_name in target_iterator():
for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name):
for principal_type, principal_id, principal_name in principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
assignment = Assignment(
ids.instance_arn,
principal_type,
principal_id,
principal_name,
permission_set_arn,
permission_set_name,
target_type,
target_id,
target_name,
)
LOGGER.debug(f"Visiting assignment: {assignment}")
yield assignment
if __name__ == "__main__":
import boto3
import sys
import json
logging.basicConfig(level=logging.INFO)
kwargs = {}
for v in sys.argv[1:]:
if hasattr(logging, v):
LOGGER.setLevel(getattr(logging, v))
else:
kwargs = json.loads(v)
def fil(*args):
print(args)
return True
kwargs["target_filter"] = fil
try:
session = boto3.Session()
print(",".join(Assignment._fields))
for value in list_assignments(session, **kwargs):
print(",".join(v or "" for v in value))
except KeyboardInterrupt:
pass
| [
"logging.getLogger",
"itertools.chain",
"logging.basicConfig",
"collections.namedtuple",
"json.loads",
"aws_error_utils.catch_aws_error",
"boto3.Session",
"re.match"
]
| [((234, 261), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (251, 261), False, 'import logging\n'), ((274, 555), 'collections.namedtuple', 'collections.namedtuple', (['"""_Context"""', "['session', 'ids', 'principal', 'principal_filter', 'permission_set',\n 'permission_set_filter', 'target', 'target_filter',\n 'get_principal_names', 'get_permission_set_names', 'get_target_names',\n 'ou_recursive', 'cache', 'filter_cache']"], {}), "('_Context', ['session', 'ids', 'principal',\n 'principal_filter', 'permission_set', 'permission_set_filter', 'target',\n 'target_filter', 'get_principal_names', 'get_permission_set_names',\n 'get_target_names', 'ou_recursive', 'cache', 'filter_cache'])\n", (296, 555), False, 'import collections\n'), ((14385, 14587), 'collections.namedtuple', 'collections.namedtuple', (['"""Assignment"""', "['instance_arn', 'principal_type', 'principal_id', 'principal_name',\n 'permission_set_arn', 'permission_set_name', 'target_type', 'target_id',\n 'target_name']"], {}), "('Assignment', ['instance_arn', 'principal_type',\n 'principal_id', 'principal_name', 'permission_set_arn',\n 'permission_set_name', 'target_type', 'target_id', 'target_name'])\n", (14407, 14587), False, 'import collections\n'), ((19629, 19668), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (19648, 19668), False, 'import logging\n'), ((831, 862), 'itertools.chain', 'itertools.chain', (['*list_of_lists'], {}), '(*list_of_lists)\n', (846, 862), False, 'import itertools\n'), ((2684, 2710), 're.match', 're.match', (['"""^\\\\d+$"""', 'target'], {}), "('^\\\\d+$', target)\n", (2692, 2710), False, 'import re\n'), ((19967, 19982), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (19980, 19982), False, 'import boto3\n'), ((19829, 19842), 'json.loads', 'json.loads', (['v'], {}), '(v)\n', (19839, 19842), False, 'import json\n'), ((2789, 2827), 're.match', 're.match', (['"""^r-[a-z0-9]{4,32}$"""', 'target'], {}), "('^r-[a-z0-9]{4,32}$', target)\n", (2797, 2827), False, 'import re\n'), ((2832, 2886), 're.match', 're.match', (['"""^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$"""', 'target'], {}), "('^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$', target)\n", (2840, 2886), False, 'import re\n'), ((12767, 12827), 'aws_error_utils.catch_aws_error', 'aws_error_utils.catch_aws_error', (['"""ResourceNotFoundException"""'], {}), "('ResourceNotFoundException')\n", (12798, 12827), False, 'import aws_error_utils\n'), ((13440, 13500), 'aws_error_utils.catch_aws_error', 'aws_error_utils.catch_aws_error', (['"""ResourceNotFoundException"""'], {}), "('ResourceNotFoundException')\n", (13471, 13500), False, 'import aws_error_utils\n')] |
#!/usr/bin/env python3
import os
import argparse
import subprocess
if __name__ == '__main__':
from version import __version__
from configParser import ConfigParser
else:
from .version import __version__
from .configParser import ConfigParser
def command(cmd):
"""Run a shell command"""
subprocess.call(cmd, shell=True)
"""
cmd_split = cmd.split()
process = subprocess.Popen(cmd_split,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = process.communicate()
return stdout, stderr
"""
def main():
absFilePath = os.path.dirname(os.path.abspath(__file__))
cwdPath = os.path.abspath(os.getcwd())
parser = argparse.ArgumentParser(
prog="buildutil",
description="Assembly/C/C++ utility to build embedded systems",
epilog="Author: <NAME>",
fromfile_prefix_chars='@')
# parser.add_argument('-v', '--verbose',
# action='store_true',
# help='an optional argument')
"""
parser.add_argument('Path',
metavar='path',
type=str,
default=cwdPath,
help='the config filepath')
"""
parser.add_argument(
'-d', '--directory',
type=str,
default=cwdPath,
help='the config filepath')
parser.add_argument(
'-v', '--version',
action='store_true',
help='get the version of the build system')
# parser.add_argument(
# '-f',
# '--file',
# help='A readable file',
# metavar='FILE',
# type=argparse.FileType('r'),
# default=None)
cmd_parser = parser.add_subparsers(dest='cmd', description="")
parser_build = cmd_parser.add_parser(
'build',
help="build the project")
parser_get_version = cmd_parser.add_parser(
'get_version',
help="try to get the version from git")
# parser_get_version.add_argument(
# '-a', '--alpha',
# dest='alpha',
# help='try to get the version')
# Execute parse_args()
args = parser.parse_args()
subcommand = parser.parse_args().cmd
if args.version is True:
print(f"version: {__version__}")
exit(0)
# if subcommand is None or subcommand == "build":
if subcommand == "build":
makefilePath = os.path.join(absFilePath, "conf/make/Makefile")
command(f"make -f {makefilePath}")
elif subcommand == "get_version":
print("version")
else:
ConfigParser()
print("fuck")
return
# Working directory
wd = os.path.abspath(args.directory)
print(f"File: {absFilePath}")
print(F"CWD: {cwdPath}")
print(F"Working directory: {wd}")
print(F"makefile path: {makefilePath}")
print()
command(f"make -f {makefilePath}")
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.path.join",
"os.getcwd",
"configParser.ConfigParser",
"subprocess.call",
"os.path.abspath"
]
| [((297, 329), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (312, 329), False, 'import subprocess\n'), ((686, 853), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""buildutil"""', 'description': '"""Assembly/C/C++ utility to build embedded systems"""', 'epilog': '"""Author: <NAME>"""', 'fromfile_prefix_chars': '"""@"""'}), "(prog='buildutil', description=\n 'Assembly/C/C++ utility to build embedded systems', epilog=\n 'Author: <NAME>', fromfile_prefix_chars='@')\n", (709, 853), False, 'import argparse\n'), ((2313, 2344), 'os.path.abspath', 'os.path.abspath', (['args.directory'], {}), '(args.directory)\n', (2328, 2344), False, 'import os\n'), ((608, 633), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (623, 633), False, 'import os\n'), ((662, 673), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (671, 673), False, 'import os\n'), ((2097, 2144), 'os.path.join', 'os.path.join', (['absFilePath', '"""conf/make/Makefile"""'], {}), "(absFilePath, 'conf/make/Makefile')\n", (2109, 2144), False, 'import os\n'), ((2245, 2259), 'configParser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2257, 2259), False, 'from configParser import ConfigParser\n')] |
import json
import copy
import pdb
import numpy as np
import pickle
def listify_mat(matrix):
matrix = np.array(matrix).astype(str)
if len(matrix.shape) > 1:
matrix_list = []
for row in matrix:
try:
matrix_list.append(list(row))
except:
pdb.set_trace()
return matrix_list
else:
return list(matrix)
class Recorder():
def __init__(self):
self._traj, self._cur_traj = [], []
return
def pack_traj(self):
self._traj.append(copy.deepcopy(self._cur_traj))
self._cur_traj = []
return
def add(self, o, a, r, d):
# self._cur_traj.append((o, a, r, d))
self._cur_traj.append(
(listify_mat(o), listify_mat(a), listify_mat(r), d))
return
def export_pickle(self, filename='traj'):
if filename == '':
raise ValueError('incorrect file name')
traj = []
for t in self._traj:
obs = np.array([tt[0] for tt in t]).astype(np.float32)
act = np.array([tt[1] for tt in t]).astype(np.float32)
rwd = np.array([tt[2] for tt in t]).astype(np.float32)
done = np.array([tt[3] for tt in t])
# pdb.set_trace()
traj.append({
'observations': obs[:-1],
'next_observations': obs[1:],
'actions': act[:-1],
'rewards': rwd[:-1],
'terminals': done[:-1]
})
with open('{}.pkl'.format(filename), 'wb') as outfile:
pickle.dump(traj, outfile)
return
def export(self, filename='traj'):
if filename == '':
raise ValueError('incorrect file name')
traj = {'traj': []}
for t in self._traj:
traj['traj'].append(t)
# json.dumps(traj, sort_keys=True, indent=4)
pdb.set_trace()
with open('{}.json'.format(filename), 'w') as outfile:
json.dump(traj, outfile)
return | [
"pickle.dump",
"numpy.array",
"pdb.set_trace",
"copy.deepcopy",
"json.dump"
]
| [((1905, 1920), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1918, 1920), False, 'import pdb\n'), ((108, 124), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (116, 124), True, 'import numpy as np\n'), ((555, 584), 'copy.deepcopy', 'copy.deepcopy', (['self._cur_traj'], {}), '(self._cur_traj)\n', (568, 584), False, 'import copy\n'), ((1212, 1241), 'numpy.array', 'np.array', (['[tt[3] for tt in t]'], {}), '([tt[3] for tt in t])\n', (1220, 1241), True, 'import numpy as np\n'), ((1590, 1616), 'pickle.dump', 'pickle.dump', (['traj', 'outfile'], {}), '(traj, outfile)\n', (1601, 1616), False, 'import pickle\n'), ((1997, 2021), 'json.dump', 'json.dump', (['traj', 'outfile'], {}), '(traj, outfile)\n', (2006, 2021), False, 'import json\n'), ((318, 333), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (331, 333), False, 'import pdb\n'), ((1010, 1039), 'numpy.array', 'np.array', (['[tt[0] for tt in t]'], {}), '([tt[0] for tt in t])\n', (1018, 1039), True, 'import numpy as np\n'), ((1077, 1106), 'numpy.array', 'np.array', (['[tt[1] for tt in t]'], {}), '([tt[1] for tt in t])\n', (1085, 1106), True, 'import numpy as np\n'), ((1144, 1173), 'numpy.array', 'np.array', (['[tt[2] for tt in t]'], {}), '([tt[2] for tt in t])\n', (1152, 1173), True, 'import numpy as np\n')] |
# Generated by Django 2.2.4 on 2019-10-03 21:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ingreso', '0003_auto_20190907_2152'),
]
operations = [
migrations.AlterField(
model_name='detalle_ingreso',
name='id_prod',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto'),
),
]
| [
"django.db.models.ForeignKey"
]
| [((379, 480), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""producto.Producto"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='producto.Producto')\n", (396, 480), False, 'from django.db import migrations, models\n')] |
from __future__ import print_function, division
import os,unittest
from pyscf.nao import tddft_iter
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname)
try:
from pyscf.lib import misc
libnao_gpu = misc.load_library("libnao_gpu")
td_gpu = tddft_iter(label='water', cd=dname, GPU=True)
except:
td_gpu = None
class KnowValues(unittest.TestCase):
def test_tddft_iter(self):
""" This is iterative TDDFT with SIESTA starting point """
self.assertTrue(hasattr(td, 'xocc'))
self.assertTrue(hasattr(td, 'xvrt'))
self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons
self.assertEqual(td.xocc[0].shape[0], 4)
self.assertEqual(td.xvrt[0].shape[0], 19)
dn0 = td.apply_rf0(td.moms1[:,0])
def test_tddft_iter_gpu(self):
""" Test GPU version """
if td_gpu is not None:
self.assertTrue(hasattr(td_gpu, 'xocc'))
self.assertTrue(hasattr(td_gpu, 'xvrt'))
self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons
self.assertEqual(td_gpu.xocc[0].shape[0], 4)
self.assertEqual(td_gpu.xvrt[0].shape[0], 19)
dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0])
if __name__ == "__main__": unittest.main()
| [
"os.path.abspath",
"pyscf.lib.misc.load_library",
"pyscf.nao.tddft_iter",
"unittest.main"
]
| [((158, 193), 'pyscf.nao.tddft_iter', 'tddft_iter', ([], {'label': '"""water"""', 'cd': 'dname'}), "(label='water', cd=dname)\n", (168, 193), False, 'from pyscf.nao import tddft_iter\n'), ((125, 150), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'import os, unittest\n'), ((247, 278), 'pyscf.lib.misc.load_library', 'misc.load_library', (['"""libnao_gpu"""'], {}), "('libnao_gpu')\n", (264, 278), False, 'from pyscf.lib import misc\n'), ((292, 337), 'pyscf.nao.tddft_iter', 'tddft_iter', ([], {'label': '"""water"""', 'cd': 'dname', 'GPU': '(True)'}), "(label='water', cd=dname, GPU=True)\n", (302, 337), False, 'from pyscf.nao import tddft_iter\n'), ((1286, 1301), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1299, 1301), False, 'import os, unittest\n')] |
import io
from os import path
from setuptools import setup
dirname = path.abspath(path.dirname(__file__))
with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def parse_requirements(filename):
lines = (line.strip() for line in open(path.join(dirname, filename)))
return [line for line in lines if line and not line.startswith("#")]
setup(
name='osm2geojson',
version='0.1.27',
license='MIT',
description='Parse OSM and Overpass JSON',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='geometry gis osm parsing',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/aspectumapp/osm2geojson',
packages=['osm2geojson'],
include_package_data=True,
install_requires=parse_requirements("requirements.txt")
)
| [
"os.path.dirname",
"os.path.join"
]
| [((83, 105), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'from os import path\n'), ((120, 151), 'os.path.join', 'path.join', (['dirname', '"""README.md"""'], {}), "(dirname, 'README.md')\n", (129, 151), False, 'from os import path\n'), ((287, 315), 'os.path.join', 'path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (296, 315), False, 'from os import path\n')] |
import sqlite3
from contextlib import closing
nome = input('Nome do produto: ').lower().capitalize()
with sqlite3.connect('precos.db') as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,))
registro = cursor.fetchone()
if not(registro is None):
print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}')
valor = float(input('Novo valor: R$'))
cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?', (valor, registro[0]))
if cursor.rowcount == 1:
conexao.commit()
print('Alteração gravada.')
else:
conexao.rollback()
print('Alteração abortada.')
else:
print(f'Produto {nome} não encontrado.') | [
"sqlite3.connect"
]
| [((108, 136), 'sqlite3.connect', 'sqlite3.connect', (['"""precos.db"""'], {}), "('precos.db')\n", (123, 136), False, 'import sqlite3\n')] |
import torch
from mmdet.datasets.pipelines.transforms import Pad
from mmdet.datasets.pipelines.transforms import FilterBox
import numpy as np
import cv2
def test_pad():
raw = dict(
img=np.zeros((200, 401, 3), dtype=np.uint8)
)
cv2.imshow('raw', raw['img'])
pad = Pad(square=True, pad_val=255)
r = pad(raw)
print(r['img'].shape)
cv2.imshow('draw', r['img'])
cv2.waitKey()
raw = dict(
img=np.zeros((402, 401, 3), dtype=np.uint8)
)
cv2.imshow('raw', raw['img'])
pad = Pad(square=True, pad_val=255)
r = pad(raw)
print(r['img'].shape)
cv2.imshow('draw', r['img'])
cv2.waitKey()
def test_filter_box():
bboxes = np.array([[0, 0, 10, 10],
[10, 10, 20, 20],
[10, 10, 19, 20],
[10, 10, 20, 19],
[10, 10, 19, 19]])
gt_bboxes = np.array([[0, 0, 10, 9]])
result = dict(gt_bboxes=bboxes)
fb = FilterBox((10, 10))
fb(result)
if __name__ == '__main__':
# test_pad()
test_filter_box()
| [
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"mmdet.datasets.pipelines.transforms.FilterBox",
"mmdet.datasets.pipelines.transforms.Pad",
"cv2.waitKey"
]
| [((249, 278), 'cv2.imshow', 'cv2.imshow', (['"""raw"""', "raw['img']"], {}), "('raw', raw['img'])\n", (259, 278), False, 'import cv2\n'), ((289, 318), 'mmdet.datasets.pipelines.transforms.Pad', 'Pad', ([], {'square': '(True)', 'pad_val': '(255)'}), '(square=True, pad_val=255)\n', (292, 318), False, 'from mmdet.datasets.pipelines.transforms import Pad\n'), ((367, 395), 'cv2.imshow', 'cv2.imshow', (['"""draw"""', "r['img']"], {}), "('draw', r['img'])\n", (377, 395), False, 'import cv2\n'), ((400, 413), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (411, 413), False, 'import cv2\n'), ((493, 522), 'cv2.imshow', 'cv2.imshow', (['"""raw"""', "raw['img']"], {}), "('raw', raw['img'])\n", (503, 522), False, 'import cv2\n'), ((533, 562), 'mmdet.datasets.pipelines.transforms.Pad', 'Pad', ([], {'square': '(True)', 'pad_val': '(255)'}), '(square=True, pad_val=255)\n', (536, 562), False, 'from mmdet.datasets.pipelines.transforms import Pad\n'), ((611, 639), 'cv2.imshow', 'cv2.imshow', (['"""draw"""', "r['img']"], {}), "('draw', r['img'])\n", (621, 639), False, 'import cv2\n'), ((644, 657), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (655, 657), False, 'import cv2\n'), ((696, 799), 'numpy.array', 'np.array', (['[[0, 0, 10, 10], [10, 10, 20, 20], [10, 10, 19, 20], [10, 10, 20, 19], [10,\n 10, 19, 19]]'], {}), '([[0, 0, 10, 10], [10, 10, 20, 20], [10, 10, 19, 20], [10, 10, 20, \n 19], [10, 10, 19, 19]])\n', (704, 799), True, 'import numpy as np\n'), ((903, 928), 'numpy.array', 'np.array', (['[[0, 0, 10, 9]]'], {}), '([[0, 0, 10, 9]])\n', (911, 928), True, 'import numpy as np\n'), ((974, 993), 'mmdet.datasets.pipelines.transforms.FilterBox', 'FilterBox', (['(10, 10)'], {}), '((10, 10))\n', (983, 993), False, 'from mmdet.datasets.pipelines.transforms import FilterBox\n'), ((199, 238), 'numpy.zeros', 'np.zeros', (['(200, 401, 3)'], {'dtype': 'np.uint8'}), '((200, 401, 3), dtype=np.uint8)\n', (207, 238), True, 'import numpy as np\n'), ((443, 482), 'numpy.zeros', 'np.zeros', (['(402, 401, 3)'], {'dtype': 'np.uint8'}), '((402, 401, 3), dtype=np.uint8)\n', (451, 482), True, 'import numpy as np\n')] |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib import Errors
import lumberyard_modules
import unittest
import pytest
import utils
class FakeContext(object):
pass
class FakeIncludeSettings(object):
pass
class FakePlatformSettings(object):
def __init__(self, platform_name, aliases=set()):
self.platform = platform_name
self.aliases = aliases
class FakeConfigurationSettings(object):
def __init__(self, settings_name, base_config=None):
self.base_config = base_config
self.name = settings_name
class FakeConfiguration(object):
def __init__(self, settings, is_test=False, is_server=False):
self.settings = settings
self.is_test = is_test
self.is_server = is_server
@pytest.fixture()
def mock_parse_json(mock_json_map):
if not mock_json_map:
mock_json_map = {'path': {}}
def _mock_parse_json(path, _):
return mock_json_map[path]
old_parse_json_file = utils.parse_json_file
utils.parse_json_file = _mock_parse_json
yield
utils.parse_json_file = old_parse_json_file
@pytest.fixture()
def fake_context():
return FakeContext()
def test_SanitizeKWInput_SimpleKwDictionary_Success():
kw = dict(
libpath='mylib'
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success():
kw = dict(
libpath='mylib',
additional_settings=dict(stlibpath='mystlib')
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
assert isinstance(kw['additional_settings'], list)
assert isinstance(kw['additional_settings'][0], dict)
assert isinstance(kw['additional_settings'][0]['stlibpath'], list)
assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib'
@pytest.mark.parametrize(
"target, kw_key, source_section, additional_aliases, merge_dict, expected", [
pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'),
pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'),
pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'),
pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'),
pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'),
pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'),
pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'),
pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'),
])
def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected):
fake_context = FakeContext()
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
if isinstance(expected,dict):
test_settings.merge_kw_key(target=target,
kw_key=kw_key,
source_section=source_section,
merge_kw=merge_dict)
assert merge_dict == expected
elif isinstance(expected, type(Errors.WafError)):
with pytest.raises(Errors.WafError):
test_settings.merge_kw_key(target=target,
kw_key=kw_key,
source_section=source_section,
merge_kw=merge_dict)
@pytest.mark.parametrize(
"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected", [
pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'),
pytest.param({}, 'include_test',
{
'path': {
'includes': ['include_test']
},'include_test': {}
}, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes')
])
def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected):
if fake_include_settings:
def _mock_get_project_settings_file(include_settings_file, additional_aliases):
assert fake_include_settings == include_settings_file
fake_settings = FakeIncludeSettings()
return fake_settings
fake_context.get_project_settings_file = _mock_get_project_settings_file
test = lumberyard_modules.ProjectSettingsFile(fake_context,
'path',
additional_aliases)
assert test.dict == expected
@pytest.mark.parametrize(
"mock_json_map, additional_aliases, section_key, expected", [
pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'),
pytest.param({
'path': {
"test_section": {
"key1": "value1"
}
}
}, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges')
])
def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected):
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
merge_dict = {}
test_settings.merge_kw_section(section_key=section_key,
target='test_target',
merge_kw=merge_dict)
assert expected == merge_dict
class ProjectSettingsTest(unittest.TestCase):
def setUp(self):
self.old_parse_json = utils.parse_json_file
utils.parse_json_file = self.mockParseJson
self.mock_json_map = {}
def tearDown(self):
utils.parse_json_file = self.old_parse_json
def mockParseJson(self, path, _):
return self.mock_json_map[path]
def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}):
self.mock_json_map = {'path': test_dict}
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
return test_settings
def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self):
"""
Test scenario:
Setup a project settings that contains other project settings, so that it can recursively call merge_kw_dict
recursively
"""
include_settings_file = 'include_test'
test_settings_single_include = {'includes': [include_settings_file]}
test_empty_settings = {}
test_merge_kw_key = 'passed'
test_merge_kw_value = True
self.mock_json_map = {'path': test_settings_single_include,
include_settings_file: test_empty_settings}
# Prepare a mock include settings object
test_include_settings = self.createSimpleSettings()
def _mock_merge_kw_dict(target, merge_kw, platform, configuration):
merge_kw[test_merge_kw_key] = test_merge_kw_value
pass
test_include_settings.merge_kw_dict = _mock_merge_kw_dict
# Prepare a mock context
fake_context = FakeContext()
def _mock_get_project_settings_file(_a, _b):
return test_include_settings
fake_context.get_project_settings_file = _mock_get_project_settings_file
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_settings_single_include)
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=None,
configuration=None)
self.assertIn(test_merge_kw_key, test_merge_kw)
self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value)
def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when only platform is set and not any configurations
"""
test_platform = 'test_platform'
test_alias = 'alias_1'
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform',
aliases={test_alias})
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform,
configuration=None)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform), sections_merged)
self.assertIn('{}/*'.format(test_alias), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration, but is derived from another configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
base_test_configuration_name = 'base_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name,
base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name))))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 3)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test and a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 8)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test but not a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=False)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test', sections_merged)
self.assertIn('{}/*/test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a server but not a
test configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=False,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated', sections_merged)
self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
| [
"lumberyard_modules.ProjectSettingsFile",
"pytest.param",
"lumberyard_modules.sanitize_kw_input",
"pytest.raises",
"pytest.fixture"
]
| [((1263, 1279), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1277, 1279), False, 'import pytest\n'), ((1632, 1648), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1646, 1648), False, 'import pytest\n'), ((1800, 1840), 'lumberyard_modules.sanitize_kw_input', 'lumberyard_modules.sanitize_kw_input', (['kw'], {}), '(kw)\n', (1836, 1840), False, 'import lumberyard_modules\n'), ((2104, 2144), 'lumberyard_modules.sanitize_kw_input', 'lumberyard_modules.sanitize_kw_input', (['kw'], {}), '(kw)\n', (2140, 2144), False, 'import lumberyard_modules\n'), ((4375, 4460), 'lumberyard_modules.ProjectSettingsFile', 'lumberyard_modules.ProjectSettingsFile', (['fake_context', '"""path"""', 'additional_aliases'], {}), "(fake_context, 'path', additional_aliases\n )\n", (4413, 4460), False, 'import lumberyard_modules\n'), ((6171, 6256), 'lumberyard_modules.ProjectSettingsFile', 'lumberyard_modules.ProjectSettingsFile', (['fake_context', '"""path"""', 'additional_aliases'], {}), "(fake_context, 'path', additional_aliases\n )\n", (6209, 6256), False, 'import lumberyard_modules\n'), ((7028, 7113), 'lumberyard_modules.ProjectSettingsFile', 'lumberyard_modules.ProjectSettingsFile', (['fake_context', '"""path"""', 'additional_aliases'], {}), "(fake_context, 'path', additional_aliases\n )\n", (7066, 7113), False, 'import lumberyard_modules\n'), ((2657, 2750), 'pytest.param', 'pytest.param', (['"""test_target"""', '"""fake_key"""', '{}', '{}', '{}', '{}'], {'id': '"""MissingKeyInSourceNoChange"""'}), "('test_target', 'fake_key', {}, {}, {}, {}, id=\n 'MissingKeyInSourceNoChange')\n", (2669, 2750), False, 'import pytest\n'), ((2847, 2987), 'pytest.param', 'pytest.param', (['"""test_target"""', '"""fake_key"""', "{'fake_key': 'fake_value'}", '{}', '{}', "{'fake_key': 'fake_value'}"], {'id': '"""MissingKeyInTargetKeyAdded"""'}), "('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {},\n {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded')\n", (2859, 2987), False, 'import pytest\n'), ((3421, 3590), 'pytest.param', 'pytest.param', (['"""test_target"""', '"""copyright_org"""', "{'copyright_org': 'AMZN'}", '{}', "{'copyright_org': 'A2Z'}", "{'copyright_org': 'AMZN'}"], {'id': '"""MergeStringReplaceSuccess"""'}), "('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {},\n {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id=\n 'MergeStringReplaceSuccess')\n", (3433, 3590), False, 'import pytest\n'), ((3990, 4149), 'pytest.param', 'pytest.param', (['"""test_target"""', '"""client_only"""', "{'client_only': False}", '{}', "{'client_only': True}", "{'client_only': False}"], {'id': '"""MergeBoolReplaceKwSuccess"""'}), "('test_target', 'client_only', {'client_only': False}, {}, {\n 'client_only': True}, {'client_only': False}, id=\n 'MergeBoolReplaceKwSuccess')\n", (4002, 4149), False, 'import pytest\n'), ((5211, 5301), 'pytest.param', 'pytest.param', (['{}', 'None', 'None', '{}', '{}'], {'id': '"""BasicNoAdditionalAliasNoAdditionalIncludes"""'}), "({}, None, None, {}, {}, id=\n 'BasicNoAdditionalAliasNoAdditionalIncludes')\n", (5223, 5301), False, 'import pytest\n'), ((5306, 5498), 'pytest.param', 'pytest.param', (['{}', '"""include_test"""', "{'path': {'includes': ['include_test']}, 'include_test': {}}", '{}', "{'includes': ['include_test']}"], {'id': '"""BasicNoAdditionalAliasSingleAdditionalIncludes"""'}), "({}, 'include_test', {'path': {'includes': ['include_test']},\n 'include_test': {}}, {}, {'includes': ['include_test']}, id=\n 'BasicNoAdditionalAliasSingleAdditionalIncludes')\n", (5318, 5498), False, 'import pytest\n'), ((6492, 6553), 'pytest.param', 'pytest.param', (['None', '{}', '"""no_section"""', '{}'], {'id': '"""SimpleNoChange"""'}), "(None, {}, 'no_section', {}, id='SimpleNoChange')\n", (6504, 6553), False, 'import pytest\n'), ((6563, 6687), 'pytest.param', 'pytest.param', (["{'path': {'test_section': {'key1': 'value1'}}}", '{}', '"""test_section"""', "{'key1': 'value1'}"], {'id': '"""SimpleChanges"""'}), "({'path': {'test_section': {'key1': 'value1'}}}, {},\n 'test_section', {'key1': 'value1'}, id='SimpleChanges')\n", (6575, 6687), False, 'import pytest\n'), ((7928, 8013), 'lumberyard_modules.ProjectSettingsFile', 'lumberyard_modules.ProjectSettingsFile', (['fake_context', '"""path"""', 'additional_aliases'], {}), "(fake_context, 'path', additional_aliases\n )\n", (7966, 8013), False, 'import lumberyard_modules\n'), ((4818, 4848), 'pytest.raises', 'pytest.raises', (['Errors.WafError'], {}), '(Errors.WafError)\n', (4831, 4848), False, 'import pytest\n')] |
#!/usr/bin/env python3
"""Curve fitting with linear programming.
Minimizes the sum of error for each fit point to find the optimal coefficients
for a given polynomial.
Overview:
Objective: Sum of errors
Subject to: Bounds on coefficients
Credit: "Curve Fitting with Linear Programming", <NAME> and <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import string
from ortools.linear_solver import pywraplp
class ErrorDefinition(enum.Enum):
SUM_ABS_DEV = enum.auto()
SUM_MAX_DEVIATION = enum.auto()
def _generate_variables(solver, points, coeff_ranges, err_max, error_def):
"""Create coefficient variables.
Initial version works for up to 26 variable polynomial. One letter per
english alphabet used for coefficient names.
TODO(drofp): Figure out naming scheme for arbitrary number of variables.
"""
num_of_coeff = len(coeff_ranges)
variables = []
coeff_names = []
# Add coefficients to variable list.
if num_of_coeff == 2:
coeff_names.append('m')
coeff_names.append('b')
else:
for letter_cnt in range(num_of_coeff):
coeff_names.append(string.ascii_lowercase[letter_cnt])
for coeff_num in range(num_of_coeff):
if coeff_ranges[coeff_num][0] is None:
lower_bound = -solver.Infinity()
else:
lower_bound = coeff_ranges[coeff_num][0]
if coeff_ranges[coeff_num][1] is None:
upper_bound = solver.Infinity()
else:
upper_bound = coeff_ranges[coeff_num][1]
variables.append(
solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num]))
# Add absolute error variables to variable list
for point_cnt in range(len(points)):
positive_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_plus')
negative_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_minus')
variables.append(positive_err_var)
variables.append(negative_err_var)
return variables
def _generate_objective_fn(
solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV):
"""Generate objective function for given error definition."""
objective = solver.Objective()
for variable in variables[num_of_coeff:]:
objective.SetCoefficient(variable, 1)
return objective
def _generate_constraints(solver, points, num_of_coeff, variables):
constraints = []
for point_num, point in enumerate(points):
# Equivalency constraint
constraint = solver.Constraint(point[1], point[1])
# Resultant Coefficient terms
for coeff_num, coeff in enumerate(variables[:num_of_coeff]):
power = num_of_coeff - coeff_num - 1
x_val = point[0] ** power
constraint.SetCoefficient(coeff, x_val)
# Error terms
ex_plus = variables[num_of_coeff + 2 * point_num]
ex_minus = variables[num_of_coeff + 2 * point_num + 1]
constraint.SetCoefficient(ex_plus, -1)
constraint.SetCoefficient(ex_minus, 1)
constraints.append(constraint)
return constraints
def get_optimal_polynomial(
points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV,
err_max=10000, solver=None):
"""Optimize coefficients for any order polynomial.
Args:
points: A tuple of points, represented as tuples (x, y)
coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples
(min, max). Nubmer of elements in list determines order of polynomial,
from highest order (0th index) to lowest order (nth index).
err_def: An ErrorDefinition enum, specifying the definition for error.
err_max: An Integer, specifying the maximum error allowable.
solver: a ortools.pywraplp.Solver object, if a specific solver instance is
requested by caller.
Returns:
A Dictionary, the desired coefficients mapped to ther values.
"""
if coeff_ranges is None:
raise ValueError('Please provide appropriate coefficient range.')
if solver is None:
solver = pywraplp.Solver(
'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
variables = _generate_variables(
solver, points, coeff_ranges, err_max=err_max,
error_def=error_def)
num_of_coeff = len(coeff_ranges)
_generate_objective_fn(solver, num_of_coeff, variables)
_generate_constraints(solver, points, num_of_coeff, variables)
solver.Solve()
var_to_val = dict()
for coeff in variables[:num_of_coeff]:
var_to_val[coeff.name()] = coeff.solution_value()
return var_to_val
def demo_optimal_linear_5points():
"""Demonstration of getting optimal linear polynomial.
Uses 5 points from Swanson's curve fitting paper.
"""
print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER')
points = (0,1), (1,3), (2,2), (3,4), (4,5)
coeff_ranges = ((None, None), (None, None))
# solver = pywraplp.Solver(
# 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
optimized_coefficients = get_optimal_polynomial(
points=points, coeff_ranges=coeff_ranges)
for elm in optimized_coefficients:
print('elm: {}'.format(elm))
print(
'type(optimized_coefficients): {}'.format(
type(optimized_coefficients)))
print('optimized_coefficients: {}'.format(optimized_coefficients))
# m, b = optimized_coefficients
# print('Optimized m: {}, b: {}'.format(m, b))
def demo_optimal_linear_10points():
print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_quadratic_10points():
print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_quadratic_19points():
print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0])
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3])
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_cubic_10points():
print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def main():
demo_optimal_quadratic_19points()
if __name__ == '__main__':
main() | [
"enum.auto",
"ortools.linear_solver.pywraplp.Solver"
]
| [((555, 566), 'enum.auto', 'enum.auto', ([], {}), '()\n', (564, 566), False, 'import enum\n'), ((591, 602), 'enum.auto', 'enum.auto', ([], {}), '()\n', (600, 602), False, 'import enum\n'), ((4198, 4275), 'ortools.linear_solver.pywraplp.Solver', 'pywraplp.Solver', (['"""polynomial_solver"""', 'pywraplp.Solver.GLOP_LINEAR_PROGRAMMING'], {}), "('polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n", (4213, 4275), False, 'from ortools.linear_solver import pywraplp\n')] |
from pathlib import PosixPath
import configparser
from typing import Dict, Optional, Any, List
from inspect import cleandoc
import shutil
import tensorhive
import os
import logging
log = logging.getLogger(__name__)
class CONFIG_FILES:
# Where to copy files
# (TensorHive tries to load these by default)
config_dir = PosixPath.home() / '.config/TensorHive'
MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini')
HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini')
MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini')
# Where to get file templates from
# (Clone file when it's not found in config directory)
tensorhive_package_dir = PosixPath(__file__).parent
MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini')
HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini')
MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini')
ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini')
MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations')
class ConfigInitilizer:
'''Makes sure that all default config files exist'''
def __init__(self):
# 1. Check if all config files exist
all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()
if not all_exist:
log.warning('[•] Detected missing default config file(s), recreating...')
self.recreate_default_configuration_files()
log.info('[•] All configs already exist, skipping...')
def recreate_default_configuration_files(self) -> None:
try:
# 1. Create directory for stroing config files
CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True)
# 2. Clone templates safely from `tensorhive` package
self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)
# 3. Change config files permission
rw_owner_only = 0o600
os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)
except Exception:
log.error('[✘] Unable to recreate configuration files.')
def safe_copy(self, src: str, dst: str) -> None:
'''Safe means that it won't override existing configuration'''
if PosixPath(dst).exists():
log.info('Skipping, file already exists: {}'.format(dst))
else:
shutil.copy(src, dst)
log.info('Copied {} to {}'.format(src, dst))
class ConfigLoader:
@staticmethod
def load(path, displayed_title=''):
import configparser
config = configparser.ConfigParser(strict=False)
full_path = PosixPath(path).expanduser()
if config.read(str(full_path)):
log.info('[•] Reading {} config from {}'.format(displayed_title, full_path))
else:
log.warning('[✘] Configuration file not found ({})'.format(full_path))
log.info('Using default {} settings from config.py'.format(displayed_title))
return config
ConfigInitilizer()
config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')
def display_config(cls):
'''
Displays all uppercase class atributes (class must be defined first)
Example usage: display_config(API_SERVER)
'''
print('[{class_name}]'.format(class_name=cls.__name__))
for key, value in cls.__dict__.items():
if key.isupper():
print('{} = {}'.format(key, value))
def check_env_var(name: str):
'''Makes sure that env variable is declared'''
if not os.getenv(name):
msg = cleandoc(
'''
{env} - undeclared environment variable!
Try this: `export {env}="..."`
''').format(env=name).split('\n')
log.warning(msg[0])
log.warning(msg[1])
class SSH:
section = 'ssh'
HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)
TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True)
TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0)
NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1)
KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key')
def hosts_config_to_dict(path: str) -> Dict: # type: ignore
'''Parses sections containing hostnames'''
hosts_config = ConfigLoader.load(path, displayed_title='hosts')
result = {}
for section in hosts_config.sections():
# We want to parse only sections which describe target hosts
if section == 'proxy_tunneling':
continue
hostname = section
result[hostname] = {
'user': hosts_config.get(hostname, 'user'),
'port': hosts_config.getint(hostname, 'port', fallback=22)
}
return result
def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore
'''Parses [proxy_tunneling] section'''
config = ConfigLoader.load(path, displayed_title='proxy')
section = 'proxy_tunneling'
# Check if section is present and if yes, check if tunneling is enabled
if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False):
return {
'proxy_host': config.get(section, 'proxy_host'),
'proxy_user': config.get(section, 'proxy_user'),
'proxy_port': config.getint(section, 'proxy_port', fallback=22)
}
else:
return None
AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE)
PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE)
class DB:
section = 'database'
default_path = '~/.config/TensorHive/database.sqlite'
def uri_for_path(path: str) -> str: # type: ignore
return 'sqlite:///{}'.format(PosixPath(path).expanduser())
SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path))
TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite)
class API:
section = 'api'
TITLE = config.get(section, 'title', fallback='TensorHive API')
URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0')
URL_PREFIX = config.get(section, 'url_prefix', fallback='api')
SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml')
IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers')
import yaml
respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml')
with open(respones_file_path, 'r') as file:
RESPONSES = yaml.safe_load(file)
class APP_SERVER:
section = 'web_app.server'
BACKEND = config.get(section, 'backend', fallback='gunicorn')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=5000)
WORKERS = config.getint(section, 'workers', fallback=4)
LOG_LEVEL = config.get(section, 'loglevel', fallback='warning')
class API_SERVER:
section = 'api.server'
BACKEND = config.get(section, 'backend', fallback='gevent')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=1111)
DEBUG = config.getboolean(section, 'debug', fallback=False)
class MONITORING_SERVICE:
section = 'monitoring_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
class PROTECTION_SERVICE:
section = 'protection_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True)
NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False)
class MAILBOT:
mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot')
section = 'general'
INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0)
MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section,
'max_emails_per_protection_interval', fallback=50)
NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True)
NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False)
ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None)
section = 'smtp'
SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None)
SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None)
SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None)
SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587)
section = 'template/intruder'
INTRUDER_SUBJECT = mailbot_config.get(section, 'subject')
INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
section = 'template/admin'
ADMIN_SUBJECT = mailbot_config.get(section, 'subject')
ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
class USAGE_LOGGING_SERVICE:
section = 'usage_logging_service'
default_path = '~/.config/TensorHive/logs/'
def full_path(path: str) -> str: # type: ignore
return str(PosixPath(path).expanduser())
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path))
LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2)
class JOB_SCHEDULING_SERVICE:
section = 'job_scheduling_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0)
STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0)
SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, "schedule_queued_jobs_when_free_mins", fallback=30)
class AUTH:
from datetime import timedelta
section = 'auth'
def config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore
'''
Parses value for option from string to a valid python list.
Fallback value is returned when anything goes wrong (e.g. option or value not present)
Example .ini file, function called with arguments: option='some_option', fallback=None
[some_section]
some_option = ['foo', 'bar']
Will return:
['foo', 'bar']
'''
import ast
try:
raw_arguments = config.get('auth', option)
parsed_arguments = ast.literal_eval(raw_arguments)
return parsed_arguments
except (configparser.Error, ValueError):
log.warning('Parsing [auth] config section failed for option "{}", using fallback value: {}'.format(
option, fallback))
return fallback
FLASK_JWT = {
'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'),
'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True),
'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']),
'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True),
'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes',
fallback=1)),
'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days',
fallback=1)),
'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers'])
}
| [
"logging.getLogger",
"configparser.ConfigParser",
"os.getenv",
"pathlib.PosixPath",
"os.chmod",
"ast.literal_eval",
"yaml.safe_load",
"inspect.cleandoc",
"shutil.copy",
"pathlib.PosixPath.home"
]
| [((187, 214), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (204, 214), False, 'import logging\n'), ((330, 346), 'pathlib.PosixPath.home', 'PosixPath.home', ([], {}), '()\n', (344, 346), False, 'from pathlib import PosixPath\n'), ((683, 702), 'pathlib.PosixPath', 'PosixPath', (['__file__'], {}), '(__file__)\n', (692, 702), False, 'from pathlib import PosixPath\n'), ((3137, 3176), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'strict': '(False)'}), '(strict=False)\n', (3162, 3176), False, 'import configparser\n'), ((4100, 4115), 'os.getenv', 'os.getenv', (['name'], {}), '(name)\n', (4109, 4115), False, 'import os\n'), ((7245, 7265), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (7259, 7265), False, 'import yaml\n'), ((2388, 2442), 'os.chmod', 'os.chmod', (['CONFIG_FILES.MAIN_CONFIG_PATH', 'rw_owner_only'], {}), '(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)\n', (2396, 2442), False, 'import os\n'), ((2455, 2510), 'os.chmod', 'os.chmod', (['CONFIG_FILES.HOSTS_CONFIG_PATH', 'rw_owner_only'], {}), '(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)\n', (2463, 2510), False, 'import os\n'), ((2523, 2580), 'os.chmod', 'os.chmod', (['CONFIG_FILES.MAILBOT_CONFIG_PATH', 'rw_owner_only'], {}), '(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)\n', (2531, 2580), False, 'import os\n'), ((2933, 2954), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (2944, 2954), False, 'import shutil\n'), ((11531, 11562), 'ast.literal_eval', 'ast.literal_eval', (['raw_arguments'], {}), '(raw_arguments)\n', (11547, 11562), False, 'import ast\n'), ((2812, 2826), 'pathlib.PosixPath', 'PosixPath', (['dst'], {}), '(dst)\n', (2821, 2826), False, 'from pathlib import PosixPath\n'), ((3197, 3212), 'pathlib.PosixPath', 'PosixPath', (['path'], {}), '(path)\n', (3206, 3212), False, 'from pathlib import PosixPath\n'), ((7119, 7138), 'pathlib.PosixPath', 'PosixPath', (['__file__'], {}), '(__file__)\n', (7128, 7138), False, 'from pathlib import PosixPath\n'), ((1274, 1314), 'pathlib.PosixPath', 'PosixPath', (['CONFIG_FILES.MAIN_CONFIG_PATH'], {}), '(CONFIG_FILES.MAIN_CONFIG_PATH)\n', (1283, 1314), False, 'from pathlib import PosixPath\n'), ((1342, 1383), 'pathlib.PosixPath', 'PosixPath', (['CONFIG_FILES.HOSTS_CONFIG_PATH'], {}), '(CONFIG_FILES.HOSTS_CONFIG_PATH)\n', (1351, 1383), False, 'from pathlib import PosixPath\n'), ((1411, 1454), 'pathlib.PosixPath', 'PosixPath', (['CONFIG_FILES.MAILBOT_CONFIG_PATH'], {}), '(CONFIG_FILES.MAILBOT_CONFIG_PATH)\n', (1420, 1454), False, 'from pathlib import PosixPath\n'), ((6431, 6446), 'pathlib.PosixPath', 'PosixPath', (['path'], {}), '(path)\n', (6440, 6446), False, 'from pathlib import PosixPath\n'), ((10078, 10093), 'pathlib.PosixPath', 'PosixPath', (['path'], {}), '(path)\n', (10087, 10093), False, 'from pathlib import PosixPath\n'), ((4131, 4266), 'inspect.cleandoc', 'cleandoc', (['"""\n {env} - undeclared environment variable!\n Try this: `export {env}="..."`\n """'], {}), '(\n """\n {env} - undeclared environment variable!\n Try this: `export {env}="..."`\n """\n )\n', (4139, 4266), False, 'from inspect import cleandoc\n')] |
import random
from typing import Tuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
outputs, hidden = self.rnn(embedded)
# output of bi-directional rnn should be concatenated
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn_in = (enc_hid_dim * 2) + dec_hid_dim
self.attn = nn.Linear(self.attn_in, attn_dim)
def forward(self, decoder_hidden, encoder_outputs):
src_len = encoder_outputs.shape[0]
repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((
repeated_decoder_hidden,
encoder_outputs),
dim = 2)))
attention = torch.sum(energy, dim=2)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.dropout = dropout
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs):
a = self.attention(decoder_hidden, encoder_outputs)
a = a.unsqueeze(1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted_encoder_rep = torch.bmm(a, encoder_outputs)
weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2)
return weighted_encoder_rep
def forward(self, input, decoder_hidden, encoder_outputs):
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden,
encoder_outputs)
rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2)
output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0))
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted_encoder_rep = weighted_encoder_rep.squeeze(0)
output = self.out(torch.cat((output,
weighted_encoder_rep,
embedded), dim = 1))
return output, decoder_hidden.squeeze(0)
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
batch_size = src.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
# first input to the decoder is the <sos> token
output = trg[0,:]
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, encoder_outputs)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = (trg[t] if teacher_force else top1)
return outputs
| [
"torch.nn.Dropout",
"torch.cat",
"torch.sum",
"torch.nn.Linear",
"torch.bmm",
"torch.zeros",
"random.random",
"torch.nn.functional.softmax",
"torch.nn.Embedding",
"torch.nn.GRU"
]
| [((494, 526), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'emb_dim'], {}), '(input_dim, emb_dim)\n', (506, 526), True, 'import torch.nn as nn\n'), ((546, 594), 'torch.nn.GRU', 'nn.GRU', (['emb_dim', 'enc_hid_dim'], {'bidirectional': '(True)'}), '(emb_dim, enc_hid_dim, bidirectional=True)\n', (552, 594), True, 'import torch.nn as nn\n'), ((615, 654), 'torch.nn.Linear', 'nn.Linear', (['(enc_hid_dim * 2)', 'dec_hid_dim'], {}), '(enc_hid_dim * 2, dec_hid_dim)\n', (624, 654), True, 'import torch.nn as nn\n'), ((678, 697), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (688, 697), True, 'import torch.nn as nn\n'), ((1287, 1320), 'torch.nn.Linear', 'nn.Linear', (['self.attn_in', 'attn_dim'], {}), '(self.attn_in, attn_dim)\n', (1296, 1320), True, 'import torch.nn as nn\n'), ((1725, 1749), 'torch.sum', 'torch.sum', (['energy'], {'dim': '(2)'}), '(energy, dim=2)\n', (1734, 1749), False, 'import torch\n'), ((1765, 1792), 'torch.nn.functional.softmax', 'F.softmax', (['attention'], {'dim': '(1)'}), '(attention, dim=1)\n', (1774, 1792), True, 'import torch.nn.functional as F\n'), ((2185, 2218), 'torch.nn.Embedding', 'nn.Embedding', (['output_dim', 'emb_dim'], {}), '(output_dim, emb_dim)\n', (2197, 2218), True, 'import torch.nn as nn\n'), ((2238, 2284), 'torch.nn.GRU', 'nn.GRU', (['(enc_hid_dim * 2 + emb_dim)', 'dec_hid_dim'], {}), '(enc_hid_dim * 2 + emb_dim, dec_hid_dim)\n', (2244, 2284), True, 'import torch.nn as nn\n'), ((2306, 2361), 'torch.nn.Linear', 'nn.Linear', (['(self.attention.attn_in + emb_dim)', 'output_dim'], {}), '(self.attention.attn_in + emb_dim, output_dim)\n', (2315, 2361), True, 'import torch.nn as nn\n'), ((2385, 2404), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2395, 2404), True, 'import torch.nn as nn\n'), ((2655, 2684), 'torch.bmm', 'torch.bmm', (['a', 'encoder_outputs'], {}), '(a, encoder_outputs)\n', (2664, 2684), False, 'import torch\n'), ((3116, 3166), 'torch.cat', 'torch.cat', (['(embedded, weighted_encoder_rep)'], {'dim': '(2)'}), '((embedded, weighted_encoder_rep), dim=2)\n', (3125, 3166), False, 'import torch\n'), ((3414, 3472), 'torch.cat', 'torch.cat', (['(output, weighted_encoder_rep, embedded)'], {'dim': '(1)'}), '((output, weighted_encoder_rep, embedded), dim=1)\n', (3423, 3472), False, 'import torch\n'), ((923, 977), 'torch.cat', 'torch.cat', (['(hidden[-2, :, :], hidden[-1, :, :])'], {'dim': '(1)'}), '((hidden[-2, :, :], hidden[-1, :, :]), dim=1)\n', (932, 977), False, 'import torch\n'), ((1603, 1663), 'torch.cat', 'torch.cat', (['(repeated_decoder_hidden, encoder_outputs)'], {'dim': '(2)'}), '((repeated_decoder_hidden, encoder_outputs), dim=2)\n', (1612, 1663), False, 'import torch\n'), ((4006, 4054), 'torch.zeros', 'torch.zeros', (['max_len', 'batch_size', 'trg_vocab_size'], {}), '(max_len, batch_size, trg_vocab_size)\n', (4017, 4054), False, 'import torch\n'), ((4377, 4392), 'random.random', 'random.random', ([], {}), '()\n', (4390, 4392), False, 'import random\n')] |
from typing import Dict, Any, List
import string
from parlai.core.agents import Agent
from parlai.core.message import Message
from random import sample
import pathlib
path = pathlib.Path(__file__).parent.absolute()
class LightImitateMixin(Agent):
"""Abstract class that handles passing expert trajectories alongside self-play sampling
"""
def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any] = None):
self.id = "LightChatbotSelfPlay"
self.train_step = 0
self.self_speaker_token = "<speaker_self>"
self.other_speaker_token = "<speaker_other>"
def act(self):
raise NotImplementedError()
def batch_act(self, observations):
self.train_step += 1
# Add generated histories to data ones
imitate = []
sample = []
for i, observation in enumerate(observations):
sample.extend(
[
(dialog[0], dialog[1][:-1])
for dialog in observation["text"] if len(dialog[1]) > 0
]
)
imitate.extend(
[
dialog
for dialog in observation["text"] if len(dialog[1]) > 0
]
)
self.batch_imitate(imitate)
utterances = self.batch_sample(sample)
if (
self.train_step % self.episode_num_dialog_dump == 0
) and self.train_step != 0:
self.checkpoint([sample, utterances])
return [{"id": self.id} for _ in observations]
def batch_imitate(self, dialogs):
"""Implement sampling utterances and memorization here"""
pass
def batch_sample(self, dialogs) -> List[str]:
"""Implement update here"""
pass
def batch_update(self):
"""Update weights here"""
pass
def _update_histories(self, utterances, other=False):
for i in range(len(utterances)):
history = self.histories[i]
history.append(
(self.self_speaker_token if not other else self.other_speaker_token)
+ utterances[i]
)
self.histories[i] = history
def _convert_history_to_other(self, history):
history = [
turn.replace(self.self_speaker_token, self.other_speaker_token)
if self.self_speaker_token in turn
else turn.replace(self.other_speaker_token, self.self_speaker_token)
for turn in history
]
return history
| [
"pathlib.Path"
]
| [((179, 201), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'import pathlib\n')] |
from git import Repo
from pf_pweb_sourceman.common.console import console
from pf_py_file.pfpf_file_util import PFPFFileUtil
class GitRepoMan:
def get_repo_name_from_url(self, url: str):
if not url:
return None
last_slash_index = url.rfind("/")
last_suffix_index = url.rfind(".git")
if last_suffix_index < 0:
last_suffix_index = len(url)
if last_slash_index < 0 or last_suffix_index <= last_slash_index:
raise Exception("Invalid repo url {}".format(url))
return url[last_slash_index + 1:last_suffix_index]
def clone_or_pull_project(self, path, url, branch):
repo_name = self.get_repo_name_from_url(url)
if not repo_name:
raise Exception("Invalid repo")
if not PFPFFileUtil.is_exist(path):
console.success("Cloning project: " + repo_name + ", Branch: " + branch)
Repo.clone_from(url, branch=branch, to_path=path)
else:
console.success(repo_name + " Taking pull...")
repo = Repo(path)
repo.git.checkout(branch)
origin = repo.remotes.origin
origin.pull()
| [
"git.Repo.clone_from",
"pf_py_file.pfpf_file_util.PFPFFileUtil.is_exist",
"pf_pweb_sourceman.common.console.console.success",
"git.Repo"
]
| [((795, 822), 'pf_py_file.pfpf_file_util.PFPFFileUtil.is_exist', 'PFPFFileUtil.is_exist', (['path'], {}), '(path)\n', (816, 822), False, 'from pf_py_file.pfpf_file_util import PFPFFileUtil\n'), ((836, 908), 'pf_pweb_sourceman.common.console.console.success', 'console.success', (["('Cloning project: ' + repo_name + ', Branch: ' + branch)"], {}), "('Cloning project: ' + repo_name + ', Branch: ' + branch)\n", (851, 908), False, 'from pf_pweb_sourceman.common.console import console\n'), ((921, 970), 'git.Repo.clone_from', 'Repo.clone_from', (['url'], {'branch': 'branch', 'to_path': 'path'}), '(url, branch=branch, to_path=path)\n', (936, 970), False, 'from git import Repo\n'), ((997, 1043), 'pf_pweb_sourceman.common.console.console.success', 'console.success', (["(repo_name + ' Taking pull...')"], {}), "(repo_name + ' Taking pull...')\n", (1012, 1043), False, 'from pf_pweb_sourceman.common.console import console\n'), ((1063, 1073), 'git.Repo', 'Repo', (['path'], {}), '(path)\n', (1067, 1073), False, 'from git import Repo\n')] |
# Install all examples to connected device(s)
import subprocess
import sys
answer = input("Install all vulkan examples to attached device, this may take some time! (Y/N)").lower() == 'y'
if answer:
BUILD_ARGUMENTS = ""
for arg in sys.argv[1:]:
if arg == "-validation":
BUILD_ARGUMENTS += "-validation"
if subprocess.call(("python build-all.py -deploy %s" % BUILD_ARGUMENTS).split(' ')) != 0:
print("Error: Not all examples may have been installed!")
sys.exit(-1)
| [
"sys.exit"
]
| [((499, 511), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (507, 511), False, 'import sys\n')] |
from generators.ahoughton import AhoughtonGenerator
from render_config import RendererConfig
from problem_renderer import ProblemRenderer
from moonboard import get_moonboard
from adapters.default import DefaultProblemAdapter
from adapters.crg import CRGProblemAdapter
from adapters.ahoughton import AhoughtonAdapter
import json
def main():
# Create Renderer
config = RendererConfig()
renderer = ProblemRenderer(
get_moonboard(2017),
DefaultProblemAdapter(),
config
)
crg_renderer = ProblemRenderer(
get_moonboard(2017),
CRGProblemAdapter(),
config
)
ahoughton_renderer_2016 = ProblemRenderer(
get_moonboard(2016),
AhoughtonAdapter(),
config
)
ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe')
ahoughton_renderer_2017 = ProblemRenderer(
get_moonboard(2017),
AhoughtonAdapter(),
config
)
ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe')
# Load data
with open('data/problems.json', 'r') as f:
problems = json.load(f)
renderer.render_problem(problems['339318'], with_info=True)
with open('data/crg.json', 'r') as f:
crg_problems = json.load(f)
crg_renderer.render_problem(crg_problems['1'])
# Ahoughton generator and adapter test
# 2016
problem = ahoughton_generator_2016.generate()
ahoughton_renderer_2016.render_problem(problem)
# 2017
problem = ahoughton_generator_2017.generate()
ahoughton_renderer_2017.render_problem(problem)
if __name__ == "__main__":
main()
| [
"adapters.default.DefaultProblemAdapter",
"moonboard.get_moonboard",
"json.load",
"adapters.crg.CRGProblemAdapter",
"generators.ahoughton.AhoughtonGenerator",
"adapters.ahoughton.AhoughtonAdapter",
"render_config.RendererConfig"
]
| [((377, 393), 'render_config.RendererConfig', 'RendererConfig', ([], {}), '()\n', (391, 393), False, 'from render_config import RendererConfig\n'), ((789, 876), 'generators.ahoughton.AhoughtonGenerator', 'AhoughtonGenerator', ([], {'year': '(2016)', 'driver_path': '"""C:/.selenium_drivers/chromedriver.exe"""'}), "(year=2016, driver_path=\n 'C:/.selenium_drivers/chromedriver.exe')\n", (807, 876), False, 'from generators.ahoughton import AhoughtonGenerator\n'), ((1029, 1116), 'generators.ahoughton.AhoughtonGenerator', 'AhoughtonGenerator', ([], {'year': '(2017)', 'driver_path': '"""C:/.selenium_drivers/chromedriver.exe"""'}), "(year=2017, driver_path=\n 'C:/.selenium_drivers/chromedriver.exe')\n", (1047, 1116), False, 'from generators.ahoughton import AhoughtonGenerator\n'), ((434, 453), 'moonboard.get_moonboard', 'get_moonboard', (['(2017)'], {}), '(2017)\n', (447, 453), False, 'from moonboard import get_moonboard\n'), ((464, 487), 'adapters.default.DefaultProblemAdapter', 'DefaultProblemAdapter', ([], {}), '()\n', (485, 487), False, 'from adapters.default import DefaultProblemAdapter\n'), ((555, 574), 'moonboard.get_moonboard', 'get_moonboard', (['(2017)'], {}), '(2017)\n', (568, 574), False, 'from moonboard import get_moonboard\n'), ((585, 604), 'adapters.crg.CRGProblemAdapter', 'CRGProblemAdapter', ([], {}), '()\n', (602, 604), False, 'from adapters.crg import CRGProblemAdapter\n'), ((688, 707), 'moonboard.get_moonboard', 'get_moonboard', (['(2016)'], {}), '(2016)\n', (701, 707), False, 'from moonboard import get_moonboard\n'), ((717, 735), 'adapters.ahoughton.AhoughtonAdapter', 'AhoughtonAdapter', ([], {}), '()\n', (733, 735), False, 'from adapters.ahoughton import AhoughtonAdapter\n'), ((928, 947), 'moonboard.get_moonboard', 'get_moonboard', (['(2017)'], {}), '(2017)\n', (941, 947), False, 'from moonboard import get_moonboard\n'), ((957, 975), 'adapters.ahoughton.AhoughtonAdapter', 'AhoughtonAdapter', ([], {}), '()\n', (973, 975), False, 'from adapters.ahoughton import AhoughtonAdapter\n'), ((1199, 1211), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1208, 1211), False, 'import json\n'), ((1345, 1357), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1354, 1357), False, 'import json\n')] |
import discord
from Util import Utils, Emoji, Translator
page_handlers = dict()
known_messages = dict()
def on_ready(bot):
load_from_disc()
def register(type, init, update, sender_only=False):
page_handlers[type] = {
"init": init,
"update": update,
"sender_only": sender_only
}
def unregister(type_handler):
if type_handler in page_handlers.keys():
del page_handlers[type_handler]
async def create_new(type, ctx, **kwargs):
text, embed, has_pages, emoji = await page_handlers[type]["init"](ctx, **kwargs)
message: discord.Message = await ctx.channel.send(text, embed=embed)
if has_pages or len(emoji) > 0:
data = {
"type": type,
"page": 0,
"trigger": ctx.message.id,
"sender": ctx.author.id
}
for k, v in kwargs.items():
data[k] = v
known_messages[str(message.id)] = data
try:
if has_pages: await message.add_reaction(Emoji.get_emoji('LEFT'))
for e in emoji: await message.add_reaction(e)
if has_pages: await message.add_reaction(Emoji.get_emoji('RIGHT'))
except discord.Forbidden:
await ctx.send(
f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('paginator_missing_perms', ctx, prev=Emoji.get_chat_emoji('LEFT'), next=Emoji.get_chat_emoji('RIGHT'))} {Emoji.get_chat_emoji('WARNING')}")
if len(known_messages.keys()) > 500:
del known_messages[list(known_messages.keys())[0]]
save_to_disc()
async def update(bot, message, action, user):
message_id = str(message.id)
if message_id in known_messages.keys():
type = known_messages[message_id]["type"]
if type in page_handlers.keys():
data = known_messages[message_id]
if data["sender"] == user or page_handlers[type]["sender_only"] is False:
page_num = data["page"]
try:
trigger_message = await message.channel.get_message(data["trigger"])
except discord.NotFound:
trigger_message = None
ctx = await bot.get_context(trigger_message) if trigger_message is not None else None
text, embed, page = await page_handlers[type]["update"](ctx, message, page_num, action, data)
await message.edit(content=text, embed=embed)
known_messages[message_id]["page"] = page
save_to_disc()
return True
return False
def basic_pages(pages, page_num, action):
if action == "PREV":
page_num -= 1
elif action == "NEXT":
page_num += 1
if page_num < 0:
page_num = len(pages) - 1
if page_num >= len(pages):
page_num = 0
page = pages[page_num]
return page, page_num
def paginate(input, max_lines=20, max_chars=1900, prefix="", suffix=""):
max_chars -= len(prefix) + len(suffix)
lines = str(input).splitlines(keepends=True)
pages = []
page = ""
count = 0
for line in lines:
if len(page) + len(line) > max_chars or count == max_lines:
if page == "":
# single 2k line, split smaller
words = line.split(" ")
for word in words:
if len(page) + len(word) > max_chars:
pages.append(f"{prefix}{page}{suffix}")
page = f"{word} "
else:
page += f"{word} "
else:
pages.append(f"{prefix}{page}{suffix}")
page = line
count = 1
else:
page += line
count += 1
pages.append(f"{prefix}{page}{suffix}")
return pages
def paginate_fields(input):
pages = []
for page in input:
page_fields = dict()
for name, content in page.items():
page_fields[name] = paginate(content, max_chars=1024)
pages.append(page_fields)
real_pages = []
for page in pages:
page_count = 0
page_fields = dict()
for name, parts in page.items():
base_name = name
if len(parts) is 1:
if page_count + len(name) + len(parts[0]) > 4000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = parts[0]
page_count += len(name) + len(parts[0])
else:
for i in range(len(parts)):
part = parts[i]
name = f"{base_name} ({i+1}/{len(parts)})"
if page_count + len(name) + len(part) > 3000:
real_pages.append(page_fields)
page_fields = dict()
page_count = 0
page_fields[name] = part
page_count += len(name) + len(part)
real_pages.append(page_fields)
return real_pages
def save_to_disc():
Utils.saveToDisk("known_messages", known_messages)
def load_from_disc():
global known_messages
known_messages = Utils.fetch_from_disk("known_messages")
| [
"Util.Emoji.get_emoji",
"Util.Emoji.get_chat_emoji",
"Util.Utils.fetch_from_disk",
"Util.Utils.saveToDisk"
]
| [((5066, 5116), 'Util.Utils.saveToDisk', 'Utils.saveToDisk', (['"""known_messages"""', 'known_messages'], {}), "('known_messages', known_messages)\n", (5082, 5116), False, 'from Util import Utils, Emoji, Translator\n'), ((5188, 5227), 'Util.Utils.fetch_from_disk', 'Utils.fetch_from_disk', (['"""known_messages"""'], {}), "('known_messages')\n", (5209, 5227), False, 'from Util import Utils, Emoji, Translator\n'), ((1001, 1024), 'Util.Emoji.get_emoji', 'Emoji.get_emoji', (['"""LEFT"""'], {}), "('LEFT')\n", (1016, 1024), False, 'from Util import Utils, Emoji, Translator\n'), ((1137, 1161), 'Util.Emoji.get_emoji', 'Emoji.get_emoji', (['"""RIGHT"""'], {}), "('RIGHT')\n", (1152, 1161), False, 'from Util import Utils, Emoji, Translator\n'), ((1244, 1275), 'Util.Emoji.get_chat_emoji', 'Emoji.get_chat_emoji', (['"""WARNING"""'], {}), "('WARNING')\n", (1264, 1275), False, 'from Util import Utils, Emoji, Translator\n'), ((1404, 1435), 'Util.Emoji.get_chat_emoji', 'Emoji.get_chat_emoji', (['"""WARNING"""'], {}), "('WARNING')\n", (1424, 1435), False, 'from Util import Utils, Emoji, Translator\n'), ((1336, 1364), 'Util.Emoji.get_chat_emoji', 'Emoji.get_chat_emoji', (['"""LEFT"""'], {}), "('LEFT')\n", (1356, 1364), False, 'from Util import Utils, Emoji, Translator\n'), ((1371, 1400), 'Util.Emoji.get_chat_emoji', 'Emoji.get_chat_emoji', (['"""RIGHT"""'], {}), "('RIGHT')\n", (1391, 1400), False, 'from Util import Utils, Emoji, Translator\n')] |
from __future__ import annotations
from dataclasses import dataclass, field, InitVar
from typing import List, Tuple, Iterator, Iterable, Optional
from random import choice
import pyxel
# -------------------------------------------------------
# Types
# -------------------------------------------------------
Maze = Tuple[int, ...]
# -------------------------------------------------------
# Constants
# -------------------------------------------------------
SCALE = 3
BOARD_WIDTH = 32
BOARD_HEIGHT = 32
CELL_SIZE = 6
CELL_COLOR = 15
WALL_SIZE = 1
WALL_COLOR = 5
# Flags
UP = 1 << 0
LEFT = 1 << 1
DOWN = 1 << 2
RIGHT = 1 << 3
VISTED = 1 << 4
# Calculated
N_CELLS = BOARD_WIDTH * BOARD_HEIGHT
BLOCK_SIZE = CELL_SIZE + WALL_SIZE * 2
WINDOW_WIDTH = BOARD_WIDTH * BLOCK_SIZE
WINDOW_HEIGHT = BOARD_HEIGHT * BLOCK_SIZE
NEIGHBORS = ((0, -1), (-1, 0), (0, 1), (1, 0))
# -------------------------------------------------------
# Maze
# -------------------------------------------------------
@dataclass
class Generator:
width: int
height: int
start_pos: InitVar[Optional[Tuple[int, int]]] = None
_visited_cells: int = field(init=False, default=0)
_stack: List[Tuple[int, int]] = field(init=False, default_factory=list)
_maze: List[int] = field(init=False)
def __post_init__(self, start_pos: Optional[Tuple[int, int]]):
x, y = start_pos = start_pos or (0, 0)
self._stack.append(start_pos)
self._visited_cells = 1
self._maze = [0 for _ in range(self.width * self.height)]
self._maze[y * self.width + x] |= VISTED
def _get_neighbors(self, x: int, y: int) -> List[int]:
return [
(i, dx, dy)
for i, (dx, dy) in enumerate(NEIGHBORS)
if (
0 <= x + dx < self.width and
0 <= y + dy < self.height and
self._maze[(y + dy) * self.width + (x + dx)] & VISTED == 0
)
]
def step(self) -> Tuple[Maze, Tuple[int, int], bool]:
if self._visited_cells < self.width * self.height:
x, y = self._stack[-1]
neighbors = self._get_neighbors(x, y)
if neighbors:
d, dx, dy = choice(neighbors)
self._maze[y * self.width + x] |= 1 << d
x_, y_ = x + dx, y + dy
self._maze[y_ * self.width + x_] |= 1 << ((d + 2) % 4) | VISTED
self._stack.append((x_, y_))
self._visited_cells += 1
else:
del self._stack[-1]
return tuple(self._maze), self._stack[-1], False
else:
return tuple(self._maze), (0, 0), True
# -------------------------------------------------------
# Application
# -------------------------------------------------------
@dataclass
class App:
maze: Maze = field(init=False, default=tuple(0 for _ in range(N_CELLS)))
generator: Optional[Generator] = field(init=False, default=None)
running: bool = field(init=False, default=False)
pos: Tuple[int, int] = field(init=False, default=(0, 0))
def run(self):
pyxel.init(
WINDOW_WIDTH, WINDOW_HEIGHT,
scale=SCALE, caption="Mazes",
border_width=SCALE, border_color=pyxel.DEFAULT_PALETTE[5],
fps=100
)
pyxel.mouse(True)
pyxel.run(self.update, self.draw)
def draw(self):
pyxel.cls(0)
for i, cell in enumerate(self.maze):
x, y = i % BOARD_WIDTH, i // BOARD_WIDTH
scr_x, scr_y = x * BLOCK_SIZE, y * BLOCK_SIZE
pyxel.rect(
scr_x, scr_y,
BLOCK_SIZE, BLOCK_SIZE,
WALL_COLOR
)
if cell & VISTED:
pyxel.rect(
scr_x + WALL_SIZE, scr_y + WALL_SIZE,
CELL_SIZE, CELL_SIZE,
CELL_COLOR
)
if cell & UP:
pyxel.rect(
scr_x + WALL_SIZE, scr_y,
CELL_SIZE, WALL_SIZE,
CELL_COLOR
)
if cell & LEFT:
pyxel.rect(
scr_x, scr_y + WALL_SIZE,
WALL_SIZE, CELL_SIZE,
CELL_COLOR
)
if cell & DOWN:
pyxel.rect(
scr_x + WALL_SIZE, scr_y + WALL_SIZE + CELL_SIZE,
CELL_SIZE, WALL_SIZE,
CELL_COLOR
)
if cell & RIGHT:
pyxel.rect(
scr_x + WALL_SIZE + CELL_SIZE, scr_y + WALL_SIZE,
WALL_SIZE, CELL_SIZE,
CELL_COLOR
)
x, y = self.pos
pyxel.rectb(
x * BLOCK_SIZE + WALL_SIZE, y * BLOCK_SIZE + WALL_SIZE,
CELL_SIZE, CELL_SIZE,
2 if self.running else 1
)
def update(self):
if pyxel.btnp(pyxel.KEY_SPACE) or pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON):
self.running = not self.running
if self.running and self.generator is None:
self.generator = Generator(BOARD_WIDTH, BOARD_HEIGHT, self.pos)
if self.running:
next_maze, pos, done = self.generator.step()
if done:
self.running = False
self.generator = None
self.maze = next_maze
self.pos = pos
else:
self.pos = (
max(0, min(BOARD_WIDTH-1, pyxel.mouse_x // BLOCK_SIZE)),
max(0, min(BOARD_HEIGHT-1, pyxel.mouse_y // BLOCK_SIZE))
)
if __name__ == '__main__':
App().run() | [
"random.choice",
"pyxel.btnp",
"pyxel.rect",
"pyxel.mouse",
"pyxel.init",
"pyxel.rectb",
"pyxel.run",
"pyxel.cls",
"dataclasses.field"
]
| [((1146, 1174), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '(0)'}), '(init=False, default=0)\n', (1151, 1174), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1211, 1250), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default_factory': 'list'}), '(init=False, default_factory=list)\n', (1216, 1250), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1274, 1291), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1279, 1291), False, 'from dataclasses import dataclass, field, InitVar\n'), ((2942, 2973), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': 'None'}), '(init=False, default=None)\n', (2947, 2973), False, 'from dataclasses import dataclass, field, InitVar\n'), ((2994, 3026), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '(False)'}), '(init=False, default=False)\n', (2999, 3026), False, 'from dataclasses import dataclass, field, InitVar\n'), ((3054, 3087), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '(0, 0)'}), '(init=False, default=(0, 0))\n', (3059, 3087), False, 'from dataclasses import dataclass, field, InitVar\n'), ((3116, 3257), 'pyxel.init', 'pyxel.init', (['WINDOW_WIDTH', 'WINDOW_HEIGHT'], {'scale': 'SCALE', 'caption': '"""Mazes"""', 'border_width': 'SCALE', 'border_color': 'pyxel.DEFAULT_PALETTE[5]', 'fps': '(100)'}), "(WINDOW_WIDTH, WINDOW_HEIGHT, scale=SCALE, caption='Mazes',\n border_width=SCALE, border_color=pyxel.DEFAULT_PALETTE[5], fps=100)\n", (3126, 3257), False, 'import pyxel\n'), ((3320, 3337), 'pyxel.mouse', 'pyxel.mouse', (['(True)'], {}), '(True)\n', (3331, 3337), False, 'import pyxel\n'), ((3346, 3379), 'pyxel.run', 'pyxel.run', (['self.update', 'self.draw'], {}), '(self.update, self.draw)\n', (3355, 3379), False, 'import pyxel\n'), ((3409, 3421), 'pyxel.cls', 'pyxel.cls', (['(0)'], {}), '(0)\n', (3418, 3421), False, 'import pyxel\n'), ((4868, 4987), 'pyxel.rectb', 'pyxel.rectb', (['(x * BLOCK_SIZE + WALL_SIZE)', '(y * BLOCK_SIZE + WALL_SIZE)', 'CELL_SIZE', 'CELL_SIZE', '(2 if self.running else 1)'], {}), '(x * BLOCK_SIZE + WALL_SIZE, y * BLOCK_SIZE + WALL_SIZE,\n CELL_SIZE, CELL_SIZE, 2 if self.running else 1)\n', (4879, 4987), False, 'import pyxel\n'), ((3590, 3650), 'pyxel.rect', 'pyxel.rect', (['scr_x', 'scr_y', 'BLOCK_SIZE', 'BLOCK_SIZE', 'WALL_COLOR'], {}), '(scr_x, scr_y, BLOCK_SIZE, BLOCK_SIZE, WALL_COLOR)\n', (3600, 3650), False, 'import pyxel\n'), ((5064, 5091), 'pyxel.btnp', 'pyxel.btnp', (['pyxel.KEY_SPACE'], {}), '(pyxel.KEY_SPACE)\n', (5074, 5091), False, 'import pyxel\n'), ((5095, 5130), 'pyxel.btnp', 'pyxel.btnp', (['pyxel.MOUSE_LEFT_BUTTON'], {}), '(pyxel.MOUSE_LEFT_BUTTON)\n', (5105, 5130), False, 'import pyxel\n'), ((2213, 2230), 'random.choice', 'choice', (['neighbors'], {}), '(neighbors)\n', (2219, 2230), False, 'from random import choice\n'), ((3759, 3845), 'pyxel.rect', 'pyxel.rect', (['(scr_x + WALL_SIZE)', '(scr_y + WALL_SIZE)', 'CELL_SIZE', 'CELL_SIZE', 'CELL_COLOR'], {}), '(scr_x + WALL_SIZE, scr_y + WALL_SIZE, CELL_SIZE, CELL_SIZE,\n CELL_COLOR)\n', (3769, 3845), False, 'import pyxel\n'), ((3970, 4040), 'pyxel.rect', 'pyxel.rect', (['(scr_x + WALL_SIZE)', 'scr_y', 'CELL_SIZE', 'WALL_SIZE', 'CELL_COLOR'], {}), '(scr_x + WALL_SIZE, scr_y, CELL_SIZE, WALL_SIZE, CELL_COLOR)\n', (3980, 4040), False, 'import pyxel\n'), ((4187, 4257), 'pyxel.rect', 'pyxel.rect', (['scr_x', '(scr_y + WALL_SIZE)', 'WALL_SIZE', 'CELL_SIZE', 'CELL_COLOR'], {}), '(scr_x, scr_y + WALL_SIZE, WALL_SIZE, CELL_SIZE, CELL_COLOR)\n', (4197, 4257), False, 'import pyxel\n'), ((4404, 4502), 'pyxel.rect', 'pyxel.rect', (['(scr_x + WALL_SIZE)', '(scr_y + WALL_SIZE + CELL_SIZE)', 'CELL_SIZE', 'WALL_SIZE', 'CELL_COLOR'], {}), '(scr_x + WALL_SIZE, scr_y + WALL_SIZE + CELL_SIZE, CELL_SIZE,\n WALL_SIZE, CELL_COLOR)\n', (4414, 4502), False, 'import pyxel\n'), ((4646, 4744), 'pyxel.rect', 'pyxel.rect', (['(scr_x + WALL_SIZE + CELL_SIZE)', '(scr_y + WALL_SIZE)', 'WALL_SIZE', 'CELL_SIZE', 'CELL_COLOR'], {}), '(scr_x + WALL_SIZE + CELL_SIZE, scr_y + WALL_SIZE, WALL_SIZE,\n CELL_SIZE, CELL_COLOR)\n', (4656, 4744), False, 'import pyxel\n')] |
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg',
upload_to='profile_pic/')
def __str__(self):
return f'{self.user.username} Profile'
def save(self,*args,**kwargs):
super().save()
img = Image(self.prof_pic.path)
if img.height > 300 and img.width > 300:
output_size = (300,300)
img.thumbnail(output_size)
img.save(self.prof_pic.path)
| [
"django.db.models.ImageField",
"PIL.Image",
"django.db.models.ForeignKey"
]
| [((136, 185), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (153, 185), False, 'from django.db import models\n'), ((198, 264), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""default.jpg"""', 'upload_to': '"""profile_pic/"""'}), "(default='default.jpg', upload_to='profile_pic/')\n", (215, 264), False, 'from django.db import models\n'), ((470, 495), 'PIL.Image', 'Image', (['self.prof_pic.path'], {}), '(self.prof_pic.path)\n', (475, 495), False, 'from PIL import Image\n')] |
## MODULE WITH UTIL FUNCTIONS - NOTION
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Imports ########################################################
"----------------------------------------------------------------------------------------------------------------------"
## Standard library imports
import requests
## Third party imports
import pandas as pd
## Local application imports
from pkg_dir.config.config import (
creds_file_path as crds_loc,
)
from pkg_dir.src.utils.general_utils import (
read_yaml,
)
"----------------------------------------------------------------------------------------------------------------------"
####################################################### Functions ######################################################
"----------------------------------------------------------------------------------------------------------------------"
## Read notion database with api
def notion_api_call(db_api_url, db_id, headers):
"""
Read notion database with api
:param db_api_url (string): base url provided by Notion to make api calls
:param db_id (string): unique id of the database that will be read
:param headers (dictionary): dict with authorization and version info
:return req (?): response after calling notions api
"""
## Configuring reading URL
read_url = db_api_url + db_id + "/query"
## Requesting info via the API
req = requests.request(
"POST",
read_url,
headers=headers
)
## Verifying API call status
print("API interaction status code: ", req.status_code)
return req
## Calling a Notion database as a json via Notion's API
def get_notion_db_json(db_id):
"""
Calling a Notion database as a json via Notion's API
:param db_id (string): unique id of the database that will be called
:return db_json (json): json with the notion's db contents
"""
## Reading credentials from yaml file
yaml_file = read_yaml(crds_loc)
notion_version = yaml_file["notion_api"]["notion_version"]
db_api_url = yaml_file["notion_api"]["db_api_url"]
api_key = yaml_file["notion_api"]["api_key"]
## Building headers for the API call
headers = {
"Authorization": "Bearer " + api_key,
"Notion-Version": notion_version
}
## Calling notion's api
req = notion_api_call(db_api_url, db_id, headers)
## Converting the api response to a json
db_json = req.json()
return db_json
## Crating a schema of the notion database that was read
def create_notion_db_schema(db_json, relevant_properties):
"""
Crating a schema of the notion database that was read
:param db_json (json): json object obtained by calling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return db_schema (dictionary): schema of the table that includes the properties' data type
"""
## Selecting a sample entry to go over all of it's properties
sample_entry = db_json["results"][0]["properties"]
## Bulding dictionary (schema) of the relevant properties and their datatypes
db_schema = {
prop: {
"data_type": sample_entry[prop]["type"]
}
for prop in sample_entry
if prop in relevant_properties
}
# print(db_schema)
return db_schema
## Building a the blueprint dictionary for the dataframe (orient=index)
def notion_db_blueprint_df(db_json, db_schema, index_prop):
"""
Building a the blueprint dictionary for the dataframe (orient=index)
:param db_json (json): json object obtained by calling notion's api
:return db_schema (dictionary): schema of the table that includes the properties' data type
:param index_prop (string): name of the property that will serve as the df's index
:return df_dict (dict): dictionary that will be used to create a dataframe with the json contents
"""
## Empty dictionary that will store all the results
df_dict = {}
## Iterating over every row in the dataframe
for row in db_json["results"]:
## Defining the table's base attributes
#### All properties contained in the notion db
row_props = row["properties"]
#### Name of the index; key attribute in the notion db
row_name = row_props[index_prop]["title"][0]["plain_text"]
#### Empty list to store all the row contents
row_contents = []
## Iterating over every relevant property in the table
for col in db_schema:
## Identifying the datatype of the property
data_type = db_schema[col]["data_type"]
## Set of conditions to determine how the row will be treated
#### Skipping the index row
if data_type == "title":
continue
#### Searching for data in specific locations for special data types (1)
elif data_type in ["select", "person", "created_by"]:
try:
row_contents.append(row_props[col][data_type]["name"])
except:
row_contents.append("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["rich_text"]:
try:
row_contents.append(row_props[col][data_type][0]["text"]["content"])
except:
row_contents.append("No_data")
#### Searching for data in specific locations for special data types (2)
elif data_type in ["formula"]:
try:
#### Applying conditions based on the type of formula result
if row_props[col][data_type]["type"] == "string":
row_contents.append(row_props[col][data_type]["string"])
elif row_props[col][data_type]["type"] == "number":
row_contents.append(row_props[col][data_type]["number"])
except:
row_contents.append("No_data")
#### General procedure to find data
else:
row_contents.append(row_props[col][db_schema[col]["data_type"]])
## Saving the row contents gathered
df_dict[row_name] = row_contents
return df_dict
## Obtaining a dataframe from a notion database
def notion_json_to_df(db_json, relevant_properties):
"""
Obtaining a dataframe from a notion database
:param db_json (json): json object obtained by calling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return df_n (dataframe): resulting dataframe crated based on the blueprint generated
"""
## General parameters needed to build the dataframe
#### Database schema
db_schema = create_notion_db_schema(db_json, relevant_properties)
#### Property that will be used as the dataframe's index
index_prop = [prop for prop in db_schema if db_schema[prop]["data_type"] == "title"][0]
## Building a the blueprint dictionary for the dataframe (orient=index)
df_dict = notion_db_blueprint_df(db_json, db_schema, index_prop)
## Creating dataframe with the resulting blueprint dictionary
#### Crating dataframe
df_n = pd.DataFrame.from_dict(df_dict, orient="index")
#### Inserting the table's index as a column at the end of the df
df_n.insert(
df_n.shape[1],
index_prop,
df_n.index
)
#### Resetting index
df_n.reset_index(inplace=True, drop=True)
#### Adjusting column names
df_n.columns = [col_n for col_n in db_schema]
return df_n
## Obtaining a Notion database as dataframe with the selected columns
def notion_db_to_df(db_id, relevant_properties):
"""
Obtaining a Notion database as dataframe with the selected columns
:param db_id (string): unique id to identify the notion database
:param relevant_properties (list): list of string with the names of the relevant properties
:return df_n (dataframe): resulting dataframe crated based on the blueprint generated
"""
## Calling a Notion database as a json via Notion's API
db_json = get_notion_db_json(db_id)
## Obtaining a dataframe from a notion database
df_n = notion_json_to_df(db_json, relevant_properties)
return df_n
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
## END OF FILE ##
"----------------------------------------------------------------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------" | [
"pandas.DataFrame.from_dict",
"pkg_dir.src.utils.general_utils.read_yaml",
"requests.request"
]
| [((1577, 1628), 'requests.request', 'requests.request', (['"""POST"""', 'read_url'], {'headers': 'headers'}), "('POST', read_url, headers=headers)\n", (1593, 1628), False, 'import requests\n'), ((2130, 2149), 'pkg_dir.src.utils.general_utils.read_yaml', 'read_yaml', (['crds_loc'], {}), '(crds_loc)\n', (2139, 2149), False, 'from pkg_dir.src.utils.general_utils import read_yaml\n'), ((7512, 7559), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df_dict'], {'orient': '"""index"""'}), "(df_dict, orient='index')\n", (7534, 7559), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
#
from conans import python_requires
import conans.tools as tools
import os
base = python_requires("Eigen3ToPython/latest@multi-contact/dev")
class MCRTCDataConan(base.Eigen3ToPythonConan):
name = "mc_rtc_data"
version = "1.0.4"
description = "Environments/Robots description for mc_rtc"
topics = ("robotics", "data")
url = "https://github.com/jrl-umi3218/mc_rtc_data"
homepage = "https://github.com/jrl-umi3218/mc_rtc_data"
author = "<NAME> <<EMAIL>>"
license = "BSD-2-Clause"
exports = ["LICENSE"]
exports_sources = ["CMakeLists.txt", "conan/CMakeLists.txt", "cmake/*", "jvrc_description/*", "mc_env_description/*", "mc_int_obj_description/*", "mc_rtc_data/*"]
generators = "cmake"
settings = "os", "arch"
requires = ()
def config_options(self):
del self.options.python2_version
del self.options.python3_version
def package_id(self):
pass
def package(self):
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
for f in [".catkin", "_setup_util.py", "env.sh", "setup.bash", "local_setup.bash", "setup.sh", "local_setup.sh", "setup.zsh", "local_setup.zsh", ".rosinstall"]:
p = os.path.join(self.package_folder, f)
if os.path.exists(p):
os.remove(p)
| [
"conans.python_requires",
"os.path.exists",
"os.path.join",
"os.remove"
]
| [((109, 167), 'conans.python_requires', 'python_requires', (['"""Eigen3ToPython/latest@multi-contact/dev"""'], {}), "('Eigen3ToPython/latest@multi-contact/dev')\n", (124, 167), False, 'from conans import python_requires\n'), ((1062, 1115), 'os.path.join', 'os.path.join', (['self.package_folder', '"""lib"""', '"""pkgconfig"""'], {}), "(self.package_folder, 'lib', 'pkgconfig')\n", (1074, 1115), False, 'import os\n'), ((1302, 1338), 'os.path.join', 'os.path.join', (['self.package_folder', 'f'], {}), '(self.package_folder, f)\n', (1314, 1338), False, 'import os\n'), ((1354, 1371), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (1368, 1371), False, 'import os\n'), ((1387, 1399), 'os.remove', 'os.remove', (['p'], {}), '(p)\n', (1396, 1399), False, 'import os\n')] |
import torch
import json
import numpy as np
from torch.autograd import Variable
import gzip
import yaml
from re import split
from matplotlib import pyplot
def showImg( im ):
pyplot.imshow( im )
pyplot.show()
def myOpen( fname, mode ):
return open( fname, mode, encoding="utf-8" )
def readFile( fname ):
opener, mode = ( gzip.open, 'rt' ) if fname[-3:] == '.gz' else ( open, 'r' )
with opener( fname, mode ) as f:
return f.read()
def readLines( fname ):
return split('[\r\n]', readFile( fname ) )
def readJson( fname ):
with myOpen( fname, 'r' ) as f:
return json.load( f )
def writeFile( fname, contents ):
with myOpen( fname, 'w' ) as f:
f.write( contents )
def writeJson( fname, data ):
with myOpen( fname, 'w') as outfile:
json.dump(data, outfile)
def readYaml( fname ):
with myOpen(fname, 'r') as fp:
return yaml.load( fp )
config = readYaml('./config.yaml')
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def loadTrainedModel( model, opt ):
"""Load a pretrained model into given model"""
print('loading pretrained model from %s' % opt.crnn)
if( opt.cuda ):
stateDict = torch.load(opt.crnn )
else:
stateDict = torch.load(opt.crnn, map_location={'cuda:0': 'cpu'} )
# Handle the case of some old torch version. It will save the data as module.<xyz> . Handle it
if( list( stateDict.keys() )[0][:7] == 'module.' ):
for key in list(stateDict.keys()):
stateDict[ key[ 7:] ] = stateDict[key]
del stateDict[ key ]
model.load_state_dict( stateDict )
print('Completed loading pre trained model')
| [
"matplotlib.pyplot.imshow",
"torch.load",
"yaml.load",
"json.load",
"json.dump",
"matplotlib.pyplot.show"
]
| [((180, 197), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['im'], {}), '(im)\n', (193, 197), False, 'from matplotlib import pyplot\n'), ((204, 217), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (215, 217), False, 'from matplotlib import pyplot\n'), ((609, 621), 'json.load', 'json.load', (['f'], {}), '(f)\n', (618, 621), False, 'import json\n'), ((803, 827), 'json.dump', 'json.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (812, 827), False, 'import json\n'), ((902, 915), 'yaml.load', 'yaml.load', (['fp'], {}), '(fp)\n', (911, 915), False, 'import yaml\n'), ((1757, 1777), 'torch.load', 'torch.load', (['opt.crnn'], {}), '(opt.crnn)\n', (1767, 1777), False, 'import torch\n'), ((1809, 1861), 'torch.load', 'torch.load', (['opt.crnn'], {'map_location': "{'cuda:0': 'cpu'}"}), "(opt.crnn, map_location={'cuda:0': 'cpu'})\n", (1819, 1861), False, 'import torch\n')] |
#coding:utf-8
#0导入模块 ,生成模拟数据集
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import opt4_8_generateds
import opt4_8_forward
STEPS = 40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.999
REGULARIZER = 0.01
def backward():
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
X, Y_, Y_c = opt4_8_generateds.generateds()
y = opt4_8_forward.forward(x, REGULARIZER)
global_step = tf.Variable(0,trainable=False)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
300/BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True)
#定义损失函数
loss_mse = tf.reduce_mean(tf.square(y-y_))
loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
#定义反向传播方法:包含正则化
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
for i in range(STEPS):
start = (i*BATCH_SIZE) % 300
end = start + BATCH_SIZE
sess.run(train_step, feed_dict={x: X[start:end], y_:Y_[start:end]})
if i % 2000 == 0:
loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_})
print("After %d steps, loss is: %f" %(i, loss_v))
xx, yy = np.mgrid[-3:3:.01, -3:3:.01]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = sess.run(y, feed_dict={x:grid})
probs = probs.reshape(xx.shape)
plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c))
plt.contour(xx, yy, probs, levels=[.5])
plt.show()
if __name__=='__main__':
backward()
| [
"tensorflow.Variable",
"opt4_8_generateds.generateds",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.get_collection",
"opt4_8_forward.forward",
"numpy.squeeze",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.contour",
"tensorflow.train.exponential_decay",
"tensorflow.train.AdamOptimizer",
"tensorflow.square",
"matplotlib.pyplot.show"
]
| [((280, 323), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 2)'}), '(tf.float32, shape=(None, 2))\n', (294, 323), True, 'import tensorflow as tf\n'), ((330, 373), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)'}), '(tf.float32, shape=(None, 1))\n', (344, 373), True, 'import tensorflow as tf\n'), ((389, 419), 'opt4_8_generateds.generateds', 'opt4_8_generateds.generateds', ([], {}), '()\n', (417, 419), False, 'import opt4_8_generateds\n'), ((426, 464), 'opt4_8_forward.forward', 'opt4_8_forward.forward', (['x', 'REGULARIZER'], {}), '(x, REGULARIZER)\n', (448, 464), False, 'import opt4_8_forward\n'), ((482, 513), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (493, 513), True, 'import tensorflow as tf\n'), ((532, 650), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['LEARNING_RATE_BASE', 'global_step', '(300 / BATCH_SIZE)', 'LEARNING_RATE_DECAY'], {'staircase': '(True)'}), '(LEARNING_RATE_BASE, global_step, 300 /\n BATCH_SIZE, LEARNING_RATE_DECAY, staircase=True)\n', (558, 650), True, 'import tensorflow as tf\n'), ((1457, 1497), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'probs'], {'levels': '[0.5]'}), '(xx, yy, probs, levels=[0.5])\n', (1468, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1506, 1508), True, 'import matplotlib.pyplot as plt\n'), ((694, 711), 'tensorflow.square', 'tf.square', (['(y - y_)'], {}), '(y - y_)\n', (703, 711), True, 'import tensorflow as tf\n'), ((873, 885), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (883, 885), True, 'import tensorflow as tf\n'), ((907, 940), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (938, 940), True, 'import tensorflow as tf\n'), ((745, 772), 'tensorflow.get_collection', 'tf.get_collection', (['"""losses"""'], {}), "('losses')\n", (762, 772), True, 'import tensorflow as tf\n'), ((807, 844), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (829, 844), True, 'import tensorflow as tf\n'), ((1438, 1453), 'numpy.squeeze', 'np.squeeze', (['Y_c'], {}), '(Y_c)\n', (1448, 1453), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2020 New York University A<NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for camel_tools.transliterate.
"""
from __future__ import absolute_import
import pytest
from camel_tools.utils.charmap import CharMapper
from camel_tools.utils.transliterate import Transliterator
# A mapper that translates lower-case English characters to a lower-case x and
# upper-case English characters to an upper-case X. This makes it easy to
# predict what the transliteration should be.
TEST_MAP = {
u'A-Z': u'X',
u'a-z': u'x',
}
TEST_MAPPER = CharMapper(TEST_MAP, None)
class TestTransliteratorInit(object):
"""Test class for Transliterator.__init__.
"""
def test_init_none_mapper(self):
"""Test that init raises a TypeError when given a mapper that is None.
"""
with pytest.raises(TypeError):
Transliterator(None)
def test_init_invalid_type_mapper(self):
"""Test that init raises a TypeError when given a mapper that is not a
CharMapper instance.
"""
with pytest.raises(TypeError):
Transliterator({})
def test_init_valid_mapper(self):
"""Test that init doesn't raise an error when given a valid mapper.
"""
assert Transliterator(TEST_MAPPER)
def test_init_none_marker(self):
"""Test that init raises a TypeError when given a marker that is None.
"""
with pytest.raises(TypeError):
Transliterator(TEST_MAPPER, None)
def test_init_invalid_type_marker(self):
"""Test that init raises a TypeError when given a marker that is not a
string.
"""
with pytest.raises(TypeError):
Transliterator(TEST_MAPPER, [])
def test_init_empty_marker(self):
"""Test that init raises a ValueError when given a marker that is an
empty string.
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '')
def test_init_invalid_marker1(self):
"""Test that init raises a ValueError when given an invalid marker (
wgitespace in the middle).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '@@LAT @@')
def test_init_invalid_marker2(self):
"""Test that init raises a ValueError when given an invalid marker (
whitespace at the end).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '@@LAT@@ ')
def test_init_invalid_marker3(self):
"""Test that init raises a ValueError when given an invalid marker (
whitespace at the beginning).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, ' @@LAT@@')
def test_init_valid_marker1(self):
"""Test that init doesn't raise an error when given a valid marker.
"""
assert Transliterator(TEST_MAPPER, '@@LAT@@')
def test_init_valid_marker2(self):
"""Test that init doesn't raise an error when given a valid marker.
"""
assert Transliterator(TEST_MAPPER, u'@@LAT@@')
class TestTransliteratorTranslate(object):
"""Test class for Transliterator.translate.
"""
def test_trans_empty(self):
"""Test that transliterating an empty string returns an empty string.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'') == u''
def test_trans_single_no_markers(self):
"""Test that a single word with no markers gets transliterated.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'Hello') == u'Xxxxx'
def test_trans_single_with_markers(self):
"""Test that a single word with markers does not get transliterated.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello') == u'@@Hello'
def test_trans_single_strip(self):
"""Test that a single word with markers does not get transliterated
but markers do get stripped when strip_markers is set to True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', True) == u'Hello'
def test_trans_single_ignore(self):
"""Test that a single word with markers gets transliterated when ignore
markers is set to True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', False, True) == u'@@Xxxxx'
def test_trans_single_ignore_strip(self):
"""Test that a single word with markers gets transliterated with
markers stripped when both strip_markers and ignore_markers are set to
True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', True, True) == u'Xxxxx'
def test_trans_sent_no_markers(self):
"""Test that a sentence with no markers gets transliterated.
"""
sent_orig = u'Hello World, this is a sentence!'
sent_out = u'Xxxxx Xxxxx, xxxx xx x xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig) == sent_out
def test_trans_sent_with_markers(self):
"""Test that tokens with markers in a sentence do not get
transliterated.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx @@World, xxxx xx x @@sentence!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig) == sent_out
def test_trans_sent_strip(self):
"""Test that tokens with markers in a sentence do not get
transliterated but markers do get stripped when strip_markers is set
to True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx World, xxxx xx x sentence!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, True) == sent_out
def test_trans_sent_ignore(self):
"""Test that tokens with markers in a sentence get transliterated
when ignore markers is set to True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx @@Xxxxx, xxxx xx x @@xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, False, True) == sent_out
def test_trans_sent_ignore_strip(self):
"""Test that tokens with markers in a sentence get transliterated with
markers stripped when both strip_markers and ignore_markers are set to
True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx Xxxxx, xxxx xx x xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, True, True) == sent_out
| [
"camel_tools.utils.transliterate.Transliterator",
"pytest.raises",
"camel_tools.utils.charmap.CharMapper"
]
| [((1624, 1650), 'camel_tools.utils.charmap.CharMapper', 'CharMapper', (['TEST_MAP', 'None'], {}), '(TEST_MAP, None)\n', (1634, 1650), False, 'from camel_tools.utils.charmap import CharMapper\n'), ((2328, 2355), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER'], {}), '(TEST_MAPPER)\n', (2342, 2355), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((3964, 4002), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@LAT@@"""'], {}), "(TEST_MAPPER, '@@LAT@@')\n", (3978, 4002), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((4147, 4186), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', 'u"""@@LAT@@"""'], {}), "(TEST_MAPPER, u'@@LAT@@')\n", (4161, 4186), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((4428, 4461), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (4442, 4461), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((4655, 4688), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (4669, 4688), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((4899, 4932), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (4913, 4932), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((5210, 5243), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (5224, 5243), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((5491, 5524), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (5505, 5524), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((5841, 5874), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (5855, 5874), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((6199, 6232), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (6213, 6232), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((6575, 6608), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (6589, 6608), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((7010, 7043), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (7024, 7043), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((7414, 7447), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (7428, 7447), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((7881, 7914), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@"""'], {}), "(TEST_MAPPER, '@@')\n", (7895, 7914), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((1889, 1913), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1902, 1913), False, 'import pytest\n'), ((1927, 1947), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['None'], {}), '(None)\n', (1941, 1947), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((2128, 2152), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2141, 2152), False, 'import pytest\n'), ((2166, 2184), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['{}'], {}), '({})\n', (2180, 2184), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((2499, 2523), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2512, 2523), False, 'import pytest\n'), ((2537, 2570), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', 'None'], {}), '(TEST_MAPPER, None)\n', (2551, 2570), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((2738, 2762), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2751, 2762), False, 'import pytest\n'), ((2776, 2807), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '[]'], {}), '(TEST_MAPPER, [])\n', (2790, 2807), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((2972, 2997), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2985, 2997), False, 'import pytest\n'), ((3011, 3042), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '""""""'], {}), "(TEST_MAPPER, '')\n", (3025, 3042), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((3223, 3248), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3236, 3248), False, 'import pytest\n'), ((3262, 3301), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@LAT @@"""'], {}), "(TEST_MAPPER, '@@LAT @@')\n", (3276, 3301), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((3479, 3504), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3492, 3504), False, 'import pytest\n'), ((3518, 3557), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '"""@@LAT@@ """'], {}), "(TEST_MAPPER, '@@LAT@@ ')\n", (3532, 3557), False, 'from camel_tools.utils.transliterate import Transliterator\n'), ((3741, 3766), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3754, 3766), False, 'import pytest\n'), ((3780, 3819), 'camel_tools.utils.transliterate.Transliterator', 'Transliterator', (['TEST_MAPPER', '""" @@LAT@@"""'], {}), "(TEST_MAPPER, ' @@LAT@@')\n", (3794, 3819), False, 'from camel_tools.utils.transliterate import Transliterator\n')] |
######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import sys
from collections import OrderedDict
def get_versioned_template(template_filename, bucket, solution, version, region):
with open(template_filename, "rt") as f:
template_text = "".join(f.readlines())
template_text = template_text.replace("%bucket%", bucket)
template_text = template_text.replace("%solution%", solution)
template_text = template_text.replace("%version%", version)
if region == 'cn-north-1' or region == 'cn-northwest-1':
arn_prefix = "arn:aws-cn"
else:
arn_prefix = "arn:aws"
template_text = template_text.replace("%arn_prefix%", arn_prefix)
return json.loads(template_text, object_pairs_hook=OrderedDict)
def main(template_file, bucket, solution, version, region):
template = get_versioned_template(template_file, bucket, solution, version, region)
print(json.dumps(template, indent=4))
main(template_file=sys.argv[1], bucket=sys.argv[2], solution=sys.argv[3], version=sys.argv[4], region=sys.argv[5])
exit(0)
| [
"json.loads",
"json.dumps"
]
| [((2110, 2166), 'json.loads', 'json.loads', (['template_text'], {'object_pairs_hook': 'OrderedDict'}), '(template_text, object_pairs_hook=OrderedDict)\n', (2120, 2166), False, 'import json\n'), ((2327, 2357), 'json.dumps', 'json.dumps', (['template'], {'indent': '(4)'}), '(template, indent=4)\n', (2337, 2357), False, 'import json\n')] |
#!/usr/bin/env python3
#
# Copyright (C) 2018 <NAME> <<EMAIL>>
# Copyright (C) 2019,2020 <NAME> <<EMAIL>>
# Copyright (C) 2020, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import concurrent.futures
import os
import pathlib
import subprocess
import sys
import time
from logging import getLogger
import py7zr
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from aqt.archives import QtPackage
from aqt.helper import altlink, versiontuple
from aqt.qtpatch import Updater
from aqt.settings import Settings
class ExtractionError(Exception):
pass
class QtInstaller:
"""
Installer class to download packages and extract it.
"""
def __init__(self, qt_archives, logging=None, command=None, target_dir=None):
self.qt_archives = qt_archives
if logging:
self.logger = logging
else:
self.logger = getLogger('aqt')
self.command = command
if target_dir is None:
self.base_dir = os.getcwd()
else:
self.base_dir = target_dir
self.settings = Settings()
def retrieve_archive(self, package: QtPackage):
archive = package.archive
url = package.url
name = package.name
start_time = time.perf_counter()
self.logger.info("Downloading {}...".format(name))
self.logger.debug("Download URL: {}".format(url))
session = requests.Session()
retry = Retry(connect=5, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
try:
r = session.get(url, allow_redirects=False, stream=True)
if r.status_code == 302:
newurl = altlink(r.url, r.headers['Location'], logger=self.logger)
self.logger.info('Redirected URL: {}'.format(newurl))
r = session.get(newurl, stream=True)
except requests.exceptions.ConnectionError as e:
self.logger.error("Connection error: %s" % e.args)
raise e
else:
try:
with open(archive, 'wb') as fd:
for chunk in r.iter_content(chunk_size=8196):
fd.write(chunk)
fd.flush()
if self.command is None:
with open(archive, 'rb') as fd:
self.extract_archive(fd)
except Exception as e:
exc = sys.exc_info()
self.logger.error("Download error: %s" % exc[1])
raise e
else:
if self.command is not None:
self.extract_archive_ext(archive)
os.unlink(archive)
self.logger.info("Finish installation of {} in {}".format(archive, time.perf_counter() - start_time))
def extract_archive(self, archive):
szf = py7zr.SevenZipFile(archive)
szf.extractall(path=self.base_dir)
szf.close()
def extract_archive_ext(self, archive):
if self.base_dir is not None:
command_args = [self.command, 'x', '-aoa', '-bd', '-y', '-o{}'.format(self.base_dir), archive]
else:
command_args = [self.command, 'x', '-aoa', '-bd', '-y', archive]
try:
proc = subprocess.run(command_args, stdout=subprocess.PIPE, check=True)
self.logger.debug(proc.stdout)
except subprocess.CalledProcessError as cpe:
self.logger.error("Extraction error: %d" % cpe.returncode)
if cpe.stdout is not None:
self.logger.error(cpe.stdout)
if cpe.stderr is not None:
self.logger.error(cpe.stderr)
raise cpe
def get_arch_dir(self, arch):
if arch.startswith('win64_mingw'):
arch_dir = arch[6:] + '_64'
elif arch.startswith('win32_mingw'):
arch_dir = arch[6:] + '_32'
elif arch.startswith('win'):
arch_dir = arch[6:]
else:
arch_dir = arch
return arch_dir
def make_conf_files(self, qt_version, arch):
"""Make Qt configuration files, qt.conf and qtconfig.pri"""
arch_dir = self.get_arch_dir(arch)
try:
# prepare qt.conf
with open(os.path.join(self.base_dir, qt_version, arch_dir, 'bin', 'qt.conf'), 'w') as f:
f.write("[Paths]\n")
f.write("Prefix=..\n")
# update qtconfig.pri only as OpenSource
with open(os.path.join(self.base_dir, qt_version, arch_dir, 'mkspecs', 'qconfig.pri'), 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
for line in lines:
if line.startswith('QT_EDITION ='):
line = 'QT_EDITION = OpenSource\n'
if line.startswith('QT_LICHECK ='):
line = 'QT_LICHECK =\n'
f.write(line)
except IOError as e:
self.logger.error("Configuration file generation error: %s\n", e.args, exc_info=True)
raise e
def install(self):
with concurrent.futures.ThreadPoolExecutor(self.settings.concurrency) as executor:
futures = [executor.submit(self.retrieve_archive, ar) for ar in self.qt_archives.get_archives()]
done, not_done = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_EXCEPTION)
if len(not_done) > 0:
self.logger.error("Installation error detected.")
exit(1)
try:
for feature in done:
feature.result()
except Exception:
exit(1)
def finalize(self):
target = self.qt_archives.get_target_config()
self.make_conf_files(target.version, target.arch)
prefix = pathlib.Path(self.base_dir) / target.version / target.arch
updater = Updater(prefix, self.logger)
if versiontuple(target.version) < (5, 14, 2):
updater.patch_qt(target)
| [
"logging.getLogger",
"requests.Session",
"py7zr.SevenZipFile",
"urllib3.util.retry.Retry",
"pathlib.Path",
"requests.adapters.HTTPAdapter",
"subprocess.run",
"time.perf_counter",
"aqt.settings.Settings",
"os.getcwd",
"os.path.join",
"sys.exc_info",
"os.unlink",
"aqt.helper.versiontuple",
"aqt.qtpatch.Updater",
"aqt.helper.altlink"
]
| [((2119, 2129), 'aqt.settings.Settings', 'Settings', ([], {}), '()\n', (2127, 2129), False, 'from aqt.settings import Settings\n'), ((2292, 2311), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2309, 2311), False, 'import time\n'), ((2447, 2465), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2463, 2465), False, 'import requests\n'), ((2482, 2518), 'urllib3.util.retry.Retry', 'Retry', ([], {'connect': '(5)', 'backoff_factor': '(0.5)'}), '(connect=5, backoff_factor=0.5)\n', (2487, 2518), False, 'from urllib3.util.retry import Retry\n'), ((2537, 2567), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retry'}), '(max_retries=retry)\n', (2548, 2567), False, 'from requests.adapters import HTTPAdapter\n'), ((3766, 3784), 'os.unlink', 'os.unlink', (['archive'], {}), '(archive)\n', (3775, 3784), False, 'import os\n'), ((3950, 3977), 'py7zr.SevenZipFile', 'py7zr.SevenZipFile', (['archive'], {}), '(archive)\n', (3968, 3977), False, 'import py7zr\n'), ((7024, 7052), 'aqt.qtpatch.Updater', 'Updater', (['prefix', 'self.logger'], {}), '(prefix, self.logger)\n', (7031, 7052), False, 'from aqt.qtpatch import Updater\n'), ((1923, 1939), 'logging.getLogger', 'getLogger', (['"""aqt"""'], {}), "('aqt')\n", (1932, 1939), False, 'from logging import getLogger\n'), ((2030, 2041), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2039, 2041), False, 'import os\n'), ((4354, 4418), 'subprocess.run', 'subprocess.run', (['command_args'], {'stdout': 'subprocess.PIPE', 'check': '(True)'}), '(command_args, stdout=subprocess.PIPE, check=True)\n', (4368, 4418), False, 'import subprocess\n'), ((7064, 7092), 'aqt.helper.versiontuple', 'versiontuple', (['target.version'], {}), '(target.version)\n', (7076, 7092), False, 'from aqt.helper import altlink, versiontuple\n'), ((2797, 2854), 'aqt.helper.altlink', 'altlink', (['r.url', "r.headers['Location']"], {'logger': 'self.logger'}), "(r.url, r.headers['Location'], logger=self.logger)\n", (2804, 2854), False, 'from aqt.helper import altlink, versiontuple\n'), ((6947, 6974), 'pathlib.Path', 'pathlib.Path', (['self.base_dir'], {}), '(self.base_dir)\n', (6959, 6974), False, 'import pathlib\n'), ((3537, 3551), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3549, 3551), False, 'import sys\n'), ((3860, 3879), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3877, 3879), False, 'import time\n'), ((5342, 5409), 'os.path.join', 'os.path.join', (['self.base_dir', 'qt_version', 'arch_dir', '"""bin"""', '"""qt.conf"""'], {}), "(self.base_dir, qt_version, arch_dir, 'bin', 'qt.conf')\n", (5354, 5409), False, 'import os\n'), ((5573, 5648), 'os.path.join', 'os.path.join', (['self.base_dir', 'qt_version', 'arch_dir', '"""mkspecs"""', '"""qconfig.pri"""'], {}), "(self.base_dir, qt_version, arch_dir, 'mkspecs', 'qconfig.pri')\n", (5585, 5648), False, 'import os\n')] |
"""Utility Modules."""
from typing import Any, Callable, Dict, List, Optional, Sequence, TypeVar, Union
import jax
import jax.numpy as jnp
from .module import Module, parameters_method
T = TypeVar("T", bound=Module)
O = TypeVar("O")
class ParameterModule(Module):
"""A PAX module that registers attributes as parameters by default."""
def parameters(self):
return self.apply_submodules(lambda x: x.parameters())
class StateModule(Module):
"""A PAX module that registers attributes as states by default."""
parameters = parameters_method()
class LazyModule(Module):
"""A lazy module is a module that only creates submodules when needed.
Example:
>>> from dataclasses import dataclass
>>> @dataclass
... class MLP(pax.experimental.LazyModule):
... features: list
...
... def __call__(self, x):
... sizes = zip(self.features[:-1], self.features[1:])
... for i, (in_dim, out_dim) in enumerate(sizes):
... fc = self.get_or_create(f"fc_{i}", lambda: pax.Linear(in_dim, out_dim))
... x = jax.nn.relu(fc(x))
... return x
...
...
>>> mlp, _ = MLP([1, 2, 3]) % jnp.ones((1, 1))
>>> print(mlp.summary())
MLP(features=[1, 2, 3])
├── Linear(in_dim=1, out_dim=2, with_bias=True)
└── Linear(in_dim=2, out_dim=3, with_bias=True)
"""
def get_or_create(self, name, create_fn: Callable[[], T]) -> T:
"""Create and register a new attribute when it is not exist.
Return the attribute.
"""
if hasattr(self, name):
value = getattr(self, name)
else:
assert callable(create_fn), "Expect a callable function"
value = create_fn()
setattr(self, name, value)
return value
class Lambda(Module):
"""Convert a function to a module.
Example:
>>> net = pax.Lambda(jax.nn.relu)
>>> print(net.summary())
x => relu(x)
>>> y = net(jnp.array(-1))
>>> y
DeviceArray(0, dtype=int32, weak_type=True)
"""
func: Callable
def __init__(self, func: Callable, name: Optional[str] = None):
super().__init__(name=name)
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self) -> str:
if self.name is not None:
return super().__repr__()
else:
return f"{self.__class__.__qualname__}({self.func.__name__})"
def summary(self, return_list: bool = False) -> Union[str, List[str]]:
if self.name is not None:
name = self.name
elif isinstance(self.func, jax.custom_jvp) and hasattr(self.func, "fun"):
if hasattr(self.func.fun, "__name__"):
name = self.func.fun.__name__
else:
name = f"{self.func.fun}"
elif hasattr(self.func, "__name__"):
name = self.func.__name__
else:
name = f"{self.func}"
output = f"x => {name}(x)"
return [output] if return_list else output
class Flattener(Module):
"""Flatten PAX modules for better performance.
Example:
>>> net = pax.Linear(3, 3)
>>> opt = opax.adam(1e-3)(net.parameters())
>>> flat_mods = pax.experimental.Flattener(model=net, optimizer=opt)
>>> net, opt = flat_mods.model, flat_mods.optimizer
>>> print(net.summary())
Linear(in_dim=3, out_dim=3, with_bias=True)
>>> print(opt.summary())
chain.<locals>.Chain
├── scale_by_adam.<locals>.ScaleByAdam
│ ├── Linear(in_dim=3, out_dim=3, with_bias=True)
│ └── Linear(in_dim=3, out_dim=3, with_bias=True)
└── scale.<locals>.Scale
"""
treedef_dict: Dict[str, Any]
leaves_dict: Dict[str, Sequence[jnp.ndarray]]
def __init__(self, **kwargs):
"""Create a new flattener."""
super().__init__()
self.treedef_dict = {}
self.leaves_dict = {}
for name, value in kwargs.items():
leaves, treedef = jax.tree_flatten(value)
self.treedef_dict[name] = treedef
self.leaves_dict[name] = leaves
def __getattr__(self, name: str) -> Any:
if name in self.treedef_dict:
treedef = self.treedef_dict[name]
leaves = self.leaves_dict[name]
value = jax.tree_unflatten(treedef, leaves)
return value
else:
raise AttributeError()
def update(self: T, **kwargs) -> T:
"""Update the flattener.
Example:
>>> net = pax.Linear(3, 3)
>>> flats = pax.experimental.Flattener(net=net)
>>> flats = flats.update(net=pax.Linear(4, 4))
>>> print(flats.net.summary())
Linear(in_dim=4, out_dim=4, with_bias=True)
"""
new_self = self.copy()
for name, value in kwargs.items():
leaves, treedef = jax.tree_flatten(value)
new_self.treedef_dict[name] = treedef
new_self.leaves_dict[name] = leaves
return new_self
def parameters(self: T) -> T:
"""Raise an error.
Need to reconstruct the original module before getting parameters.
"""
raise ValueError(
"A flattener only stores ndarray leaves as non-trainable states.\n"
"Reconstruct the original module before getting parameters."
)
| [
"jax.tree_flatten",
"jax.tree_unflatten",
"typing.TypeVar"
]
| [((194, 220), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Module'}), "('T', bound=Module)\n", (201, 220), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, TypeVar, Union\n'), ((225, 237), 'typing.TypeVar', 'TypeVar', (['"""O"""'], {}), "('O')\n", (232, 237), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, TypeVar, Union\n'), ((4048, 4071), 'jax.tree_flatten', 'jax.tree_flatten', (['value'], {}), '(value)\n', (4064, 4071), False, 'import jax\n'), ((4356, 4391), 'jax.tree_unflatten', 'jax.tree_unflatten', (['treedef', 'leaves'], {}), '(treedef, leaves)\n', (4374, 4391), False, 'import jax\n'), ((4912, 4935), 'jax.tree_flatten', 'jax.tree_flatten', (['value'], {}), '(value)\n', (4928, 4935), False, 'import jax\n')] |
import base64
import mimetypes
import os
from django.conf import settings
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.cache import cache_control
from django.views.static import serve as django_serve
from database_files.models import File
@cache_control(max_age=86400)
def serve(request, name):
"""
Retrieves the file from the database.
"""
f = get_object_or_404(File, name=name)
f.dump()
mimetype = mimetypes.guess_type(name)[0] or 'application/octet-stream'
response = HttpResponse(f.content, content_type=mimetype)
response['Content-Length'] = f.size
return response
def serve_mixed(request, *args, **kwargs):
"""
First attempts to serve the file from the filesystem,
then tries the database.
"""
name = kwargs.get('name') or kwargs.get('path')
document_root = kwargs.get('document_root')
document_root = document_root or settings.MEDIA_ROOT
try:
# First attempt to serve from filesystem.
return django_serve(request, name, document_root)
except Http404:
# Then try serving from database.
return serve(request, name)
| [
"django.http.HttpResponse",
"django.shortcuts.get_object_or_404",
"django.views.decorators.cache.cache_control",
"mimetypes.guess_type",
"django.views.static.serve"
]
| [((321, 349), 'django.views.decorators.cache.cache_control', 'cache_control', ([], {'max_age': '(86400)'}), '(max_age=86400)\n', (334, 349), False, 'from django.views.decorators.cache import cache_control\n'), ((442, 476), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['File'], {'name': 'name'}), '(File, name=name)\n', (459, 476), False, 'from django.shortcuts import get_object_or_404\n'), ((580, 626), 'django.http.HttpResponse', 'HttpResponse', (['f.content'], {'content_type': 'mimetype'}), '(f.content, content_type=mimetype)\n', (592, 626), False, 'from django.http import Http404, HttpResponse\n'), ((1066, 1108), 'django.views.static.serve', 'django_serve', (['request', 'name', 'document_root'], {}), '(request, name, document_root)\n', (1078, 1108), True, 'from django.views.static import serve as django_serve\n'), ((505, 531), 'mimetypes.guess_type', 'mimetypes.guess_type', (['name'], {}), '(name)\n', (525, 531), False, 'import mimetypes\n')] |
# coding: utf-8
'''フロントコントローラを提供する
'''
from math import ceil
import os
from flask import json
from flask import Flask
from flask import request
from flask import send_from_directory
from flask import render_template
# from json_loader import load_locations
# from json_loader import prepare_locations
from models import Location
# ページ毎のロケ地表示する
LOCATION_ITEMS_PER_PAGE = 20
app = Flask(__name__)
app.config['GOOGLE_API_KEY'] = os.environ['GOOGLE_API_KEY']
app.config['ROOT'] = (app.config['APPLICATION_ROOT']
if app.config['APPLICATION_ROOT'] else '')
@app.route('/static/<path:path>')
def send_js(path):
return send_from_directory('static', path)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/location')
def location():
req_title = request.args.get('title', None)
try:
req_page = int(request.args.get('page', 1))
except ValueError as e:
req_page = 1
query = Location.selectbase()
if req_title:
query = query.where(Location.title ** '%{}%'.format(req_title))
total_items = query.count()
total_pages = ceil(total_items / LOCATION_ITEMS_PER_PAGE)
current_page = req_page if req_page <= total_pages else total_pages
query = query.paginate(current_page, LOCATION_ITEMS_PER_PAGE)
locations = [l.as_dict() for l in query]
return json.jsonify({
'meta': {
'pager_data': {
'totalItems': total_items,
'totalPages': total_pages,
'currentItems': len(locations),
'currentPage': current_page,
'itemsPerPage': LOCATION_ITEMS_PER_PAGE,
},
},
'entities': {
'locations': locations,
},
})
@app.route('/movie')
def movie():
req_title = request.args.get('title', None)
if not req_title:
return json.jsonify([])
query = (Location.select(Location.title)
.distinct()
.where(Location.title ** '%{}%'.format(req_title)))
movies = [{'id': index, 'title': l.title} for index, l in enumerate(query)]
return json.jsonify(movies)
| [
"flask.render_template",
"flask.request.args.get",
"flask.send_from_directory",
"math.ceil",
"flask.json.jsonify",
"flask.Flask",
"models.Location.selectbase",
"models.Location.select"
]
| [((387, 402), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (392, 402), False, 'from flask import Flask\n'), ((647, 682), 'flask.send_from_directory', 'send_from_directory', (['"""static"""', 'path'], {}), "('static', path)\n", (666, 682), False, 'from flask import send_from_directory\n'), ((725, 754), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (740, 754), False, 'from flask import render_template\n'), ((813, 844), 'flask.request.args.get', 'request.args.get', (['"""title"""', 'None'], {}), "('title', None)\n", (829, 844), False, 'from flask import request\n'), ((968, 989), 'models.Location.selectbase', 'Location.selectbase', ([], {}), '()\n', (987, 989), False, 'from models import Location\n'), ((1132, 1175), 'math.ceil', 'ceil', (['(total_items / LOCATION_ITEMS_PER_PAGE)'], {}), '(total_items / LOCATION_ITEMS_PER_PAGE)\n', (1136, 1175), False, 'from math import ceil\n'), ((1828, 1859), 'flask.request.args.get', 'request.args.get', (['"""title"""', 'None'], {}), "('title', None)\n", (1844, 1859), False, 'from flask import request\n'), ((2160, 2180), 'flask.json.jsonify', 'json.jsonify', (['movies'], {}), '(movies)\n', (2172, 2180), False, 'from flask import json\n'), ((1898, 1914), 'flask.json.jsonify', 'json.jsonify', (['[]'], {}), '([])\n', (1910, 1914), False, 'from flask import json\n'), ((877, 904), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {}), "('page', 1)\n", (893, 904), False, 'from flask import request\n'), ((1929, 1960), 'models.Location.select', 'Location.select', (['Location.title'], {}), '(Location.title)\n', (1944, 1960), False, 'from models import Location\n')] |
import serial
import serial.tools.list_ports
from hale_hub.constants import STARTING_OUTLET_COMMAND, SERIAL_BAUD_RATE, SERIAL_TIMEOUT
from hale_hub.ifttt_logger import send_ifttt_log
class _Outlet:
def __init__(self, name):
self.state = 0
self.name = name
class _OutletInterface:
def __init__(self):
self.outlets = [_Outlet('Outlet 0'), _Outlet('Outlet 1'), _Outlet('Outlet 2')]
self.serial_interface = None
self.serial_interface_string = None
def set_outlet_name(self, name, outlet_id):
if outlet_id < len(self.outlets):
self.outlets[outlet_id].name = name
def set_serial_interface(self, serial_interface_string):
try:
print('Setting serial interface with description: {}'.format(serial_interface_string))
self.serial_interface_string = serial_interface_string
ports = [p.device for p in serial.tools.list_ports.comports() if self.serial_interface_string in p.description]
self.serial_interface = serial.Serial(ports[0], SERIAL_BAUD_RATE, timeout=SERIAL_TIMEOUT)
except IndexError:
send_ifttt_log(__name__, 'No serial ports could be upon!')
def _send_outlet_command(self, outlet_id, outlet_state):
try:
print('Changing outlet {0} to {1} state'.format(outlet_id, outlet_state))
command = bytearray([STARTING_OUTLET_COMMAND + (outlet_id << 1) + outlet_state])
print('Writing {0} to serial'.format(command))
self.serial_interface.write(command)
except (serial.SerialException, AttributeError):
send_ifttt_log(__name__, 'No serial bytes could be written')
if self.serial_interface.is_open():
self.serial_interface.close()
self.set_serial_interface(self.serial_interface_string)
def toggle_outlet(self, outlet_id):
if outlet_id < len(self.outlets):
self.outlets[outlet_id].state ^= 1
self._send_outlet_command(outlet_id, self.outlets[outlet_id].state)
def turn_on_outlet(self, outlet_id):
if outlet_id < len(self.outlets):
self.outlets[outlet_id].state = 1
self._send_outlet_command(outlet_id, self.outlets[outlet_id].state)
def turn_off_outlet(self, outlet_id):
if outlet_id < len(self.outlets):
self.outlets[outlet_id].state = 0
self._send_outlet_command(outlet_id, self.outlets[outlet_id].state)
def get_outlets(self):
return self.outlets
_outlet_interface = _OutletInterface()
set_outlet_serial_interface = _outlet_interface.set_serial_interface
toggle_outlet = _outlet_interface.toggle_outlet
turn_on_outlet = _outlet_interface.turn_on_outlet
turn_off_outlet = _outlet_interface.turn_off_outlet
get_outlets = _outlet_interface.get_outlets
set_outlet_name = _outlet_interface.set_outlet_name
| [
"hale_hub.ifttt_logger.send_ifttt_log",
"serial.tools.list_ports.comports",
"serial.Serial"
]
| [((1036, 1101), 'serial.Serial', 'serial.Serial', (['ports[0]', 'SERIAL_BAUD_RATE'], {'timeout': 'SERIAL_TIMEOUT'}), '(ports[0], SERIAL_BAUD_RATE, timeout=SERIAL_TIMEOUT)\n', (1049, 1101), False, 'import serial\n'), ((1141, 1199), 'hale_hub.ifttt_logger.send_ifttt_log', 'send_ifttt_log', (['__name__', '"""No serial ports could be upon!"""'], {}), "(__name__, 'No serial ports could be upon!')\n", (1155, 1199), False, 'from hale_hub.ifttt_logger import send_ifttt_log\n'), ((1631, 1691), 'hale_hub.ifttt_logger.send_ifttt_log', 'send_ifttt_log', (['__name__', '"""No serial bytes could be written"""'], {}), "(__name__, 'No serial bytes could be written')\n", (1645, 1691), False, 'from hale_hub.ifttt_logger import send_ifttt_log\n'), ((915, 949), 'serial.tools.list_ports.comports', 'serial.tools.list_ports.comports', ([], {}), '()\n', (947, 949), False, 'import serial\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
def remove_nan(xyzs): return xyzs[~np.isnan(xyzs).any(axis = 1)]
def measure_twocores(core_xyz_ref, core_xyz_tar):
''' Measure the following aspects of two helical cores.
- Interhelical distance vector between the centers.
- Interhelical angle (0-90 degree)
'''
# Obtain the centers...
center_ref = np.nanmean(core_xyz_ref, axis = 0)
center_tar = np.nanmean(core_xyz_tar, axis = 0)
# Construct the interhelical distance vector...
ih_dvec = center_tar - center_ref
# Calculate the length of interhelical distance vector...
norm_ih_dvec = np.linalg.norm(ih_dvec)
# Obtain the helical core vectors...
core_xyz_ref_nonan = remove_nan(core_xyz_ref)
core_xyz_tar_nonan = remove_nan(core_xyz_tar)
core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0]
core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0]
# Calculate the interhelical angle...
core_vec_ref_unit = core_vec_ref / np.linalg.norm(core_vec_ref)
core_vec_tar_unit = core_vec_tar / np.linalg.norm(core_vec_tar)
ih_ang = np.arccos( np.dot(core_vec_ref_unit, core_vec_tar_unit) )
return ih_dvec, norm_ih_dvec, core_vec_ref_unit, core_vec_tar_unit, ih_ang
def calc_interangle(core_xyz_ref, core_xyz_tar):
''' Measure the following aspects of two helical cores.
- Interhelical angle (0-90 degree)
'''
# Obtain the helical core vectors...
core_xyz_ref_nonan = remove_nan(core_xyz_ref)
core_xyz_tar_nonan = remove_nan(core_xyz_tar)
core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0]
core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0]
# Calculate the interhelical angle...
core_vec_ref_unit = core_vec_ref / np.linalg.norm(core_vec_ref)
core_vec_tar_unit = core_vec_tar / np.linalg.norm(core_vec_tar)
inter_angle = np.arccos( np.dot(core_vec_ref_unit, core_vec_tar_unit) )
if inter_angle > np.pi / 2.0: inter_angle = np.pi - inter_angle
return inter_angle
def calc_interdist(core_xyz_ref, core_xyz_tar):
''' Measure the following aspects of two helical cores.
- Interhelical distance vector between the centers.
Refers to http://geomalgorithms.com/a07-_distance.html for the method.
Q is ref, P is tar.
'''
# Obtain the helical core vectors...
core_xyz_ref_nonan = remove_nan(core_xyz_ref)
core_xyz_tar_nonan = remove_nan(core_xyz_tar)
core_vec_ref = core_xyz_ref_nonan[-1] - core_xyz_ref_nonan[0]
core_vec_tar = core_xyz_tar_nonan[-1] - core_xyz_tar_nonan[0]
# Obtain the starting point...
q0 = core_xyz_ref_nonan[0]
p0 = core_xyz_tar_nonan[0]
w0 = p0 - q0
# Obtain the directional vector with magnitude...
v = core_vec_ref
u = core_vec_tar
# Math part...
a = np.dot(u, u)
b = np.dot(u, v)
c = np.dot(v, v)
d = np.dot(u, w0)
e = np.dot(v, w0)
de = a * c - b * b # Denominator
if de == 0: sc, tc = 0, d / b
else: sc, tc = (b * e - c * d) / de, (a * e - b * d) / de
# Calculate distance...
wc = w0 + sc * u - tc * v
inter_dist = np.linalg.norm(wc)
return inter_dist
| [
"numpy.nanmean",
"numpy.dot",
"numpy.isnan",
"numpy.linalg.norm"
]
| [((402, 434), 'numpy.nanmean', 'np.nanmean', (['core_xyz_ref'], {'axis': '(0)'}), '(core_xyz_ref, axis=0)\n', (412, 434), True, 'import numpy as np\n'), ((454, 486), 'numpy.nanmean', 'np.nanmean', (['core_xyz_tar'], {'axis': '(0)'}), '(core_xyz_tar, axis=0)\n', (464, 486), True, 'import numpy as np\n'), ((662, 685), 'numpy.linalg.norm', 'np.linalg.norm', (['ih_dvec'], {}), '(ih_dvec)\n', (676, 685), True, 'import numpy as np\n'), ((2874, 2886), 'numpy.dot', 'np.dot', (['u', 'u'], {}), '(u, u)\n', (2880, 2886), True, 'import numpy as np\n'), ((2895, 2907), 'numpy.dot', 'np.dot', (['u', 'v'], {}), '(u, v)\n', (2901, 2907), True, 'import numpy as np\n'), ((2916, 2928), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (2922, 2928), True, 'import numpy as np\n'), ((2937, 2950), 'numpy.dot', 'np.dot', (['u', 'w0'], {}), '(u, w0)\n', (2943, 2950), True, 'import numpy as np\n'), ((2959, 2972), 'numpy.dot', 'np.dot', (['v', 'w0'], {}), '(v, w0)\n', (2965, 2972), True, 'import numpy as np\n'), ((3191, 3209), 'numpy.linalg.norm', 'np.linalg.norm', (['wc'], {}), '(wc)\n', (3205, 3209), True, 'import numpy as np\n'), ((1042, 1070), 'numpy.linalg.norm', 'np.linalg.norm', (['core_vec_ref'], {}), '(core_vec_ref)\n', (1056, 1070), True, 'import numpy as np\n'), ((1110, 1138), 'numpy.linalg.norm', 'np.linalg.norm', (['core_vec_tar'], {}), '(core_vec_tar)\n', (1124, 1138), True, 'import numpy as np\n'), ((1163, 1207), 'numpy.dot', 'np.dot', (['core_vec_ref_unit', 'core_vec_tar_unit'], {}), '(core_vec_ref_unit, core_vec_tar_unit)\n', (1169, 1207), True, 'import numpy as np\n'), ((1807, 1835), 'numpy.linalg.norm', 'np.linalg.norm', (['core_vec_ref'], {}), '(core_vec_ref)\n', (1821, 1835), True, 'import numpy as np\n'), ((1875, 1903), 'numpy.linalg.norm', 'np.linalg.norm', (['core_vec_tar'], {}), '(core_vec_tar)\n', (1889, 1903), True, 'import numpy as np\n'), ((1933, 1977), 'numpy.dot', 'np.dot', (['core_vec_ref_unit', 'core_vec_tar_unit'], {}), '(core_vec_ref_unit, core_vec_tar_unit)\n', (1939, 1977), True, 'import numpy as np\n'), ((104, 118), 'numpy.isnan', 'np.isnan', (['xyzs'], {}), '(xyzs)\n', (112, 118), True, 'import numpy as np\n')] |
"""
Unit tests for module `homework_1.tasks.task_3`.
"""
from tempfile import NamedTemporaryFile
from typing import Tuple
import pytest
from homework_1.tasks.task_3 import find_maximum_and_minimum
@pytest.mark.parametrize(
["file_content", "expected_result"],
[
pytest.param(
"0\n",
(0, 0),
id="'0\n', result is (0, 0).",
),
pytest.param(
"1\n2\n3\n4\n5\n",
(1, 5),
id="'1\n2\n3\n4\n5\n', result is (1, 5).",
),
pytest.param(
"1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n",
(-12, 11),
id="'1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n', result: (11,-12).",
),
pytest.param(
"11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n",
(-12, 11),
id="'11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n', result: (11,-12).",
),
pytest.param(
"\n".join(str(num) for num in range(0, 667000)),
(0, 666999),
id="Integers from 0 to 666999 delimited by '\n'.",
),
],
)
def test_find_maximum_and_minimum(file_content: str, expected_result: Tuple[int, int]):
"""
Mocks file using `NamedTemporaryFile` instance with writed
`file_content` inside, where `file_name` == `file.name`.
Passes test if `find_maximum_and_minimum`(`file.name`)
is equal to `expected_result`.
"""
with NamedTemporaryFile(mode="wt") as file:
file.write(file_content)
file.seek(0)
assert find_maximum_and_minimum(file.name) == expected_result
| [
"homework_1.tasks.task_3.find_maximum_and_minimum",
"pytest.param",
"tempfile.NamedTemporaryFile"
]
| [((1456, 1485), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""wt"""'}), "(mode='wt')\n", (1474, 1485), False, 'from tempfile import NamedTemporaryFile\n'), ((283, 344), 'pytest.param', 'pytest.param', (['"""0\n"""', '(0, 0)'], {'id': '"""\'0\n\', result is (0, 0)."""'}), '(\'0\\n\', (0, 0), id="""\'0\n\', result is (0, 0).""")\n', (295, 344), False, 'import pytest\n'), ((398, 484), 'pytest.param', 'pytest.param', (['"""1\n2\n3\n4\n5\n"""', '(1, 5)'], {'id': '"""\'1\n2\n3\n4\n5\n\', result is (1, 5)."""'}), '(\'1\\n2\\n3\\n4\\n5\\n\', (1, 5), id=\n """\'1\n2\n3\n4\n5\n\', result is (1, 5).""")\n', (410, 484), False, 'import pytest\n'), ((537, 671), 'pytest.param', 'pytest.param', (['"""1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n"""', '(-12, 11)'], {'id': '"""\'1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n\', result: (11,-12)."""'}), '("""1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n""", (-12, 11), id=\n """\'1\n-2\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n11\n-12\n\', result: (11,-12).""")\n', (549, 671), False, 'import pytest\n'), ((739, 873), 'pytest.param', 'pytest.param', (['"""11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n"""', '(-12, 11)'], {'id': '"""\'11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n\', result: (11,-12)."""'}), '("""11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n""", (-12, 11), id=\n """\'11\n-12\n3\n-4\n5\n-6\n7\n-8\n9\n-10\n1\n-2\n\', result: (11,-12).""")\n', (751, 873), False, 'import pytest\n'), ((1564, 1599), 'homework_1.tasks.task_3.find_maximum_and_minimum', 'find_maximum_and_minimum', (['file.name'], {}), '(file.name)\n', (1588, 1599), False, 'from homework_1.tasks.task_3 import find_maximum_and_minimum\n')] |
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gtfsobjectbase import GtfsObjectBase
import problems as problems_module
import util
class Transfer(GtfsObjectBase):
"""Represents a transfer in a schedule"""
_REQUIRED_FIELD_NAMES = ['from_stop_id', 'to_stop_id', 'transfer_type']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + ['min_transfer_time']
_TABLE_NAME = 'transfers'
_ID_COLUMNS = ['from_stop_id', 'to_stop_id']
def __init__(self, schedule=None, from_stop_id=None, to_stop_id=None, transfer_type=None,
min_transfer_time=None, field_dict=None):
self._schedule = None
if field_dict:
self.__dict__.update(field_dict)
else:
self.from_stop_id = from_stop_id
self.to_stop_id = to_stop_id
self.transfer_type = transfer_type
self.min_transfer_time = min_transfer_time
if getattr(self, 'transfer_type', None) in ("", None):
# Use the default, recommended transfer, if attribute is not set or blank
self.transfer_type = 0
else:
try:
self.transfer_type = util.NonNegIntStringToInt(self.transfer_type)
except (TypeError, ValueError):
pass
if hasattr(self, 'min_transfer_time'):
try:
self.min_transfer_time = util.NonNegIntStringToInt(self.min_transfer_time)
except (TypeError, ValueError):
pass
else:
self.min_transfer_time = None
if schedule is not None:
# Note from Tom, Nov 25, 2009: Maybe calling __init__ with a schedule
# should output a DeprecationWarning. A schedule factory probably won't
# use it and other GenericGTFSObject subclasses don't support it.
schedule.AddTransferObject(self)
def ValidateFromStopIdIsPresent(self, problems):
if util.IsEmpty(self.from_stop_id):
problems.MissingValue('from_stop_id')
return False
return True
def ValidateToStopIdIsPresent(self, problems):
if util.IsEmpty(self.to_stop_id):
problems.MissingValue('to_stop_id')
return False
return True
def ValidateTransferType(self, problems):
if not util.IsEmpty(self.transfer_type):
if (not isinstance(self.transfer_type, int)) or \
(self.transfer_type not in range(0, 4)):
problems.InvalidValue('transfer_type', self.transfer_type)
return False
return True
def ValidateMinimumTransferTime(self, problems):
if not util.IsEmpty(self.min_transfer_time):
if self.transfer_type != 2:
problems.MinimumTransferTimeSetWithInvalidTransferType(
self.transfer_type)
# If min_transfer_time is negative, equal to or bigger than 24h, issue
# an error. If smaller than 24h but bigger than 3h issue a warning.
# These errors are not blocking, and should not prevent the transfer
# from being added to the schedule.
if (isinstance(self.min_transfer_time, int)):
if self.min_transfer_time < 0:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="This field cannot contain a negative " \
"value.")
elif self.min_transfer_time >= 24*3600:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="The value is very large for a " \
"transfer time and most likely " \
"indicates an error.")
elif self.min_transfer_time >= 3*3600:
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
type=problems_module.TYPE_WARNING,
reason="The value is large for a transfer " \
"time and most likely indicates " \
"an error.")
else:
# It has a value, but it is not an integer
problems.InvalidValue('min_transfer_time', self.min_transfer_time,
reason="If present, this field should contain " \
"an integer value.")
return False
return True
def GetTransferDistance(self):
from_stop = self._schedule.stops[self.from_stop_id]
to_stop = self._schedule.stops[self.to_stop_id]
distance = util.ApproximateDistanceBetweenStops(from_stop, to_stop)
return distance
def ValidateFromStopIdIsValid(self, problems):
if self.from_stop_id not in self._schedule.stops.keys():
problems.InvalidValue('from_stop_id', self.from_stop_id)
return False
return True
def ValidateToStopIdIsValid(self, problems):
if self.to_stop_id not in self._schedule.stops.keys():
problems.InvalidValue('to_stop_id', self.to_stop_id)
return False
return True
def ValidateTransferDistance(self, problems):
distance = self.GetTransferDistance()
if distance > 10000:
problems.TransferDistanceTooBig(self.from_stop_id,
self.to_stop_id,
distance)
elif distance > 2000:
problems.TransferDistanceTooBig(self.from_stop_id,
self.to_stop_id,
distance,
type=problems_module.TYPE_WARNING)
def ValidateTransferWalkingTime(self, problems):
if util.IsEmpty(self.min_transfer_time):
return
if self.min_transfer_time < 0:
# Error has already been reported, and it does not make sense
# to calculate walking speed with negative times.
return
distance = self.GetTransferDistance()
# If min_transfer_time + 120s isn't enough for someone walking very fast
# (2m/s) then issue a warning.
#
# Stops that are close together (less than 240m appart) never trigger this
# warning, regardless of min_transfer_time.
FAST_WALKING_SPEED= 2 # 2m/s
if self.min_transfer_time + 120 < distance / FAST_WALKING_SPEED:
problems.TransferWalkingSpeedTooFast(from_stop_id=self.from_stop_id,
to_stop_id=self.to_stop_id,
transfer_time=self.min_transfer_time,
distance=distance)
def ValidateBeforeAdd(self, problems):
result = True
result = self.ValidateFromStopIdIsPresent(problems) and result
result = self.ValidateToStopIdIsPresent(problems) and result
result = self.ValidateTransferType(problems) and result
result = self.ValidateMinimumTransferTime(problems) and result
return result
def ValidateAfterAdd(self, problems):
valid_stop_ids = True
valid_stop_ids = self.ValidateFromStopIdIsValid(problems) and valid_stop_ids
valid_stop_ids = self.ValidateToStopIdIsValid(problems) and valid_stop_ids
# We need both stop IDs to be valid to able to validate their distance and
# the walking time between them
if valid_stop_ids:
self.ValidateTransferDistance(problems)
self.ValidateTransferWalkingTime(problems)
def Validate(self,
problems=problems_module.default_problem_reporter):
if self.ValidateBeforeAdd(problems) and self._schedule:
self.ValidateAfterAdd(problems)
def _ID(self):
return tuple(self[i] for i in self._ID_COLUMNS)
def AddToSchedule(self, schedule, problems):
schedule.AddTransferObject(self, problems)
| [
"util.ApproximateDistanceBetweenStops",
"util.NonNegIntStringToInt",
"util.IsEmpty"
]
| [((2293, 2324), 'util.IsEmpty', 'util.IsEmpty', (['self.from_stop_id'], {}), '(self.from_stop_id)\n', (2305, 2324), False, 'import util\n'), ((2462, 2491), 'util.IsEmpty', 'util.IsEmpty', (['self.to_stop_id'], {}), '(self.to_stop_id)\n', (2474, 2491), False, 'import util\n'), ((4865, 4921), 'util.ApproximateDistanceBetweenStops', 'util.ApproximateDistanceBetweenStops', (['from_stop', 'to_stop'], {}), '(from_stop, to_stop)\n', (4901, 4921), False, 'import util\n'), ((5947, 5983), 'util.IsEmpty', 'util.IsEmpty', (['self.min_transfer_time'], {}), '(self.min_transfer_time)\n', (5959, 5983), False, 'import util\n'), ((2626, 2658), 'util.IsEmpty', 'util.IsEmpty', (['self.transfer_type'], {}), '(self.transfer_type)\n', (2638, 2658), False, 'import util\n'), ((2934, 2970), 'util.IsEmpty', 'util.IsEmpty', (['self.min_transfer_time'], {}), '(self.min_transfer_time)\n', (2946, 2970), False, 'import util\n'), ((1608, 1653), 'util.NonNegIntStringToInt', 'util.NonNegIntStringToInt', (['self.transfer_type'], {}), '(self.transfer_type)\n', (1633, 1653), False, 'import util\n'), ((1793, 1842), 'util.NonNegIntStringToInt', 'util.NonNegIntStringToInt', (['self.min_transfer_time'], {}), '(self.min_transfer_time)\n', (1818, 1842), False, 'import util\n')] |
from django.urls import path
from . import views
from . views import UserPostListView, PostDetailView, PostDeleteview, PostCreateView, PostUpdateView,CommentUpdateView, VideoCreateView, video_update
urlpatterns = [
path('',views.base, name='base'),
path('login',views.login, name='login'),
path('register',views.register, name='register'),
path('index',views.index, name='index'),
path('logout',views.logout, name='logout'),
path('like_post', views.like_post, name='like_post'),
path('find_friends',views.find_friends, name='find_friends'),
path('profile',views.profile, name='profile'),
path('profile_update', views.profile_update, name='profile_update'),
path('user/<str:username>', UserPostListView.as_view(), name='user_posts'),
path('post/<int:pk>/',PostDetailView.as_view(), name='post_details' ),
path('post/<int:pk>/delete/',PostDeleteview.as_view(), name='post_delete' ),
path('profile_posts',views.profile_posts, name='profile_posts'),
path('results',views.results, name='results'),
path('post/new/',PostCreateView.as_view(), name='post-create' ),
path('post_update',views.post_update, name='post_update'),
path('post/<int:pk>/update',PostUpdateView.as_view(), name='post-update' ),
path('profile_photos',views.profile_photos, name='profile_photos'),
path('comment_update/<int:id>',views.comment_update, name='comment_update'),
path('comment/<int:pk>/update',CommentUpdateView.as_view(), name='comment-update' ),
path('delete/<int:id>',views.delete, name='delete'),
path('favourite',views.favourite, name='favourite'),
path('favourite_posts',views.favourite_posts, name='favourite_posts'),
path('video/new/',VideoCreateView.as_view(), name='video-create' ),
path('post/<int:pk>/video',video_update.as_view(), name='video_update' ),
# path('<str:username>',views.userprofile, name='userprofile'),
path('video_posts',views.video_posts, name='video_posts'),
path('user_videos',views.user_videos,name='user_videos'),
]
| [
"django.urls.path"
]
| [((220, 253), 'django.urls.path', 'path', (['""""""', 'views.base'], {'name': '"""base"""'}), "('', views.base, name='base')\n", (224, 253), False, 'from django.urls import path\n'), ((258, 298), 'django.urls.path', 'path', (['"""login"""', 'views.login'], {'name': '"""login"""'}), "('login', views.login, name='login')\n", (262, 298), False, 'from django.urls import path\n'), ((303, 352), 'django.urls.path', 'path', (['"""register"""', 'views.register'], {'name': '"""register"""'}), "('register', views.register, name='register')\n", (307, 352), False, 'from django.urls import path\n'), ((357, 397), 'django.urls.path', 'path', (['"""index"""', 'views.index'], {'name': '"""index"""'}), "('index', views.index, name='index')\n", (361, 397), False, 'from django.urls import path\n'), ((402, 445), 'django.urls.path', 'path', (['"""logout"""', 'views.logout'], {'name': '"""logout"""'}), "('logout', views.logout, name='logout')\n", (406, 445), False, 'from django.urls import path\n'), ((450, 502), 'django.urls.path', 'path', (['"""like_post"""', 'views.like_post'], {'name': '"""like_post"""'}), "('like_post', views.like_post, name='like_post')\n", (454, 502), False, 'from django.urls import path\n'), ((508, 569), 'django.urls.path', 'path', (['"""find_friends"""', 'views.find_friends'], {'name': '"""find_friends"""'}), "('find_friends', views.find_friends, name='find_friends')\n", (512, 569), False, 'from django.urls import path\n'), ((574, 620), 'django.urls.path', 'path', (['"""profile"""', 'views.profile'], {'name': '"""profile"""'}), "('profile', views.profile, name='profile')\n", (578, 620), False, 'from django.urls import path\n'), ((626, 693), 'django.urls.path', 'path', (['"""profile_update"""', 'views.profile_update'], {'name': '"""profile_update"""'}), "('profile_update', views.profile_update, name='profile_update')\n", (630, 693), False, 'from django.urls import path\n'), ((940, 1004), 'django.urls.path', 'path', (['"""profile_posts"""', 'views.profile_posts'], {'name': '"""profile_posts"""'}), "('profile_posts', views.profile_posts, name='profile_posts')\n", (944, 1004), False, 'from django.urls import path\n'), ((1009, 1055), 'django.urls.path', 'path', (['"""results"""', 'views.results'], {'name': '"""results"""'}), "('results', views.results, name='results')\n", (1013, 1055), False, 'from django.urls import path\n'), ((1129, 1187), 'django.urls.path', 'path', (['"""post_update"""', 'views.post_update'], {'name': '"""post_update"""'}), "('post_update', views.post_update, name='post_update')\n", (1133, 1187), False, 'from django.urls import path\n'), ((1272, 1339), 'django.urls.path', 'path', (['"""profile_photos"""', 'views.profile_photos'], {'name': '"""profile_photos"""'}), "('profile_photos', views.profile_photos, name='profile_photos')\n", (1276, 1339), False, 'from django.urls import path\n'), ((1349, 1425), 'django.urls.path', 'path', (['"""comment_update/<int:id>"""', 'views.comment_update'], {'name': '"""comment_update"""'}), "('comment_update/<int:id>', views.comment_update, name='comment_update')\n", (1353, 1425), False, 'from django.urls import path\n'), ((1519, 1571), 'django.urls.path', 'path', (['"""delete/<int:id>"""', 'views.delete'], {'name': '"""delete"""'}), "('delete/<int:id>', views.delete, name='delete')\n", (1523, 1571), False, 'from django.urls import path\n'), ((1577, 1629), 'django.urls.path', 'path', (['"""favourite"""', 'views.favourite'], {'name': '"""favourite"""'}), "('favourite', views.favourite, name='favourite')\n", (1581, 1629), False, 'from django.urls import path\n'), ((1634, 1704), 'django.urls.path', 'path', (['"""favourite_posts"""', 'views.favourite_posts'], {'name': '"""favourite_posts"""'}), "('favourite_posts', views.favourite_posts, name='favourite_posts')\n", (1638, 1704), False, 'from django.urls import path\n'), ((1929, 1987), 'django.urls.path', 'path', (['"""video_posts"""', 'views.video_posts'], {'name': '"""video_posts"""'}), "('video_posts', views.video_posts, name='video_posts')\n", (1933, 1987), False, 'from django.urls import path\n'), ((1993, 2051), 'django.urls.path', 'path', (['"""user_videos"""', 'views.user_videos'], {'name': '"""user_videos"""'}), "('user_videos', views.user_videos, name='user_videos')\n", (1997, 2051), False, 'from django.urls import path\n')] |
# https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb
# https://docs.aws.amazon.com/opensearch-service/latest/developerguide/knn.html
import sys
import requests
import h5py
import numpy as np
import json
import aiohttp
import asyncio
import time
import httpx
from requests.auth import HTTPBasicAuth
from statistics import mean
# if len(sys.argv) != 2:
# print("Type in the efSearch!")
# sys.exit()
# path = '/tmp/sift-128-euclidean.hdf5.1M' # float dataset
# path = '/tmp/sift-128-euclidean.hdf5' # float dataset
path = '/home/ubuntu/sift-128-euclidean.hdf5' # float dataset
output_csv = '/tmp/sift-es.csv'
# url = 'http://127.0.0.1:9200/sift-index/'
host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # single node
# host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # two nodes
url = host + 'sift-index/'
requestHeaders = {'content-type': 'application/json'} # https://stackoverflow.com/questions/51378099/content-type-header-not-supported
auth = HTTPBasicAuth('admin', '<PASSWORD>')
# Build an index
#https://stackoverflow.com/questions/17301938/making-a-request-to-a-restful-api-using-python
# PUT sift-index
data = '''{
"settings": {
"index": {
"knn": true,
"knn.space_type": "l2",
"knn.algo_param.m": 6,
"knn.algo_param.ef_construction": 50,
"knn.algo_param.ef_search": 50,
"refresh_interval": -1,
"translog.flush_threshold_size": "10gb",
"number_of_replicas": 0
}
},
"mappings": {
"properties": {
"sift_vector": {
"type": "knn_vector",
"dimension": 128
}
}
}
}'''
# https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb
response = requests.put(url, data=data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
# response = requests.put(url, data=data, verify=False, headers=requestHeaders, auth=auth)
assert response.status_code==requests.codes.ok
# cluster_url = 'http://127.0.0.1:9200/_cluster/settings'
cluster_url = host + '_cluster/settings'
cluster_data = '''{
"persistent" : {
"knn.algo_param.index_thread_qty": 16
}
}
'''
response = requests.put(cluster_url, data=cluster_data, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), headers=requestHeaders)
assert response.status_code==requests.codes.ok
# Bulkload into index
bulk_template = '{ "index": { "_index": "sift-index", "_id": "%s" } }\n{ "sift_vector": [%s] }\n'
hf = h5py.File(path, 'r')
for key in hf.keys():
print("A key of hf is %s" % key) #Names of the groups in HDF5 file.
vectors = np.array(hf["train"][:])
num_vectors, dim = vectors.shape
print("num_vectors: %d" % num_vectors)
print("dim: %d" % dim)
bulk_data = ""
start = time.time()
for (id,vector) in enumerate(vectors):
assert len(vector)==dim
vector_str = ""
for num in vector:
vector_str += str(num) + ','
vector_str = vector_str[:-1]
id_str = str(id)
single_bulk_done = bulk_template % (id_str, vector_str)
bulk_data += single_bulk_done
if (id+1) % 100000 == 0:
print(str(id+1))
# POST _bulk
response = requests.put(url + '_bulk', data=bulk_data, auth=HTTPBasicAuth('admin', 'I#<PASSWORD>TAHB'), headers=requestHeaders)
assert response.status_code==requests.codes.ok
bulk_data = ""
end = time.time()
print("Insert Time: %d mins" % ((end - start) / 60.0)) # Unit: min
# refresh_url = 'http://127.0.0.1:9200/sift-index/_settings'
refresh_url = host + 'sift-index/_settings'
refresh_data = '''{
"index" : {
"refresh_interval": "1s"
}
}
'''
response = requests.put(refresh_url, data=refresh_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
assert response.status_code==requests.codes.ok
# response = requests.post('http://127.0.0.1:9200/sift-index/_refresh', verify=False, headers=requestHeaders)
# assert response.status_code==requests.codes.ok
# merge_url = 'http://127.0.0.1:9200/sift-index/_forcemerge?max_num_segments=1'
merge_url = host + 'sift-index/_forcemerge?max_num_segments=1'
merge_response = requests.post(merge_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#<PASSWORD>'), timeout=600)
assert merge_response.status_code==requests.codes.ok
# warmup_url = 'http://127.0.0.1:9200/_opendistro/_knn/warmup/sift-index'
warmup_url = host + '_opendistro/_knn/warmup/sift-index'
warmup_response = requests.get(warmup_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I<PASSWORD>'))
assert warmup_response.status_code==requests.codes.ok
# Send queries
total_time = 0 # in ms
hits = 0 # for recall calculation
query_template = '''
{
"size": 50,
"query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}}
}
'''
queries = np.array(hf["test"][:])
nq = len(queries)
neighbors = np.array(hf["neighbors"][:])
# distances = np.array(hf["distances"][:])
num_queries, q_dim = queries.shape
print("num_queries: %d" % num_queries)
print("q_dim: %d" % q_dim)
assert q_dim==dim
ef_search_list = [50, 100, 150, 200, 250, 300]
for ef_search in ef_search_list:
ef_data = '''{
"index": {
"knn.algo_param.ef_search": %d
}
}'''
ef_data = ef_data % ef_search
### Update Index Setting: efSearch
response = requests.put(url + '_settings', data=ef_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', '<PASSWORD>'))
assert response.status_code==requests.codes.ok
total_time_list = []
hits_list = []
for count in range(5):
total_time = 0 # in ms
hits = 0 # for recall calculation
query_template = '''
'''
single_query = '''{}\n{"size": 50, "query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}}}\n'''
for (id,query) in enumerate(queries):
assert len(query)==dim
query_str = ""
for num in query:
query_str += str(num) + ','
query_str = query_str[:-1]
# GET sift-index/_search
single_query_done = single_query % (query_str)
query_template += single_query_done
query_data = query_template
# print(query_data)
response = requests.get(url + '_msearch', data=query_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', '<PASSWORD>'), stream=True)
assert response.status_code==requests.codes.ok
# print(response.text)
result = json.loads(response.text)
# QPS
total_time = result['took']
# tooks = []
# for i in range(len(queries)):
# for ele in result['responses']:
# tooks.append(int(ele['took']))
for id in range(len(queries)):
# Recall
neighbor_id_from_result = []
for ele in result['responses'][id]['hits']['hits']:
neighbor_id_from_result.append(int(ele['_id']))
assert len(neighbor_id_from_result)==50
# print("neighbor_id_from_result: ")
# print(neighbor_id_from_result)
neighbor_id_gt = neighbors[id][0:50] # topK=50
# print("neighbor_id_gt")
# print(neighbor_id_gt)
hits_q = len(list(set(neighbor_id_from_result) & set(neighbor_id_gt)))
# print("# hits of this query with topk=50: %d" % hits_q)
hits += hits_q
total_time_list.append(total_time)
hits_list.append(hits)
print(total_time_list)
total_time_avg = mean(total_time_list[2:-1])
hits_avg = mean(hits_list)
QPS = 1.0 * nq / (total_time_avg / 1000.0)
recall = 1.0 * hits_avg / (nq * 50)
print(ef_search, QPS, recall)
| [
"statistics.mean",
"json.loads",
"requests.auth.HTTPBasicAuth",
"h5py.File",
"numpy.array",
"time.time"
]
| [((1014, 1050), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""<PASSWORD>"""'], {}), "('admin', '<PASSWORD>')\n", (1027, 1050), False, 'from requests.auth import HTTPBasicAuth\n'), ((2465, 2485), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (2474, 2485), False, 'import h5py\n'), ((2592, 2616), 'numpy.array', 'np.array', (["hf['train'][:]"], {}), "(hf['train'][:])\n", (2600, 2616), True, 'import numpy as np\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((3302, 3313), 'time.time', 'time.time', ([], {}), '()\n', (3311, 3313), False, 'import time\n'), ((4703, 4726), 'numpy.array', 'np.array', (["hf['test'][:]"], {}), "(hf['test'][:])\n", (4711, 4726), True, 'import numpy as np\n'), ((4757, 4785), 'numpy.array', 'np.array', (["hf['neighbors'][:]"], {}), "(hf['neighbors'][:])\n", (4765, 4785), True, 'import numpy as np\n'), ((7146, 7173), 'statistics.mean', 'mean', (['total_time_list[2:-1]'], {}), '(total_time_list[2:-1])\n', (7150, 7173), False, 'from statistics import mean\n'), ((7187, 7202), 'statistics.mean', 'mean', (['hits_list'], {}), '(hits_list)\n', (7191, 7202), False, 'from statistics import mean\n'), ((1801, 1837), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I#vu7bTAHB"""'], {}), "('admin', 'I#vu7bTAHB')\n", (1814, 1837), False, 'from requests.auth import HTTPBasicAuth\n'), ((2229, 2265), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I#vu7bTAHB"""'], {}), "('admin', 'I#vu7bTAHB')\n", (2242, 2265), False, 'from requests.auth import HTTPBasicAuth\n'), ((3647, 3683), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I#vu7bTAHB"""'], {}), "('admin', 'I#vu7bTAHB')\n", (3660, 3683), False, 'from requests.auth import HTTPBasicAuth\n'), ((4107, 4145), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I#<PASSWORD>"""'], {}), "('admin', 'I#<PASSWORD>')\n", (4120, 4145), False, 'from requests.auth import HTTPBasicAuth\n'), ((4417, 4454), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I<PASSWORD>"""'], {}), "('admin', 'I<PASSWORD>')\n", (4430, 4454), False, 'from requests.auth import HTTPBasicAuth\n'), ((6226, 6251), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (6236, 6251), False, 'import json\n'), ((5270, 5306), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""<PASSWORD>"""'], {}), "('admin', '<PASSWORD>')\n", (5283, 5306), False, 'from requests.auth import HTTPBasicAuth\n'), ((3157, 3199), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""I#<PASSWORD>TAHB"""'], {}), "('admin', 'I#<PASSWORD>TAHB')\n", (3170, 3199), False, 'from requests.auth import HTTPBasicAuth\n'), ((6084, 6120), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""admin"""', '"""<PASSWORD>"""'], {}), "('admin', '<PASSWORD>')\n", (6097, 6120), False, 'from requests.auth import HTTPBasicAuth\n')] |
# coding: utf-8
import math
import dateutil
import dateutil.parser
import json
from ChartBars import Chart
from ChartUpdaterByCCWebsocket import ChartUpdaterByCoincheckWS
from Util import BitcoinUtil
def adjust_price_to_tick(price, tick):
return price - math.fmod(price, tick)
def adjust_amount_to_tick(amount, tick):
return amount - math.fmod(amount, tick)
# a class for one position
class OnePositionTrader(object):
def __init__(self, price_decide_algorithm, api, pair="btc_jpy", use_leverage = True):
self.max_total_position_price_base = 0 # total maximum position size in base currency
self.positioned_price_base = 0 # total position price in base currency (actually paired currency)
self.positioned_value_in_qty = 0 # used only for genbutsu
self.max_free_margin_of_base_currency = 0 # max free margin. we cannot use orders that exceed this margin
self.positions = []
self.position_id_to_sellids = {}
self.got_all_order_ids = []
self.got_close_order_ids = []
self.exist_order_info_list = None
self.exist_close_order_info_list = None
self.last_checked_transaction_id = 0
self.api = api # api: e.g. instance of CoinCheck
self.use_leverage = use_leverage
self.timelimit_to_grouping_transaction = 2 # 約定時刻がこの秒数以下なら同一ポジションとみなす(use_leverage == False の場合のみ)
self.__pair = pair
self.price_decide_algorithm = price_decide_algorithm
print("PositionTrader: inst=" + str(self) + ", pair=" + str(pair))
@property
def pair(self):
return self.__pair
def get_base_currency(self):
return self.pair.split("_")[1].lower()
def get_qty_currency(self):
return self.pair.split("_")[0].lower()
# set usable jpy (available_margin + reserved_margin + (positioned))
def set_max_total_position_price_base(self, p):
self.set_max_total_position_price_of_base_currency(p)
def set_max_total_position_price_of_base_currency(self, p):
self.max_total_position_price_base = p
def set_max_free_margin_of_base_currency(self, p):
self.max_free_margin_of_base_currency = p
def get_max_total_position_price_base(self):
return self.get_max_total_position_price_of_base_currency()
def get_max_total_position_price_of_base_currency(self):
return self.max_total_position_price_base
def get_positioned_price_base(self):
return self.positioned_price_base
def set_timelimit_to_grouping_transaction(self, timelimit_to_grouping_transaction):
self.timelimit_to_grouping_transaction = timelimit_to_grouping_transaction
# check current status and make new positions according to algorithm
# notice: this method should be called after update_status
def update_new_orders(self, chart, do_not_create_new_order=False):
assert (self.price_decide_algorithm is not None)
position_type = None
target_value = None
stoploss_rate = None
decide_make_ret = self.price_decide_algorithm.decide_make_position_order(chart)
if len(decide_make_ret) == 3:
(position_type, target_value, stoploss_rate) = decide_make_ret
else:
(position_type, target_value) = decide_make_ret
if target_value is None or position_type is None:
# algorithm says this instance should not make order. cancel all
if self.exist_order_info_list is not None:
for exist_order_info in self.exist_order_info_list:
self._cancel_order(exist_order_info["id"])
self.exist_order_info_list = None
return False
# round to possible price
tick = self.api.order.tick_price(self.pair)
target_value = adjust_price_to_tick(target_value, tick)
if stoploss_rate is not None:
stoploss_rate = adjust_price_to_tick(stoploss_rate, tick)
# !!round to possible amount
possible_make_total_price_base_cur = self.get_max_total_position_price_of_base_currency() - self.positioned_price_base
possible_make_total_price_base_cur = min(possible_make_total_price_base_cur, self.max_free_margin_of_base_currency)
amount_tick = self.api.order.tick_amount(self.pair)
possible_amount = 1.0 * possible_make_total_price_base_cur / target_value
possible_amount = adjust_amount_to_tick(possible_amount,amount_tick)
print("possible_create_in_base = %f, want to make amount in base = %f, possible amount = %f" %
(self.get_max_total_position_price_of_base_currency() - self.positioned_price_base,
possible_make_total_price_base_cur, possible_amount))
#print("base_cur = %f, positioned = %f, others = %f" % (self.get_max_total_position_price_of_base_currency(), self.positioned_price_base, self.other_reserved_base,))
#print("target_value = %f, possible_base = %f" % (target_value, possible_make_total_price_base_cur,))
if possible_amount <= 0.000001:
# too few btc
print("want to make (price,amount) = (%f,%f) but too few amount" % (target_value, possible_amount))
return False
if not do_not_create_new_order:
success, new_order_created = self._update_or_create_order(position_type, target_value, possible_amount, stop_loss_rate=stoploss_rate)
return new_order_created
else:
self._cancel_exist_all_buy_orders()
print("algorithm wants to create a new order but DO_NOT_CREATE_NEW flag = true")
return False
# update close orders according to current positions
# this class should be called after update_status
def update_close_orders(self, chart, current_time_timezone_aware):
for position in self.positions:
open_rate = float(position["open_rate"])
amount = float(position["amount"])
created_time = position["created_at_datetime"]
target_value = None
if self.price_decide_algorithm.market_sell_decide_algorithm(chart, open_rate, created_time, current_time_timezone_aware) is True:
# market order close
pass
else:
target_value = self.price_decide_algorithm.sell_price_decide_algorithm(open_rate)
target_value = adjust_price_to_tick(target_value, self.api.order.tick_price(self.pair))
self._update_or_create_close_order(position, target_value)
# interface to update internal position & order status
def update_status(self, valid_position_info, valid_transaction_info, valid_order_info):
# update position/order status (assume: pagenations are already cleared)
self._update_order_id_status(valid_order_info)
if self.use_leverage:
self._update_position_status(valid_position_info)
else:
self._update_transaction_status(valid_transaction_info)
def _update_position_status(self, valid_position_info):
# apply real positions status to this instance
# レバレッジ用
if not self.use_leverage:
return
"""
position example (array of "data" will be passed)
{
"data": [
{
"id": 10,
"pair": "btc_jpy",
"status": "open",
"created_at": "2015-12-02T05:27:53.000Z",
"closed_at": null,
"open_rate": "43553.0",
"closed_rate": null,
"amount": "1.51347797",
"all_amount": "1.51045705",
"side": "sell",
"pl": "-8490.81029287",
"new_order": {
"id": 23104033,
"side": "sell",
"rate": null,
"amount": null,
"pending_amount": "0",
"status": "complete",
"created_at": "2015-12-02T05:27:52.000Z"
},
"close_orders": [
{
"id": 23755132,
"side": "buy",
"rate": "10000.0",
"amount": "1.0",
"pending_amount": "0.0",
"status": "cancel",
"created_at": "2015-12-05T05:03:56.000Z"
}
]
}
]
}
"""
####
# parse positions
####
self.positions = []
self.position_id_to_sellids = {}
all_positions = valid_position_info
positioned_value_in_base = 0
for position in all_positions:
status = position["status"]
if status != "open":
continue
pair = position["pair"]
if pair != self.pair:
continue
position_id = position["id"]
# check position that is created by the new_order that is self.order_id:
new_order = position["new_order"]
if new_order["status"] == "cancel":
print("new order: " + str(new_order["id"]) + " state is 'cancel'. probably partially contracted and remain is canceled. this position is not ignored")
#continue
new_order_id = new_order["id"]
if new_order_id in self.got_all_order_ids:
# this position is created by this class's order
created_time = dateutil.parser.parse(position["created_at"])
position["created_at_datetime"] = created_time
amount = position["amount"]
all_amount = position["all_amount"]
if all_amount is not None and all_amount < amount:
amount = all_amount
position["amount"] = position["all_amount"] = amount
self.positions.append(position)
open_rate = position["open_rate"]
positioned_value_in_base += float(amount) * float(open_rate)
# check close orders
self.position_id_to_sellids[position_id] = \
list(map(lambda x:x["id"], filter(lambda x:x["status"] != "cancel", position["close_orders"])))
self.positioned_price_base = positioned_value_in_base
def _update_transaction_status(self, valid_transaction_info):
if self.use_leverage:
return
# 現物用。transactionの結果からポジションの状態を解析. 基本的にupdate_position_statusと挙動は同じ。parseするjsonが異なる
# * ただし、前フレームからの情報を引き継ぐところがupdate_position_statusと違う (現物にはpositionという概念が無い)
positions = self.positions
position_id_to_sellids = self.position_id_to_sellids
close_transactions = []
all_transactions = valid_transaction_info
positioned_value_in_qty = self.positioned_value_in_qty
qty_cur = self.get_qty_currency()
base_cur = self.get_base_currency()
last_transaction_id_in_this_frame = self.last_checked_transaction_id
for transaction in all_transactions:
transaction_id = int(transaction["id"]) # transaction_id means position_id
transaction["id"] = transaction_id
# check only new id
if self.last_checked_transaction_id >= transaction_id:
continue
last_transaction_id_in_this_frame = max(last_transaction_id_in_this_frame, transaction_id)
# check pair
this_pair = transaction["pair"]
if this_pair != self.pair:
continue
# check position that is created by the new_order that is self.order_id:
new_order_id = int(transaction["order_id"])
transaction["order_id"] = new_order_id
is_position_transaction = new_order_id in self.got_all_order_ids
is_close_transaction = new_order_id in self.got_close_order_ids
if not is_position_transaction and not is_close_transaction:
continue
# other pair
if qty_cur not in transaction["funds"] or base_cur not in transaction["funds"]:
continue
# this position is created by this class's order
qty_amount = float(transaction["funds"][qty_cur])
transaction["amount"] = transaction["amount"] = qty_amount
transaction["open_rate"] = float(transaction["rate"])
open_rate = float(transaction["open_rate"])
positioned_value_in_qty += float(qty_amount)
created_time = dateutil.parser.parse(transaction["created_at"])
transaction["created_at_datetime"] = created_time
if is_position_transaction:
# check close orders
# 漏れがあるとまずい(cancelしなくなる)ので、とりあえずあるだけリンクしておく
position_id_to_sellids[transaction_id] = []
transaction["close_orders"] = []
positions.append(transaction)
else:
close_transactions.append(transaction)
# in next frame, only transaction_id > self.last_checked_transaction_id will be checked
self.last_checked_transaction_id = last_transaction_id_in_this_frame
print("last_checked_transaction_id = ", self.last_checked_transaction_id)
print("self.exist_close_order_info_list", self.exist_close_order_info_list)
if self.exist_close_order_info_list is not None:
for pos_i, position in enumerate(positions):
transaction_id = position["id"]
position_id_to_sellids[transaction_id] = list(map(lambda x:x["id"], self.exist_close_order_info_list))
position["close_orders"] = self.exist_close_order_info_list
for i, order in enumerate(position["close_orders"]):
order["status"] = "open"
order["side"] = order["order_type"]
if "amount" not in order:
order["amount"] = float(order["pending_amount"])
position["close_orders"][i] = order
positions[pos_i] = position
# round very small value
if abs(positioned_value_in_qty) < self.api.order.min_create_amount(self.pair)*0.1:
positioned_value_in_qty = 0
positions = sorted(positions, key=lambda x:-x["id"]) # order by desc
# concat very near created_at transactions
grouped_positions = self._group_near_transactions(positions)
# remove closed position & update positioned_value_in_jpy
valid_positions, positioned_value_in_base = self._remain_non_closed_transactions(grouped_positions, positioned_value_in_qty)
if abs(positioned_value_in_base) < self.api.order.tick_price(self.pair) * self.api.order.min_create_amount(self.pair) * 0.1:
positioned_value_in_base = 0
# merge position_id_to_sellids
self.position_id_to_sellids = {}
for position in valid_positions:
pos_id = position["id"]
self.position_id_to_sellids[pos_id] = position_id_to_sellids[pos_id]
self.positioned_price_base = positioned_value_in_base
self.positioned_value_in_qty = positioned_value_in_qty
self.position_id_to_sellids = position_id_to_sellids
self.positions = valid_positions
print("position_count=%d, positioned_%s=%f, positioned_%s=%f" % (len(self.positions), base_cur, self.positioned_price_base, qty_cur, self.positioned_value_in_qty,))
# close したかどうか、残っているポジション残量を計算するのに、全て遡らないといけないのは現実的ではない
# 既にこの段階で解決できるポジション状態(close order id見て、それがあれば反対売買が成立している)
# を用い、↑で貯めたpositionsから、反対売買済みのものを(amount基準で)消していき(前回フレームで残っていたpositionも含めて)、残ったpositionだけを生きているポジションとし、1つに集約する(現物用なので、idが分かれている意味はない)
# その残ったpositionID, 消費した反対売買IDのIDを持っておき、次回からはそれより新しいIDのみを反映する
# ただし、ずっと続けると計算誤差がたまるので、jpyもしくはbtcベースでその合計値が極めて小さくなったら丸めてノーポジ扱いにする
# うーん...現物とレバレッジで管理が結構変わるから同じクラスにするのはまずかった?ごちゃごちゃしてきてしまった
# 時間的に約定時刻が近いpositionをまとめる
def _group_near_transactions(self, target_transactions):
grouped_positions = []
positions = target_transactions
if len(positions) > 0:
def grouping(desced_position_array):
ret_pos = dict(desced_position_array[0])
total_amount = 0
total_jpy = 0
for p in desced_position_array:
total_amount += p["amount"]
total_jpy += p["amount"] * p["open_rate"]
ret_pos["amount"] = total_amount
ret_pos["open_rate"] = total_jpy / total_amount
return ret_pos
concat_start_index = 0
prev_created_at = positions[0]["created_at_datetime"]
for idx, pos in enumerate(positions):
cur_created_at = pos["created_at_datetime"]
if abs((cur_created_at - prev_created_at).total_seconds()) <= self.timelimit_to_grouping_transaction:
# can group
prev_created_at = cur_created_at
continue
# this position cannot be grouped. make a new group from pos[start_index] - pos[idx-1]
grouped_positions.append(grouping(positions[concat_start_index:idx]))
#print(grouped_positions[-1])
concat_start_index = idx
prev_created_at = cur_created_at
# remain positioned not be grouped
grouped_positions.append(grouping(positions[concat_start_index:]))
return grouped_positions
# まだcloseされていないtransactionだけを残す
def _remain_non_closed_transactions(self, target_transactions, positioned_value_in_qty):
valid_positions = []
remain_qty = positioned_value_in_qty
total_base = 0
for position in target_transactions:
if remain_qty <= 0: break
amount = position["amount"]
if remain_qty >= amount:
remain_qty -= amount
else:
position["amount"] = remain_qty
remain_qty = 0
valid_positions.append(position)
total_base += position["amount"] * position["open_rate"]
return valid_positions, total_base
def _update_order_id_status(self, valid_order_info):
####
# parse orders
####
"""
orders example (array of "orders" will be passed)
{
"success": true,
"orders": [
{
"id": 202835,
"order_type": "buy",
"rate": 26890,
"pair": "btc_jpy",
"pending_amount": "0.5527",
"pending_market_buy_amount": null,
"stop_loss_rate": null,
"created_at": "2015-01-10T05:55:38.000Z"
},
{
"id": 202836,
"order_type": "sell",
"rate": 26990,
"pair": "btc_jpy",
"pending_amount": "0.77",
"pending_market_buy_amount": null,
"stop_loss_rate": null,
"created_at": "2015-01-10T05:55:38.000Z"
},
{
"id": 38632107,
"order_type": "buy",
"rate": null,
"pair": "btc_jpy",
"pending_amount": null,
"pending_market_buy_amount": "10000.0",
"stop_loss_rate": "50000.0",
"created_at": "2016-02-23T12:14:50.000Z"
}
]
}
"""
#exist_order_ids = list(map(lambda x:x["id"], valid_order_info))
exist_orders = []
exist_close_orders = []
other_orders = []
for idx, order in enumerate(valid_order_info):
order_id = order["id"]
order_pair = order["pair"]
is_added = False
if order_pair == self.pair:
if order_id in self.got_all_order_ids:
is_added = True
exist_orders.append(order)
elif order_id in self.got_close_order_ids:
is_added = True
exist_close_orders.append(order)
if not is_added:
other_orders.append(order)
print("exist_create_orders", exist_orders)
print("exist_close_orders", exist_close_orders)
self.exist_order_info_list = exist_orders if len(exist_orders) > 0 else None
self.exist_close_order_info_list = exist_close_orders if len(exist_close_orders) > 0 else None
#self.other_reserved_base = 0
#if not self.use_leverage:
# for o in other_orders:
# if o["order_type"] == "buy":
# self.other_reserved_base += float(o["pending_amount"]) * float(o["rate"])
# returns: (is_success, is_new_order_created)
def _update_or_create_order(self, position_type, target_value, possible_qty, stop_loss_rate = None):
assert (self.api is not None)
# order list は現物とleverageで変わらない
if self.exist_order_info_list is not None:
# check the same value or not
if len(self.exist_order_info_list) == 1:
exist_order_info = self.exist_order_info_list[0]
cur_rate = exist_order_info["rate"] if "rate" in exist_order_info else None
# get current stoploss
cur_stoploss = exist_order_info["stop_loss_rate"] if "stop_loss_rate" in exist_order_info else None
cur_stoploss_float_or_none = None
if cur_stoploss is not None:
cur_stoploss_float_or_none = float(cur_stoploss)
target_stoploss_float_or_none = None
if stop_loss_rate is not None:
target_stoploss_float_or_none = float(stop_loss_rate)
cur_amount = None
if "amount" in exist_order_info:
cur_amount = exist_order_info["amount"]
elif "pending_amount" in exist_order_info:
cur_amount = exist_order_info["pending_amount"]
order_type = None
if "order_type" in exist_order_info:
if exist_order_info["order_type"] == "buy" or\
exist_order_info["order_type"] == "leverage_buy":
order_type = "long"
if exist_order_info["order_type"] == "sell" or \
exist_order_info["order_type"] == "leverage_sell":
order_type = "short"
if cur_rate is not None and cur_amount is not None and order_type is not None:
if abs(float(cur_rate)-float(target_value)) < 0.00001 and \
abs(float(cur_amount)-float(possible_qty)) < 0.00001 and \
cur_stoploss_float_or_none == target_stoploss_float_or_none and \
order_type == position_type:
# same order. do nothing
print("You already ordered this order: rate=%.1f, amount=%f, stoploss_rate=%s, position_type=%s" % (target_value, possible_qty, str(stop_loss_rate), position_type,))
return True, False
# cancel all exist orders
if not self._cancel_exist_all_buy_orders():
return False, False
# check minimum btc
min_qty = self.api.order.min_create_amount(self.pair)
if possible_qty < min_qty:
print("Minimum order btc = %f, you requested = %f" % (min_qty, possible_qty,))
return False, False
# make new order
"""
ret val example
"success": true,
"id": 12345,
"rate": "30010.0",
"amount": "1.3",
"order_type": "sell",
"stop_loss_rate": null,
"pair": "btc_jpy",
"created_at": "2015-01-10T05:55:38.000Z"
"""
is_long = position_type == "long"
order_type = 'leverage_buy' if is_long else 'leverage_sell'
if not self.use_leverage:
order_type = 'buy' if is_long else 'sell'
order = {
'rate': "%.8f" % target_value,
'amount': "%.8f" % possible_qty,
'order_type': order_type,
'pair': self.pair
}
# not correct
# this "stop_loss_rate" means: if a value >= stop_loss_rate, sashine will be placed at "rate"
if stop_loss_rate is not None:
order["stop_loss_rate"] = stop_loss_rate
ret_str = self.api.order.create(order)
ret = None
if ret_str is not None:
try:
ret = json.loads(ret_str)
except:
print("failed to parse api.order.create result")
try:
print(ret_str)
except Exception as e:
print("failed to show returned json str")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret:
print("Failed to create order!!")
try:
print(ret_str)
except Exception as e:
print("failed to show returned json str")
print(e)
return False, False
self.exist_order_info_list = [ret]
self.got_all_order_ids.append(ret["id"])
# remove very old orders
if len(self.got_all_order_ids) > 500:
self.got_all_order_ids = self.got_all_order_ids[-500:]
print("order success!", ret_str)
return True, True
def _cancel_exist_all_buy_orders(self):
failed_to_cancel = False
exist_order_i = 0
while exist_order_i < len(self.exist_order_info_list):
exist_order_info = self.exist_order_info_list[exist_order_i]
if self._cancel_order(exist_order_info["id"]) is False:
# something error happened!!
print("order cancel failed %d even if there is a valid order in internal state" % (exist_order_info["id"],))
failed_to_cancel = True
del self.exist_order_info_list[exist_order_i]
else:
exist_order_i += 1
if len(self.exist_order_info_list) == 0:
self.exist_order_info_list = None
if failed_to_cancel:
return False
return True
# target_value: sashine value. if None, market-make
def _update_or_create_close_order(self, position, target_value):
position_id = position["id"]
if position_id not in self.position_id_to_sellids:
return False
sell_qty = float(position["amount"])
sell_ids = self.position_id_to_sellids[position_id]
position_type = position["side"]
# convert position type name
if position_type == "buy": position_type = "long"
if position_type == "sell": position_type = "short"
is_close_long = True
if position_type == "long": is_close_long = True
if position_type == "short": is_close_long = False
# check exist sell-orders. if target value and amount are completely same, do not pass new order
valid_close_orders = list(filter(lambda x:x["status"] != "cancel" and x["id"] in sell_ids, position["close_orders"]))
print("valid_close_order count = %d" % len(valid_close_orders))
if len(valid_close_orders) == 1 and target_value is not None:
# check the order is already created on exchanger
valid_close_order = valid_close_orders[0]
print("your order: rate=%f, amount=%f" % (target_value, sell_qty,))
print("valid_close_order[0]:")
print(valid_close_order)
rate = None
if "rate" in valid_close_order:
rate = float(valid_close_order["rate"])
amount = valid_close_order["amount"]
is_cur_close_long = False
if "side" in valid_close_order:
is_cur_close_long = valid_close_order["side"] == "sell"
elif "order_type" in valid_close_order:
is_cur_close_long = valid_close_order["order_type"] == "sell"
if abs(float(rate)-float(target_value)) < 0.00001 and \
abs(float(amount)-float(sell_qty)) < 0.00001 and \
is_close_long == is_cur_close_long:
# completely same!!
print("requested close order is already ordered on server:")
print(" position id:%s, target_value:%s, amount:%s, close_long:%s" % (str(position_id), str(target_value), str(amount), str(is_cur_close_long),))
return True
min_qty = self.api.order.min_create_amount(self.pair)
if sell_qty < min_qty:
qty_cur = self.get_qty_currency()
print("Minimum order %s = %f, you requested = %f" % (qty_cur, min_qty, sell_qty,))
return False
# cancel all
for sell_id in sell_ids:
self._cancel_order(sell_id)
self.position_id_to_sellids[position_id] = []
# make new order
order = {}
if self.use_leverage:
order = {
'amount': '%.8f' % BitcoinUtil.roundBTCby1satoshi(sell_qty),
'position_id': position_id,
'order_type': 'close_long' if is_close_long else 'close_short',
'pair': 'btc_jpy',
}
if target_value is not None:
order['rate'] = target_value
else:
# if not leverage order, close order is always "sell"
if not is_close_long:
print("normal order cannot make short position!")
print("you passed close 'short' for normal order")
return False
order = {
'amount': '%.8f' % BitcoinUtil.roundBTCby1satoshi(sell_qty),
'order_type': 'sell',
'pair': self.pair,
}
if target_value is None:
# market_sell
order['order_type'] = "market_sell"
else:
order['rate'] = target_value
ret = self.api.order.create(order)
ret_str = ret
if ret is not None:
try:
ret = json.loads(ret)
except:
print("failed to parse close_long order result")
try:
print(ret_str)
except Exception as e:
print("failed to print error")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret or ret["id"] is None:
print("sell order canceled but failed to create new sell order!!: position id: %s" % (str(position_id),))
try:
print(ret_str)
except Exception as e:
print("failed to print error")
print(e)
return False
sell_ids = [ret["id"]]
self.position_id_to_sellids[position_id] = sell_ids
self.got_close_order_ids.append(ret["id"])
if len(self.got_close_order_ids) > 500:
self.got_close_order_ids = self.got_close_order_ids[-500:]
return True
def _cancel_order(self, order_id):
# call apis for current orders
if order_id is None:
print("order is already canceled")
return True
# do something
ret_str = self.api.order.cancel({"id": order_id, "pair": self.pair})
ret = None
if ret_str is not None:
try:
ret = json.loads(ret_str)
except:
print("failed to parse cancel order ret str")
try:
print(ret_str)
except Exception as e:
print("failed to print returned error json")
print(e)
if ret is None or ret["success"] is not True or "id" not in ret:
print("Failed to cancel order %s: %s" % (str(order_id), str(ret_str),))
return False
return True
| [
"dateutil.parser.parse",
"json.loads",
"Util.BitcoinUtil.roundBTCby1satoshi",
"math.fmod"
]
| [((261, 283), 'math.fmod', 'math.fmod', (['price', 'tick'], {}), '(price, tick)\n', (270, 283), False, 'import math\n'), ((346, 369), 'math.fmod', 'math.fmod', (['amount', 'tick'], {}), '(amount, tick)\n', (355, 369), False, 'import math\n'), ((12727, 12775), 'dateutil.parser.parse', 'dateutil.parser.parse', (["transaction['created_at']"], {}), "(transaction['created_at'])\n", (12748, 12775), False, 'import dateutil\n'), ((9614, 9659), 'dateutil.parser.parse', 'dateutil.parser.parse', (["position['created_at']"], {}), "(position['created_at'])\n", (9635, 9659), False, 'import dateutil\n'), ((25075, 25094), 'json.loads', 'json.loads', (['ret_str'], {}), '(ret_str)\n', (25085, 25094), False, 'import json\n'), ((30729, 30744), 'json.loads', 'json.loads', (['ret'], {}), '(ret)\n', (30739, 30744), False, 'import json\n'), ((32052, 32071), 'json.loads', 'json.loads', (['ret_str'], {}), '(ret_str)\n', (32062, 32071), False, 'import json\n'), ((29649, 29689), 'Util.BitcoinUtil.roundBTCby1satoshi', 'BitcoinUtil.roundBTCby1satoshi', (['sell_qty'], {}), '(sell_qty)\n', (29679, 29689), False, 'from Util import BitcoinUtil\n'), ((30285, 30325), 'Util.BitcoinUtil.roundBTCby1satoshi', 'BitcoinUtil.roundBTCby1satoshi', (['sell_qty'], {}), '(sell_qty)\n', (30315, 30325), False, 'from Util import BitcoinUtil\n')] |
import unittest
from test import test_support
import base64
class LegacyBase64TestCase(unittest.TestCase):
def test_encodestring(self):
eq = self.assertEqual
eq(base64.encodestring("www.python.org"), "d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodestring("a"), "YQ==\n")
eq(base64.encodestring("ab"), "YWI=\n")
eq(base64.encodestring("abc"), "YWJj\n")
eq(base64.encodestring(""), "")
eq(base64.encodestring("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"<KEY>"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodestring(bytearray('abc')), 'YWJj\n')
def test_decodestring(self):
eq = self.assertEqual
eq(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n"), "www.python.org")
eq(base64.decodestring("YQ==\n"), "a")
eq(base64.decodestring("YWI=\n"), "ab")
eq(base64.decodestring("YWJj\n"), "abc")
eq(base64.decodestring("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodestring(''), '')
# Non-bytes
eq(base64.decodestring(bytearray("YWJj\n")), "abc")
def test_encode(self):
eq = self.assertEqual
from cStringIO import StringIO
infp = StringIO('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789!@#0^&*();:<>,. []{}')
outfp = StringIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
def test_decode(self):
from cStringIO import StringIO
infp = StringIO('d3d3LnB5dGhvbi5vcmc=')
outfp = StringIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), 'www.python.org')
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode('\x00'), 'AA==')
eq(base64.b64encode("a"), "YQ==")
eq(base64.b64encode("ab"), "YWI=")
eq(base64.b64encode("abc"), "YWJj")
eq(base64.b64encode(""), "")
eq(base64.b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray('abcd')), 'YWJjZA==')
self.assertRaises(TypeError, base64.b64encode,
'\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$'))
# Test standard alphabet
eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode("a"), "YQ==")
eq(base64.standard_b64encode("ab"), "YWI=")
eq(base64.standard_b64encode("abc"), "YWJj")
eq(base64.standard_b64encode(""), "")
eq(base64.standard_b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray('abcd')), 'YWJjZA==')
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode('\xd3V\xbeo\xf7\x1d'), '01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray('\xd3V\xbeo\xf7\x1d')), '01a-b_cd')
def test_b64decode(self):
eq = self.assertEqual
eq(base64.b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.b64decode('AA=='), '\x00')
eq(base64.b64decode("YQ=="), "a")
eq(base64.b64decode("YWI="), "ab")
eq(base64.b64decode("YWJj"), "abc")
eq(base64.b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.b64decode(''), '')
# Test with arbitrary alternative characters
eq(base64.b64decode('01a*b$cd', altchars='*$'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.b64decode(bytearray("YWJj")), "abc")
# Test standard alphabet
eq(base64.standard_b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.standard_b64decode("YQ=="), "a")
eq(base64.standard_b64decode("YWI="), "ab")
eq(base64.standard_b64decode("YWJj"), "abc")
eq(base64.standard_b64decode(""), "")
eq(base64.standard_b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
# Non-bytes
eq(base64.standard_b64decode(bytearray("YWJj")), "abc")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64decode('01a-b_cd'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray('01a-b_cd')), '\xd3V\xbeo\xf7\x1d')
def test_b64decode_error(self):
self.assertRaises(TypeError, base64.b64decode, 'abc')
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(''), '')
eq(base64.b32encode('\x00'), 'AA======')
eq(base64.b32encode('a'), 'ME======')
eq(base64.b32encode('ab'), 'MFRA====')
eq(base64.b32encode('abc'), 'MFRGG===')
eq(base64.b32encode('abcd'), 'MFRGGZA=')
eq(base64.b32encode('abcde'), 'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray('abcd')), 'MFRGGZA=')
def test_b32decode(self):
eq = self.assertEqual
eq(base64.b32decode(''), '')
eq(base64.b32decode('AA======'), '\x00')
eq(base64.b32decode('ME======'), 'a')
eq(base64.b32decode('MFRA===='), 'ab')
eq(base64.b32decode('MFRGG==='), 'abc')
eq(base64.b32decode('MFRGGZA='), 'abcd')
eq(base64.b32decode('MFRGGZDF'), 'abcde')
# Non-bytes
self.assertRaises(TypeError, base64.b32decode, bytearray('MFRGG==='))
def test_b32decode_casefold(self):
eq = self.assertEqual
eq(base64.b32decode('', True), '')
eq(base64.b32decode('ME======', True), 'a')
eq(base64.b32decode('MFRA====', True), 'ab')
eq(base64.b32decode('MFRGG===', True), 'abc')
eq(base64.b32decode('MFRGGZA=', True), 'abcd')
eq(base64.b32decode('MFRGGZDF', True), 'abcde')
# Lower cases
eq(base64.b32decode('me======', True), 'a')
eq(base64.b32decode('mfra====', True), 'ab')
eq(base64.b32decode('mfrgg===', True), 'abc')
eq(base64.b32decode('mfrggza=', True), 'abcd')
eq(base64.b32decode('mfrggzdf', True), 'abcde')
# Expected exceptions
self.assertRaises(TypeError, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode('MLO23456'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='L'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='I'), 'b\x1d\xad\xf3\xbe')
def test_b32decode_error(self):
self.assertRaises(TypeError, base64.b32decode, 'abc')
self.assertRaises(TypeError, base64.b32decode, 'ABCDEF==')
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode('\x01\x02\xab\xcd\xef'), '0102ABCDEF')
eq(base64.b16encode('\x00'), '00')
# Non-bytes
eq(base64.b16encode(bytearray('\x01\x02\xab\xcd\xef')), '0102ABCDEF')
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode('0102ABCDEF'), '\x01\x02\xab\xcd\xef')
eq(base64.b16decode('00'), '\x00')
# Lower case is not allowed without a flag
self.assertRaises(TypeError, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray("0102ABCDEF")), '\x01\x02\xab\xcd\xef')
def test_main():
test_support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| [
"cStringIO.StringIO",
"base64.urlsafe_b64decode",
"base64.urlsafe_b64encode",
"base64.b32decode",
"base64.b16decode",
"base64.b64encode",
"base64.b64decode",
"test.test_support.run_unittest",
"base64.standard_b64decode",
"base64.encode",
"base64.encodestring",
"base64.standard_b64encode",
"base64.decodestring",
"base64.b16encode",
"base64.decode",
"base64.b32encode"
]
| [((9275, 9310), 'test.test_support.run_unittest', 'test_support.run_unittest', (['__name__'], {}), '(__name__)\n', (9300, 9310), False, 'from test import test_support\n'), ((1685, 1789), 'cStringIO.StringIO', 'StringIO', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}"""'], {}), "(\n 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )\n", (1693, 1789), False, 'from cStringIO import StringIO\n'), ((1850, 1860), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (1858, 1860), False, 'from cStringIO import StringIO\n'), ((1869, 1895), 'base64.encode', 'base64.encode', (['infp', 'outfp'], {}), '(infp, outfp)\n', (1882, 1895), False, 'import base64\n'), ((2166, 2198), 'cStringIO.StringIO', 'StringIO', (['"""d3d3LnB5dGhvbi5vcmc="""'], {}), "('d3d3LnB5dGhvbi5vcmc=')\n", (2174, 2198), False, 'from cStringIO import StringIO\n'), ((2215, 2225), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (2223, 2225), False, 'from cStringIO import StringIO\n'), ((2234, 2260), 'base64.decode', 'base64.decode', (['infp', 'outfp'], {}), '(infp, outfp)\n', (2247, 2260), False, 'import base64\n'), ((185, 222), 'base64.encodestring', 'base64.encodestring', (['"""www.python.org"""'], {}), "('www.python.org')\n", (204, 222), False, 'import base64\n'), ((261, 285), 'base64.encodestring', 'base64.encodestring', (['"""a"""'], {}), "('a')\n", (280, 285), False, 'import base64\n'), ((308, 333), 'base64.encodestring', 'base64.encodestring', (['"""ab"""'], {}), "('ab')\n", (327, 333), False, 'import base64\n'), ((356, 382), 'base64.encodestring', 'base64.encodestring', (['"""abc"""'], {}), "('abc')\n", (375, 382), False, 'import base64\n'), ((405, 428), 'base64.encodestring', 'base64.encodestring', (['""""""'], {}), "('')\n", (424, 428), False, 'import base64\n'), ((445, 560), 'base64.encodestring', 'base64.encodestring', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}"""'], {}), "(\n 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )\n", (464, 560), False, 'import base64\n'), ((899, 944), 'base64.decodestring', 'base64.decodestring', (['"""d3d3LnB5dGhvbi5vcmc=\n"""'], {}), "('d3d3LnB5dGhvbi5vcmc=\\n')\n", (918, 944), False, 'import base64\n'), ((975, 1004), 'base64.decodestring', 'base64.decodestring', (['"""YQ==\n"""'], {}), "('YQ==\\n')\n", (994, 1004), False, 'import base64\n'), ((1022, 1051), 'base64.decodestring', 'base64.decodestring', (['"""YWI=\n"""'], {}), "('YWI=\\n')\n", (1041, 1051), False, 'import base64\n'), ((1070, 1099), 'base64.decodestring', 'base64.decodestring', (['"""YWJj\n"""'], {}), "('YWJj\\n')\n", (1089, 1099), False, 'import base64\n'), ((1119, 1270), 'base64.decodestring', 'base64.decodestring', (['"""YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"""'], {}), '(\n """YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"""\n )\n', (1138, 1270), False, 'import base64\n'), ((1464, 1487), 'base64.decodestring', 'base64.decodestring', (['""""""'], {}), "('')\n", (1483, 1487), False, 'import base64\n'), ((2470, 2504), 'base64.b64encode', 'base64.b64encode', (['"""www.python.org"""'], {}), "('www.python.org')\n", (2486, 2504), False, 'import base64\n'), ((2541, 2565), 'base64.b64encode', 'base64.b64encode', (["'\\x00'"], {}), "('\\x00')\n", (2557, 2565), False, 'import base64\n'), ((2586, 2607), 'base64.b64encode', 'base64.b64encode', (['"""a"""'], {}), "('a')\n", (2602, 2607), False, 'import base64\n'), ((2628, 2650), 'base64.b64encode', 'base64.b64encode', (['"""ab"""'], {}), "('ab')\n", (2644, 2650), False, 'import base64\n'), ((2671, 2694), 'base64.b64encode', 'base64.b64encode', (['"""abc"""'], {}), "('abc')\n", (2687, 2694), False, 'import base64\n'), ((2715, 2735), 'base64.b64encode', 'base64.b64encode', (['""""""'], {}), "('')\n", (2731, 2735), False, 'import base64\n'), ((2752, 2864), 'base64.b64encode', 'base64.b64encode', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}"""'], {}), "(\n 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )\n", (2768, 2864), False, 'import base64\n'), ((3137, 3181), 'base64.b64encode', 'base64.b64encode', (['"""ÓV¾o÷\x1d"""'], {'altchars': '"""*$"""'}), "('ÓV¾o÷\\x1d', altchars='*$')\n", (3153, 3181), False, 'import base64\n'), ((3457, 3500), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""www.python.org"""'], {}), "('www.python.org')\n", (3482, 3500), False, 'import base64\n'), ((3537, 3567), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""a"""'], {}), "('a')\n", (3562, 3567), False, 'import base64\n'), ((3588, 3619), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""ab"""'], {}), "('ab')\n", (3613, 3619), False, 'import base64\n'), ((3640, 3672), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""abc"""'], {}), "('abc')\n", (3665, 3672), False, 'import base64\n'), ((3693, 3722), 'base64.standard_b64encode', 'base64.standard_b64encode', (['""""""'], {}), "('')\n", (3718, 3722), False, 'import base64\n'), ((3739, 3860), 'base64.standard_b64encode', 'base64.standard_b64encode', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}"""'], {}), "(\n 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}'\n )\n", (3764, 3860), False, 'import base64\n'), ((4241, 4278), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['"""ÓV¾o÷\x1d"""'], {}), "('ÓV¾o÷\\x1d')\n", (4265, 4278), False, 'import base64\n'), ((4475, 4515), 'base64.b64decode', 'base64.b64decode', (['"""d3d3LnB5dGhvbi5vcmc="""'], {}), "('d3d3LnB5dGhvbi5vcmc=')\n", (4491, 4515), False, 'import base64\n'), ((4546, 4570), 'base64.b64decode', 'base64.b64decode', (['"""AA=="""'], {}), "('AA==')\n", (4562, 4570), False, 'import base64\n'), ((4591, 4615), 'base64.b64decode', 'base64.b64decode', (['"""YQ=="""'], {}), "('YQ==')\n", (4607, 4615), False, 'import base64\n'), ((4633, 4657), 'base64.b64decode', 'base64.b64decode', (['"""YWI="""'], {}), "('YWI=')\n", (4649, 4657), False, 'import base64\n'), ((4676, 4700), 'base64.b64decode', 'base64.b64decode', (['"""YWJj"""'], {}), "('YWJj')\n", (4692, 4700), False, 'import base64\n'), ((4720, 4867), 'base64.b64decode', 'base64.b64decode', (['"""YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="""'], {}), '(\n """YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="""\n )\n', (4736, 4867), False, 'import base64\n'), ((5054, 5074), 'base64.b64decode', 'base64.b64decode', (['""""""'], {}), "('')\n", (5070, 5074), False, 'import base64\n'), ((5144, 5187), 'base64.b64decode', 'base64.b64decode', (['"""01a*b$cd"""'], {'altchars': '"""*$"""'}), "('01a*b$cd', altchars='*$')\n", (5160, 5187), False, 'import base64\n'), ((5330, 5379), 'base64.standard_b64decode', 'base64.standard_b64decode', (['"""d3d3LnB5dGhvbi5vcmc="""'], {}), "('d3d3LnB5dGhvbi5vcmc=')\n", (5355, 5379), False, 'import base64\n'), ((5410, 5443), 'base64.standard_b64decode', 'base64.standard_b64decode', (['"""YQ=="""'], {}), "('YQ==')\n", (5435, 5443), False, 'import base64\n'), ((5461, 5494), 'base64.standard_b64decode', 'base64.standard_b64decode', (['"""YWI="""'], {}), "('YWI=')\n", (5486, 5494), False, 'import base64\n'), ((5513, 5546), 'base64.standard_b64decode', 'base64.standard_b64decode', (['"""YWJj"""'], {}), "('YWJj')\n", (5538, 5546), False, 'import base64\n'), ((5566, 5595), 'base64.standard_b64decode', 'base64.standard_b64decode', (['""""""'], {}), "('')\n", (5591, 5595), False, 'import base64\n'), ((5612, 5763), 'base64.standard_b64decode', 'base64.standard_b64decode', (['"""YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="""'], {}), "(\n 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=='\n )\n", (5637, 5763), False, 'import base64\n'), ((6109, 6145), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['"""01a-b_cd"""'], {}), "('01a-b_cd')\n", (6133, 6145), False, 'import base64\n'), ((6442, 6462), 'base64.b32encode', 'base64.b32encode', (['""""""'], {}), "('')\n", (6458, 6462), False, 'import base64\n'), ((6479, 6503), 'base64.b32encode', 'base64.b32encode', (["'\\x00'"], {}), "('\\x00')\n", (6495, 6503), False, 'import base64\n'), ((6528, 6549), 'base64.b32encode', 'base64.b32encode', (['"""a"""'], {}), "('a')\n", (6544, 6549), False, 'import base64\n'), ((6574, 6596), 'base64.b32encode', 'base64.b32encode', (['"""ab"""'], {}), "('ab')\n", (6590, 6596), False, 'import base64\n'), ((6621, 6644), 'base64.b32encode', 'base64.b32encode', (['"""abc"""'], {}), "('abc')\n", (6637, 6644), False, 'import base64\n'), ((6669, 6693), 'base64.b32encode', 'base64.b32encode', (['"""abcd"""'], {}), "('abcd')\n", (6685, 6693), False, 'import base64\n'), ((6718, 6743), 'base64.b32encode', 'base64.b32encode', (['"""abcde"""'], {}), "('abcde')\n", (6734, 6743), False, 'import base64\n'), ((6909, 6929), 'base64.b32decode', 'base64.b32decode', (['""""""'], {}), "('')\n", (6925, 6929), False, 'import base64\n'), ((6946, 6974), 'base64.b32decode', 'base64.b32decode', (['"""AA======"""'], {}), "('AA======')\n", (6962, 6974), False, 'import base64\n'), ((6995, 7023), 'base64.b32decode', 'base64.b32decode', (['"""ME======"""'], {}), "('ME======')\n", (7011, 7023), False, 'import base64\n'), ((7041, 7069), 'base64.b32decode', 'base64.b32decode', (['"""MFRA===="""'], {}), "('MFRA====')\n", (7057, 7069), False, 'import base64\n'), ((7088, 7116), 'base64.b32decode', 'base64.b32decode', (['"""MFRGG==="""'], {}), "('MFRGG===')\n", (7104, 7116), False, 'import base64\n'), ((7136, 7164), 'base64.b32decode', 'base64.b32decode', (['"""MFRGGZA="""'], {}), "('MFRGGZA=')\n", (7152, 7164), False, 'import base64\n'), ((7185, 7213), 'base64.b32decode', 'base64.b32decode', (['"""MFRGGZDF"""'], {}), "('MFRGGZDF')\n", (7201, 7213), False, 'import base64\n'), ((7403, 7429), 'base64.b32decode', 'base64.b32decode', (['""""""', '(True)'], {}), "('', True)\n", (7419, 7429), False, 'import base64\n'), ((7446, 7480), 'base64.b32decode', 'base64.b32decode', (['"""ME======"""', '(True)'], {}), "('ME======', True)\n", (7462, 7480), False, 'import base64\n'), ((7498, 7532), 'base64.b32decode', 'base64.b32decode', (['"""MFRA===="""', '(True)'], {}), "('MFRA====', True)\n", (7514, 7532), False, 'import base64\n'), ((7551, 7585), 'base64.b32decode', 'base64.b32decode', (['"""MFRGG==="""', '(True)'], {}), "('MFRGG===', True)\n", (7567, 7585), False, 'import base64\n'), ((7605, 7639), 'base64.b32decode', 'base64.b32decode', (['"""MFRGGZA="""', '(True)'], {}), "('MFRGGZA=', True)\n", (7621, 7639), False, 'import base64\n'), ((7660, 7694), 'base64.b32decode', 'base64.b32decode', (['"""MFRGGZDF"""', '(True)'], {}), "('MFRGGZDF', True)\n", (7676, 7694), False, 'import base64\n'), ((7738, 7772), 'base64.b32decode', 'base64.b32decode', (['"""me======"""', '(True)'], {}), "('me======', True)\n", (7754, 7772), False, 'import base64\n'), ((7790, 7824), 'base64.b32decode', 'base64.b32decode', (['"""mfra===="""', '(True)'], {}), "('mfra====', True)\n", (7806, 7824), False, 'import base64\n'), ((7843, 7877), 'base64.b32decode', 'base64.b32decode', (['"""mfrgg==="""', '(True)'], {}), "('mfrgg===', True)\n", (7859, 7877), False, 'import base64\n'), ((7897, 7931), 'base64.b32decode', 'base64.b32decode', (['"""mfrggza="""', '(True)'], {}), "('mfrggza=', True)\n", (7913, 7931), False, 'import base64\n'), ((7952, 7986), 'base64.b32decode', 'base64.b32decode', (['"""mfrggzdf"""', '(True)'], {}), "('mfrggzdf', True)\n", (7968, 7986), False, 'import base64\n'), ((8136, 8164), 'base64.b32decode', 'base64.b32decode', (['"""MLO23456"""'], {}), "('MLO23456')\n", (8152, 8164), False, 'import base64\n'), ((8198, 8237), 'base64.b32decode', 'base64.b32decode', (['"""M1023456"""'], {'map01': '"""L"""'}), "('M1023456', map01='L')\n", (8214, 8237), False, 'import base64\n'), ((8271, 8310), 'base64.b32decode', 'base64.b32decode', (['"""M1023456"""'], {'map01': '"""I"""'}), "('M1023456', map01='I')\n", (8287, 8310), False, 'import base64\n'), ((8571, 8602), 'base64.b16encode', 'base64.b16encode', (['"""\x01\x02«Íï"""'], {}), "('\\x01\\x02«Íï')\n", (8587, 8602), False, 'import base64\n'), ((8638, 8662), 'base64.b16encode', 'base64.b16encode', (["'\\x00'"], {}), "('\\x00')\n", (8654, 8662), False, 'import base64\n'), ((8840, 8870), 'base64.b16decode', 'base64.b16decode', (['"""0102ABCDEF"""'], {}), "('0102ABCDEF')\n", (8856, 8870), False, 'import base64\n'), ((8907, 8929), 'base64.b16decode', 'base64.b16decode', (['"""00"""'], {}), "('00')\n", (8923, 8929), False, 'import base64\n'), ((9090, 9126), 'base64.b16decode', 'base64.b16decode', (['"""0102abcdef"""', '(True)'], {}), "('0102abcdef', True)\n", (9106, 9126), False, 'import base64\n')] |
import numpy as np
from ctypes import c_void_p
from .Shader import Shader
from .transforms import *
from OpenGL.GL import *
class Path:
# position=[x1, y1, z1, ..., xn, yn, zn] ; rotation = [[Rx1, Ry1, Rz1], ..., [Rxn, Ryn, Rzn]]
def __init__(self, position, rotation=None):
self.loadPath(position)
if rotation:
assert len(position) == len(rotation) * 3
self.loadRotation(rotation)
else:
self.rotation = 'Pio è un figo'
def loadPath(self, position):
# compiling shader
self.path_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\pathfrag.glsl').shaderProgram
# setting path buffer
self.vertices = position
self.patharray = glGenVertexArrays(1)
glBindVertexArray(self.patharray)
self.lineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.lineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.vertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
def loadRotation(self, rotation):
self.rotation = rotation
# compiling shader
self.xpath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\xpathfrag.glsl').shaderProgram
self.ypath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\ypathfrag.glsl').shaderProgram
self.zpath_shader = Shader('src\\shaders\\path\\pathvert.glsl',
'src\\shaders\\path\\zpathfrag.glsl').shaderProgram
# setting versors
self.xvertices = []
self.yvertices = []
self.zvertices = []
for pos in range(len(rotation)):
xversor = self.getVersorAtTime(np.array([1, 0, 0, 1], dtype='float32'), pos)
yversor = self.getVersorAtTime(np.array([0, 1, 0, 1], dtype='float32'), pos)
zversor = self.getVersorAtTime(np.array([0, 0, 1, 1], dtype='float32'), pos)
pos = [self.vertices[pos*3], self.vertices[pos*3 + 1], self.vertices[pos*3 + 2]]
self.xvertices.extend(pos)
self.xvertices.extend([xversor[0], xversor[1], xversor[2]])
self.yvertices.extend(pos)
self.yvertices.extend([yversor[0], yversor[1], yversor[2]])
self.zvertices.extend(pos)
self.zvertices.extend([zversor[0], zversor[1], zversor[2]])
#setting xline bufer
self.xpatharray = glGenVertexArrays(1)
glBindVertexArray(self.xpatharray)
self.xlineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.xlineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.xvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
# setting yline buffer
self.ypatharray = glGenVertexArrays(1)
glBindVertexArray(self.ypatharray)
self.ylineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.ylineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.yvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
#setting xline bufer
self.zpatharray = glGenVertexArrays(1)
glBindVertexArray(self.zpatharray)
self.zlineBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.zlineBuffer)
glBufferData(GL_ARRAY_BUFFER, np.array(self.zvertices, dtype='float32'), GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, c_void_p(0))
def getVersorAtTime(self, versor, index):
r_versor = np.dot(get_rot(self.rotation[index][0], 0), versor)
r_versor = np.dot(get_rot(self.rotation[index][1], 1), r_versor)
r_versor = np.dot(get_rot(self.rotation[index][2], 2), r_versor)
t_versor = np.dot(get_traslation(self.vertices[index*3], self.vertices[index*3 + 1], self.vertices[index*3 + 2]), r_versor)
return t_versor
def renderPath(self, camera):
model = np.identity(4)
view = camera.view
proj = camera.proj
# rendering the path
glBindVertexArray(self.patharray)
glUseProgram(self.path_shader)
modelLocation = glGetUniformLocation(self.path_shader, 'model')
viewLocation = glGetUniformLocation(self.path_shader, 'view')
projectionLocation = glGetUniformLocation(self.path_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINE_STRIP, 0, int(len(self.vertices)/3))
glDisableVertexAttribArray(0)
# rendering the xlines
if self.rotation != 'Pio è un figo':
glBindVertexArray(self.xpatharray)
glUseProgram(self.xpath_shader)
modelLocation = glGetUniformLocation(self.xpath_shader, 'model')
viewLocation = glGetUniformLocation(self.xpath_shader, 'view')
projectionLocation = glGetUniformLocation(self.xpath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
# rendering the ylines
glBindVertexArray(self.ypatharray)
glUseProgram(self.ypath_shader)
modelLocation = glGetUniformLocation(self.ypath_shader, 'model')
viewLocation = glGetUniformLocation(self.ypath_shader, 'view')
projectionLocation = glGetUniformLocation(self.ypath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
# rendering the zlines
glBindVertexArray(self.zpatharray)
glUseProgram(self.zpath_shader)
modelLocation = glGetUniformLocation(self.zpath_shader, 'model')
viewLocation = glGetUniformLocation(self.zpath_shader, 'view')
projectionLocation = glGetUniformLocation(self.zpath_shader, 'projection')
glUniformMatrix4fv(modelLocation, 1, GL_TRUE, model)
glUniformMatrix4fv(viewLocation, 1, GL_TRUE, view)
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, proj)
glEnableVertexAttribArray(0)
glDrawArrays(GL_LINES, 0, int(len(self.xvertices)/3))
glDisableVertexAttribArray(0)
| [
"numpy.identity",
"numpy.array",
"ctypes.c_void_p"
]
| [((4234, 4248), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (4245, 4248), True, 'import numpy as np\n'), ((1016, 1056), 'numpy.array', 'np.array', (['self.vertices'], {'dtype': '"""float32"""'}), "(self.vertices, dtype='float32')\n", (1024, 1056), True, 'import numpy as np\n'), ((1137, 1148), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (1145, 1148), False, 'from ctypes import c_void_p\n'), ((2842, 2883), 'numpy.array', 'np.array', (['self.xvertices'], {'dtype': '"""float32"""'}), "(self.xvertices, dtype='float32')\n", (2850, 2883), True, 'import numpy as np\n'), ((2964, 2975), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (2972, 2975), False, 'from ctypes import c_void_p\n'), ((3236, 3277), 'numpy.array', 'np.array', (['self.yvertices'], {'dtype': '"""float32"""'}), "(self.yvertices, dtype='float32')\n", (3244, 3277), True, 'import numpy as np\n'), ((3358, 3369), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (3366, 3369), False, 'from ctypes import c_void_p\n'), ((3628, 3669), 'numpy.array', 'np.array', (['self.zvertices'], {'dtype': '"""float32"""'}), "(self.zvertices, dtype='float32')\n", (3636, 3669), True, 'import numpy as np\n'), ((3750, 3761), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (3758, 3761), False, 'from ctypes import c_void_p\n'), ((1922, 1961), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {'dtype': '"""float32"""'}), "([1, 0, 0, 1], dtype='float32')\n", (1930, 1961), True, 'import numpy as np\n'), ((2011, 2050), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {'dtype': '"""float32"""'}), "([0, 1, 0, 1], dtype='float32')\n", (2019, 2050), True, 'import numpy as np\n'), ((2100, 2139), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {'dtype': '"""float32"""'}), "([0, 0, 1, 1], dtype='float32')\n", (2108, 2139), True, 'import numpy as np\n')] |
from argparse import (
ArgumentParser
)
from os import getcwd as os_getcwd
DEFAULT_OUTPUT_FOLDER = os_getcwd()
DEFAULT_SAMPLE_VOLUME = 10000
def build_args_parser(
program,
description):
parser = ArgumentParser(
program,
description,
)
parser = add_arguments(parser)
return parser
def add_arguments(parser):
parser.add_argument(
'cfps',
type=str,
help='Path to a .tsv file containing CFPS parameters and features',
)
parser.add_argument(
'init_tset',
type=str,
help='Path to a .tsv file containing initial training set',
)
parser.add_argument(
'norm_set',
type=str,
help='Path to a .tsv file containing normalizer set',
)
parser.add_argument(
'autofluo_set',
type=str,
help='Path to a .tsv file containing autofluorescence set',
)
parser.add_argument(
'-v', '--sample_volume',
type=int,
default=DEFAULT_SAMPLE_VOLUME,
help=('Final sample volume in each well in nL'
f' (default: {DEFAULT_SAMPLE_VOLUME})')
)
parser.add_argument(
'-of', '--output-folder',
type=str,
default=DEFAULT_OUTPUT_FOLDER,
help=('Output folder to write output files'
f' (default: {DEFAULT_OUTPUT_FOLDER})')
)
return parser
| [
"argparse.ArgumentParser",
"os.getcwd"
]
| [((108, 119), 'os.getcwd', 'os_getcwd', ([], {}), '()\n', (117, 119), True, 'from os import getcwd as os_getcwd\n'), ((228, 264), 'argparse.ArgumentParser', 'ArgumentParser', (['program', 'description'], {}), '(program, description)\n', (242, 264), False, 'from argparse import ArgumentParser\n')] |
import math
x = float(input())
prop_2 = -(x**2) / math.factorial(2)
prop_3 = (x**4) / math.factorial(4)
prop_4 = -(x**6) / math.factorial(6)
cos_x = float(1 + prop_2 + prop_3 + prop_4)
print(prop_2)
print(prop_3)
print(prop_4)
print(cos_x)
| [
"math.factorial"
]
| [((52, 69), 'math.factorial', 'math.factorial', (['(2)'], {}), '(2)\n', (66, 69), False, 'import math\n'), ((89, 106), 'math.factorial', 'math.factorial', (['(4)'], {}), '(4)\n', (103, 106), False, 'import math\n'), ((127, 144), 'math.factorial', 'math.factorial', (['(6)'], {}), '(6)\n', (141, 144), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst
# class SpiderItem(scrapy.Item):
# # define the fields for your item here like:
# # name = scrapy.Field()
# pass
#
#
#
# class TorrentItem(scrapy.Item):
# url = scrapy.Field()
# name = scrapy.Field()
# description = scrapy.Field()
# size = scrapy.Field()
#
# import scrapy
class StockstarItemLoader(ItemLoader):
# 自定义itemloader,用于存储爬虫所抓取的字段内容
default_output_processor = TakeFirst()
class StockstarItem(scrapy.Item): # 建立相应的字段
# define the fields for your item here like:
# name = scrapy.Field()
code = scrapy.Field() # 股票代码
abbr = scrapy.Field() # 股票简称
last_trade = scrapy.Field() # 最新价
chg_ratio = scrapy.Field() # 涨跌幅
chg_amt = scrapy.Field() # 涨跌额
chg_ratio_5min = scrapy.Field() # 5分钟涨幅
volumn = scrapy.Field() # 成交量
turn_over = scrapy.Field() # 成交额 | [
"scrapy.loader.processors.TakeFirst",
"scrapy.Field"
]
| [((665, 676), 'scrapy.loader.processors.TakeFirst', 'TakeFirst', ([], {}), '()\n', (674, 676), False, 'from scrapy.loader.processors import TakeFirst\n'), ((812, 826), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (824, 826), False, 'import scrapy\n'), ((846, 860), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (858, 860), False, 'import scrapy\n'), ((886, 900), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (898, 900), False, 'import scrapy\n'), ((924, 938), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (936, 938), False, 'import scrapy\n'), ((960, 974), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (972, 974), False, 'import scrapy\n'), ((1003, 1017), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1015, 1017), False, 'import scrapy\n'), ((1040, 1054), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1052, 1054), False, 'import scrapy\n'), ((1078, 1092), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1090, 1092), False, 'import scrapy\n')] |
#
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
""" Context holding multiple subcontexts.
"""
from __future__ import absolute_import
from itertools import chain
from collections import MutableMapping as DictMixin
from traits.api import (Bool, List, Str, Undefined, Supports,
adapt, provides, on_trait_change)
from .data_context import DataContext, ListenableMixin, PersistableMixin
from .i_context import ICheckpointable, IDataContext, IRestrictedContext
from .utils import safe_repr
@provides(IDataContext)
class MultiContext(ListenableMixin, PersistableMixin, DictMixin):
""" Wrap several subcontexts.
"""
#: The name of the context.
name = Str("multidummy")
#: The underlying dictionary.
subcontexts = List(Supports(IRestrictedContext, factory=DataContext))
#: Suppress subcontext modified events
veto_subcontext_modified = Bool(True)
def __init__(self, *subcontexts, **traits):
subcontexts = list(subcontexts)
super(MultiContext, self).__init__(subcontexts=subcontexts, **traits)
#### IContext interface ####################################################
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(list(self.keys()))
def __contains__(self, key):
for c in self.subcontexts:
if key in c:
return True
return False
def __delitem__(self, key):
""" Remove the given key with [] access.
Only deletes the first instance of the key.
Parameters
----------
key : str
Raises
------
KeyError if the kew is not available in the context.
"""
for c in self.subcontexts:
try:
del c[key]
return
except KeyError:
continue
raise KeyError(key)
def __getitem__(self, key):
for c in self.subcontexts:
try:
return c[key]
except KeyError:
continue
raise KeyError(key)
def __setitem__(self, key, value):
""" Set item with [] access.
The first subcontext which allows the key/value pair will get it. If an
earlier subcontext has the key, but does not allow the assignment, then
that key will be deleted. Later contexts with the key will be untouched.
If the key/value pair cannot be assigned to anything, no deletion will
take place.
Parameters
----------
key : str
value : object
Raises
------
ValueError if the key is not permitted to be assigned that value.
"""
# Let subtypes dictate compatibility independently of contained contexts
if not self.allows(value, key):
raise ValueError('Disallowed mapping: %s = %s' % (key, safe_repr(value)))
set = False
blocking_contexts = []
for c in self.subcontexts:
if not set:
if c.allows(value, key):
if key in c:
added = []
current_value = c[key]
try:
is_modified = bool(current_value != value)
except Exception:
is_modified = current_value is not value
if is_modified:
modified = [key]
c[key] = value
else:
modified = []
else:
added = [key]
modified = []
c[key] = value
set = True
break
elif key in c:
# Record this context as blocking access to the final
# location of the value.
blocking_contexts.append(c)
# Remove all blocking instances.
for c in blocking_contexts:
del c[key]
if not set:
raise ValueError('Disallowed mapping: %s = %s' % (key, safe_repr(value)))
def keys(self):
return list(set(chain(*[list(c.keys()) for c in self.subcontexts])))
# Expose DictMixin's get method over HasTraits'.
get = DictMixin.get
def __str__(self):
# Maybe a good default string
subcontext_str = '[%s]' % ', '.join([str(x) for x in self.subcontexts])
return '%s(name=%r, subcontexts=%s)' % (type(self).__name__, self.name,
subcontext_str)
def __repr__(self):
# Maybe a good default representation
return '%s(name=%r)' % (type(self).__name__, self.name)
#### IRestrictedContext interface ##########################################
def allows(self, value, name=None):
for c in self.subcontexts:
if c.allows(value, name=name):
return True
return False
#### Trait Event Handlers ##################################################
@on_trait_change('subcontexts:items_modified')
def subcontexts_items_modified(self, event):
""" Pass events up.
"""
if event is Undefined:
# Nothing to do.
return
event.veto = self.veto_subcontext_modified
self._fire_event(added=event.added, removed=event.removed,
modified=event.modified, context=event.context)
def _subcontexts_items_changed(self, event):
""" Trait listener for items of subcontexts list.
"""
added = []
removed = []
# Add to the list of items added
if len(event.added):
for context in event.added:
added.extend(list(context.keys()))
# Add to the list of items removed
if len(event.removed):
for context in event.removed:
removed.extend(list(context.keys()))
self._fire_event(added=added, removed=removed)
#### ICheckpointable interface ############################################
def checkpoint(self):
""" Make a shallow copy of the context.
Technically, this is actually a fairly deep copy. All of the object
structure should be replicated, but the actual dictionary storage will
be shallowly copied::
copy = context.shallow_copy()
copy[key] is context[key] for key in context.keys()
These semantics are useful for saving out checkpointed versions of the
context for implementing an undo/redo stack. They may not be useful for
other purposes.
Returns
-------
copy : IContext
"""
copy = self.clone_traits()
new_subcontexts = []
for context in self.subcontexts:
checkpointable_subcontext = adapt(context, ICheckpointable)
new_subcontexts.append(checkpointable_subcontext.checkpoint())
copy.subcontexts = new_subcontexts
return copy
| [
"traits.api.Str",
"traits.api.on_trait_change",
"traits.api.provides",
"traits.api.Bool",
"traits.api.Supports",
"traits.api.adapt"
]
| [((613, 635), 'traits.api.provides', 'provides', (['IDataContext'], {}), '(IDataContext)\n', (621, 635), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n'), ((788, 805), 'traits.api.Str', 'Str', (['"""multidummy"""'], {}), "('multidummy')\n", (791, 805), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n'), ((990, 1000), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (994, 1000), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n'), ((5200, 5245), 'traits.api.on_trait_change', 'on_trait_change', (['"""subcontexts:items_modified"""'], {}), "('subcontexts:items_modified')\n", (5215, 5245), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n'), ((864, 913), 'traits.api.Supports', 'Supports', (['IRestrictedContext'], {'factory': 'DataContext'}), '(IRestrictedContext, factory=DataContext)\n', (872, 913), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n'), ((6989, 7020), 'traits.api.adapt', 'adapt', (['context', 'ICheckpointable'], {}), '(context, ICheckpointable)\n', (6994, 7020), False, 'from traits.api import Bool, List, Str, Undefined, Supports, adapt, provides, on_trait_change\n')] |
"""Data pipelines."""
from collections import defaultdict, OrderedDict
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import torch
def get_info(examples, vocab=None, max_seq_len=256):
"""Gathers info on and creats a featurized example generator for a list of raw examples.
Args:
examples: list(list, float, or string). Examples to create generator for.
vocab: list(str). A vocabulary for discrete datatypes (e.g. text or categorical).
max_seq_len: int. maximum sequence length for text examples.
Returns:
A dict of info about this variable as well as a generator over featurized examples.
"""
assert isinstance(examples, list), 'examples must be list; got ' + str(type(examples))
assert len(examples) > 0, 'Empty example list!'
# Text
if isinstance(examples[0], list):
assert vocab is not None, 'ERROR: must provide a vocab.'
example_type = 'input'
vocab = ['UNK', 'PAD'] + vocab
tok2id = {tok: i for i, tok in enumerate(vocab)}
ngrams = max(len(x.split()) for x in vocab)
unk_id = 0
def featurizer(example):
ids = []
for n in range(1, ngrams + 1):
toks = [' '.join(example[i: i + n]) for i in range(len(example) - n + 1)]
ids += [tok2id.get(x, 0) for x in toks]
ids = ids[:max_seq_len]
padded_ids = ids + ([1] * (max_seq_len - len(ids))) # pad idx = 1
return padded_ids
# Continuous
elif isinstance(examples[0], float) or isinstance(examples[0], int):
example_type = 'continuous'
vocab = ['N/A']
if isinstance(examples[0], int):
featurizer = lambda ex: float(ex)
else:
featurizer = lambda ex: ex
# Categorical
elif isinstance(examples[0], str):
example_type = 'categorical'
if not vocab:
vocab = ['UNK'] + sorted(list(set(examples)))
tok2id = {tok: i for i, tok in enumerate(vocab)}
featurizer = lambda ex: tok2id.get(ex, 0) # 0 is the unk id.
else:
print("ERROR: unrecognized example type: ", examples[0])
quit()
return featurizer, example_type, vocab
def get_iterator(vocab, df, name_to_type, batch_size=32, max_seq_len=256):
"""Builds a data iterator for text, confounds, and outcomes.
Args:
vocab: list(str). The vocabulary to use.
df: pandas.df. The data we want to iterate over. The columns of
these data should be a superset of the keys in name_to_type.
name_to_type: dict. A mapping from variable names to whether they are
"input", "predict", or "control" variables.
batch_size: int. The batch size to use.
max_seq_len: int. Maximum length of text sequences.
Returns:
A generator which yields dictionaries where variable names are mapped
to tensors of batched data.
"""
def featurize(featurizer):
return [featurizer(ex) for ex in examples]
var_info = defaultdict(lambda: OrderedDict())
featurized_data = defaultdict(list)
for var_name, var_type in name_to_type.items():
examples = list(df[var_name])
if var_type == 'input':
examples = [x.split() for x in examples]
featurizer, _, vocab = get_info(examples, vocab, max_seq_len)
var_info[var_name] = {
'control': False, 'name': var_name,
'type': var_type, 'vocab': vocab
}
else:
featurizer, varType, vocab = get_info(examples)
var_info[var_name] = {
'control': var_type == 'control',
'name': var_name, 'type': varType, 'vocab': vocab
}
featurized_data[var_name] = [featurizer(ex) for ex in examples]
def to_tensor(var_name):
dtype = torch.float
if var_info[var_name]['type'] in {'categorical', 'input'}:
dtype = torch.long
return torch.tensor(featurized_data[var_name], dtype=dtype)
feature_names = sorted(featurized_data.keys())
data = TensorDataset(*[to_tensor(name) for name in feature_names])
dataloader = DataLoader(
dataset=data,
sampler=RandomSampler(data),
collate_fn=lambda batch: [torch.stack(x) for x in zip(*batch)], # group by datatype.
batch_size=batch_size)
def iterator():
for batch in dataloader:
yield dict(zip(feature_names, batch))
return iterator, var_info
| [
"collections.OrderedDict",
"torch.stack",
"torch.utils.data.RandomSampler",
"torch.tensor",
"collections.defaultdict"
]
| [((3172, 3189), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3183, 3189), False, 'from collections import defaultdict, OrderedDict\n'), ((4077, 4129), 'torch.tensor', 'torch.tensor', (['featurized_data[var_name]'], {'dtype': 'dtype'}), '(featurized_data[var_name], dtype=dtype)\n', (4089, 4129), False, 'import torch\n'), ((3135, 3148), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3146, 3148), False, 'from collections import defaultdict, OrderedDict\n'), ((4320, 4339), 'torch.utils.data.RandomSampler', 'RandomSampler', (['data'], {}), '(data)\n', (4333, 4339), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4375, 4389), 'torch.stack', 'torch.stack', (['x'], {}), '(x)\n', (4386, 4389), False, 'import torch\n')] |
import discord
from discord.ext import commands
import os
import json
from datetime import date, datetime, timedelta
from .utils import helper_functions as hf
from copy import deepcopy
dir_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))).replace('\\', '/')
class Jpserv(commands.Cog):
"""Modules unique for the Japanese server"""
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if not ctx.guild:
return
return ctx.guild.id == 189571157446492161 or ctx.guild.id == 275146036178059265
# these commands are only useable on Japanese server or my testing server
@commands.command()
@hf.is_admin()
async def swap(self, ctx):
"""Swaps JHO/JHO2's names and positions in the lists, for if we temporarily want welcome messages to go to
JHO2"""
jpJHO = self.bot.get_channel(189571157446492161)
jpJHO2 = self.bot.get_channel(326263874770829313)
if jpJHO.position == 4:
await jpJHO.edit(position=5, name='just_hanging_out_2')
await jpJHO2.edit(position=4, name='just_hanging_out')
else:
await jpJHO.edit(position=4, name='just_hanging_out')
await jpJHO2.edit(position=5, name='just_hanging_out_2')
@commands.group(invoke_without_command=True, aliases=['uhc'])
async def ultrahardcore(self, ctx, *, member=None):
"""Irreversible hardcore mode. Must talk to an admin to have this undone."""
# if ctx.guild.id != 189571157446492161:
# return
role = ctx.guild.get_role(486851965121331200)
config = self.bot.db['ultraHardcore']['users']
if member: # if you specified someone else's ID, then remove UHC from them
member = await hf.member_converter(ctx, member)
if not member:
return
if hf.admin_check(ctx) and ctx.author.id != member.id:
if str(member.id) in config:
if config[str(member.id)][0]:
config[str(member.id)][0] = False
else:
await ctx.send("That user is not in UHC")
return
else:
await ctx.send("That user is not in UHC mode.")
return
await hf.dump_json()
try:
await member.remove_roles(role)
except discord.errors.Forbidden:
await ctx.send("I couldn't remove the ultra hardcore role")
await ctx.send(f'Undid ultra hardcore mode for {member.name}')
else:
await ctx.send("You can not remove UHC. Ask a mod/admin to help you.")
else:
if str(ctx.author.id) in config:
if config[str(ctx.author.id)][0]:
await ctx.invoke(self.explanation)
return
await ctx.send(f"This is ultra hardcore mode. It means you must speak in the language you are learning"
f" (for example, if you are learning Japanese, any messages in English will be deleted)."
f" This can not be undone unless you ask a mod to remove it for you. \n\n"
f"To enable ultra hardcore mode, type `;uhc on` or `;uhc enable`. ")
@ultrahardcore.command(aliases=['enable'])
async def on(self, ctx):
"""Enables UHC"""
if ctx.guild.id != 189571157446492161:
return
role = ctx.guild.get_role(486851965121331200)
config = self.bot.db['ultraHardcore']['users']
if str(ctx.author.id) in config: # if not enabled
user = config[str(ctx.author.id)]
if user[0]:
await ctx.send("You're already in ultra hardcore mode.")
return
else:
user[0] = True
else:
config[str(ctx.author.id)] = [True, date.today().strftime("%Y/%m/%d"), 0]
await hf.dump_json()
try:
await ctx.author.add_roles(role)
except discord.errors.Forbidden:
await ctx.send("I couldn't add the ultra hardcore role")
await ctx.send(f"{ctx.author.name} has chosen to enable ultra hardcore mode. It works the same as "
"normal hardcore mode except that you can't undo it and asterisks don't change "
"anything. Talk to a mod to undo this.")
@ultrahardcore.command()
async def list(self, ctx):
"""Lists the people currently in ultra hardcore mode"""
if ctx.guild.id != 189571157446492161:
return
string = 'The members in ultra hardcore mode right now are '
guild = self.bot.get_guild(189571157446492161)
members = []
config = self.bot.db['ultraHardcore']['users']
for member_id in config.copy():
if config[member_id][0]:
member = guild.get_member(int(member_id))
if member is not None: # in case a member leaves
members.append(member.name)
else:
del config[member_id]
await ctx.send(f'Removed <@{member_id}> from the list, as they seem to have left the server')
await ctx.send(string + ', '.join(members))
@ultrahardcore.command()
async def explanation(self, ctx):
"""Explains ultra hardcore mode for those who are using it and can't explain it"""
if ctx.guild.id != 189571157446492161:
return
if str(ctx.author.id) in self.bot.db['ultraHardcore']['users']:
if self.bot.db['ultraHardcore']['users'][str(ctx.author.id)][0]:
await ctx.send(f"{ctx.author.mention} is currently using ultra hardcore mode. In this mode, they can't"
f" speak their native language, and they also cannot undo this mode themselves.")
return
await ctx.send(f"{ctx.author.mention} is currently NOT using hardcore mode, so I don't know why "
f"they're trying to use this command. But, ultra hardcore mode means a user can't speak "
f"any English, and can't undo this mode themselves no matter what.")
@ultrahardcore.command(aliases=['lb'])
async def leaderboard(self, ctx):
"""Shows a leaderboard of who has had UHC on for the longest"""
if ctx.guild.id != 189571157446492161:
return
time_dict = deepcopy(self.bot.db['ultraHardcore']['users'])
for i in time_dict:
if time_dict[i][0]:
time_dict[i][2] += (datetime.today() - datetime.strptime(time_dict[i][1], "%Y/%m/%d")).days
# {('243703909166612480', [True, '2019/02/14', 124]),
# ('219617844973797376', [False, '2018/11/30', 122]), ...}
to_sort = [[i[0], i[1][0], i[1][2]] for i in list(time_dict.items())]
# to_sort: [['243703909166612480', True, 162], ['219617844973797376', False, 122], ...]
sorted_dict = sorted(to_sort, key=lambda x: x[2], reverse=True)
leaderboard = f"The number of days each user has had UHC enabled " \
f"(Bold = This user currently has UHC enabled)\n\n"
for i in sorted_dict:
user = ctx.guild.get_member(int(i[0]))
if (i[2] < 10 and not i[1]) or (not user):
continue
if user.nick:
name_str = f"{user.mention} ({user.name})"
else:
name_str = f"{user.name}"
if i[1]:
leaderboard += f"**{i[2]}: {name_str}**\n"
else:
leaderboard += f"{i[2]}: {name_str}\n"
emb = discord.Embed(title="UHC Leaderboard", description=leaderboard,
color=discord.Color(int('ff5500', 16)))
await ctx.send(embed=emb)
@ultrahardcore.command()
@hf.is_admin()
async def ignore(self, ctx):
"""Ignores a channel for UHC"""
if ctx.guild.id != 189571157446492161:
return
config = self.bot.db['ultraHardcore']
try:
if ctx.channel.id not in config['ignore']:
config['ignore'].append(ctx.channel.id)
await ctx.send(f"Added {ctx.channel.name} to list of ignored channels for UHC")
else:
config['ignore'].remove(ctx.channel.id)
await ctx.send(f"Removed {ctx.channel.name} from list of ignored channels for UHC")
except KeyError:
config['ignore'] = [ctx.channel.id]
await ctx.send(f"Added {ctx.channel.name} to list of ignored channels for UHC")
await hf.dump_json()
def setup(bot):
bot.add_cog(Jpserv(bot))
| [
"datetime.datetime.strptime",
"discord.ext.commands.group",
"os.path.realpath",
"copy.deepcopy",
"datetime.datetime.today",
"datetime.date.today",
"discord.ext.commands.command"
]
| [((668, 686), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (684, 686), False, 'from discord.ext import commands\n'), ((1305, 1365), 'discord.ext.commands.group', 'commands.group', ([], {'invoke_without_command': '(True)', 'aliases': "['uhc']"}), "(invoke_without_command=True, aliases=['uhc'])\n", (1319, 1365), False, 'from discord.ext import commands\n'), ((6572, 6619), 'copy.deepcopy', 'deepcopy', (["self.bot.db['ultraHardcore']['users']"], {}), "(self.bot.db['ultraHardcore']['users'])\n", (6580, 6619), False, 'from copy import deepcopy\n'), ((229, 255), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (245, 255), False, 'import os\n'), ((4004, 4016), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4014, 4016), False, 'from datetime import date, datetime, timedelta\n'), ((6716, 6732), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (6730, 6732), False, 'from datetime import date, datetime, timedelta\n'), ((6735, 6781), 'datetime.datetime.strptime', 'datetime.strptime', (['time_dict[i][1]', '"""%Y/%m/%d"""'], {}), "(time_dict[i][1], '%Y/%m/%d')\n", (6752, 6781), False, 'from datetime import date, datetime, timedelta\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for pooling 3D layers."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(
self,
pool_function,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, "pool_size")
self.strides = conv_utils.normalize_tuple(
strides, 3, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == "channels_first":
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = tf.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
)
if self.data_format == "channels_first":
outputs = tf.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(
len_dim1, self.pool_size[0], self.padding, self.strides[0]
)
len_dim2 = conv_utils.conv_output_length(
len_dim2, self.pool_size[1], self.padding, self.strides[1]
)
len_dim3 = conv_utils.conv_output_length(
len_dim3, self.pool_size[2], self.padding, self.strides[2]
)
if self.data_format == "channels_first":
return tf.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]
)
else:
return tf.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]
)
def get_config(self):
config = {
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"keras.backend.image_data_format",
"keras.utils.conv_utils.normalize_padding",
"keras.engine.input_spec.InputSpec",
"keras.utils.conv_utils.conv_output_length",
"tensorflow.compat.v2.TensorShape",
"keras.utils.conv_utils.normalize_data_format",
"tensorflow.compat.v2.transpose",
"keras.utils.conv_utils.normalize_tuple"
]
| [((2611, 2664), 'keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['pool_size', '(3)', '"""pool_size"""'], {}), "(pool_size, 3, 'pool_size')\n", (2637, 2664), False, 'from keras.utils import conv_utils\n'), ((2688, 2754), 'keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['strides', '(3)', '"""strides"""'], {'allow_zero': '(True)'}), "(strides, 3, 'strides', allow_zero=True)\n", (2714, 2754), False, 'from keras.utils import conv_utils\n'), ((2800, 2837), 'keras.utils.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['padding'], {}), '(padding)\n', (2828, 2837), False, 'from keras.utils import conv_utils\n'), ((2865, 2910), 'keras.utils.conv_utils.normalize_data_format', 'conv_utils.normalize_data_format', (['data_format'], {}), '(data_format)\n', (2897, 2910), False, 'from keras.utils import conv_utils\n'), ((2937, 2954), 'keras.engine.input_spec.InputSpec', 'InputSpec', ([], {'ndim': '(5)'}), '(ndim=5)\n', (2946, 2954), False, 'from keras.engine.input_spec import InputSpec\n'), ((4122, 4215), 'keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['len_dim1', 'self.pool_size[0]', 'self.padding', 'self.strides[0]'], {}), '(len_dim1, self.pool_size[0], self.padding,\n self.strides[0])\n', (4151, 4215), False, 'from keras.utils import conv_utils\n'), ((4253, 4346), 'keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['len_dim2', 'self.pool_size[1]', 'self.padding', 'self.strides[1]'], {}), '(len_dim2, self.pool_size[1], self.padding,\n self.strides[1])\n', (4282, 4346), False, 'from keras.utils import conv_utils\n'), ((4384, 4477), 'keras.utils.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['len_dim3', 'self.pool_size[2]', 'self.padding', 'self.strides[2]'], {}), '(len_dim3, self.pool_size[2], self.padding,\n self.strides[2])\n', (4413, 4477), False, 'from keras.utils import conv_utils\n'), ((2455, 2482), 'keras.backend.image_data_format', 'backend.image_data_format', ([], {}), '()\n', (2480, 2482), False, 'from keras import backend\n'), ((3360, 3397), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['inputs', '(0, 2, 3, 4, 1)'], {}), '(inputs, (0, 2, 3, 4, 1))\n', (3372, 3397), True, 'import tensorflow.compat.v2 as tf\n'), ((3640, 3678), 'tensorflow.compat.v2.transpose', 'tf.transpose', (['outputs', '(0, 4, 1, 2, 3)'], {}), '(outputs, (0, 4, 1, 2, 3))\n', (3652, 3678), True, 'import tensorflow.compat.v2 as tf\n'), ((4564, 4642), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3]'], {}), '([input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])\n', (4578, 4642), True, 'import tensorflow.compat.v2 as tf\n'), ((4706, 4784), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]]'], {}), '([input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])\n', (4720, 4784), True, 'import tensorflow.compat.v2 as tf\n'), ((3774, 3801), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (3788, 3801), True, 'import tensorflow.compat.v2 as tf\n')] |
import os
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('braumeister/braumeister.py').read(),
re.M
).group(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="braumeister",
packages=["braumeister", "braumeister.actions"],
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Easy release bulding, combining JIRA and git",
long_description=read('README.md'),
license="MIT",
keywords="git jira release",
url="https://www.talentsconnect.com",
include_package_data=True,
install_requires=['requests', 'colorama'],
entry_points={
'console_scripts': ["braumeister = braumeister.braumeister:main"]
},
python_requires='!=2.7, !=3.4, >=3.5',
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Utilities",
"Topic :: Software Development :: Version Control :: Git"
],
)
| [
"os.path.dirname"
]
| [((220, 245), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os\n')] |
# standard library imports
import os
# third party imports
import numpy as np
from PIL import Image
import torch.nn as nn
from torchvision import transforms
# local imports
import config
from . import utils
from . import geometric_transformer
class GeoTransformationInfer(nn.Module):
def __init__(self, output_dir="./output/results"):
super(GeoTransformationInfer, self).__init__()
self.output_dir = output_dir
utils.ensure_folder(self.output_dir)
def forward(self, model_apparel, warped_image, model_image, warped_model_image, random_product_image, random_product_image_warped, output_on_random_product, batch_index, epoch):
batch_size = warped_image.shape[0]
model_apparel = model_apparel.cpu().numpy()
warped_image = warped_image.cpu().numpy()
model_image = model_image.cpu().numpy()
warped_model_image = warped_model_image.cpu().numpy()
random_product_image = random_product_image.cpu().numpy()
random_product_image_warped = random_product_image_warped.cpu().numpy()
output_on_random_product = output_on_random_product.cpu().numpy()
for i in range(batch_size):
self._save_image_sheet(
batch_index*config.PARAMS["batch_size"] + i,
model_apparel[i],
warped_image[i],
model_image[i],
warped_model_image[i],
random_product_image[i],
random_product_image_warped[i],
output_on_random_product[i],
epoch)
def _save_image_sheet(self,
idx,
model_apparel,
warped_image,
model_image,
warped_model_image,
random_product_image,
random_product_image_warped,
output_on_random_product,
epoch):
# inverse normalization of the images along with channel first to channel last steps and finally converting np array to pillow format for saving
model_apparel = np.moveaxis(model_apparel, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
model_apparel = Image.fromarray(np.uint8(model_apparel * 255))
warped_image = np.moveaxis(warped_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
warped_image = Image.fromarray(np.uint8(warped_image * 255))
model_image = np.moveaxis(model_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
model_image = Image.fromarray(np.uint8(model_image * 255))
warped_model_image = np.moveaxis(warped_model_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
warped_model_image = Image.fromarray(np.uint8(warped_model_image * 255))
random_product_image = np.moveaxis(random_product_image, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
random_product_image = Image.fromarray(np.uint8(random_product_image * 255))
random_product_image_warped = np.moveaxis(random_product_image_warped, 0, 2) * [0.229, 0.224, 0.225] + (0.485, 0.456, 0.406)
random_product_image_warped = Image.fromarray(np.uint8(random_product_image_warped * 255))
output_on_random_product = np.moveaxis(output_on_random_product, 0, 2) * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
output_on_random_product = Image.fromarray(np.uint8(output_on_random_product * 255))
sheet = Image.new('RGB', (1568, 224), 'white')
sheet.paste(model_apparel, (0, 0))
sheet.paste(warped_image, (224, 0))
sheet.paste(model_image, (448, 0))
sheet.paste(warped_model_image, (672, 0))
sheet.paste(random_product_image, (896, 0))
sheet.paste(random_product_image_warped, (1120, 0))
sheet.paste(output_on_random_product, (1344, 0))
sheet.save(os.path.join(self.output_dir, "image_sheet_{}-epoch{}".format(idx, str(epoch).zfill(3)) + ".jpg"))
| [
"numpy.uint8",
"PIL.Image.new",
"numpy.moveaxis"
]
| [((3034, 3072), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(1568, 224)', '"""white"""'], {}), "('RGB', (1568, 224), 'white')\n", (3043, 3072), False, 'from PIL import Image\n'), ((1872, 1901), 'numpy.uint8', 'np.uint8', (['(model_apparel * 255)'], {}), '(model_apparel * 255)\n', (1880, 1901), True, 'import numpy as np\n'), ((2033, 2061), 'numpy.uint8', 'np.uint8', (['(warped_image * 255)'], {}), '(warped_image * 255)\n', (2041, 2061), True, 'import numpy as np\n'), ((2190, 2217), 'numpy.uint8', 'np.uint8', (['(model_image * 255)'], {}), '(model_image * 255)\n', (2198, 2217), True, 'import numpy as np\n'), ((2367, 2401), 'numpy.uint8', 'np.uint8', (['(warped_model_image * 255)'], {}), '(warped_model_image * 255)\n', (2375, 2401), True, 'import numpy as np\n'), ((2557, 2593), 'numpy.uint8', 'np.uint8', (['(random_product_image * 255)'], {}), '(random_product_image * 255)\n', (2565, 2593), True, 'import numpy as np\n'), ((2770, 2813), 'numpy.uint8', 'np.uint8', (['(random_product_image_warped * 255)'], {}), '(random_product_image_warped * 255)\n', (2778, 2813), True, 'import numpy as np\n'), ((2981, 3021), 'numpy.uint8', 'np.uint8', (['(output_on_random_product * 255)'], {}), '(output_on_random_product * 255)\n', (2989, 3021), True, 'import numpy as np\n'), ((1757, 1789), 'numpy.moveaxis', 'np.moveaxis', (['model_apparel', '(0)', '(2)'], {}), '(model_apparel, 0, 2)\n', (1768, 1789), True, 'import numpy as np\n'), ((1920, 1951), 'numpy.moveaxis', 'np.moveaxis', (['warped_image', '(0)', '(2)'], {}), '(warped_image, 0, 2)\n', (1931, 1951), True, 'import numpy as np\n'), ((2079, 2109), 'numpy.moveaxis', 'np.moveaxis', (['model_image', '(0)', '(2)'], {}), '(model_image, 0, 2)\n', (2090, 2109), True, 'import numpy as np\n'), ((2242, 2279), 'numpy.moveaxis', 'np.moveaxis', (['warped_model_image', '(0)', '(2)'], {}), '(warped_model_image, 0, 2)\n', (2253, 2279), True, 'import numpy as np\n'), ((2428, 2467), 'numpy.moveaxis', 'np.moveaxis', (['random_product_image', '(0)', '(2)'], {}), '(random_product_image, 0, 2)\n', (2439, 2467), True, 'import numpy as np\n'), ((2627, 2673), 'numpy.moveaxis', 'np.moveaxis', (['random_product_image_warped', '(0)', '(2)'], {}), '(random_product_image_warped, 0, 2)\n', (2638, 2673), True, 'import numpy as np\n'), ((2844, 2887), 'numpy.moveaxis', 'np.moveaxis', (['output_on_random_product', '(0)', '(2)'], {}), '(output_on_random_product, 0, 2)\n', (2855, 2887), True, 'import numpy as np\n')] |
from pathlib import Path
import h5py
import numpy as np
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import requests
import zipfile
from tqdm import tqdm
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(CHUNK_SIZE)):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class Fonts(VisionDataset):
url_id = '0B0GtwTQ6IF9AU3NOdzFzUWZ0aDQ'
base_folder = 'fonts'
def __init__(self, root, split='train',
transform=None, target_transform=None, download=True,
denoise=False, denoise_transform=None, num_fonts_pi=None,
num_examples=2500):
'''
Args:
root (str): path
num_train_domains (int): number of train domains up to 41443
test_mean_chars (bool): Use the mean characters as test set
split (str): 'train', 'val', 'test'
transform: input transformation
target_transform: target transformation
download (bool): download or not
'''
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.denoise = denoise
self.denoise_transform = denoise_transform
self.path = Path(self.root) / self.base_folder
self.path.mkdir(parents=True, exist_ok=True)
self.download_path = self.path / 'fonts.hdf5'
if download:
self.download()
with h5py.File(str(self.download_path), 'r') as f:
data_by_domain = f['fonts'][()]
np.random.seed(484347)
# limit the number of fonts
num_fonts = 100
font_idxs = np.arange(len(data_by_domain))
np.random.shuffle(font_idxs)
if not denoise:
data_by_domain = data_by_domain[font_idxs[:num_fonts]]
print(f"NUM FONTS: {num_fonts}")
print(f"NUM CHARS: {data_by_domain.shape[1]}")
num_classes = data_by_domain.shape[1]
self.all_targets = np.concatenate(
[np.arange(num_classes)]*num_fonts, axis=0)
self.all_domain_labels = np.repeat(np.arange(num_fonts), num_classes)
self.all_data = data_by_domain.reshape(data_by_domain.shape[0]*data_by_domain.shape[1], data_by_domain.shape[2], data_by_domain.shape[3])
idxs = np.arange(len(self.all_data))
np.random.shuffle(idxs)
train_val_max = 2600
if num_examples > train_val_max:
# to be able to heuristically test what happens if we have more training data
train_val_max = 5000
if split == 'train':
idxs = idxs[:num_examples]
elif split == 'val':
idxs = idxs[num_examples: train_val_max]
else:
idxs = idxs[train_val_max:]
self.targets = self.all_targets[idxs]
self.domain_labels = self.all_domain_labels[idxs]
self.data = self.all_data[idxs]
else:
# get the train data
train_dbd = data_by_domain[font_idxs[:num_fonts]]
all_data = train_dbd.reshape(train_dbd.shape[0]*train_dbd.shape[1], train_dbd.shape[2], train_dbd.shape[3])
idxs = np.arange(len(all_data))
np.random.shuffle(idxs)
idxs = idxs[:num_examples]
train_data = all_data[idxs]
if num_fonts_pi is not None:
data_by_domain = data_by_domain[font_idxs[num_fonts:num_fonts+num_fonts_pi]]
else:
data_by_domain = data_by_domain[font_idxs[num_fonts:]]
self.data = data_by_domain.reshape(data_by_domain.shape[0]*data_by_domain.shape[1], data_by_domain.shape[2], data_by_domain.shape[3])
self.data = np.concatenate([train_data, self.data], axis=0)
def get_nearest_neighbor(self, all_imgs, x):
idx = np.argmin(np.sum(np.square(all_imgs - x), axis=(1,2)))
return self[idx]
def download(self):
if not self.download_path.exists():
download_file_from_google_drive(self.url_id, str(self.download_path))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.denoise:
img = self.data[index]
img = Image.fromarray(img)
if self.transform is not None:
tgt_img = self.transform(img)
if self.denoise_transform is not None:
src_img = self.denoise_transform(img)
return src_img, tgt_img
else:
img, target = self.data[index], self.targets[index]
domain_label = self.domain_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, domain_label
def get_item_from_all(self, index):
img, target = self.all_data[index], self.all_targets[index]
domain_label = self.all_domain_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, domain_label
def __len__(self):
return len(self.data)
| [
"PIL.Image.fromarray",
"requests.Session",
"pathlib.Path",
"numpy.square",
"numpy.random.seed",
"numpy.concatenate",
"numpy.arange",
"numpy.random.shuffle"
]
| [((312, 330), 'requests.Session', 'requests.Session', ([], {}), '()\n', (328, 330), False, 'import requests\n'), ((2396, 2418), 'numpy.random.seed', 'np.random.seed', (['(484347)'], {}), '(484347)\n', (2410, 2418), True, 'import numpy as np\n'), ((2538, 2566), 'numpy.random.shuffle', 'np.random.shuffle', (['font_idxs'], {}), '(font_idxs)\n', (2555, 2566), True, 'import numpy as np\n'), ((6284, 6304), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6299, 6304), False, 'from PIL import Image\n'), ((2091, 2106), 'pathlib.Path', 'Path', (['self.root'], {}), '(self.root)\n', (2095, 2106), False, 'from pathlib import Path\n'), ((3215, 3238), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (3232, 3238), True, 'import numpy as np\n'), ((4117, 4140), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (4134, 4140), True, 'import numpy as np\n'), ((4614, 4661), 'numpy.concatenate', 'np.concatenate', (['[train_data, self.data]'], {'axis': '(0)'}), '([train_data, self.data], axis=0)\n', (4628, 4661), True, 'import numpy as np\n'), ((5235, 5255), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (5250, 5255), False, 'from PIL import Image\n'), ((5746, 5766), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (5761, 5766), False, 'from PIL import Image\n'), ((2968, 2988), 'numpy.arange', 'np.arange', (['num_fonts'], {}), '(num_fonts)\n', (2977, 2988), True, 'import numpy as np\n'), ((4743, 4766), 'numpy.square', 'np.square', (['(all_imgs - x)'], {}), '(all_imgs - x)\n', (4752, 4766), True, 'import numpy as np\n'), ((2878, 2900), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (2887, 2900), True, 'import numpy as np\n')] |
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def tmp(x):
y = (x.split(';')[7]).split(',')
return (y)
def forf(x):
for i in x:
yield (i,1)
def topprint(time,rdd):
res1=rdd.take(5)
count=0
for i in res1:
if(count==4):
print("%s" % i)
else:
print("%s" % i,end=',')
count = count +1
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[1]))
ssc.checkpoint("/checkpoint_BIGDATA")
'''
#Selecting a window :
#outpu3:
inputStream=ssc.socketTextStream("localhost",9009)
dataStream = inputStream.window(int(sys.argv[1]),int(sys.argv[2]))
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
count=septweet.reduceByKey(lambda x,y:x+y)
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
#res.pprint(3)
'''
'''
#Selecting a datastream and then reducing by window:
#outpu2
dataStream=ssc.socketTextStream("localhost",9009)
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
#septweet.pprint()
count=septweet.reduceByKeyAndWindow(lambda x,y:x+y,int(sys.argv[1]),int(sys.argv[2]))
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[0],ascending=True))
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
#tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
'''
#Try in outpu1
inputStream=ssc.socketTextStream("localhost",9009)
dataStream = inputStream.window(int(sys.argv[2]),int(sys.argv[1]))
tweet=dataStream.map(tmp)
septweet=tweet.flatMap(forf)
count=septweet.reduceByKey(lambda x,y:x+y)
sortcount = count.transform(lambda rdd :rdd.sortBy(lambda a:a[0],ascending=True))
sortcount = sortcount.transform(lambda rdd :rdd.sortBy(lambda a:a[1],ascending=False))
tweet1=sortcount.filter(lambda w:w[0] is not '')
#tweet1.pprint()
res = tweet1.map(lambda a : a[0])
res.foreachRDD(topprint)
#TO maintain state
# totalcount=tweet.updateStateByKey(aggregate_tweets_count)
# totalcount.pprint()
#To Perform operation on each RDD
# totalcount.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| [
"findspark.init",
"pyspark.SparkContext",
"pyspark.SparkConf"
]
| [((17, 33), 'findspark.init', 'findspark.init', ([], {}), '()\n', (31, 33), False, 'import findspark\n'), ((457, 468), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (466, 468), False, 'from pyspark import SparkConf, SparkContext\n'), ((499, 522), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (511, 522), False, 'from pyspark import SparkConf, SparkContext\n')] |
# Generated by Django 4.0.1 on 2022-01-20 13:10
import courses.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_video_text_image_file_content'),
]
operations = [
migrations.AlterModelOptions(
name='content',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='module',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='content',
name='order',
field=courses.fields.OrderField(blank=True, default=0),
preserve_default=False,
),
migrations.AddField(
model_name='module',
name='order',
field=courses.fields.OrderField(blank=True, default=0),
preserve_default=False,
),
]
| [
"django.db.migrations.AlterModelOptions"
]
| [((260, 337), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""content"""', 'options': "{'ordering': ['order']}"}), "(name='content', options={'ordering': ['order']})\n", (288, 337), False, 'from django.db import migrations\n'), ((382, 458), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""module"""', 'options': "{'ordering': ['order']}"}), "(name='module', options={'ordering': ['order']})\n", (410, 458), False, 'from django.db import migrations\n')] |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library for retrieving and modifying configuration settings."""
import os
import textwrap
from google_compute_engine import file_utils
from google_compute_engine.compat import parser
CONFIG = '/etc/default/instance_configs.cfg'
class ConfigManager(object):
"""Process the configuration defaults."""
def __init__(self, config_file=None, config_header=None):
"""Constructor.
Args:
config_file: string, the location of the config file.
config_header: string, the message to write at the top of the config.
"""
self.config_file = config_file or CONFIG
self.config_header = config_header
self.config = parser.SafeConfigParser()
self.config.read(self.config_file)
def _AddHeader(self, fp):
"""Create a file header in the config.
Args:
fp: int, a file pointer for writing the header.
"""
text = textwrap.wrap(
textwrap.dedent(self.config_header), break_on_hyphens=False)
fp.write('\n'.join(['# ' + line for line in text]))
fp.write('\n\n')
def GetOptionString(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
string, the value of the option or None if the option doesn't exist.
"""
if self.config.has_option(section, option):
return self.config.get(section, option)
else:
return None
def GetOptionBool(self, section, option):
"""Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set.
"""
return (not self.config.has_option(section, option) or
self.config.getboolean(section, option))
def SetOption(self, section, option, value, overwrite=True):
"""Set the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to set the value of.
value: string, the value to set the option.
overwrite: bool, True to overwrite an existing value in the config file.
"""
if not overwrite and self.config.has_option(section, option):
return
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value))
def WriteConfig(self, config_file=None):
"""Write the config values to a given file.
Args:
config_file: string, the file location of the config file to write.
"""
config_file = config_file or self.config_file
config_name = os.path.splitext(os.path.basename(config_file))[0]
config_lock = '/var/lock/google_%s.lock' % config_name
with file_utils.LockFile(config_lock):
with open(config_file, 'w') as config_fp:
if self.config_header:
self._AddHeader(config_fp)
self.config.write(config_fp)
| [
"google_compute_engine.file_utils.LockFile",
"textwrap.dedent",
"os.path.basename",
"google_compute_engine.compat.parser.SafeConfigParser"
]
| [((1260, 1285), 'google_compute_engine.compat.parser.SafeConfigParser', 'parser.SafeConfigParser', ([], {}), '()\n', (1283, 1285), False, 'from google_compute_engine.compat import parser\n'), ((1504, 1539), 'textwrap.dedent', 'textwrap.dedent', (['self.config_header'], {}), '(self.config_header)\n', (1519, 1539), False, 'import textwrap\n'), ((3485, 3517), 'google_compute_engine.file_utils.LockFile', 'file_utils.LockFile', (['config_lock'], {}), '(config_lock)\n', (3504, 3517), False, 'from google_compute_engine import file_utils\n'), ((3383, 3412), 'os.path.basename', 'os.path.basename', (['config_file'], {}), '(config_file)\n', (3399, 3412), False, 'import os\n')] |
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import re
import numpy as np
import torch
ACTIVATION_AMAX_NUM = 72
INT8O_KERNEL_NUM = 5
INT8O_GEMM_NUM = 7
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 8
def extract_amaxlist(init_dict, depths, ths_path='../lib/libpyt_swintransformer.so', verbose=True):
# print("Quantizing checkpoint ...")
torch.classes.load_library(ths_path)
weight_quantize = torch.ops.fastertransformer.swin_weight_quantize
layer_num = len(depths)
amaxTotalNum = ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM + 1 + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
kernel_name_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2"]
amax_name_list = ["attn.qkv._input_quantizer",
"attn.qkv._aftergemm_quantizer",
"attn.proj._input_quantizer",
"attn.proj._aftergemm_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer",
"attn.matmul_a_input_quantizer",
"attn.softmax_input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._input_quantizer",
"mlp.fc2._aftergemm_quantizer",
"add1_residual_input_quantizer",
"add2_residual_input_quantizer"
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_weight_list = ["attn.qkv",
"attn.proj",
"mlp.fc1",
"mlp.fc2",
"attn.matmul_k_input_quantizer",
"attn.matmul_v_input_quantizer"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_input_list = ["attn.qkv._input_quantizer",
"attn.proj._input_quantizer",
"mlp.fc1._input_quantizer",
"mlp.fc2._input_quantizer",
"attn.matmul_q_input_quantizer",
"attn.matmul_a_input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
int8O_gemm_output_list = ["attn.qkv._aftergemm_quantizer",
"attn.proj._aftergemm_quantizer",
"mlp.fc1._aftergemm_quantizer",
"mlp.fc2._aftergemm_quantizer",
"attn.softmax_input_quantizer",
"attn.proj._input_quantizer"]
downsample_input = "downsample.reduction._input_quantizer"
downsample_weight = "downsample.reduction._weight_quantizer"
downsample_out = "downsample.reduction._aftergemm_quantizer"
factor = 1000000.0
for i in range(layer_num):
for depth in range(depths[i]):
amaxList = np.zeros([amaxTotalNum]).astype(np.float32)
amax_id = 0
for amax_name in amax_name_list:
quant_max = init_dict["layers.{}.blocks.{}.{}._amax".format(i, depth, amax_name)].item()
amax = abs(quant_max)#round(abs(quant_max)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
# if verbose:
# print(i, amax_name)
# print('quant_max:', quant_max)
# print('amax:', amax)
if i != layer_num - 1:
amax = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
amax = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
else:
amax_id += 8
if verbose:
print("done process layer_{} block_{} activation amax".format(i, depth))
#kernel amax starts from ACTIVATION_AMAX_NUM
assert amax_id == 68
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)].transpose(-1, -2).contiguous()
quant_max2 = init_dict["layers.{}.blocks.{}.{}._weight_quantizer._amax".format(i, depth, kernel_name)]
amax2 = abs(quant_max2)
# if (amax2.dim() == 0):
# quant_max_processed = torch.full((kernel.size(1),), amax2.item(), dtype=amax2.dtype, device=amax2.device)
# else:
# quant_max_processed = amax2.view(-1)
kernel_processed = weight_quantize(kernel, amax2.cuda())
init_dict["layers.{}.blocks.{}.{}.weight".format(i, depth, kernel_name)] = kernel_processed
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = amax2.item()
amaxList[amax_id] = amax2
amax_id += 1
# if verbose:
# print(i, kernel_name)
# print('kernel:', kernel)
# print('quant_max2:', quant_max2)
# print('quant_max_processed_:', quant_max_processed)
if i != layer_num - 1:
amaxList[amax_id] = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)].item()
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM - 1):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
# print('layernum:', i, 'j:', j, ' gemm_int8IO_scale:',amaxList[amax_id])
# print(int8O_gemm_input_amax_list[j], int8O_gemm_weight_amax_list[j], int8O_gemm_output_amax_list[j])
amax_id += 1
if i != layer_num - 1:
patchMerge_i = init_dict["layers.{}.{}._amax".format(i, downsample_input)].item()
patchMerge_w = init_dict["layers.{}.{}._amax".format(i, downsample_weight)].item()
patchMerge_o = init_dict["layers.{}.{}._amax".format(i, downsample_out)].item()
amaxList[amax_id] = (patchMerge_i * patchMerge_w) / (127 * patchMerge_o)
amax_id += 1
assert amax_id == ACTIVATION_AMAX_NUM + INT8O_KERNEL_NUM + INT8O_GEMM_NUM
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
# amaxList[amax_id] = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
# amax_id += 1
# #### softmax amax
# amaxList[amax_id] = amaxList[28]
# amax_id += 1
# #### bmm2 amax
# amaxList[amax_id] = amaxList[8]
# amax_id += 1
qkvMax = np.maximum(np.maximum(amaxList[16],amaxList[20]), amaxList[24])
amaxList[amax_id] = amaxList[16] * amaxList[20] / (127.0 * 127.0)
amax_id += 1
amaxList[amax_id] = 127.0 / amaxList[28]
amax_id += 1
amaxList[amax_id] = amaxList[24] * amaxList[28] / (127.0 * amaxList[8])
amax_id += 1
init_dict["layers.{}.blocks.{}.amaxList".format(i, depth)] = torch.tensor(amaxList, dtype=torch.float32)
if verbose:
print("done process layer_{} block_{} kernel weight".format(i, depth))
if i != layer_num - 1:
kernel = init_dict["layers.{}.downsample.reduction.weight".format(i)]
quant_max2 = init_dict["layers.{}.downsample.reduction._weight_quantizer._amax".format(i)]
amax2 = abs(quant_max2)
kernel = kernel.transpose(-1, -2).contiguous()
kernel_processed = weight_quantize(kernel, amax2.cuda())
init_dict["layers.{}.downsample.reduction.weight".format(i)] = kernel_processed
# print("Quantizing checkpoint done.")
return init_dict
if __name__ == '__main__':
weights = torch.load('pytorch_model.bin')
extract_amaxlist(weights, [2, 2, 6, 2]) | [
"torch.load",
"torch.tensor",
"numpy.zeros",
"torch.classes.load_library",
"numpy.maximum"
]
| [((946, 982), 'torch.classes.load_library', 'torch.classes.load_library', (['ths_path'], {}), '(ths_path)\n', (972, 982), False, 'import torch\n'), ((10265, 10296), 'torch.load', 'torch.load', (['"""pytorch_model.bin"""'], {}), "('pytorch_model.bin')\n", (10275, 10296), False, 'import torch\n'), ((9514, 9557), 'torch.tensor', 'torch.tensor', (['amaxList'], {'dtype': 'torch.float32'}), '(amaxList, dtype=torch.float32)\n', (9526, 9557), False, 'import torch\n'), ((9085, 9123), 'numpy.maximum', 'np.maximum', (['amaxList[16]', 'amaxList[20]'], {}), '(amaxList[16], amaxList[20])\n', (9095, 9123), True, 'import numpy as np\n'), ((3728, 3752), 'numpy.zeros', 'np.zeros', (['[amaxTotalNum]'], {}), '([amaxTotalNum])\n', (3736, 3752), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import time
from ...actors import new_client, FunctionActor
logger = logging.getLogger(__name__)
class K8SPodsIPWatcher(object):
"""
Pods watcher class, compatible with SchedulerDiscoverer
"""
dynamic = True
def __init__(self, k8s_config=None, k8s_namespace=None, label_selector=None):
from kubernetes import config, client
from gevent.threadpool import ThreadPool
if k8s_config is not None:
self._k8s_config = k8s_config
elif os.environ.get('KUBE_API_ADDRESS'):
self._k8s_config = client.Configuration()
self._k8s_config.host = os.environ['KUBE_API_ADDRESS']
else:
self._k8s_config = config.load_incluster_config()
self._k8s_namespace = k8s_namespace or os.environ.get('MARS_K8S_POD_NAMESPACE') or 'default'
self._label_selector = label_selector
self._client = client.CoreV1Api(client.ApiClient(self._k8s_config))
self._pool = ThreadPool(1)
self._pod_to_ep = None
def __reduce__(self):
return type(self), (self._k8s_config, self._k8s_namespace, self._label_selector)
def _extract_pod_name_ep(self, pod_data):
svc_port = pod_data['spec']['containers'][0]['ports'][0]['container_port']
return pod_data['metadata']['name'], '%s:%s' % (pod_data['status']['pod_ip'], svc_port)
@staticmethod
def _extract_pod_ready(obj_data):
# if conditions not supported, always return True
if 'status' not in obj_data or 'conditions' not in obj_data['status']:
return True
return any(cond['type'] == 'Ready' and cond['status'] == 'True'
for cond in obj_data['status']['conditions'])
def _get_pod_to_ep(self):
query = self._pool.spawn(self._client.list_namespaced_pod,
namespace=self._k8s_namespace,
label_selector=self._label_selector).result().to_dict()
result = dict()
for el in query['items']:
name, pod_ep = self._extract_pod_name_ep(el)
if pod_ep is not None and not self._extract_pod_ready(el):
pod_ep = None
result[name] = pod_ep
return result
def get(self, update=False):
if self._pod_to_ep is None or update:
self._pod_to_ep = self._get_pod_to_ep()
return sorted(a for a in self._pod_to_ep.values() if a is not None)
def is_all_ready(self):
self.get(True)
return all(a is not None for a in self._pod_to_ep.values())
def watch(self):
from urllib3.exceptions import ReadTimeoutError
from kubernetes import watch
cur_pods = set(self.get(True))
w = watch.Watch()
while True:
# when some schedulers are not ready, we refresh faster
linger = 10 if self.is_all_ready() else 1
streamer = w.stream(self._client.list_namespaced_pod,
namespace=self._k8s_namespace,
label_selector=self._label_selector,
timeout_seconds=linger)
while True:
try:
event = self._pool.spawn(next, streamer, StopIteration).result()
if event is StopIteration:
raise StopIteration
except (ReadTimeoutError, StopIteration):
new_pods = set(self.get(True))
if new_pods != cur_pods:
cur_pods = new_pods
yield self.get(False)
break
except: # noqa: E722
logger.exception('Unexpected error when watching on kubernetes')
break
obj_dict = event['object'].to_dict()
pod_name, endpoint = self._extract_pod_name_ep(obj_dict)
self._pod_to_ep[pod_name] = endpoint \
if endpoint and self._extract_pod_ready(obj_dict) else None
yield self.get(False)
class ReadinessActor(FunctionActor):
"""
Dummy actor indicating service start
"""
@classmethod
def default_uid(cls):
return 'k:0:%s' % cls.__name__
class K8SServiceMixin:
@staticmethod
def write_pid_file():
with open('/tmp/mars-service.pid', 'w') as pid_file:
pid_file.write(str(os.getpid()))
def wait_all_schedulers_ready(self):
"""
Wait till all containers are ready, both in kubernetes and in ClusterInfoActor
"""
from ...scheduler.utils import SchedulerClusterInfoActor
# check if all schedulers are ready using Kubernetes API
sleep_fun = (getattr(self, 'pool', None) or time).sleep
while not self.scheduler_discoverer.is_all_ready():
sleep_fun(1)
kube_schedulers = self.scheduler_discoverer.get()
logger.debug('Schedulers all ready in kubernetes, waiting ClusterInfoActor to be ready')
# check if all schedulers are registered in ClusterInfoActor
actor_client = new_client()
while True:
cluster_info = actor_client.actor_ref(
SchedulerClusterInfoActor.default_uid(), address=random.choice(kube_schedulers))
cluster_info_schedulers = cluster_info.get_schedulers()
if set(cluster_info_schedulers) == set(kube_schedulers):
from ...cluster_info import INITIAL_SCHEDULER_FILE
with open(INITIAL_SCHEDULER_FILE, 'w') as scheduler_file:
scheduler_file.write(','.join(cluster_info_schedulers))
logger.debug('Scheduler detection finished. Result: %r', kube_schedulers)
break
sleep_fun(1) # pragma: no cover
def create_scheduler_discoverer(self):
self.scheduler_discoverer = K8SPodsIPWatcher(label_selector='name=marsscheduler')
| [
"logging.getLogger",
"random.choice",
"kubernetes.config.load_incluster_config",
"kubernetes.watch.Watch",
"os.environ.get",
"kubernetes.client.Configuration",
"kubernetes.client.ApiClient",
"os.getpid",
"gevent.threadpool.ThreadPool"
]
| [((731, 758), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (748, 758), False, 'import logging\n'), ((1635, 1648), 'gevent.threadpool.ThreadPool', 'ThreadPool', (['(1)'], {}), '(1)\n', (1645, 1648), False, 'from gevent.threadpool import ThreadPool\n'), ((3396, 3409), 'kubernetes.watch.Watch', 'watch.Watch', ([], {}), '()\n', (3407, 3409), False, 'from kubernetes import watch\n'), ((1157, 1191), 'os.environ.get', 'os.environ.get', (['"""KUBE_API_ADDRESS"""'], {}), "('KUBE_API_ADDRESS')\n", (1171, 1191), False, 'import os\n'), ((1438, 1478), 'os.environ.get', 'os.environ.get', (['"""MARS_K8S_POD_NAMESPACE"""'], {}), "('MARS_K8S_POD_NAMESPACE')\n", (1452, 1478), False, 'import os\n'), ((1578, 1612), 'kubernetes.client.ApiClient', 'client.ApiClient', (['self._k8s_config'], {}), '(self._k8s_config)\n', (1594, 1612), False, 'from kubernetes import config, client\n'), ((1224, 1246), 'kubernetes.client.Configuration', 'client.Configuration', ([], {}), '()\n', (1244, 1246), False, 'from kubernetes import config, client\n'), ((1359, 1389), 'kubernetes.config.load_incluster_config', 'config.load_incluster_config', ([], {}), '()\n', (1387, 1389), False, 'from kubernetes import config, client\n'), ((5086, 5097), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5095, 5097), False, 'import os\n'), ((5930, 5960), 'random.choice', 'random.choice', (['kube_schedulers'], {}), '(kube_schedulers)\n', (5943, 5960), False, 'import random\n')] |
from unittest import TestCase
import torch
import transformers
from model.bert_model import BertModel
class TestBertModel(TestCase):
def test_forward(self):
# Bert Config
vocab_size = 10
sequence_len = 20
batch = 32
num_classes = 3
expected_shape = (batch, sequence_len, num_classes)
input_batch = torch.randint(low=0, high=vocab_size-1, size=(batch, sequence_len))
config= transformers.BertConfig(vocab_size=vocab_size,hidden_size=10, num_hidden_layers=1, num_attention_heads=1,num_labels=num_classes)
sut = BertModel(None, None, bert_config=config)
# Act
actual = sut.forward(input_batch)[0]
# Assert
self.assertEqual(expected_shape, actual.shape)
| [
"torch.randint",
"transformers.BertConfig",
"model.bert_model.BertModel"
]
| [((365, 434), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(vocab_size - 1)', 'size': '(batch, sequence_len)'}), '(low=0, high=vocab_size - 1, size=(batch, sequence_len))\n', (378, 434), False, 'import torch\n'), ((449, 583), 'transformers.BertConfig', 'transformers.BertConfig', ([], {'vocab_size': 'vocab_size', 'hidden_size': '(10)', 'num_hidden_layers': '(1)', 'num_attention_heads': '(1)', 'num_labels': 'num_classes'}), '(vocab_size=vocab_size, hidden_size=10,\n num_hidden_layers=1, num_attention_heads=1, num_labels=num_classes)\n', (472, 583), False, 'import transformers\n'), ((592, 633), 'model.bert_model.BertModel', 'BertModel', (['None', 'None'], {'bert_config': 'config'}), '(None, None, bert_config=config)\n', (601, 633), False, 'from model.bert_model import BertModel\n')] |
import json
import os
import tempfile
from unittest import TestCase
import pytest
from donjuan import Dungeon, DungeonRandomizer, Renderer
class RendererTest(TestCase):
def setUp(self):
super().setUp()
self.TEMP_DIR = tempfile.mkdtemp()
def test_smoke(self):
r = Renderer()
assert r is not None
def test_scale(self):
r = Renderer(scale=3)
assert r.scale == 3
@pytest.mark.slow
def test_render_dummy_dungeon(self):
inpath = os.path.abspath(os.path.dirname(__file__))
inpath = os.path.join(inpath, "fixtures/dummy_dungeon.json")
with open(inpath, "r") as f:
darr = json.load(f)["dungeon"]
n_rows = len(darr)
n_cols = len(darr)
dungeon = Dungeon(n_rows=n_rows, n_cols=n_cols)
for i in range(n_rows):
for j in range(n_cols):
dungeon.grid.cells[i][j].filled = bool(darr[i][j])
# Render and check for the file
fp = os.path.join(self.TEMP_DIR, "rendered_dungeon.png")
r = Renderer()
r.render(dungeon, file_path=fp)
assert os.path.exists(fp)
@pytest.mark.slow
def test_render_dungeon_with_rooms(self):
randomizer = DungeonRandomizer()
dungeon = Dungeon(10, 10, randomizers=[randomizer])
dungeon.randomize()
dungeon.emplace_rooms()
renderer = Renderer()
# Render and check for the file
fp = os.path.join(self.TEMP_DIR, "rendered_dungeon.png")
renderer.render(dungeon, file_path=fp)
assert os.path.exists(fp)
| [
"donjuan.Renderer",
"os.path.exists",
"os.path.join",
"os.path.dirname",
"tempfile.mkdtemp",
"donjuan.DungeonRandomizer",
"json.load",
"donjuan.Dungeon"
]
| [((242, 260), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (258, 260), False, 'import tempfile\n'), ((300, 310), 'donjuan.Renderer', 'Renderer', ([], {}), '()\n', (308, 310), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((379, 396), 'donjuan.Renderer', 'Renderer', ([], {'scale': '(3)'}), '(scale=3)\n', (387, 396), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((566, 617), 'os.path.join', 'os.path.join', (['inpath', '"""fixtures/dummy_dungeon.json"""'], {}), "(inpath, 'fixtures/dummy_dungeon.json')\n", (578, 617), False, 'import os\n'), ((770, 807), 'donjuan.Dungeon', 'Dungeon', ([], {'n_rows': 'n_rows', 'n_cols': 'n_cols'}), '(n_rows=n_rows, n_cols=n_cols)\n', (777, 807), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((997, 1048), 'os.path.join', 'os.path.join', (['self.TEMP_DIR', '"""rendered_dungeon.png"""'], {}), "(self.TEMP_DIR, 'rendered_dungeon.png')\n", (1009, 1048), False, 'import os\n'), ((1061, 1071), 'donjuan.Renderer', 'Renderer', ([], {}), '()\n', (1069, 1071), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((1127, 1145), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (1141, 1145), False, 'import os\n'), ((1236, 1255), 'donjuan.DungeonRandomizer', 'DungeonRandomizer', ([], {}), '()\n', (1253, 1255), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((1274, 1315), 'donjuan.Dungeon', 'Dungeon', (['(10)', '(10)'], {'randomizers': '[randomizer]'}), '(10, 10, randomizers=[randomizer])\n', (1281, 1315), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((1395, 1405), 'donjuan.Renderer', 'Renderer', ([], {}), '()\n', (1403, 1405), False, 'from donjuan import Dungeon, DungeonRandomizer, Renderer\n'), ((1460, 1511), 'os.path.join', 'os.path.join', (['self.TEMP_DIR', '"""rendered_dungeon.png"""'], {}), "(self.TEMP_DIR, 'rendered_dungeon.png')\n", (1472, 1511), False, 'import os\n'), ((1574, 1592), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (1588, 1592), False, 'import os\n'), ((522, 547), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (537, 547), False, 'import os\n'), ((674, 686), 'json.load', 'json.load', (['f'], {}), '(f)\n', (683, 686), False, 'import json\n')] |
# Copyright 2016 Rackspace Australia
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import jsonschema
import os
import requests
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
class fake_result(object):
def __init__(self, result):
self.status_code = 200
self.text = jsonutils.dumps(result)
real_request = requests.request
def fake_request(obj, url, method, **kwargs):
if url.startswith('http://127.0.0.1:123'):
return fake_result({'a': 1, 'b': 'foo'})
if url.startswith('http://127.0.0.1:124'):
return fake_result({'c': 3})
if url.startswith('http://127.0.0.1:125'):
return fake_result(jsonutils.loads(kwargs.get('data', '{}')))
return real_request(method, url, **kwargs)
class MetadataTest(test.TestCase, integrated_helpers.InstanceHelperMixin):
def setUp(self):
super(MetadataTest, self).setUp()
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
self.start_service('conductor')
self.start_service('scheduler')
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).api
self.start_service('compute')
# create a server for the tests
server = self._build_server(name='test')
server = self.api.post_server({'server': server})
self.server = self._wait_for_state_change(server, 'ACTIVE')
self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer())
self.md_url = self.api_fixture.md_url
# make sure that the metadata service returns information about the
# server we created above
def fake_get_fixed_ip_by_address(self, ctxt, address):
return {'instance_uuid': server['id']}
self.useFixture(
fixtures.MonkeyPatch(
'nova.network.neutron.API.get_fixed_ip_by_address',
fake_get_fixed_ip_by_address))
def test_lookup_metadata_root_url(self):
res = requests.request('GET', self.md_url, timeout=5)
self.assertEqual(200, res.status_code)
def test_lookup_metadata_openstack_url(self):
url = '%sopenstack' % self.md_url
res = requests.request('GET', url, timeout=5,
headers={'X-Forwarded-For': '127.0.0.2'})
self.assertEqual(200, res.status_code)
def test_lookup_metadata_data_url(self):
url = '%sopenstack/latest/meta_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('hostname', j)
self.assertEqual('test.novalocal', j['hostname'])
def test_lookup_external_service(self):
self.flags(
vendordata_providers=['StaticJSON', 'DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'hamster@http://127.0.0.1:123'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertEqual({}, j['static'])
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertEqual(1, j['hamster']['a'])
self.assertEqual('foo', j['hamster']['b'])
def test_lookup_external_service_no_overwrite(self):
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'testing@http://127.0.0.1:124'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertNotIn('static', j)
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertNotIn('c', j['testing'])
def test_lookup_external_service_passes_data(self):
# Much of the data we pass to the REST service is missing because of
# the way we've created the fake instance, but we should at least try
# and ensure we're passing _some_ data through to the external REST
# service.
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:125'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('instance-id', j['testing'])
self.assertTrue(uuidutils.is_uuid_like(j['testing']['instance-id']))
self.assertIn('hostname', j['testing'])
self.assertEqual(self.server['tenant_id'], j['testing']['project-id'])
self.assertIn('metadata', j['testing'])
self.assertIn('image-id', j['testing'])
self.assertIn('user-data', j['testing'])
def test_network_data_matches_schema(self):
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/latest/network_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
# load the jsonschema for network_data
schema_file = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../doc/api_schemas/network_data.json"))
with open(schema_file, 'rb') as f:
schema = jsonutils.load(f)
jsonschema.validate(res.json(), schema)
| [
"fixtures.MonkeyPatch",
"nova.tests.fixtures.OSAPIFixture",
"nova.tests.unit.image.fake.stub_out_image_service",
"nova.tests.functional.fixtures.PlacementFixture",
"nova.tests.fixtures.NeutronFixture",
"requests.request",
"oslo_utils.uuidutils.is_uuid_like",
"oslo_serialization.jsonutils.dumps",
"oslo_serialization.jsonutils.load",
"os.path.abspath",
"nova.tests.fixtures.OSMetadataServer",
"oslo_serialization.jsonutils.loads"
]
| [((1119, 1142), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', (['result'], {}), '(result)\n', (1134, 1142), False, 'from oslo_serialization import jsonutils\n'), ((1718, 1757), 'nova.tests.unit.image.fake.stub_out_image_service', 'fake_image.stub_out_image_service', (['self'], {}), '(self)\n', (1751, 1757), True, 'from nova.tests.unit.image import fake as fake_image\n'), ((2953, 3000), 'requests.request', 'requests.request', (['"""GET"""', 'self.md_url'], {'timeout': '(5)'}), "('GET', self.md_url, timeout=5)\n", (2969, 3000), False, 'import requests\n'), ((3155, 3240), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)', 'headers': "{'X-Forwarded-For': '127.0.0.2'}"}), "('GET', url, timeout=5, headers={'X-Forwarded-For':\n '127.0.0.2'})\n", (3171, 3240), False, 'import requests\n'), ((3439, 3478), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)'}), "('GET', url, timeout=5)\n", (3455, 3478), False, 'import requests\n'), ((3538, 3563), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['res.text'], {}), '(res.text)\n', (3553, 3563), False, 'from oslo_serialization import jsonutils\n'), ((4186, 4225), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)'}), "('GET', url, timeout=5)\n", (4202, 4225), False, 'import requests\n'), ((4286, 4311), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['res.text'], {}), '(res.text)\n', (4301, 4311), False, 'from oslo_serialization import jsonutils\n'), ((5076, 5115), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)'}), "('GET', url, timeout=5)\n", (5092, 5115), False, 'import requests\n'), ((5176, 5201), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['res.text'], {}), '(res.text)\n', (5191, 5201), False, 'from oslo_serialization import jsonutils\n'), ((6110, 6149), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)'}), "('GET', url, timeout=5)\n", (6126, 6149), False, 'import requests\n'), ((6210, 6235), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['res.text'], {}), '(res.text)\n', (6225, 6235), False, 'from oslo_serialization import jsonutils\n'), ((6886, 6925), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'timeout': '(5)'}), "('GET', url, timeout=5)\n", (6902, 6925), False, 'import requests\n'), ((1841, 1875), 'nova.tests.fixtures.NeutronFixture', 'nova_fixtures.NeutronFixture', (['self'], {}), '(self)\n', (1869, 1875), True, 'from nova.tests import fixtures as nova_fixtures\n'), ((1901, 1933), 'nova.tests.functional.fixtures.PlacementFixture', 'func_fixtures.PlacementFixture', ([], {}), '()\n', (1931, 1933), True, 'from nova.tests.functional import fixtures as func_fixtures\n'), ((2413, 2445), 'nova.tests.fixtures.OSMetadataServer', 'nova_fixtures.OSMetadataServer', ([], {}), '()\n', (2443, 2445), True, 'from nova.tests import fixtures as nova_fixtures\n'), ((2756, 2862), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""nova.network.neutron.API.get_fixed_ip_by_address"""', 'fake_get_fixed_ip_by_address'], {}), "('nova.network.neutron.API.get_fixed_ip_by_address',\n fake_get_fixed_ip_by_address)\n", (2776, 2862), False, 'import fixtures\n'), ((4006, 4081), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""keystoneauth1.session.Session.request"""', 'fake_request'], {}), "('keystoneauth1.session.Session.request', fake_request)\n", (4026, 4081), False, 'import fixtures\n'), ((4896, 4971), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""keystoneauth1.session.Session.request"""', 'fake_request'], {}), "('keystoneauth1.session.Session.request', fake_request)\n", (4916, 4971), False, 'import fixtures\n'), ((5930, 6005), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""keystoneauth1.session.Session.request"""', 'fake_request'], {}), "('keystoneauth1.session.Session.request', fake_request)\n", (5950, 6005), False, 'import fixtures\n'), ((6311, 6362), 'oslo_utils.uuidutils.is_uuid_like', 'uuidutils.is_uuid_like', (["j['testing']['instance-id']"], {}), "(j['testing']['instance-id'])\n", (6333, 6362), False, 'from oslo_utils import uuidutils\n'), ((6709, 6784), 'fixtures.MonkeyPatch', 'fixtures.MonkeyPatch', (['"""keystoneauth1.session.Session.request"""', 'fake_request'], {}), "('keystoneauth1.session.Session.request', fake_request)\n", (6729, 6784), False, 'import fixtures\n'), ((7253, 7270), 'oslo_serialization.jsonutils.load', 'jsonutils.load', (['f'], {}), '(f)\n', (7267, 7270), False, 'from oslo_serialization import jsonutils\n'), ((2063, 2109), 'nova.tests.fixtures.OSAPIFixture', 'nova_fixtures.OSAPIFixture', ([], {'api_version': '"""v2.1"""'}), "(api_version='v2.1')\n", (2089, 2109), True, 'from nova.tests import fixtures as nova_fixtures\n'), ((7102, 7127), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7117, 7127), False, 'import os\n')] |
import os
import sys
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../bert")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
import tokenization
from config import config
class Model_data_preparation(object):
def __init__(self, DATA_INPUT_DIR="raw_data", DATA_OUTPUT_DIR="SKE_2019_tokened_labeling",
vocab_file_path="vocab.txt", do_lower_case=True,General_Mode = False):
self.bert_tokenizer = tokenization.FullTokenizer(vocab_file=self.get_vocab_file_path(vocab_file_path),
do_lower_case=do_lower_case) # 初始化 bert_token 工具
self.DATA_INPUT_DIR = self.get_data_input_dir(DATA_INPUT_DIR)
self.DATA_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), DATA_OUTPUT_DIR)
self.General_Mode = General_Mode
def get_data_input_dir(self, DATA_INPUT_DIR):
DATAself_INPUT_DIR = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")), DATA_INPUT_DIR)
return DATA_INPUT_DIR
def get_vocab_file_path(self, vocab_file_path):
print(vocab_file_path)
return vocab_file_path
def subject_object_labeling(self, spo_list, text):
def _spo_list_to_spo_predicate_dict(spo_list):
spo_predicate_dict = dict()
for spo_item in spo_list:
predicate = spo_item["predicate"]
subject = spo_item["subject"]
object = spo_item["object"]
spo_predicate_dict.setdefault(predicate, []).append((subject, object))
return spo_predicate_dict
def _gen_event_dic(spo_list):
res = []
res_d = {}
predicate = ""
for spo_item in spo_list:
predicate = spo_item["event"]
if 'time' in spo_item:
time = spo_item["time"]
res.append(('time',time))
if 'location' in spo_item:
location = spo_item["location"]
res.append(('location',location))
if 'participant' in spo_item:
participant = spo_item["participant"]
res.append(('participant',participant))
if 'denoter' in spo_item:
denoter = spo_item["denoter"]
res.append(('denoter',denoter))
if 'object' in spo_item:
object = spo_item["object"]
res.append(('object',object))
res_d[predicate] = res
return res_d
def _index_q_list_in_k_list(q_list, k_list):
"""Known q_list in k_list, find index(first time) of q_list in k_list"""
q_list_length = len(q_list)
k_list_length = len(k_list)
for idx in range(k_list_length - q_list_length + 1):
t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]
# print(idx, t)
if all(t):
# print(idx)
idx_start = idx
return idx_start
def _labeling_type(subject_object, so_type):
tokener_error_flag = False
so_tokened = self.bert_tokenizer.tokenize(subject_object)
so_tokened_length = len(so_tokened)
idx_start = _index_q_list_in_k_list(q_list=so_tokened, k_list=text_tokened)
if idx_start is None:
tokener_error_flag = True
'''
实体: "1981年" 原句: "●1981年2月27日,中国人口学会成立"
so_tokened ['1981', '年'] text_tokened ['●', '##19', '##81', '年', '2', '月', '27', '日', ',', '中', '国', '人', '口', '学', '会', '成', '立']
so_tokened 无法在 text_tokened 找到!原因是bert_tokenizer.tokenize 分词增添 “##” 所致!
'''
self.bert_tokener_error_log_f.write(subject_object + " @@ " + text + "\n")
self.bert_tokener_error_log_f.write(str(so_tokened) + " @@ " + str(text_tokened) + "\n")
else: #给实体开始处标 B 其它位置标 I
labeling_list[idx_start] = "B-" + so_type
if so_tokened_length == 2:
labeling_list[idx_start + 1] = "I-" + so_type
elif so_tokened_length >= 3:
labeling_list[idx_start + 1: idx_start + so_tokened_length] = ["I-" + so_type] * (so_tokened_length - 1)
return tokener_error_flag
text_tokened = self.bert_tokenizer.tokenize(text)
text_tokened_not_UNK = self.bert_tokenizer.tokenize_not_UNK(text)
if not self.General_Mode:
spo_predicate_dict = _spo_list_to_spo_predicate_dict(spo_list)
else:
spo_predicate_dict = _gen_event_dic(spo_list)
for predicate, spo_list_form in spo_predicate_dict.items():
tokener_error_flag = False
labeling_list = ["O"] * len(text_tokened)
if not self.General_Mode:
for (spo_subject, spo_object) in spo_list_form:
flag_A = _labeling_type(spo_subject, "SUB")
#flag_B = _labeling_type(spo_object, "OBJ")
if flag_A or flag_B:
tokener_error_flag = True
else:
for item in spo_list_form:
if item[1]== None:
continue
flag_A = _labeling_type(item[1],item[0])
if flag_A:
tokener_error_flag = True
#给被bert_tokenizer.tokenize 拆分的词语打上特殊标签[##WordPiece]
for idx, token in enumerate(text_tokened):
"""标注被 bert_tokenizer.tokenize 拆分的词语"""
if token.startswith("##"):
labeling_list[idx] = "[##WordPiece]"
if not tokener_error_flag:
self.token_label_and_one_prdicate_out_f.write(" ".join(labeling_list)+"\t"+predicate+"\n")
self.text_f.write(text + "\n")
self.token_in_f.write(" ".join(text_tokened)+"\t"+predicate+"\n")
self.token_in_not_UNK_f.write(" ".join(text_tokened_not_UNK) + "\n")
def separate_raw_data_and_token_labeling(self):
if not os.path.exists(self.DATA_OUTPUT_DIR):
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "train"))
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "valid"))
os.makedirs(os.path.join(self.DATA_OUTPUT_DIR, "test"))
for file_set_type in ["train", "valid"]:
print(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type)))
self.token_label_and_one_prdicate_out_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_label_and_one_prdicate_out.txt"), "w", encoding='utf-8')
self.bert_tokener_error_log_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "bert_tokener_error_log.txt"), "w", encoding='utf-8')
self.text_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "text.txt"), "w", encoding='utf-8')
self.token_in_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_in.txt"), "w", encoding='utf-8')
self.token_in_not_UNK_f = open(os.path.join(os.path.join(self.DATA_OUTPUT_DIR, file_set_type), "token_in_not_UNK.txt"), "w", encoding='utf-8')
if file_set_type == "train":
path_to_raw_data_file = "train.json"
elif file_set_type == "valid":
path_to_raw_data_file = "valid.json"
else:
pass
with open(os.path.join(self.DATA_INPUT_DIR, path_to_raw_data_file), 'r', encoding='utf-8') as f:
count_numbers = 0
while True:
line = f.readline()
if line:
count_numbers += 1
r = json.loads(line)
text = r["text"]
spo_list = r["spo_list"]
self.subject_object_labeling(spo_list=spo_list, text=text)
else:
break
print("all numbers", count_numbers)
self.text_f.close()
self.token_in_f.close()
self.token_in_not_UNK_f.close()
self.token_label_and_one_prdicate_out_f.close()
self.bert_tokener_error_log_f.close()
if __name__=="__main__":
DATA_INPUT_DIR = config.data_dir
DATA_OUTPUT_DIR = "sequence_labeling_data"
Vocab_Path = config.bert_vocab_dir
General_Mode = False
model_data = Model_data_preparation(General_Mode = General_Mode,DATA_INPUT_DIR=DATA_INPUT_DIR, DATA_OUTPUT_DIR=DATA_OUTPUT_DIR,vocab_file_path=Vocab_Path)
model_data.separate_raw_data_and_token_labeling()
| [
"os.path.dirname",
"json.loads",
"os.path.exists",
"os.path.join"
]
| [((78, 103), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (93, 103), False, 'import os\n'), ((166, 191), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (181, 191), False, 'import os\n'), ((807, 832), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (822, 832), False, 'import os\n'), ((6288, 6324), 'os.path.exists', 'os.path.exists', (['self.DATA_OUTPUT_DIR'], {}), '(self.DATA_OUTPUT_DIR)\n', (6302, 6324), False, 'import os\n'), ((6350, 6393), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', '"""train"""'], {}), "(self.DATA_OUTPUT_DIR, 'train')\n", (6362, 6393), False, 'import os\n'), ((6419, 6462), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', '"""valid"""'], {}), "(self.DATA_OUTPUT_DIR, 'valid')\n", (6431, 6462), False, 'import os\n'), ((6488, 6530), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', '"""test"""'], {}), "(self.DATA_OUTPUT_DIR, 'test')\n", (6500, 6530), False, 'import os\n'), ((1027, 1052), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1042, 1052), False, 'import os\n'), ((6613, 6662), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (6625, 6662), False, 'import os\n'), ((6737, 6786), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (6749, 6786), False, 'import os\n'), ((6914, 6963), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (6926, 6963), False, 'import os\n'), ((7064, 7113), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (7076, 7113), False, 'import os\n'), ((7199, 7248), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (7211, 7248), False, 'import os\n'), ((7346, 7395), 'os.path.join', 'os.path.join', (['self.DATA_OUTPUT_DIR', 'file_set_type'], {}), '(self.DATA_OUTPUT_DIR, file_set_type)\n', (7358, 7395), False, 'import os\n'), ((7697, 7753), 'os.path.join', 'os.path.join', (['self.DATA_INPUT_DIR', 'path_to_raw_data_file'], {}), '(self.DATA_INPUT_DIR, path_to_raw_data_file)\n', (7709, 7753), False, 'import os\n'), ((7986, 8002), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7996, 8002), False, 'import json\n')] |
"""
GitLab webhook receiver - see http://doc.gitlab.com/ee/web_hooks/web_hooks.html
"""
import asyncio
import json
import logging
from sinks.base_bot_request_handler import AsyncRequestHandler
logger = logging.getLogger(__name__)
try:
import dateutil.parser
except ImportError:
logger.error("missing module python_dateutil: pip3 install python_dateutil")
raise
class webhookReceiver(AsyncRequestHandler):
"""Receive REST API posts from GitLab"""
_bot = None
@asyncio.coroutine
def process_request(self, path, dummy_query_string, content):
"""Process a received POST to a given converstation"""
path = path.split("/")
conv_or_user_id = path[1]
if conv_or_user_id is None:
logger.error("conversation or user id must be provided as part of path")
return
try:
payload = json.loads(content)
except json.JSONDecodeError as err:
logger.exception("invalid payload @%d:%d: %s", err.lineno, err.colno, err)
logger.error("GitLab message: %s", json.dumps(payload))
refs = payload.get("ref", '').split("/")
user = payload.get("user_name")
if not user:
user = payload["user"]["name"]
message = ["GitLab update for [{}]({}) by __{}__".format(
payload["project"]["name"], payload["project"]["web_url"], user)]
if payload["object_kind"] == "push":
message.append("Pushed {} commit(s) on {} branch:".format(
payload["total_commits_count"], "/".join(refs[2:])))
for commit in payload["commits"]:
message.append("{} -- {} at [{:%c}]({})".format(
commit["message"], commit["author"]["name"],
dateutil.parser.parse(commit["timestamp"]), commit["url"]))
elif payload["object_kind"] == "tag_push":
message.append("Pushed tag {}]".format("/".join(refs[2:])))
elif payload["object_kind"] == "issue":
issue = payload["object_attributes"]
message.append("Update {} issue {} at {:%c}\n[{}]({})".format(
issue["state"], issue["id"],
dateutil.parser.parse(issue["updated_at"]),
issue["title"], issue["url"]))
elif payload["object_kind"] == "note":
note = payload["object_attributes"]
message.append("{} note on {}: [{}]({})".format(
note["notable_type"], note["id"], note["note"], note["url"]))
elif payload["object_kind"] == "merge_request":
request = payload["object_attributes"]
message.append("Merge request {}: from [{}:{}]({}) to [{}:{}]({})".format(
request["id"],
request["source"]["name"], request["source_branch"], request["source"]["web_url"],
request["target"]["name"], request["target_branch"], request["target"]["web_url"]))
else:
message.append("{}: unknown gitlab webhook object kind".format(payload["object_kind"]))
logger.warning("%s: unknown gitlab webhook object kind", payload["object_kind"])
if message:
yield from self.send_data(conv_or_user_id, "\n".join(message))
| [
"logging.getLogger",
"json.loads",
"json.dumps"
]
| [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((877, 896), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (887, 896), False, 'import json\n'), ((1072, 1091), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1082, 1091), False, 'import json\n')] |
import os
from typing import Union, List
from tei_entity_enricher.interface.postprocessing.io import FileReader, FileWriter
from tei_entity_enricher.util.helper import local_save_path, makedir_if_necessary
from tei_entity_enricher.util.exceptions import FileNotFound
class GndConnector:
def __init__(
self,
gnd_id: Union[str, List[str], None] = None,
apiindex: int = 0,
check_connectivity: bool = True,
show_printmessages: bool = True,
) -> None:
"""establishes connection to api, from which norm data for entities of Deutsche Nationalbibliothek´s database is retrieved,
loaded data can be passed to an instance of Cache class for further processing or FileWriter class to save it
gnd_id:
gnd id number(s)
apiindex:
index of selected api in list defined in self.apilist
check_connectivity:
execute connectivity check in __init__() or not (see connectivitycheck_loop())
show_printmessages:
show class internal printmessages on runtime or not
apilist_filepath:
path to apilist config file
apilist:
list of dicts as configuration data set, delivers a mapping to be able to normalize data from different apis, defines api`s url and aliases for filtering purposes (see get_gnd_data())
connection_established:
data from an api has already been received or not
remaining_apis_to_check:
list of apiindex values, which have not been checked yet in connectivitycheck_loop()"""
print("initializing GndConnector..") if show_printmessages else None
self.show_printmessages: bool = show_printmessages
self.gnd_id: Union[str, List[str], None] = gnd_id
self.apiindex: int = apiindex
self.apilist_filepath: str = os.path.join(local_save_path, "config", "postprocessing", "gnd_apilist.json")
try:
self.apilist: Union[dict, None] = FileReader(
filepath=self.apilist_filepath, origin="local", internal_call=True, show_printmessages=False
).loadfile_json()
except FileNotFound:
print(
"GndConnector: could not find gnd_apilist.json in config dir. creating file with default settings..."
) if self.show_printmessages else None
self.apilist: List[dict] = [
{
"name": "culturegraph",
"baseUrl": "https://hub.culturegraph.org/entityfacts/{}",
"baseAliases": {
"type": [
"@type",
"str",
"categorial",
{
"person": "person",
"organisation": "organisation",
"place": "place",
},
],
"name": ["preferredName", "str", "nominal"],
"furtherNames": ["variantName", ["str"], "nominal"],
"sameAs": ["sameAs", [{"@id": "str"}], "nominal"],
"pseudonyms": [
"pseudonym",
[{"preferredName": "str"}],
"nominal",
],
},
"personAliases": {},
"placeAliases": {},
"organizationAliases": {},
},
{
"name": "lobid",
"baseUrl": "http://lobid.org/gnd/{}",
"baseAliases": {
"type": [
"type",
["str"],
"categorial",
{
"person": "Person",
"organisation": "CorporateBody",
"place": "PlaceOrGeographicName",
},
],
"name": ["preferredName", "str", "nominal"],
"furtherNames": ["variantName", ["str"], "nominal"],
"sameAs": ["sameAs", [{"id": "str"}], "nominal"],
"pseudonyms": [
"variantNameEntityForThePerson",
[{"forename": ["str"], "surname": ["str"]}],
"nominal",
],
},
"personAliases": {},
"placeAliases": {},
"organizationAliases": {},
},
]
self.apiindex: int = 0
try:
makedir_if_necessary(os.path.dirname(self.apilist_filepath))
FileWriter(data=self.apilist, filepath=self.apilist_filepath).writefile_json()
except:
print(
f"GndConnector __init__(): could not create default gnd_apilist.json in config folder."
) if self.show_printmessages == True else None
self.check_connectivity: bool = check_connectivity
self.connection_established: bool = False
self.remaining_apis_to_check: list = [i for i, _ in enumerate(self.apilist)]
if self.check_connectivity == True:
self.connectivitycheck_loop()
else:
print(
"GndConnector: initialization has been done without connectivity check."
) if self.show_printmessages else None
def connectivitycheck_single(self, index_to_test: int, gnd_id_to_test: str = "118540238") -> bool:
"""auxiliary method of connectivitycheck_loop(),
checks a single api`s (from self.apilist) response status code and checks if response data type is json,
preset gnd_id_to_test value refers to Goethe"""
try:
result: dict = FileReader(
filepath=self.apilist[index_to_test]["baseUrl"].format(gnd_id_to_test),
origin="web",
internal_call=True,
show_printmessages=self.show_printmessages,
).loadfile_json()
except:
return False
if type(result) == dict:
return True
return False
def connectivitycheck_loop(self) -> int:
"""recursive connectivity check, checking every single api in self.apilist (ascending)
and setting self.apiindex to the value of those api, which is first to pass the check successfully.
returns 0 or -1 for unittest purposes"""
if self.check_connectivity == False:
self.check_connectivity == True
if len(self.remaining_apis_to_check) > 0:
if self.connectivitycheck_single(self.remaining_apis_to_check[0]) == True:
print(
f"GndConnector: connectivity check passed, connection to {self.apilist[self.remaining_apis_to_check[0]]['name']} api established."
) if self.show_printmessages else None
self.apiindex = self.remaining_apis_to_check[0]
self.remaining_apis_to_check = [i for i, _ in enumerate(self.apilist)]
self.connection_established = True
return 0
else:
print(
f"GndConnector connectivity check: {self.apilist[self.remaining_apis_to_check[0]]['name']} api is currently not responding as expected. checking for alternatives..."
) if self.show_printmessages else None
self.remaining_apis_to_check.remove(self.remaining_apis_to_check[0])
self.connectivitycheck_loop()
else:
print(
"GndConnector connectivity check error: none of the listed apis is responding as expected."
) if self.show_printmessages else None
return -1
def print_complete_url(self, index: int = 0) -> int:
"""print baseUrl string of the currently selected api defined in self.apilist,
formatted with a gnd id number of self.gnd_id (list or str) selected by index value.
returns 0 or -1 for unittest purposes"""
if self.apiindex not in [i for i, _ in enumerate(self.apilist)]:
print(
"GndConnector print_complete_url() error: apiindex is not defined correctly. using default api..."
) if self.show_printmessages else None
self.apiindex = 0
if self.gnd_id is not None:
if type(self.gnd_id) == str:
print(
f"GndConnector complete URL: {self.apilist[self.apiindex]['baseUrl'].format(self.gnd_id)}"
) if self.show_printmessages else None
elif type(self.gnd_id) == list:
print(
f"GndConnector complete URL of gnd id number {index + 1} in passed gnd id list: {self.apilist[self.apiindex]['baseUrl'].format(self.gnd_id[index])}"
) if self.show_printmessages else None
return 0
else:
print(
"GndConnector print_complete_url() internal error: no gnd id number has been passed to connector object yet."
) if self.show_printmessages else None
return -1
def return_complete_url(self, index: int = 0) -> Union[str, None]:
"""return baseUrl string of the currently selected api defined in self.apilist,
formatted with a gnd id number of self.gnd_id (list or str) selected by index value"""
if self.apiindex not in [i for i, _ in enumerate(self.apilist)]:
print(
"GndConnector return_complete_url() error: apiindex is not defined correctly. using default api..."
) if self.show_printmessages else None
self.apiindex = 0
if self.gnd_id is not None:
if type(self.gnd_id) == str:
return self.apilist[self.apiindex]["baseUrl"].format(self.gnd_id)
elif type(self.gnd_id) == list:
return self.apilist[self.apiindex]["baseUrl"].format(self.gnd_id[index])
else:
print(
"GndConnector return_complete_url() internal error: no gnd id number has been passed to connector object yet."
) if self.show_printmessages else None
return None
def get_gnd_data(self, data_selection: Union[str, List[str], None] = None) -> Union[dict, None]:
"""method to receive data from api with the possibility to filter results,
a dict is created, having gnd id numbers as keys and filtered or unfiltered response json data as values
data_selection:
if delivered, a normalized output is generated by renaming keys and re-sorting data from different keys from the raw data into new keys (purpose: json data delivered by different apis comes in different key-value-structures; normalization of this data is achieved with the help of key-value mapping information stored in self.apilist)
can be "base" (all baseAliases data is provided: "type", "name", "furtherNames", "sameAs", "pseudonyms")
can be a list of one or more baseAliases (i.e. ["type", "name"])
(not yet implemented: can be a "person", "place", "organization" or a custom string refering to a user-defined set of keys, for which the mapping is provided in self.apilist)
"""
if self.check_connectivity == False:
print(
f"GndConnector note: connections to apis have not been checked yet. to do so manually execute connectivitycheck_loop() method of the current connector object. continuing attempt to receive gnd data from {self.apilist[self.apiindex]['name']} api..."
) if self.show_printmessages else None
elif self.connection_established == False:
print(
"GndConnector connectivity error: after connectivity check no connection could has been established to any of the available apis. gnd data queries can not be executed at the moment."
) if self.show_printmessages else None
return None
result = {}
if type(self.gnd_id) == str:
_temp_data = {}
try:
filereader = FileReader(
filepath=self.return_complete_url(), origin="web", internal_call=True, show_printmessages=False
)
_temp_data = filereader.loadfile_json()
except:
print(
"GndConnector connectivity error in get_gnd_data() method: could not load resource from api as expected."
) if self.show_printmessages else None
return None
self.connection_established = True
if _temp_data != None and _temp_data != False:
result[self.gnd_id] = _temp_data
print(
f"GndConnector get_gnd_data() status: data for gnd id {self.gnd_id} received."
) if self.show_printmessages else None
else:
print(
f"GndConnector get_gnd_data() status: for gnd id {self.gnd_id} no data could be delivered by api"
) if self.show_printmessages else None
return None
elif type(self.gnd_id) == list:
for index, gnd in enumerate(self.gnd_id):
_temp_data = {}
try:
filereader = FileReader(
filepath=self.return_complete_url(index),
origin="web",
internal_call=True,
show_printmessages=True,
)
_temp_data = filereader.loadfile_json()
except:
print(
f"GndConnector get_gnd_data() status: for gnd id {index + 1} ({gnd}) of {len(self.gnd_id)} no data could be delivered by api"
) if self.show_printmessages else None
result[gnd] = _temp_data
print(
f"GndConnector get_gnd_data() status: gnd id {index + 1} ({gnd}) of {len(self.gnd_id)} processed"
) if self.show_printmessages else None
self.connection_established = True
# filtering: build new dict with selected values, which should be returned (base mode = all base aliases from apilist definition. list mode = select specific aliases from base set)
# defining sub method for filtering
def filter_received_data(gnd_id: str, mode: Union[str, List[str]]) -> dict:
"""sub method, which extracts the key-value pairs from the raw data received from api for one gnd id number and renames the keys and/or values.
alias definitions in self.apilist are used for this filtering process:
the keys of 'baseAliases' dict define the new key names, their value list denotates (in order of the list)
1. the original key name,
2. the original value type (python-wise: i.e. 'str' or '[str]'),
3. the original value type (logic-wise: 'categorial' or 'nominal'),
4. a categorization dict, if the original value type logic-wise is 'categorial':
it delivers mapping information to assign a category (defined keys of this mapping dict) based on specific values (defined in the values of this mapping dict) found in raw data,
example 1: using culturegraph api the value of the base category 'type' is assigned to 'person', if the raw data json object has a key '@type' with the value 'person' of type str,
example 2: using lobid api the value of the base category 'type' is assigned to 'person', if the raw data json object has a key 'type' with a list as a value, which has itself a value 'Person' of type str in it,
mode parameter accepts str 'base' (all base aliases will be extracted) or a list of str (specific aliases will be extracted)"""
# todo: handle additional alias definition sets in gnd_apilist.json by user
# category_sets = {'base': [list(self.apilist[self.apiindex]["baseAliases"].keys()), 'baseAliases'],
# 'custom': [list(self.apilist[self.apiindex]["custom"].keys()), 'custom']
# }
# selected_categories_list = category_sets.get(mode)[0] if type(mode) == str else mode
# selected_categories_alias = category_sets.get(mode)[1] if type(mode) == str else 'baseAliases'
# => allow parsing a list of categories to get_gnd_data() only if they are defined in baseAlias set?
base_categories = list(self.apilist[self.apiindex]["baseAliases"].keys())
selected_categories = base_categories if mode == "base" else mode
selected_categories_data = {}
for category in selected_categories:
_temp_data = []
try:
_temp_data = result[gnd_id][self.apilist[self.apiindex]["baseAliases"][category][0]]
except KeyError:
_temp_data = []
print(
f"GndConnector get_gnd_data() filtering note: could not find {category} information for {gnd_id} in raw data. continuing processing..."
) if self.show_printmessages else None
# handling of categorical data types
if (
len(_temp_data) > 0
and self.apilist[self.apiindex]["baseAliases"][category][2] == "categorial"
and type(self.apilist[self.apiindex]["baseAliases"][category][3] == dict)
):
_temp_category_data_form = self.apilist[self.apiindex]["baseAliases"][category][1]
_temp_categorial_values = self.apilist[self.apiindex]["baseAliases"][category][3]
# change found categorial string to selfdefined string (i.e. 'Person' to 'person')
if type(_temp_category_data_form) == str:
for _type in _temp_categorial_values:
if _temp_data == _temp_categorial_values[_type]:
_temp_data = _type
# replace found categorial list with selfdefined string (i.e. ['Person', 'PoliticalLeader'] to 'person')
elif type(_temp_category_data_form) == list:
for _type in _temp_categorial_values:
if _temp_categorial_values[_type] in _temp_data:
_temp_data = _type
selected_categories_data[category] = _temp_data
return selected_categories_data
# executing sub method for filtering
if data_selection is not None:
if type(self.gnd_id) == str:
_new_dict = {list(result.keys())[0]: filter_received_data(self.gnd_id, data_selection)}
elif type(self.gnd_id) == list:
_new_dict = {}
for key in result:
_new_dict[key] = filter_received_data(key, data_selection)
result = _new_dict
return result
| [
"os.path.dirname",
"tei_entity_enricher.interface.postprocessing.io.FileReader",
"tei_entity_enricher.interface.postprocessing.io.FileWriter",
"os.path.join"
]
| [((1901, 1978), 'os.path.join', 'os.path.join', (['local_save_path', '"""config"""', '"""postprocessing"""', '"""gnd_apilist.json"""'], {}), "(local_save_path, 'config', 'postprocessing', 'gnd_apilist.json')\n", (1913, 1978), False, 'import os\n'), ((2040, 2149), 'tei_entity_enricher.interface.postprocessing.io.FileReader', 'FileReader', ([], {'filepath': 'self.apilist_filepath', 'origin': '"""local"""', 'internal_call': '(True)', 'show_printmessages': '(False)'}), "(filepath=self.apilist_filepath, origin='local', internal_call=\n True, show_printmessages=False)\n", (2050, 2149), False, 'from tei_entity_enricher.interface.postprocessing.io import FileReader, FileWriter\n'), ((4990, 5028), 'os.path.dirname', 'os.path.dirname', (['self.apilist_filepath'], {}), '(self.apilist_filepath)\n', (5005, 5028), False, 'import os\n'), ((5047, 5108), 'tei_entity_enricher.interface.postprocessing.io.FileWriter', 'FileWriter', ([], {'data': 'self.apilist', 'filepath': 'self.apilist_filepath'}), '(data=self.apilist, filepath=self.apilist_filepath)\n', (5057, 5108), False, 'from tei_entity_enricher.interface.postprocessing.io import FileReader, FileWriter\n')] |
#/usr/bin/python
__version__ = '1.0'
__author__ = '<EMAIL>'
##
## Imports
import string
import os
import errno
import shutil
import sys
import glob
import datetime
import subprocess
import logging as log
import numpy as np
import csv
from io import StringIO
import PyPDF2
from sklearn import preprocessing
from collections import defaultdict
from xml.etree import cElementTree
from lxml import etree
from reportlab.pdfgen import canvas
class Colour:
def __init__(self):
pass
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
class ConfigReader(object):
"""
The configuration file reader.
Opens a configuration file, and if valid, converts the parameters within the file to a dictionary object,
reader to be viewed through accessing the config_dict variable.
"""
def __init__(self, scriptdir, config_filename=None):
##
## Instance variables
self.scriptdir = scriptdir
self.config_filename = config_filename
self.dtd_filename = scriptdir + "/config/config.dtd"
##
## Check for configuration file (just incase)
if self.config_filename is None:
log.error("No configuration file specified!")
else:
self.config_file = etree.parse(self.config_filename)
##
## Check config vs dtd, parse info to dictionary, validate vs ruleset
self.validate_against_dtd()
self.set_dictionary()
self.validate_config()
def validate_against_dtd(self):
"""
Validate input config against DTD ruleset
i.e. confirms conformation of XML structure
"""
##
## Open > etree.DTD object
dtd_file = open(self.dtd_filename, 'r')
dtd_object = etree.DTD(dtd_file)
##
## If validation fails, close the object (memory) and raise an error
if not dtd_object.validate(self.config_file):
dtd_file.close()
log.error("DTD validation failure {0}: {1}".format(self.config_filename, dtd_object.error_log.filter_from_errors()[0]))
sys.exit(2)
dtd_file.close()
def set_dictionary(self):
"""
Takes the now validated XML and extracts information from the tree into
a python dictionary {key: value}. This dictionary will be used for variables
within the pipeline. Recursion adapted from http://stackoverflow.com/a/9286702
"""
def recursive_generation(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
##
## If list was populated, create dictionary, Append keys
if children:
dd = defaultdict(list)
for dc in map(recursive_generation, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
##
## Values for key
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
##
## Takes the formatted xml doc, puts through generator, returns dictionary
string_repr = etree.tostring(self.config_file, pretty_print=True)
element_tree = cElementTree.XML(string_repr)
self.config_dict = recursive_generation(element_tree)
self.config_dict = self.config_dict[list(self.config_dict.keys())[0]]
def validate_config(self):
"""
Method which validates the configuration file's contents.
If all pass, guarantees that the settings dictionary is full of valid settings!
"""
trigger = False
##
## Main configuration instance settings
data_directory = self.config_dict['@data_dir']
if not os.path.exists(data_directory):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified data directory could not be found.'))
trigger = True
for fqfile in glob.glob(os.path.join(data_directory, '*')):
if not (fqfile.endswith('.fq') or fqfile.endswith('.fastq') or fqfile.endswith('.fq.gz') or fqfile.endswith('.fastq.gz')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Non FastQ/GZ data detected in specified input directory.'))
trigger = True
forward_reference = self.config_dict['@forward_reference']
if not os.path.isfile(forward_reference):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified forward reference file could not be found.'))
trigger = True
if not (forward_reference.endswith('.fa') or forward_reference.endswith('.fasta')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified forward reference file is not a fa/fas file.'))
trigger = True
reverse_reference = self.config_dict['@reverse_reference']
if not os.path.isfile(reverse_reference):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified reverse reference file could not be found.'))
trigger = True
if not (reverse_reference.endswith('fa') or reverse_reference.endswith('.fasta')):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified reverse reference file is not a fa/fas file.'))
trigger = True
if forward_reference.split('/')[-1] == reverse_reference.split('/')[-1]:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: FW and RV references have identical filenames. Will create indexing issue.'))
trigger = True
##
## Instance flag settings
demultiplexing_flag = self.config_dict['instance_flags']['@demultiplex']
if not (demultiplexing_flag == 'True' or demultiplexing_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Demultiplexing flag is not set to True/False.'))
trigger = True
sequence_qc_flag = self.config_dict['instance_flags']['@quality_control']
if not (sequence_qc_flag == 'True' or sequence_qc_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Sequence Quality control flag is not set to True/False.'))
trigger = True
alignment_flag = self.config_dict['instance_flags']['@sequence_alignment']
if not (alignment_flag == 'True' or alignment_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Sequence Alignment flag is not set to True/False.'))
trigger = True
atypical_flag = self.config_dict['instance_flags']['@atypical_realignment']
if not (atypical_flag == 'True' or atypical_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Atypical Realignment flag is not True/False.'))
trigger = True
genotype_flag = self.config_dict['instance_flags']['@genotype_prediction']
if not (genotype_flag == 'True' or genotype_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Genotype Prediction control flag is not True/False.'))
trigger = True
snpcall_flag = self.config_dict['instance_flags']['@snp_calling']
if not (snpcall_flag == 'True' or snpcall_flag == 'False'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Calling flag is not True/False.'))
trigger = True
##
## Demultiplexing flag settings
trim_adapter_base = ['A', 'G', 'C', 'T']
if demultiplexing_flag == 'True':
forward_adapter = self.config_dict['demultiplex_flags']['@forward_adapter']
for charbase in forward_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in forward_adapter demultiplexing flag.'))
trigger = True
forward_position = self.config_dict['demultiplex_flags']['@forward_position']
if forward_position not in ['5P', '3P', 'AP']:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Given demultiplexing forward adapter position invalid! [5P, 3P, AP]'))
trigger = True
reverse_adapter = self.config_dict['demultiplex_flags']['@reverse_adapter']
for charbase in reverse_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in reverse_adapter demultiplexing flag.'))
trigger = True
reverse_position = self.config_dict['demultiplex_flags']['@reverse_position']
if reverse_position not in ['5P', '3P', 'AP']:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Given demultiplexing reverse adapter position invalid! [5P, 3P, AP]'))
trigger = True
error_rate = self.config_dict['demultiplex_flags']['@error_rate']
if not error_rate.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error_rate is not a valid integer.'))
trigger = True
minimum_overlap = self.config_dict['demultiplex_flags']['@min_overlap']
if not minimum_overlap.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_overlap is not a valid integer.'))
trigger = True
minimum_length = self.config_dict['demultiplex_flags']['@min_length']
if not minimum_length == '':
if not minimum_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_length is not a valid integer.'))
trigger = True
maximum_length = self.config_dict['demultiplex_flags']['@max_length']
if not maximum_length == '':
if not maximum_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified max_length is not a valid integer.'))
trigger = True
##
## Trimming flag settings
if sequence_qc_flag == 'True':
trimming_type = self.config_dict['trim_flags']['@trim_type']
if not (trimming_type == 'Quality' or trimming_type == 'Adapter' or trimming_type == 'Both'):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Trimming type is not Quality/Adapter/Both.'))
trigger = True
quality_threshold = self.config_dict['trim_flags']['@quality_threshold']
if not quality_threshold.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified quality threshold integer is invalid.'))
trigger = True
elif not int(quality_threshold) in range(0,39):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified quality threshold integer out of range (0-38).'))
trigger = True
trim_adapters = ['-a','-g','-a$','-g^','-b']
adapter_flag = self.config_dict['trim_flags']['@adapter_flag']
if not (adapter_flag in trim_adapters):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified trimming adapter not valid selection.'))
trigger = True
forward_adapter = self.config_dict['trim_flags']['@forward_adapter']
for charbase in forward_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in FW adapter sequence.'))
trigger = True
reverse_adapter = self.config_dict['trim_flags']['@reverse_adapter']
for charbase in reverse_adapter:
if charbase not in trim_adapter_base:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Invalid character detected in RV adapter sequence.'))
trigger = True
error_tolerance = self.config_dict['trim_flags']['@error_tolerance']
if not isinstance(float(error_tolerance), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error tolerance is not a valid float.'))
trigger = True
if not float(error_tolerance) in np.arange(0,1.1,0.01):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified error tolerance is not 0.0 < x < 1.0.'))
trigger = True
##
## Alignment flag settings
if alignment_flag == 'True':
min_seed_length = self.config_dict['alignment_flags']['@min_seed_length']
if not min_seed_length.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified min_seed_length integer is invalid.'))
trigger=True
band_width = self.config_dict['alignment_flags']['@band_width']
if not band_width.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified band_width integer is invalid.'))
trigger=True
seed_length_extension = self.config_dict['alignment_flags']['@seed_length_extension']
if not isinstance(float(seed_length_extension), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seed_length_extension float is invalid.'))
trigger=True
skip_seed_with_occurrence = self.config_dict['alignment_flags']['@skip_seed_with_occurrence']
if not skip_seed_with_occurrence.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified skip_seed_with_occurrence integer is invalid.'))
trigger=True
chain_drop = self.config_dict['alignment_flags']['@chain_drop']
if not isinstance(float(chain_drop), float):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified chain_drop float is invalid.'))
trigger=True
seeded_chain_drop = self.config_dict['alignment_flags']['@seeded_chain_drop']
if not seeded_chain_drop.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seeded_chain_drop integer is invalid.'))
trigger=True
seq_match_score = self.config_dict['alignment_flags']['@seq_match_score']
if not seq_match_score.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified seq_match_score integer is invalid.'))
trigger=True
mismatch_penalty = self.config_dict['alignment_flags']['@mismatch_penalty']
if not mismatch_penalty.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified mismatch_penalty integer is invalid.'))
trigger=True
indel_penalty_raw = self.config_dict['alignment_flags']['@indel_penalty']
indel_penalty = indel_penalty_raw.split(',')
for individual_indelpen in indel_penalty:
if not individual_indelpen.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified indel_penalty integer(s) is(are) invalid.'))
trigger=True
gap_extend_penalty_raw = self.config_dict['alignment_flags']['@gap_extend_penalty']
gap_extend_penalty = gap_extend_penalty_raw.split(',')
for individual_gaextend in gap_extend_penalty:
if not individual_gaextend.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified gap_extend_penalty integer(s) is(are) invalid.'))
trigger=True
prime_clipping_penalty_raw = self.config_dict['alignment_flags']['@prime_clipping_penalty']
prime_clipping_penalty = prime_clipping_penalty_raw.split(',')
for individual_prclip in prime_clipping_penalty:
if not individual_prclip.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified prime_clipping_penalty integer(s) is(are) invalid.'))
trigger=True
unpaired_pairing_penalty = self.config_dict['alignment_flags']['@unpaired_pairing_penalty']
if not unpaired_pairing_penalty.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Specified unpaired_pairing_penalty integer is invalid.'))
trigger=True
##
## Genotype prediction flag settings
if genotype_flag == 'True':
snp_observation_pcnt = self.config_dict['prediction_flags']['@snp_observation_threshold']
if not snp_observation_pcnt.isdigit():
if not int(snp_observation_pcnt) in range(1,5):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Observation value invalid! Please use 1-10.'))
trigger = True
quality_cutoff = self.config_dict['prediction_flags']['@quality_cutoff']
if not quality_cutoff.isdigit():
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: SNP Quality Cutoff value is not an integer.'))
trigger = True
if trigger:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'XML Config: Failure, exiting.'))
sys.exit(2)
else:
log.info('{}{}{}{}'.format(Colour.green, 'shd__ ', Colour.end, 'XML Config: Parsing parameters successful!'))
class DataClump(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class DataLoader:
def __init__(self, database, descriptor):
self.database = database
self.descriptor = descriptor
def load_model(self):
## Loads description file for respective data set
modeldescr_name = self.descriptor
with open(modeldescr_name) as f:
descr_text = f.read()
## Loads data set from csv, into objects in preparation for bunch()
data_file_name = self.database
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
temp = next(data_file)
feature_names = np.array(temp)
labels = []
for i, d in enumerate(data_file):
data[i] = d[:-1]
label = d[-1]
labels.append(label)
le = preprocessing.LabelEncoder()
le.fit(labels)
hash_int_labels = le.transform(labels)
return DataClump(DATA=data,
TARGET=hash_int_labels,
FTRNAME=feature_names[:-1],
DESCR=descr_text,
ENCDR=le)
def parse_boolean(boolean_value):
"""
Given a string (boolean_value), returns a boolean value representing the string contents.
For example, a string with 'true', 't', 'y' or 'yes' will yield True.
"""
boolean_value = string.lower(boolean_value) in ('yes', 'y', 'true', 't', '1')
return boolean_value
def empty_string_check(string, raise_exception=True):
"""
Simple check to see if the string provided by parameter string is empty. False indicates the string is NOT empty.
Parameter raise_exception determines if a ValueError exception should be raised if the string is empty.
If raise_exception is False and the string is empty, True is returned.
"""
if string != '':
return False
if raise_exception:
raise ValueError("Empty string detected!")
return True
def sanitise_inputs(parsed_arguments):
"""
Utilises filesystem_exists_check and check_input_files
if either return false, path is invalid or unsupported files present
so, quit
"""
trigger = False
##
## Jobname prefix validity check
if parsed_arguments.jobname:
for character in parsed_arguments.jobname:
if character is ' ' or character is '/':
log.error('{}{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Specified Job Name has invalid characters: "', character, '"'))
trigger = True
##
## Config mode check
if parsed_arguments.config:
if not filesystem_exists_check(parsed_arguments.config[0]):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Specified config file could not be found.'))
trigger = True
for xmlfile in parsed_arguments.config:
if not check_input_files('.xml',xmlfile):
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Specified config file is not an XML file.'))
trigger = True
return trigger
def extract_data(input_data_directory):
target_files = glob.glob(os.path.join(input_data_directory, '*'))
for extract_target in target_files:
if extract_target.lower().endswith(('.fq.gz', '.fastq.gz')):
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Detected compressed input data. Extracting!'))
break
for extract_target in target_files:
unzipd = subprocess.Popen(['gzip', '-q', '-f', '-d', extract_target], stderr=subprocess.PIPE)
unzipd.wait()
return True
def sequence_pairings(data_path, instance_rundir):
##
## Get input files from data path
## Sort so that ordering isn't screwy on linux
input_files = glob.glob(os.path.join(data_path, '*'))
sorted_input = sorted(input_files)
sequence_pairs = []
file_count = len(sorted_input)
if not file_count % 2 == 0:
log.error('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'I/O: Non-even number of input files specified. Cannot continue without pairing!'))
sys.exit(2)
##
## Optimise so code isn't recycled
for i in range(0, len(sorted_input), 2):
file_pair = {}
forward_data = sorted_input[i]
reverse_data = sorted_input[i+1]
##
## Check forward ends with R1
forward_data_name = sorted_input[i].split('/')[-1].split('.')[0]
if not forward_data_name.endswith('_R1'):
log.error('{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'I/O: Forward input file does not end in _R1. ', forward_data))
sys.exit(2)
##
## Check reverse ends with R2
reverse_data_name = sorted_input[i+1].split('/')[-1].split('.')[0]
if not reverse_data_name.endswith('_R2'):
log.error('{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'I/O: Reverse input file does not end in _R2. ', reverse_data))
sys.exit(2)
##
## Make Stage outputs for use in everywhere else in pipeline
sample_root = '_'.join(forward_data_name.split('_')[:-1])
instance_path = os.path.join(instance_rundir)
seq_qc_path = os.path.join(instance_rundir, sample_root, 'SeqQC')
align_path = os.path.join(instance_rundir, sample_root, 'Align')
predict_path = os.path.join(instance_rundir, sample_root, 'Predict')
file_pair[sample_root] = [forward_data, reverse_data, instance_path, seq_qc_path, align_path, predict_path]
sequence_pairs.append(file_pair)
return sequence_pairs
def filesystem_exists_check(path, raise_exception=True):
"""
Checks to see if the path, specified by parameter path, exists. Can be either a directory or file.
If the path exists, True is returned. If the path does not exist, and raise_exception is set to True,
an IOError is raised - else False is returned.
"""
if os.path.lexists(path):
return True
if raise_exception:
log.error('{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Specified input path could not be found.'))
return False
def check_input_files(input_format, input_file):
if input_file.endswith(input_format):
return True
return False
def initialise_libraries(instance_params):
trigger = False
##
## Subfunction for recycling code
## Calls UNIX type for checking binaries present
## Changed from WHICH as apparently type functions over different shells/config files
def type_func(binary):
binary_result = []
binary_string = 'type {}'.format(binary)
binary_subprocess = subprocess.Popen([binary_string], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
binary_result = binary_subprocess.communicate()
binary_subprocess.wait()
if 'not found'.encode() in binary_result[0] or binary_result[1]:
log.critical('{}{}{}{}{}{}'.format(Colour.red,'shd__ ',Colour.end,'Missing binary: ', binary, '!'))
raise NameError
##
## To determine which binaries to check for
## AttributeError in the situation where instance_params origin differs
## try for -c style, except AttributeError for -b style
try:
quality_control = instance_params.config_dict['instance_flags']['@quality_control']
alignment = instance_params.config_dict['instance_flags']['@sequence_alignment']
genotyping = instance_params.config_dict['instance_flags']['@genotype_prediction']
snp_calling = instance_params.config_dict['instance_flags']['@snp_calling']
except AttributeError:
quality_control = instance_params['quality_control']
alignment = instance_params['sequence_alignment']
genotyping = instance_params['genotype_prediction']
snp_calling = instance_params['snp_calling']
if quality_control == 'True':
try:type_func('java')
except NameError: trigger=True
try:type_func('fastqc')
except NameError: trigger=True
try:type_func('cutadapt')
except NameError: trigger=True
if alignment == 'True':
try:type_func('seqtk')
except NameError: trigger=True
try:type_func('bwa')
except NameError: trigger=True
try:type_func('samtools')
except NameError: trigger=True
try:type_func('generatr')
except NameError: trigger=True
if genotyping == 'True':
try:type_func('samtools')
except NameError: trigger=True
try:type_func('generatr')
except NameError: trigger=True
if snp_calling == 'True':
try: type_func('picard')
except NameError: trigger=True
try: type_func('freebayes')
except NameError: trigger=True
return trigger
def sanitise_outputs(jobname, output_argument):
run_dir = ''
output_root = output_argument[0]
if jobname:
target_output = os.path.join(output_root, jobname)
if not os.path.exists(target_output):
log.info('{}{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating Output with prefix: ', jobname))
run_dir = os.path.join(output_root, jobname)
mkdir_p(run_dir)
else:
purge_choice = ''
while True:
purge_choice = input('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Job folder already exists. Delete existing folder? Y/N: '))
if not (purge_choice.lower() == 'y') and not (purge_choice.lower() == 'n'):
log.info('{}{}{}{}'.format(Colour.red, 'shd__ ', Colour.end, 'Invalid input. Please input Y or N.'))
continue
else:
break
if purge_choice.lower() == 'y':
log.info('{}{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Clearing pre-existing Jobname Prefix: ', jobname))
run_dir = os.path.join(output_root, jobname)
if os.path.exists(run_dir):
shutil.rmtree(run_dir, ignore_errors=True)
mkdir_p(run_dir)
else:
raise Exception('User chose not to delete pre-existing Job folder. Cannot write output.')
else:
## Ensures root output is a real directory
## Generates folder name based on date (for run ident)
date = datetime.date.today().strftime('%d-%m-%Y')
walltime = datetime.datetime.now().strftime('%H%M%S')
today = date + '-' + walltime
## If the user specified root doesn't exist, make it
## Then make the run directory for datetime
if not os.path.exists(output_root):
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating output root... '))
mkdir_p(output_root)
run_dir = os.path.join(output_root, 'ScaleHDRun_'+today)
log.info('{}{}{}{}'.format(Colour.bold, 'shd__ ', Colour.end, 'Creating instance run directory.. '))
mkdir_p(run_dir)
## Inform user it's all gonna be okaaaayyyy
log.info('{}{}{}{}'.format(Colour.green, 'shd__ ', Colour.end, 'Output directories OK!'))
return run_dir
def replace_fqfile(mutate_list, target_fqfile, altered_path):
if target_fqfile in mutate_list:
loc = mutate_list.index(target_fqfile)
mutate_list[loc] = altered_path
return mutate_list
def scrape_summary_data(stage, input_report_file):
##
## If the argument input_report_file is from trimming..
if stage == 'trim':
with open(input_report_file, 'r') as trpf:
trim_lines = trpf.readlines()
##
## Determine buffer size to slice from above array
scraping_buffer = 8
if '-q' in trim_lines[1]:
scraping_buffer += 1
##
## Get Anchor
summary_start = 0
for i in range(0, len(trim_lines)):
if '== Summary ==' in trim_lines[i]:
summary_start = i
##
## Slice and close
summary_data = trim_lines[summary_start:summary_start + scraping_buffer]
trpf.close()
return summary_data[2:]
##
## If the argument input_report_file is from alignment..
if stage == 'align':
with open(input_report_file, 'r') as alnrpf:
align_lines = alnrpf.readlines()
alnrpf.close()
##
## No ranges required, only skip first line
return align_lines[1:]
##
## No need to tidy up report for genotyping
## since we already have the data from our own objects
if stage == 'gtype':
pass
def generate_atypical_xml(label, allele_object, index_path, direction):
"""
:param allele_object:
:param index_path:
:return:
"""
##TODO docstring
atypical_path = os.path.join(index_path, '{}{}_{}.xml'.format(direction, label, allele_object.get_reflabel()))
fp_flank = 'GCGACCCTGGAAAAGCTGATGAAGGCCTTCGAGTCCCTCAAGTCCTTC'
cagstart = ''; cagend = ''
intv = allele_object.get_intervening()
ccgstart = ''; ccgend = ''
ccglen = allele_object.get_ccg()
cctlen = allele_object.get_cct()
tp_flank = 'CAGCTTCCTCAGCCGCCGCCGCAGGCACAGCCGCTGCT'
if direction == 'fw':
cagstart = '1'; cagend = '200'
ccgstart = '1'; ccgend = '20'
if direction == 'rv':
cagstart = '100'; cagend = '100'
ccgstart = '1'; ccgend = '20'
##
## Create XML
data_root = etree.Element('data')
loci_root = etree.Element('loci', label=allele_object.get_reflabel()); data_root.append(loci_root)
##
## Loci Nodes
fp_input = etree.Element('input', type='fiveprime', flank=fp_flank)
cag_region = etree.Element('input', type='repeat_region', order='1', unit='CAG', start=cagstart, end=cagend)
intervening = etree.Element('input', type='intervening', sequence=intv, prior='1')
ccg_region = etree.Element('input', type='repeat_region', order='2', unit='CCG', start=ccgstart, end=ccgend)
cct_region = etree.Element('input', type='repeat_region', order='3', unit='CCT', start=str(cctlen), end=str(cctlen))
tp_input = etree.Element('input', type='threeprime', flank=tp_flank)
for node in [fp_input, cag_region, intervening, ccg_region, cct_region, tp_input]:
loci_root.append(node)
s = etree.tostring(data_root, pretty_print=True)
with open(atypical_path, 'w') as xmlfi:
xmlfi.write(s.decode())
xmlfi.close()
return atypical_path
def generate_reference(input_xml, index_path, ref_indexes, direction):
##TODO docstring
label = input_xml.split('/')[-1].split('.')[0]
target_output = os.path.join(index_path, label + '.fa')
temp_output = os.path.join(index_path, label + '_concat.fa')
gen_process = subprocess.Popen(['generatr', '-i', input_xml, '-o', target_output], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gen_process.wait()
##
## Join typical and atypical reference into one file
if direction == 'fw':
toutfi = open(temp_output, 'w')
cat_process = subprocess.Popen(['cat', target_output, ref_indexes[0]], stdout=toutfi, stderr=subprocess.PIPE)
cat_process.wait()
toutfi.close()
target_output = temp_output
return target_output
def seek_target(input_list, target):
for i in range(0, len(input_list)):
if target in input_list[i]:
return i
def sanitise_trimming_output(input_object, input_list):
if type(input_object) is int:
cleanse_target = input_list[input_object].split(':')[1].lstrip().rstrip()
return cleanse_target
else:
return '*'
def sanitise_alignment_output(input_object, input_list, stage):
if type(input_object) is int:
if stage == 3:
cleanse_target = input_list[input_object].lstrip().rstrip().split(' ')[0:1]
return ''.join(cleanse_target)
else:
cleanse_target = input_list[input_object].lstrip().rstrip().split(' ')[0:2]
return ' '.join(cleanse_target)
else:
return '*'
def mkdir_p(path):
try: os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path): pass
else: raise
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"sys.exit",
"logging.error",
"lxml.etree.tostring",
"numpy.arange",
"os.path.exists",
"xml.etree.cElementTree.XML",
"subprocess.Popen",
"os.path.lexists",
"os.path.isdir",
"numpy.empty",
"lxml.etree.DTD",
"csv.reader",
"os.path.isfile",
"datetime.date.today",
"lxml.etree.Element",
"os.makedirs",
"lxml.etree.parse",
"os.path.join",
"datetime.datetime.now",
"collections.defaultdict",
"shutil.rmtree",
"string.lower"
]
| [((22060, 22081), 'os.path.lexists', 'os.path.lexists', (['path'], {}), '(path)\n', (22075, 22081), False, 'import os\n'), ((28633, 28654), 'lxml.etree.Element', 'etree.Element', (['"""data"""'], {}), "('data')\n", (28646, 28654), False, 'from lxml import etree\n'), ((28787, 28843), 'lxml.etree.Element', 'etree.Element', (['"""input"""'], {'type': '"""fiveprime"""', 'flank': 'fp_flank'}), "('input', type='fiveprime', flank=fp_flank)\n", (28800, 28843), False, 'from lxml import etree\n'), ((28858, 28958), 'lxml.etree.Element', 'etree.Element', (['"""input"""'], {'type': '"""repeat_region"""', 'order': '"""1"""', 'unit': '"""CAG"""', 'start': 'cagstart', 'end': 'cagend'}), "('input', type='repeat_region', order='1', unit='CAG', start=\n cagstart, end=cagend)\n", (28871, 28958), False, 'from lxml import etree\n'), ((28969, 29037), 'lxml.etree.Element', 'etree.Element', (['"""input"""'], {'type': '"""intervening"""', 'sequence': 'intv', 'prior': '"""1"""'}), "('input', type='intervening', sequence=intv, prior='1')\n", (28982, 29037), False, 'from lxml import etree\n'), ((29052, 29152), 'lxml.etree.Element', 'etree.Element', (['"""input"""'], {'type': '"""repeat_region"""', 'order': '"""2"""', 'unit': '"""CCG"""', 'start': 'ccgstart', 'end': 'ccgend'}), "('input', type='repeat_region', order='2', unit='CCG', start=\n ccgstart, end=ccgend)\n", (29065, 29152), False, 'from lxml import etree\n'), ((29278, 29335), 'lxml.etree.Element', 'etree.Element', (['"""input"""'], {'type': '"""threeprime"""', 'flank': 'tp_flank'}), "('input', type='threeprime', flank=tp_flank)\n", (29291, 29335), False, 'from lxml import etree\n'), ((29452, 29496), 'lxml.etree.tostring', 'etree.tostring', (['data_root'], {'pretty_print': '(True)'}), '(data_root, pretty_print=True)\n', (29466, 29496), False, 'from lxml import etree\n'), ((29760, 29799), 'os.path.join', 'os.path.join', (['index_path', "(label + '.fa')"], {}), "(index_path, label + '.fa')\n", (29772, 29799), False, 'import os\n'), ((29815, 29861), 'os.path.join', 'os.path.join', (['index_path', "(label + '_concat.fa')"], {}), "(index_path, label + '_concat.fa')\n", (29827, 29861), False, 'import os\n'), ((29877, 29998), 'subprocess.Popen', 'subprocess.Popen', (["['generatr', '-i', input_xml, '-o', target_output]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['generatr', '-i', input_xml, '-o', target_output], stdout\n =subprocess.PIPE, stderr=subprocess.PIPE)\n", (29893, 29998), False, 'import subprocess\n'), ((1723, 1742), 'lxml.etree.DTD', 'etree.DTD', (['dtd_file'], {}), '(dtd_file)\n', (1732, 1742), False, 'from lxml import etree\n'), ((3064, 3115), 'lxml.etree.tostring', 'etree.tostring', (['self.config_file'], {'pretty_print': '(True)'}), '(self.config_file, pretty_print=True)\n', (3078, 3115), False, 'from lxml import etree\n'), ((3133, 3162), 'xml.etree.cElementTree.XML', 'cElementTree.XML', (['string_repr'], {}), '(string_repr)\n', (3149, 3162), False, 'from xml.etree import cElementTree\n'), ((17907, 17934), 'string.lower', 'string.lower', (['boolean_value'], {}), '(boolean_value)\n', (17919, 17934), False, 'import string\n'), ((19526, 19565), 'os.path.join', 'os.path.join', (['input_data_directory', '"""*"""'], {}), "(input_data_directory, '*')\n", (19538, 19565), False, 'import os\n'), ((19838, 19927), 'subprocess.Popen', 'subprocess.Popen', (["['gzip', '-q', '-f', '-d', extract_target]"], {'stderr': 'subprocess.PIPE'}), "(['gzip', '-q', '-f', '-d', extract_target], stderr=\n subprocess.PIPE)\n", (19854, 19927), False, 'import subprocess\n'), ((20118, 20146), 'os.path.join', 'os.path.join', (['data_path', '"""*"""'], {}), "(data_path, '*')\n", (20130, 20146), False, 'import os\n'), ((20417, 20428), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20425, 20428), False, 'import sys\n'), ((21330, 21359), 'os.path.join', 'os.path.join', (['instance_rundir'], {}), '(instance_rundir)\n', (21342, 21359), False, 'import os\n'), ((21376, 21427), 'os.path.join', 'os.path.join', (['instance_rundir', 'sample_root', '"""SeqQC"""'], {}), "(instance_rundir, sample_root, 'SeqQC')\n", (21388, 21427), False, 'import os\n'), ((21443, 21494), 'os.path.join', 'os.path.join', (['instance_rundir', 'sample_root', '"""Align"""'], {}), "(instance_rundir, sample_root, 'Align')\n", (21455, 21494), False, 'import os\n'), ((21512, 21565), 'os.path.join', 'os.path.join', (['instance_rundir', 'sample_root', '"""Predict"""'], {}), "(instance_rundir, sample_root, 'Predict')\n", (21524, 21565), False, 'import os\n'), ((22705, 22802), 'subprocess.Popen', 'subprocess.Popen', (['[binary_string]'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '([binary_string], shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (22721, 22802), False, 'import subprocess\n'), ((24730, 24764), 'os.path.join', 'os.path.join', (['output_root', 'jobname'], {}), '(output_root, jobname)\n', (24742, 24764), False, 'import os\n'), ((26316, 26364), 'os.path.join', 'os.path.join', (['output_root', "('ScaleHDRun_' + today)"], {}), "(output_root, 'ScaleHDRun_' + today)\n", (26328, 26364), False, 'import os\n'), ((30146, 30245), 'subprocess.Popen', 'subprocess.Popen', (["['cat', target_output, ref_indexes[0]]"], {'stdout': 'toutfi', 'stderr': 'subprocess.PIPE'}), "(['cat', target_output, ref_indexes[0]], stdout=toutfi,\n stderr=subprocess.PIPE)\n", (30162, 30245), False, 'import subprocess\n'), ((31056, 31073), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (31067, 31073), False, 'import os\n'), ((1227, 1272), 'logging.error', 'log.error', (['"""No configuration file specified!"""'], {}), "('No configuration file specified!')\n", (1236, 1272), True, 'import logging as log\n'), ((1303, 1336), 'lxml.etree.parse', 'etree.parse', (['self.config_filename'], {}), '(self.config_filename)\n', (1314, 1336), False, 'from lxml import etree\n'), ((2014, 2025), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2022, 2025), False, 'import sys\n'), ((3600, 3630), 'os.path.exists', 'os.path.exists', (['data_directory'], {}), '(data_directory)\n', (3614, 3630), False, 'import os\n'), ((3802, 3835), 'os.path.join', 'os.path.join', (['data_directory', '"""*"""'], {}), "(data_directory, '*')\n", (3814, 3835), False, 'import os\n'), ((4192, 4225), 'os.path.isfile', 'os.path.isfile', (['forward_reference'], {}), '(forward_reference)\n', (4206, 4225), False, 'import os\n'), ((4689, 4722), 'os.path.isfile', 'os.path.isfile', (['reverse_reference'], {}), '(reverse_reference)\n', (4703, 4722), False, 'import os\n'), ((16299, 16310), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (16307, 16310), False, 'import sys\n'), ((17104, 17117), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (17114, 17117), False, 'import csv\n'), ((17215, 17248), 'numpy.empty', 'np.empty', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (17223, 17248), True, 'import numpy as np\n'), ((17296, 17310), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (17304, 17310), True, 'import numpy as np\n'), ((17443, 17471), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (17469, 17471), False, 'from sklearn import preprocessing\n'), ((20877, 20888), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20885, 20888), False, 'import sys\n'), ((21171, 21182), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (21179, 21182), False, 'import sys\n'), ((24774, 24803), 'os.path.exists', 'os.path.exists', (['target_output'], {}), '(target_output)\n', (24788, 24803), False, 'import os\n'), ((24928, 24962), 'os.path.join', 'os.path.join', (['output_root', 'jobname'], {}), '(output_root, jobname)\n', (24940, 24962), False, 'import os\n'), ((26157, 26184), 'os.path.exists', 'os.path.exists', (['output_root'], {}), '(output_root)\n', (26171, 26184), False, 'import os\n'), ((2507, 2524), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2518, 2524), False, 'from collections import defaultdict\n'), ((25557, 25591), 'os.path.join', 'os.path.join', (['output_root', 'jobname'], {}), '(output_root, jobname)\n', (25569, 25591), False, 'import os\n'), ((25599, 25622), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (25613, 25622), False, 'import os\n'), ((25915, 25936), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (25934, 25936), False, 'import datetime\n'), ((25971, 25994), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25992, 25994), False, 'import datetime\n'), ((31133, 31152), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (31146, 31152), False, 'import os\n'), ((11661, 11684), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.01)'], {}), '(0, 1.1, 0.01)\n', (11670, 11684), True, 'import numpy as np\n'), ((25629, 25671), 'shutil.rmtree', 'shutil.rmtree', (['run_dir'], {'ignore_errors': '(True)'}), '(run_dir, ignore_errors=True)\n', (25642, 25671), False, 'import shutil\n')] |
# Generated by Django 2.2.13 on 2020-11-27 05:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mvp', '0003_hublocation'),
]
operations = [
migrations.RemoveField(
model_name='hublocation',
name='longitude',
),
migrations.AddField(
model_name='hublocation',
name='longi',
field=models.TextField(default=654433, max_length=90, unique=True, verbose_name='Longitude'),
preserve_default=False,
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.TextField"
]
| [((225, 291), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""hublocation"""', 'name': '"""longitude"""'}), "(model_name='hublocation', name='longitude')\n", (247, 291), False, 'from django.db import migrations, models\n'), ((439, 530), 'django.db.models.TextField', 'models.TextField', ([], {'default': '(654433)', 'max_length': '(90)', 'unique': '(True)', 'verbose_name': '"""Longitude"""'}), "(default=654433, max_length=90, unique=True, verbose_name=\n 'Longitude')\n", (455, 530), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""
tornadio2.tests.gen
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the <NAME>, see AUTHORS for more details.
:license: Apache, see LICENSE for more details.
"""
from collections import deque
from nose.tools import eq_
from tornadio2 import gen
_queue = None
def init_environment():
global _queue
_queue = deque()
def run_sync(test, callback):
callback(test)
def queue_async(test, callback):
global _queue
_queue.append((callback, test))
def step_async():
callback = _queue.popleft()
callback[0](callback[1])
def run_async():
global _queue
while True:
try:
step_async()
except IndexError:
break
def run_async_oor():
global _queue
while True:
try:
callback = _queue.pop()
callback[0](callback[1])
except IndexError:
break
class Dummy():
def __init__(self, queue_type):
self.v = None
self.queue_type = queue_type
@gen.sync_engine
def test(self, value):
self.v = yield gen.Task(self.queue_type, value)
class DummyList():
def __init__(self, queue_type):
self.v = []
self.queue_type = queue_type
@gen.sync_engine
def test(self, value):
self.v.append((yield gen.Task(self.queue_type, value)))
class DummyListOutOfOrder():
def __init__(self, queue_type):
self.v = []
self.queue_type = queue_type
@gen.engine
def test(self, value):
self.v.append((yield gen.Task(self.queue_type, value)))
class DummyLoop():
def __init__(self, queue_type):
self.v = 0
self.queue_type = queue_type
@gen.sync_engine
def test(self, value):
for n in range(2):
self.v += (yield gen.Task(self.queue_type, value))
def test():
init_environment()
dummy = Dummy(run_sync)
dummy.test('test')
eq_(dummy.v, 'test')
def test_async():
init_environment()
dummy = Dummy(queue_async)
dummy.test('test')
run_async()
# Verify value
eq_(dummy.v, 'test')
def test_sync_queue():
init_environment()
dummy = DummyList(queue_async)
dummy.test('1')
dummy.test('2')
dummy.test('3')
run_async()
# Verify value
eq_(dummy.v, ['1', '2', '3'])
def test_sync_queue_oor():
init_environment()
dummy = DummyList(queue_async)
dummy.test('1')
dummy.test('2')
dummy.test('3')
run_async_oor()
# Verify value
eq_(dummy.v, ['1', '2', '3'])
def test_async_queue_oor():
init_environment()
dummy = DummyListOutOfOrder(queue_async)
dummy.test('1')
dummy.test('2')
dummy.test('3')
run_async_oor()
# Verify value
eq_(dummy.v, ['3', '2', '1'])
| [
"tornadio2.gen.Task",
"collections.deque",
"nose.tools.eq_"
]
| [((361, 368), 'collections.deque', 'deque', ([], {}), '()\n', (366, 368), False, 'from collections import deque\n'), ((1937, 1957), 'nose.tools.eq_', 'eq_', (['dummy.v', '"""test"""'], {}), "(dummy.v, 'test')\n", (1940, 1957), False, 'from nose.tools import eq_\n'), ((2096, 2116), 'nose.tools.eq_', 'eq_', (['dummy.v', '"""test"""'], {}), "(dummy.v, 'test')\n", (2099, 2116), False, 'from nose.tools import eq_\n'), ((2301, 2330), 'nose.tools.eq_', 'eq_', (['dummy.v', "['1', '2', '3']"], {}), "(dummy.v, ['1', '2', '3'])\n", (2304, 2330), False, 'from nose.tools import eq_\n'), ((2523, 2552), 'nose.tools.eq_', 'eq_', (['dummy.v', "['1', '2', '3']"], {}), "(dummy.v, ['1', '2', '3'])\n", (2526, 2552), False, 'from nose.tools import eq_\n'), ((2756, 2785), 'nose.tools.eq_', 'eq_', (['dummy.v', "['3', '2', '1']"], {}), "(dummy.v, ['3', '2', '1'])\n", (2759, 2785), False, 'from nose.tools import eq_\n'), ((1100, 1132), 'tornadio2.gen.Task', 'gen.Task', (['self.queue_type', 'value'], {}), '(self.queue_type, value)\n', (1108, 1132), False, 'from tornadio2 import gen\n'), ((1325, 1357), 'tornadio2.gen.Task', 'gen.Task', (['self.queue_type', 'value'], {}), '(self.queue_type, value)\n', (1333, 1357), False, 'from tornadio2 import gen\n'), ((1557, 1589), 'tornadio2.gen.Task', 'gen.Task', (['self.queue_type', 'value'], {}), '(self.queue_type, value)\n', (1565, 1589), False, 'from tornadio2 import gen\n'), ((1810, 1842), 'tornadio2.gen.Task', 'gen.Task', (['self.queue_type', 'value'], {}), '(self.queue_type, value)\n', (1818, 1842), False, 'from tornadio2 import gen\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: <NAME>
# contact: <EMAIL>
import torch
import SimpleITK as sitk
import numpy as np
import nibabel as nib
from torch.autograd import Variable
from skimage.transform import resize
from torchvision import transforms
from time import gmtime, strftime
from tqdm import tqdm
import pdb
import os
from ..helpers.helper import *
from os.path import expanduser
home = expanduser("~")
#========================================================================================
# prediction functions.....................
bin_path = os.path.join('/opt/ANTs/bin/')
class tumorSeg():
"""
class performs segmentation for a given sequence of patient data.
to main platform for segmentation mask estimation
one for the patient data in brats format
other with any random format
step followed for in estimation of segmentation mask
1. ABLnet for reducing false positives outside the brain
Air Brain Lesson model (2D model, 103 layered)
2. BNet3Dnet 3D network for inner class classification
Dual Path way network
3. MNet2D 57 layered convolutional network for inner class
classification
4. Tir3Dnet 57 layered 3D convolutional network for inner class
classification
more on training details and network information:
(https://link.springer.com/chapter/10.1007/978-3-030-11726-9_43<Paste>)
=========================
quick: True (just evaluates on Dual path network (BNet3D)
else copmutes an ensumble over all four networks
"""
def __init__(self,
quick = False,
ants_path = bin_path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = "cpu"
map_location = device
#========================================================================================
ckpt_tir2D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_2D_FC57_best_loss.pth.tar')
ckpt_tir3D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar')
ckpt_BNET3D = os.path.join(home, '.DeepBrainSeg/BestModels/BrainNet_3D_best_acc.pth.tar')
ckpt_ABL = os.path.join(home, '.DeepBrainSeg/BestModels/ABL_CE_best_model_loss_based.pth.tar')
#========================================================================================
# air brain lesion segmentation..............
from .models.modelABL import FCDenseNet103
self.ABLnclasses = 3
self.ABLnet = FCDenseNet103(n_classes = self.ABLnclasses) ## intialize the graph
saved_parms=torch.load(ckpt_ABL, map_location=map_location)
self.ABLnet.load_state_dict(saved_parms['state_dict']) ## fill the model with trained params
print ("=================================== ABLNET2D Loaded =================================")
self.ABLnet.eval()
self.ABLnet = self.ABLnet.to(device)
#========================================================================================
# Tir2D net.......................
from .models.modelTir2D import FCDenseNet57
self.Mnclasses = 4
self.MNET2D = FCDenseNet57(self.Mnclasses)
ckpt = torch.load(ckpt_tir2D, map_location=map_location)
self.MNET2D.load_state_dict(ckpt['state_dict'])
print ("=================================== MNET2D Loaded ===================================")
self.MNET2D.eval()
self.MNET2D = self.MNET2D.to(device)
#========================================================================================
if not quick:
# BrainNet3D model......................
from .models.model3DBNET import BrainNet_3D_Inception
self.B3Dnclasses = 5
self.BNET3Dnet = BrainNet_3D_Inception()
ckpt = torch.load(ckpt_BNET3D, map_location=map_location)
self.BNET3Dnet.load_state_dict(ckpt['state_dict'])
print ("=================================== KAMNET3D Loaded =================================")
self.BNET3Dnet.eval()
self.BNET3Dnet = self.BNET3Dnet.to(device)
#========================================================================================
# Tir3D model...................
from .models.modelTir3D import FCDenseNet57
self.T3Dnclasses = 5
self.Tir3Dnet = FCDenseNet57(self.T3Dnclasses)
ckpt = torch.load(ckpt_tir3D, map_location=map_location)
self.Tir3Dnet.load_state_dict(ckpt['state_dict'])
print ("================================== TIRNET2D Loaded =================================")
self.Tir3Dnet.eval()
self.Tir3Dnet = self.Tir3Dnet.to(device)
#========================================================================================
self.device = device
self.quick = quick
self.ants_path = ants_path
def get_ants_mask(self, t1_path):
"""
We make use of ants framework for generalized skull stripping
t1_path: t1 volume path (str)
saves the mask in the same location as t1 data directory
returns: maskvolume (numpy uint8 type)
"""
mask_path = os.path.join(os.path.dirname(t1_path), 'mask.nii.gz')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' Normalize '+ t1_path)
os.system(self.ants_path +'ThresholdImage 3 '+ mask_path +' '+ mask_path +' 0.01 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' MD '+ mask_path +' 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' ME '+ mask_path +' 1')
os.system(self.ants_path +'CopyImageHeaderInformation '+ t1_path+' '+ mask_path +' '+ mask_path +' 1 1 1')
mask = np.uint8(nib.load(mask_path).get_data())
return mask
def get_localization(self, t1_v, t1c_v, t2_v, flair_v, brain_mask):
"""
ABLnetwork output, finds the brain, Whole tumor region
t1_v = t1 volume (numpy array)
t1c_v = t1c volume (numpy array)
t2_v = t2 volume (numpy array)
flair_v = flair volume (numpy array)
brain_mask = brain, whole tumor mask (numpy array, output of ANTs pieline)
"""
t1_v = normalize(t1_v, brain_mask)
t1c_v = normalize(t1c_v, brain_mask)
t2_v = normalize(t2_v, brain_mask)
flair_v = normalize(flair_v, brain_mask)
generated_output_logits = np.empty((self.ABLnclasses, flair_v.shape[0],flair_v.shape[1],flair_v.shape[2]))
for slices in tqdm(range(flair_v.shape[2])):
flair_slice = np.transpose(flair_v[:,:,slices])
t2_slice = np.transpose(t2_v[:,:,slices])
t1ce_slice = np.transpose(t1c_v[:,:,slices])
t1_slice = np.transpose(t1_v[:,:,slices])
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],4))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array[:,:,3] = t1_slice
transformed_array = torch.from_numpy(convert_image(array)).float()
transformed_array = transformed_array.unsqueeze(0) ## neccessary if batch size == 1
transformed_array = transformed_array.to(self.device)
logits = self.ABLnet(transformed_array).detach().cpu().numpy()# 3 x 240 x 240
generated_output_logits[:,:,:, slices] = logits.transpose(0, 1, 3, 2)
final_pred = apply_argmax_to_logits(generated_output_logits)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes_air_brain_tumour(np.uint8(final_pred))
return np.uint8(final_pred)
def inner_class_classification_with_logits_NCube(self, t1,
t1ce, t2, flair,
brain_mask, mask, N = 64):
"""
output of 3D tiramisu model (tir3Dnet)
mask = numpy array output of ABLnet
N = patch size during inference
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.T3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = N)
x_min, x_max, y_min, y_max, z_min, z_max = x_min, min(shape[0] - N, x_max), y_min, min(shape[1] - N, y_max), z_min, min(shape[2] - N, z_max)
with torch.no_grad():
for x in tqdm(range(x_min, x_max, N//2)):
for y in range(y_min, y_max, N//2):
for z in range(z_min, z_max, N//2):
high = np.zeros((1, 4, N, N, N))
high[0, 0, :, :, :] = flair[x:x+N, y:y+N, z:z+N]
high[0, 1, :, :, :] = t2[x:x+N, y:y+N, z:z+N]
high[0, 2, :, :, :] = t1[x:x+N, y:y+N, z:z+N]
high[0, 3, :, :, :] = t1ce[x:x+N, y:y+N, z:z+N]
high = Variable(torch.from_numpy(high)).to(self.device).float()
pred = torch.nn.functional.softmax(self.Tir3Dnet(high).detach().cpu())
pred = pred.data.numpy()
final_prediction[:, x:x+N, y:y+N, z:z+N] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_DualPath(self, t1,
t1ce, t2, flair,
brain_mask, mask=None,
prediction_size = 9):
"""
output of BNet3D
prediction_size = mid inference patch size
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.B3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = prediction_size)
# obtained by aspect ratio calculation
high_res_size = prediction_size + 16
resize_to = int(prediction_size ** 0.5) + 16
low_res_size = int(51*resize_to/19)
hl_pad = (high_res_size - prediction_size)//2
hr_pad = hl_pad + prediction_size
ll_pad = (low_res_size - prediction_size)//2
lr_pad = ll_pad + prediction_size
for x in tqdm(range(x_min, x_max - prediction_size, prediction_size)):
for y in (range(y_min, y_max - prediction_size, prediction_size)):
for z in (range(z_min, z_max - prediction_size, prediction_size)):
high = np.zeros((1, 4, high_res_size, high_res_size, high_res_size))
low = np.zeros((1, 4, low_res_size, low_res_size, low_res_size))
low1 = np.zeros((1, 4, resize_to, resize_to, resize_to))
high[0, 0], high[0, 1], high[0, 2], high[0, 3] = high[0, 0] + flair[0,0,0], high[0, 1] + t2[0,0,0], high[0, 2] + t1[0,0,0], high[0, 2] + t1ce[0,0,0]
low[0, 0], low[0, 1], low[0, 2], low[0, 3] = low[0, 0] + flair[0,0,0], low[0, 1] + t2[0,0,0], low[0, 2] + t1[0,0,0], low[0, 2] + t1ce[0,0,0]
low1[0, 0], low1[0, 1], low1[0, 2], low1[0, 3] = low1[0, 0] + flair[0,0,0], low1[0, 1] + t2[0,0,0], low1[0, 2] + t1[0,0,0], low1[0, 2] + t1ce[0,0,0]
# =========================================================================
vxf, vxt = max(0, x-hl_pad), min(shape[0], x+hr_pad)
vyf, vyt = max(0, y-hl_pad), min(shape[1], y+hr_pad)
vzf, vzt = max(0, z-hl_pad), min(shape[2], z+hr_pad)
txf, txt = max(0, hl_pad-x), max(0, hl_pad-x) + vxt - vxf
tyf, tyt = max(0, hl_pad-y), max(0, hl_pad-y) + vyt - vyf
tzf, tzt = max(0, hl_pad-z), max(0, hl_pad-z) + vzt - vzf
high[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
vxf, vxt = max(0, x-ll_pad), min(shape[0], x+lr_pad)
vyf, vyt = max(0, y-ll_pad), min(shape[1], y+lr_pad)
vzf, vzt = max(0, z-ll_pad), min(shape[2], z+lr_pad)
txf, txt = max(0, ll_pad-x), max(0, ll_pad-x) + vxt - vxf
tyf, tyt = max(0, ll_pad-y), max(0, ll_pad-y) + vyt - vyf
tzf, tzt = max(0, ll_pad-z), max(0, ll_pad-z) + vzt - vzf
low[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
low1[0] = [resize(low[0, i, :, :, :], (resize_to, resize_to, resize_to)) for i in range(4)]
high = Variable(torch.from_numpy(high)).to(self.device).float()
low1 = Variable(torch.from_numpy(low1)).to(self.device).float()
pred = torch.nn.functional.softmax(self.BNET3Dnet(high, low1, pred_size=prediction_size).detach().cpu())
pred = pred.numpy()
final_prediction[:, x:x+prediction_size, y:y+prediction_size, z:z+prediction_size] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_2D(self,
t1ce_volume,
t2_volume,
flair_volume):
"""
output of 2D tiramisu model (MNet)
"""
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transformList = []
transformList.append(transforms.ToTensor())
transformList.append(normalize)
transformSequence=transforms.Compose(transformList)
generated_output = np.empty((self.Mnclasses,flair_volume.shape[0],flair_volume.shape[1],flair_volume.shape[2]))
for slices in tqdm(range(flair_volume.shape[2])):
flair_slice = scale_every_slice_between_0_to_255(np.transpose(flair_volume[:,:,slices]))
t2_slice = scale_every_slice_between_0_to_255(np.transpose(t2_volume[:,:,slices]))
t1ce_slice = scale_every_slice_between_0_to_255(np.transpose(t1ce_volume[:,:,slices]))
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],3))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array = np.uint8(array)
transformed_array = transformSequence(array)
transformed_array = transformed_array.unsqueeze(0)
transformed_array = transformed_array.to(self.device)
outs = torch.nn.functional.softmax(self.MNET2D(transformed_array).detach().cpu()).numpy()
outs = np.swapaxes(generated_output,1, 2)
return outs
def get_segmentation(self,
t1_path,
t2_path,
t1ce_path,
flair_path,
save_path = None):
"""
Generates segmentation for the data not in brats format
if save_path provided function saves the prediction with
DeepBrainSeg_Prediction.nii.qz name in the provided
directory
returns: segmentation mask
"""
t1 = nib.load(t1_path).get_data()
t2 = nib.load(t2_path).get_data()
t1ce = nib.load(t1ce_path).get_data()
flair = nib.load(flair_path).get_data()
affine = nib.load(flair_path).affine
brain_mask = self.get_ants_mask(t2_path)
mask = self.get_localization(t1, t1ce, t2, flair, brain_mask)
# mask = np.swapaxes(mask,1, 0)
if not self.quick:
final_predictionTir3D_logits = self.inner_class_classification_with_logits_NCube(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionBNET3D_logits = self.inner_class_classification_with_logits_DualPath(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair).transpose(0, 2, 1, 3)
final_prediction_array = np.array([final_predictionTir3D_logits, final_predictionBNET3D_logits, final_predictionMnet_logits])
else:
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionMnet_logits])
final_prediction_logits = combine_logits_AM(final_prediction_array)
final_pred = postprocessing_pydensecrf(final_prediction_logits)
final_pred = combine_mask_prediction(mask, final_pred)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes(final_pred)
if save_path:
os.makedirs(save_path, exist_ok=True)
save_volume(final_pred, affine, os.path.join(save_path, 'DeepBrainSeg_Prediction'))
return final_pred
def get_segmentation_brats(self,
path,
save = True):
"""
Generates segmentation for the data in BraTs format
if save True saves the prediction in the save directory
in the patients data path
returns : segmentation mask
"""
name = path.split("/")[-1] + "_"
flair = nib.load(os.path.join(path, name + 'flair.nii.gz')).get_data()
t1 = nib.load(os.path.join(path, name + 't1.nii.gz')).get_data()
t1ce = nib.load(os.path.join(path, name + 't1ce.nii.gz')).get_data()
t2 = nib.load(os.path.join(path, name + 't2.nii.gz')).get_data()
affine= nib.load(os.path.join(path, name + 'flair.nii.gz')).affine
print ("[INFO: DeepBrainSeg] (" + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + ") Working on: ", path)
brain_mask = self.get_ants_mask(os.path.join(path, name + 't2.nii.gz'))
# brain_mask = get_brain_mask(t1)
mask = self.get_localization(t1, t1ce, t2, flair, brain_mask)
mask = np.swapaxes(mask,1, 0)
if not self.quick:
final_predictionTir3D_logits = self.inner_class_classification_with_logits_NCube(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionBNET3D_logits = self.inner_class_classification_with_logits_DualPath(t1, t1ce, t2, flair, brain_mask, mask)
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionTir3D_logits, final_predictionBNET3D_logits, final_predictionMnet_logits])
else:
final_predictionMnet_logits = self.inner_class_classification_with_logits_2D(t1, t2, flair)
final_prediction_array = np.array([final_predictionMnet_logits])
final_prediction_logits = combine_logits_AM(final_prediction_array)
final_pred = postprocessing_pydensecrf(final_prediction_logits)
final_pred = combine_mask_prediction(mask, final_pred)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes(final_pred)
if save:
save_volume(final_pred, affine, os.path.join(path, 'DeepBrainSeg_Prediction'))
return final_pred
# ========================================================================================
if __name__ == '__main__':
ext = deepSeg(True)
ext.get_segmentation_brats('../../sample_volume/Brats18_CBICA_AVG_1/')
| [
"numpy.uint8",
"nibabel.load",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.empty",
"os.system",
"torchvision.transforms.ToTensor",
"os.path.expanduser",
"os.path.dirname",
"torchvision.transforms.Normalize",
"skimage.transform.resize",
"numpy.transpose",
"torchvision.transforms.Compose",
"time.gmtime",
"os.makedirs",
"torch.load",
"os.path.join",
"numpy.swapaxes",
"numpy.zeros",
"torch.no_grad"
]
| [((422, 437), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (432, 437), False, 'from os.path import expanduser\n'), ((584, 614), 'os.path.join', 'os.path.join', (['"""/opt/ANTs/bin/"""'], {}), "('/opt/ANTs/bin/')\n", (596, 614), False, 'import os\n'), ((2026, 2111), 'os.path.join', 'os.path.join', (['home', '""".DeepBrainSeg/BestModels/Tramisu_2D_FC57_best_loss.pth.tar"""'], {}), "(home, '.DeepBrainSeg/BestModels/Tramisu_2D_FC57_best_loss.pth.tar'\n )\n", (2038, 2111), False, 'import os\n'), ((2131, 2210), 'os.path.join', 'os.path.join', (['home', '""".DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar"""'], {}), "(home, '.DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar')\n", (2143, 2210), False, 'import os\n'), ((2235, 2310), 'os.path.join', 'os.path.join', (['home', '""".DeepBrainSeg/BestModels/BrainNet_3D_best_acc.pth.tar"""'], {}), "(home, '.DeepBrainSeg/BestModels/BrainNet_3D_best_acc.pth.tar')\n", (2247, 2310), False, 'import os\n'), ((2335, 2422), 'os.path.join', 'os.path.join', (['home', '""".DeepBrainSeg/BestModels/ABL_CE_best_model_loss_based.pth.tar"""'], {}), "(home,\n '.DeepBrainSeg/BestModels/ABL_CE_best_model_loss_based.pth.tar')\n", (2347, 2422), False, 'import os\n'), ((2762, 2809), 'torch.load', 'torch.load', (['ckpt_ABL'], {'map_location': 'map_location'}), '(ckpt_ABL, map_location=map_location)\n', (2772, 2809), False, 'import torch\n'), ((3376, 3425), 'torch.load', 'torch.load', (['ckpt_tir2D'], {'map_location': 'map_location'}), '(ckpt_tir2D, map_location=map_location)\n', (3386, 3425), False, 'import torch\n'), ((5487, 5572), 'os.system', 'os.system', (["(self.ants_path + 'ImageMath 3 ' + mask_path + ' Normalize ' + t1_path)"], {}), "(self.ants_path + 'ImageMath 3 ' + mask_path + ' Normalize ' + t1_path\n )\n", (5496, 5572), False, 'import os\n'), ((5572, 5665), 'os.system', 'os.system', (["(self.ants_path + 'ThresholdImage 3 ' + mask_path + ' ' + mask_path + ' 0.01 1'\n )"], {}), "(self.ants_path + 'ThresholdImage 3 ' + mask_path + ' ' +\n mask_path + ' 0.01 1')\n", (5581, 5665), False, 'import os\n'), ((5665, 5751), 'os.system', 'os.system', (["(self.ants_path + 'ImageMath 3 ' + mask_path + ' MD ' + mask_path + ' 1')"], {}), "(self.ants_path + 'ImageMath 3 ' + mask_path + ' MD ' + mask_path +\n ' 1')\n", (5674, 5751), False, 'import os\n'), ((5751, 5837), 'os.system', 'os.system', (["(self.ants_path + 'ImageMath 3 ' + mask_path + ' ME ' + mask_path + ' 1')"], {}), "(self.ants_path + 'ImageMath 3 ' + mask_path + ' ME ' + mask_path +\n ' 1')\n", (5760, 5837), False, 'import os\n'), ((5837, 5955), 'os.system', 'os.system', (["(self.ants_path + 'CopyImageHeaderInformation ' + t1_path + ' ' + mask_path +\n ' ' + mask_path + ' 1 1 1')"], {}), "(self.ants_path + 'CopyImageHeaderInformation ' + t1_path + ' ' +\n mask_path + ' ' + mask_path + ' 1 1 1')\n", (5846, 5955), False, 'import os\n'), ((6645, 6732), 'numpy.empty', 'np.empty', (['(self.ABLnclasses, flair_v.shape[0], flair_v.shape[1], flair_v.shape[2])'], {}), '((self.ABLnclasses, flair_v.shape[0], flair_v.shape[1], flair_v.\n shape[2]))\n', (6653, 6732), True, 'import numpy as np\n'), ((7937, 7957), 'numpy.uint8', 'np.uint8', (['final_pred'], {}), '(final_pred)\n', (7945, 7957), True, 'import numpy as np\n'), ((8571, 8629), 'numpy.zeros', 'np.zeros', (['(self.T3Dnclasses, shape[0], shape[1], shape[2])'], {}), '((self.T3Dnclasses, shape[0], shape[1], shape[2]))\n', (8579, 8629), True, 'import numpy as np\n'), ((10468, 10526), 'numpy.zeros', 'np.zeros', (['(self.B3Dnclasses, shape[0], shape[1], shape[2])'], {}), '((self.B3Dnclasses, shape[0], shape[1], shape[2]))\n', (10476, 10526), True, 'import numpy as np\n'), ((14979, 15045), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (14999, 15045), False, 'from torchvision import transforms\n'), ((15191, 15224), 'torchvision.transforms.Compose', 'transforms.Compose', (['transformList'], {}), '(transformList)\n', (15209, 15224), False, 'from torchvision import transforms\n'), ((15253, 15352), 'numpy.empty', 'np.empty', (['(self.Mnclasses, flair_volume.shape[0], flair_volume.shape[1], flair_volume\n .shape[2])'], {}), '((self.Mnclasses, flair_volume.shape[0], flair_volume.shape[1],\n flair_volume.shape[2]))\n', (15261, 15352), True, 'import numpy as np\n'), ((19604, 19627), 'numpy.swapaxes', 'np.swapaxes', (['mask', '(1)', '(0)'], {}), '(mask, 1, 0)\n', (19615, 19627), True, 'import numpy as np\n'), ((4017, 4067), 'torch.load', 'torch.load', (['ckpt_BNET3D'], {'map_location': 'map_location'}), '(ckpt_BNET3D, map_location=map_location)\n', (4027, 4067), False, 'import torch\n'), ((4644, 4693), 'torch.load', 'torch.load', (['ckpt_tir3D'], {'map_location': 'map_location'}), '(ckpt_tir3D, map_location=map_location)\n', (4654, 4693), False, 'import torch\n'), ((5438, 5462), 'os.path.dirname', 'os.path.dirname', (['t1_path'], {}), '(t1_path)\n', (5453, 5462), False, 'import os\n'), ((6806, 6841), 'numpy.transpose', 'np.transpose', (['flair_v[:, :, slices]'], {}), '(flair_v[:, :, slices])\n', (6818, 6841), True, 'import numpy as np\n'), ((6866, 6898), 'numpy.transpose', 'np.transpose', (['t2_v[:, :, slices]'], {}), '(t2_v[:, :, slices])\n', (6878, 6898), True, 'import numpy as np\n'), ((6923, 6956), 'numpy.transpose', 'np.transpose', (['t1c_v[:, :, slices]'], {}), '(t1c_v[:, :, slices])\n', (6935, 6956), True, 'import numpy as np\n'), ((6981, 7013), 'numpy.transpose', 'np.transpose', (['t1_v[:, :, slices]'], {}), '(t1_v[:, :, slices])\n', (6993, 7013), True, 'import numpy as np\n'), ((7068, 7125), 'numpy.zeros', 'np.zeros', (['(flair_slice.shape[0], flair_slice.shape[1], 4)'], {}), '((flair_slice.shape[0], flair_slice.shape[1], 4))\n', (7076, 7125), True, 'import numpy as np\n'), ((7899, 7919), 'numpy.uint8', 'np.uint8', (['final_pred'], {}), '(final_pred)\n', (7907, 7919), True, 'import numpy as np\n'), ((8872, 8887), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8885, 8887), False, 'import torch\n'), ((15102, 15123), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (15121, 15123), False, 'from torchvision import transforms\n'), ((15731, 15788), 'numpy.zeros', 'np.zeros', (['(flair_slice.shape[0], flair_slice.shape[1], 3)'], {}), '((flair_slice.shape[0], flair_slice.shape[1], 3))\n', (15739, 15788), True, 'import numpy as np\n'), ((15920, 15935), 'numpy.uint8', 'np.uint8', (['array'], {}), '(array)\n', (15928, 15935), True, 'import numpy as np\n'), ((16243, 16278), 'numpy.swapaxes', 'np.swapaxes', (['generated_output', '(1)', '(2)'], {}), '(generated_output, 1, 2)\n', (16254, 16278), True, 'import numpy as np\n'), ((16956, 16976), 'nibabel.load', 'nib.load', (['flair_path'], {}), '(flair_path)\n', (16964, 16976), True, 'import nibabel as nib\n'), ((17629, 17733), 'numpy.array', 'np.array', (['[final_predictionTir3D_logits, final_predictionBNET3D_logits,\n final_predictionMnet_logits]'], {}), '([final_predictionTir3D_logits, final_predictionBNET3D_logits,\n final_predictionMnet_logits])\n', (17637, 17733), True, 'import numpy as np\n'), ((17893, 17932), 'numpy.array', 'np.array', (['[final_predictionMnet_logits]'], {}), '([final_predictionMnet_logits])\n', (17901, 17932), True, 'import numpy as np\n'), ((18336, 18373), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (18347, 18373), False, 'import os\n'), ((19423, 19461), 'os.path.join', 'os.path.join', (['path', "(name + 't2.nii.gz')"], {}), "(path, name + 't2.nii.gz')\n", (19435, 19461), False, 'import os\n'), ((20074, 20178), 'numpy.array', 'np.array', (['[final_predictionTir3D_logits, final_predictionBNET3D_logits,\n final_predictionMnet_logits]'], {}), '([final_predictionTir3D_logits, final_predictionBNET3D_logits,\n final_predictionMnet_logits])\n', (20082, 20178), True, 'import numpy as np\n'), ((20338, 20377), 'numpy.array', 'np.array', (['[final_predictionMnet_logits]'], {}), '([final_predictionMnet_logits])\n', (20346, 20377), True, 'import numpy as np\n'), ((1809, 1834), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1832, 1834), False, 'import torch\n'), ((15465, 15505), 'numpy.transpose', 'np.transpose', (['flair_volume[:, :, slices]'], {}), '(flair_volume[:, :, slices])\n', (15477, 15505), True, 'import numpy as np\n'), ((15566, 15603), 'numpy.transpose', 'np.transpose', (['t2_volume[:, :, slices]'], {}), '(t2_volume[:, :, slices])\n', (15578, 15603), True, 'import numpy as np\n'), ((15664, 15703), 'numpy.transpose', 'np.transpose', (['t1ce_volume[:, :, slices]'], {}), '(t1ce_volume[:, :, slices])\n', (15676, 15703), True, 'import numpy as np\n'), ((16774, 16791), 'nibabel.load', 'nib.load', (['t1_path'], {}), '(t1_path)\n', (16782, 16791), True, 'import nibabel as nib\n'), ((16816, 16833), 'nibabel.load', 'nib.load', (['t2_path'], {}), '(t2_path)\n', (16824, 16833), True, 'import nibabel as nib\n'), ((16860, 16879), 'nibabel.load', 'nib.load', (['t1ce_path'], {}), '(t1ce_path)\n', (16868, 16879), True, 'import nibabel as nib\n'), ((16907, 16927), 'nibabel.load', 'nib.load', (['flair_path'], {}), '(flair_path)\n', (16915, 16927), True, 'import nibabel as nib\n'), ((18418, 18468), 'os.path.join', 'os.path.join', (['save_path', '"""DeepBrainSeg_Prediction"""'], {}), "(save_path, 'DeepBrainSeg_Prediction')\n", (18430, 18468), False, 'import os\n'), ((19204, 19245), 'os.path.join', 'os.path.join', (['path', "(name + 'flair.nii.gz')"], {}), "(path, name + 'flair.nii.gz')\n", (19216, 19245), False, 'import os\n'), ((20807, 20852), 'os.path.join', 'os.path.join', (['path', '"""DeepBrainSeg_Prediction"""'], {}), "(path, 'DeepBrainSeg_Prediction')\n", (20819, 20852), False, 'import os\n'), ((5968, 5987), 'nibabel.load', 'nib.load', (['mask_path'], {}), '(mask_path)\n', (5976, 5987), True, 'import nibabel as nib\n'), ((11280, 11341), 'numpy.zeros', 'np.zeros', (['(1, 4, high_res_size, high_res_size, high_res_size)'], {}), '((1, 4, high_res_size, high_res_size, high_res_size))\n', (11288, 11341), True, 'import numpy as np\n'), ((11369, 11427), 'numpy.zeros', 'np.zeros', (['(1, 4, low_res_size, low_res_size, low_res_size)'], {}), '((1, 4, low_res_size, low_res_size, low_res_size))\n', (11377, 11427), True, 'import numpy as np\n'), ((11456, 11505), 'numpy.zeros', 'np.zeros', (['(1, 4, resize_to, resize_to, resize_to)'], {}), '((1, 4, resize_to, resize_to, resize_to))\n', (11464, 11505), True, 'import numpy as np\n'), ((18891, 18932), 'os.path.join', 'os.path.join', (['path', "(name + 'flair.nii.gz')"], {}), "(path, name + 'flair.nii.gz')\n", (18903, 18932), False, 'import os\n'), ((18971, 19009), 'os.path.join', 'os.path.join', (['path', "(name + 't1.nii.gz')"], {}), "(path, name + 't1.nii.gz')\n", (18983, 19009), False, 'import os\n'), ((19048, 19088), 'os.path.join', 'os.path.join', (['path', "(name + 't1ce.nii.gz')"], {}), "(path, name + 't1ce.nii.gz')\n", (19060, 19088), False, 'import os\n'), ((19127, 19165), 'os.path.join', 'os.path.join', (['path', "(name + 't2.nii.gz')"], {}), "(path, name + 't2.nii.gz')\n", (19139, 19165), False, 'import os\n'), ((9082, 9107), 'numpy.zeros', 'np.zeros', (['(1, 4, N, N, N)'], {}), '((1, 4, N, N, N))\n', (9090, 9107), True, 'import numpy as np\n'), ((13995, 14056), 'skimage.transform.resize', 'resize', (['low[0, i, :, :, :]', '(resize_to, resize_to, resize_to)'], {}), '(low[0, i, :, :, :], (resize_to, resize_to, resize_to))\n', (14001, 14056), False, 'from skimage.transform import resize\n'), ((19345, 19353), 'time.gmtime', 'gmtime', ([], {}), '()\n', (19351, 19353), False, 'from time import gmtime, strftime\n'), ((14113, 14135), 'torch.from_numpy', 'torch.from_numpy', (['high'], {}), '(high)\n', (14129, 14135), False, 'import torch\n'), ((14198, 14220), 'torch.from_numpy', 'torch.from_numpy', (['low1'], {}), '(low1)\n', (14214, 14220), False, 'import torch\n'), ((9435, 9457), 'torch.from_numpy', 'torch.from_numpy', (['high'], {}), '(high)\n', (9451, 9457), False, 'import torch\n')] |
import numpy as np
from statsmodels.discrete.conditional_models import (
ConditionalLogit, ConditionalPoisson)
from statsmodels.tools.numdiff import approx_fprime
from numpy.testing import assert_allclose
import pandas as pd
def test_logit_1d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x = x[:, None]
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denominator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, ]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_allclose(grad, ngrad)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
grad = approx_fprime(np.r_[x, ], model.loglike)
score = model.score(np.r_[x, ])
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[0.9272407], rtol=1e-5)
assert_allclose(result.bse, np.r_[1.295155], rtol=1e-5)
def test_logit_2d():
y = np.r_[0, 1, 0, 1, 0, 1, 0, 1, 1, 1]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[0, 0, 1, 0, 0, 1, 0, 1, 1, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalLogit(y, x, groups=g)
# Check the gradient for the denominator of the partial likelihood
for x in -1, 0, 1, 2:
params = np.r_[x, -1.5*x]
_, grad = model._denom_grad(0, params)
ngrad = approx_fprime(params, lambda x: model._denom(0, x))
assert_allclose(grad, ngrad, rtol=1e-5)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[1.011074, 1.236758], rtol=1e-3)
assert_allclose(result.bse, np.r_[1.420784, 1.361738], rtol=1e-5)
result.summary()
def test_formula():
for j in 0, 1:
np.random.seed(34234)
n = 200
y = np.random.randint(0, 2, size=n)
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
g = np.random.randint(0, 25, size=n)
x = np.hstack((x1[:, None], x2[:, None]))
if j == 0:
model1 = ConditionalLogit(y, x, groups=g)
else:
model1 = ConditionalPoisson(y, x, groups=g)
result1 = model1.fit()
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "g": g})
if j == 0:
model2 = ConditionalLogit.from_formula(
"y ~ 0 + x1 + x2", groups="g", data=df)
else:
model2 = ConditionalPoisson.from_formula(
"y ~ 0 + x1 + x2", groups="g", data=df)
result2 = model2.fit()
assert_allclose(result1.params, result2.params, rtol=1e-5)
assert_allclose(result1.bse, result2.bse, rtol=1e-5)
assert_allclose(result1.cov_params(), result2.cov_params(), rtol=1e-5)
assert_allclose(result1.tvalues, result2.tvalues, rtol=1e-5)
def test_poisson_1d():
y = np.r_[3, 1, 1, 4, 5, 2, 0, 1, 6, 2]
g = np.r_[0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
x = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x = x[:, None]
model = ConditionalPoisson(y, x, groups=g)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
grad = approx_fprime(np.r_[x, ], model.loglike)
score = model.score(np.r_[x, ])
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[0.6466272], rtol=1e-4)
assert_allclose(result.bse, np.r_[0.4170918], rtol=1e-5)
def test_poisson_2d():
y = np.r_[3, 1, 4, 8, 2, 5, 4, 7, 2, 6]
g = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
x1 = np.r_[0, 1, 0, 0, 1, 1, 0, 0, 1, 0]
x2 = np.r_[2, 1, 0, 0, 1, 2, 3, 2, 0, 1]
x = np.empty((10, 2))
x[:, 0] = x1
x[:, 1] = x2
model = ConditionalPoisson(y, x, groups=g)
# Check the gradient for the loglikelihood
for x in -1, 0, 1, 2:
params = np.r_[-0.5*x, 0.5*x]
grad = approx_fprime(params, model.loglike)
score = model.score(params)
assert_allclose(grad, score, rtol=1e-4)
result = model.fit()
# From Stata
assert_allclose(result.params, np.r_[-.9478957, -.0134279], rtol=1e-3)
assert_allclose(result.bse, np.r_[.3874942, .1686712], rtol=1e-5)
result.summary()
def test_lasso_logistic():
np.random.seed(3423948)
n = 200
groups = np.arange(10)
groups = np.kron(groups, np.ones(n // 10))
group_effects = np.random.normal(size=10)
group_effects = np.kron(group_effects, np.ones(n // 10))
x = np.random.normal(size=(n, 4))
params = np.r_[0, 0, 1, 0]
lin_pred = np.dot(x, params) + group_effects
mean = 1 / (1 + np.exp(-lin_pred))
y = (np.random.uniform(size=n) < mean).astype(np.int)
model0 = ConditionalLogit(y, x, groups=groups)
result0 = model0.fit()
# Should be the same as model0
model1 = ConditionalLogit(y, x, groups=groups)
result1 = model1.fit_regularized(L1_wt=0, alpha=0)
assert_allclose(result0.params, result1.params, rtol=1e-3)
model2 = ConditionalLogit(y, x, groups=groups)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.05)
# Rxegression test
assert_allclose(result2.params, np.r_[0, 0, 0.55235152, 0], rtol=1e-4)
# Test with formula
df = pd.DataFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2],
"x4": x[:, 3], "groups": groups})
fml = "y ~ 0 + x1 + x2 + x3 + x4"
model3 = ConditionalLogit.from_formula(fml, groups="groups", data=df)
result3 = model3.fit_regularized(L1_wt=1, alpha=0.05)
assert_allclose(result2.params, result3.params)
def test_lasso_poisson():
np.random.seed(342394)
n = 200
groups = np.arange(10)
groups = np.kron(groups, np.ones(n // 10))
group_effects = np.random.normal(size=10)
group_effects = np.kron(group_effects, np.ones(n // 10))
x = np.random.normal(size=(n, 4))
params = np.r_[0, 0, 1, 0]
lin_pred = np.dot(x, params) + group_effects
mean = np.exp(lin_pred)
y = np.random.poisson(mean)
model0 = ConditionalPoisson(y, x, groups=groups)
result0 = model0.fit()
# Should be the same as model0
model1 = ConditionalPoisson(y, x, groups=groups)
result1 = model1.fit_regularized(L1_wt=0, alpha=0)
assert_allclose(result0.params, result1.params, rtol=1e-3)
model2 = ConditionalPoisson(y, x, groups=groups)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.2)
# Regression test
assert_allclose(result2.params, np.r_[0, 0, 0.91697508, 0], rtol=1e-4)
# Test with formula
df = pd.DataFrame({"y": y, "x1": x[:, 0], "x2": x[:, 1], "x3": x[:, 2],
"x4": x[:, 3], "groups": groups})
fml = "y ~ 0 + x1 + x2 + x3 + x4"
model3 = ConditionalPoisson.from_formula(fml, groups="groups", data=df)
result3 = model3.fit_regularized(L1_wt=1, alpha=0.2)
assert_allclose(result2.params, result3.params)
| [
"numpy.random.normal",
"statsmodels.discrete.conditional_models.ConditionalLogit",
"numpy.ones",
"numpy.random.poisson",
"numpy.hstack",
"numpy.testing.assert_allclose",
"statsmodels.discrete.conditional_models.ConditionalPoisson",
"statsmodels.discrete.conditional_models.ConditionalPoisson.from_formula",
"numpy.exp",
"numpy.random.randint",
"numpy.dot",
"numpy.empty",
"numpy.random.seed",
"statsmodels.tools.numdiff.approx_fprime",
"numpy.random.uniform",
"pandas.DataFrame",
"statsmodels.discrete.conditional_models.ConditionalLogit.from_formula",
"numpy.arange"
]
| [((420, 452), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (436, 452), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((997, 1057), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.params', 'np.r_[0.9272407]'], {'rtol': '(1e-05)'}), '(result.params, np.r_[0.9272407], rtol=1e-05)\n', (1012, 1057), False, 'from numpy.testing import assert_allclose\n'), ((1061, 1117), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.bse', 'np.r_[1.295155]'], {'rtol': '(1e-05)'}), '(result.bse, np.r_[1.295155], rtol=1e-05)\n', (1076, 1117), False, 'from numpy.testing import assert_allclose\n'), ((1328, 1345), 'numpy.empty', 'np.empty', (['(10, 2)'], {}), '((10, 2))\n', (1336, 1345), True, 'import numpy as np\n'), ((1393, 1425), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (1409, 1425), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((2017, 2086), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.params', 'np.r_[1.011074, 1.236758]'], {'rtol': '(0.001)'}), '(result.params, np.r_[1.011074, 1.236758], rtol=0.001)\n', (2032, 2086), False, 'from numpy.testing import assert_allclose\n'), ((2090, 2156), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.bse', 'np.r_[1.420784, 1.361738]'], {'rtol': '(1e-05)'}), '(result.bse, np.r_[1.420784, 1.361738], rtol=1e-05)\n', (2105, 2156), False, 'from numpy.testing import assert_allclose\n'), ((3488, 3522), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (3506, 3522), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((3789, 3850), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.params', 'np.r_[0.6466272]'], {'rtol': '(0.0001)'}), '(result.params, np.r_[0.6466272], rtol=0.0001)\n', (3804, 3850), False, 'from numpy.testing import assert_allclose\n'), ((3853, 3910), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.bse', 'np.r_[0.4170918]'], {'rtol': '(1e-05)'}), '(result.bse, np.r_[0.4170918], rtol=1e-05)\n', (3868, 3910), False, 'from numpy.testing import assert_allclose\n'), ((4123, 4140), 'numpy.empty', 'np.empty', (['(10, 2)'], {}), '((10, 2))\n', (4131, 4140), True, 'import numpy as np\n'), ((4188, 4222), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (4206, 4222), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((4519, 4592), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.params', 'np.r_[-0.9478957, -0.0134279]'], {'rtol': '(0.001)'}), '(result.params, np.r_[-0.9478957, -0.0134279], rtol=0.001)\n', (4534, 4592), False, 'from numpy.testing import assert_allclose\n'), ((4594, 4662), 'numpy.testing.assert_allclose', 'assert_allclose', (['result.bse', 'np.r_[0.3874942, 0.1686712]'], {'rtol': '(1e-05)'}), '(result.bse, np.r_[0.3874942, 0.1686712], rtol=1e-05)\n', (4609, 4662), False, 'from numpy.testing import assert_allclose\n'), ((4716, 4739), 'numpy.random.seed', 'np.random.seed', (['(3423948)'], {}), '(3423948)\n', (4730, 4739), True, 'import numpy as np\n'), ((4766, 4779), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4775, 4779), True, 'import numpy as np\n'), ((4847, 4872), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4863, 4872), True, 'import numpy as np\n'), ((4943, 4972), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 4)'}), '(size=(n, 4))\n', (4959, 4972), True, 'import numpy as np\n'), ((5165, 5202), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (5181, 5202), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((5279, 5316), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (5295, 5316), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((5377, 5436), 'numpy.testing.assert_allclose', 'assert_allclose', (['result0.params', 'result1.params'], {'rtol': '(0.001)'}), '(result0.params, result1.params, rtol=0.001)\n', (5392, 5436), False, 'from numpy.testing import assert_allclose\n'), ((5450, 5487), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (5466, 5487), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((5574, 5646), 'numpy.testing.assert_allclose', 'assert_allclose', (['result2.params', 'np.r_[0, 0, 0.55235152, 0]'], {'rtol': '(0.0001)'}), '(result2.params, np.r_[0, 0, 0.55235152, 0], rtol=0.0001)\n', (5589, 5646), False, 'from numpy.testing import assert_allclose\n'), ((5679, 5784), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': y, 'x1': x[:, 0], 'x2': x[:, 1], 'x3': x[:, 2], 'x4': x[:, 3],\n 'groups': groups}"], {}), "({'y': y, 'x1': x[:, 0], 'x2': x[:, 1], 'x3': x[:, 2], 'x4': x[\n :, 3], 'groups': groups})\n", (5691, 5784), True, 'import pandas as pd\n'), ((5854, 5914), 'statsmodels.discrete.conditional_models.ConditionalLogit.from_formula', 'ConditionalLogit.from_formula', (['fml'], {'groups': '"""groups"""', 'data': 'df'}), "(fml, groups='groups', data=df)\n", (5883, 5914), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((5977, 6024), 'numpy.testing.assert_allclose', 'assert_allclose', (['result2.params', 'result3.params'], {}), '(result2.params, result3.params)\n', (5992, 6024), False, 'from numpy.testing import assert_allclose\n'), ((6058, 6080), 'numpy.random.seed', 'np.random.seed', (['(342394)'], {}), '(342394)\n', (6072, 6080), True, 'import numpy as np\n'), ((6107, 6120), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (6116, 6120), True, 'import numpy as np\n'), ((6188, 6213), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (6204, 6213), True, 'import numpy as np\n'), ((6284, 6313), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 4)'}), '(size=(n, 4))\n', (6300, 6313), True, 'import numpy as np\n'), ((6406, 6422), 'numpy.exp', 'np.exp', (['lin_pred'], {}), '(lin_pred)\n', (6412, 6422), True, 'import numpy as np\n'), ((6431, 6454), 'numpy.random.poisson', 'np.random.poisson', (['mean'], {}), '(mean)\n', (6448, 6454), True, 'import numpy as np\n'), ((6469, 6508), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (6487, 6508), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((6585, 6624), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (6603, 6624), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((6685, 6744), 'numpy.testing.assert_allclose', 'assert_allclose', (['result0.params', 'result1.params'], {'rtol': '(0.001)'}), '(result0.params, result1.params, rtol=0.001)\n', (6700, 6744), False, 'from numpy.testing import assert_allclose\n'), ((6758, 6797), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'groups'}), '(y, x, groups=groups)\n', (6776, 6797), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((6882, 6954), 'numpy.testing.assert_allclose', 'assert_allclose', (['result2.params', 'np.r_[0, 0, 0.91697508, 0]'], {'rtol': '(0.0001)'}), '(result2.params, np.r_[0, 0, 0.91697508, 0], rtol=0.0001)\n', (6897, 6954), False, 'from numpy.testing import assert_allclose\n'), ((6987, 7092), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': y, 'x1': x[:, 0], 'x2': x[:, 1], 'x3': x[:, 2], 'x4': x[:, 3],\n 'groups': groups}"], {}), "({'y': y, 'x1': x[:, 0], 'x2': x[:, 1], 'x3': x[:, 2], 'x4': x[\n :, 3], 'groups': groups})\n", (6999, 7092), True, 'import pandas as pd\n'), ((7162, 7224), 'statsmodels.discrete.conditional_models.ConditionalPoisson.from_formula', 'ConditionalPoisson.from_formula', (['fml'], {'groups': '"""groups"""', 'data': 'df'}), "(fml, groups='groups', data=df)\n", (7193, 7224), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((7286, 7333), 'numpy.testing.assert_allclose', 'assert_allclose', (['result2.params', 'result3.params'], {}), '(result2.params, result3.params)\n', (7301, 7333), False, 'from numpy.testing import assert_allclose\n'), ((702, 730), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'ngrad'], {}), '(grad, ngrad)\n', (717, 730), False, 'from numpy.testing import assert_allclose\n'), ((820, 859), 'statsmodels.tools.numdiff.approx_fprime', 'approx_fprime', (['np.r_[x,]', 'model.loglike'], {}), '(np.r_[x,], model.loglike)\n', (833, 859), False, 'from statsmodels.tools.numdiff import approx_fprime\n'), ((909, 950), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'score'], {'rtol': '(0.0001)'}), '(grad, score, rtol=0.0001)\n', (924, 950), False, 'from numpy.testing import assert_allclose\n'), ((1681, 1721), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'ngrad'], {'rtol': '(1e-05)'}), '(grad, ngrad, rtol=1e-05)\n', (1696, 1721), False, 'from numpy.testing import assert_allclose\n'), ((1848, 1884), 'statsmodels.tools.numdiff.approx_fprime', 'approx_fprime', (['params', 'model.loglike'], {}), '(params, model.loglike)\n', (1861, 1884), False, 'from statsmodels.tools.numdiff import approx_fprime\n'), ((1929, 1970), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'score'], {'rtol': '(0.0001)'}), '(grad, score, rtol=0.0001)\n', (1944, 1970), False, 'from numpy.testing import assert_allclose\n'), ((2229, 2250), 'numpy.random.seed', 'np.random.seed', (['(34234)'], {}), '(34234)\n', (2243, 2250), True, 'import numpy as np\n'), ((2279, 2310), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'n'}), '(0, 2, size=n)\n', (2296, 2310), True, 'import numpy as np\n'), ((2324, 2348), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (2340, 2348), True, 'import numpy as np\n'), ((2362, 2386), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (2378, 2386), True, 'import numpy as np\n'), ((2399, 2431), 'numpy.random.randint', 'np.random.randint', (['(0)', '(25)'], {'size': 'n'}), '(0, 25, size=n)\n', (2416, 2431), True, 'import numpy as np\n'), ((2445, 2482), 'numpy.hstack', 'np.hstack', (['(x1[:, None], x2[:, None])'], {}), '((x1[:, None], x2[:, None]))\n', (2454, 2482), True, 'import numpy as np\n'), ((2671, 2721), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': y, 'x1': x1, 'x2': x2, 'g': g}"], {}), "({'y': y, 'x1': x1, 'x2': x2, 'g': g})\n", (2683, 2721), True, 'import pandas as pd\n'), ((3029, 3088), 'numpy.testing.assert_allclose', 'assert_allclose', (['result1.params', 'result2.params'], {'rtol': '(1e-05)'}), '(result1.params, result2.params, rtol=1e-05)\n', (3044, 3088), False, 'from numpy.testing import assert_allclose\n'), ((3096, 3149), 'numpy.testing.assert_allclose', 'assert_allclose', (['result1.bse', 'result2.bse'], {'rtol': '(1e-05)'}), '(result1.bse, result2.bse, rtol=1e-05)\n', (3111, 3149), False, 'from numpy.testing import assert_allclose\n'), ((3236, 3297), 'numpy.testing.assert_allclose', 'assert_allclose', (['result1.tvalues', 'result2.tvalues'], {'rtol': '(1e-05)'}), '(result1.tvalues, result2.tvalues, rtol=1e-05)\n', (3251, 3297), False, 'from numpy.testing import assert_allclose\n'), ((3612, 3651), 'statsmodels.tools.numdiff.approx_fprime', 'approx_fprime', (['np.r_[x,]', 'model.loglike'], {}), '(np.r_[x,], model.loglike)\n', (3625, 3651), False, 'from statsmodels.tools.numdiff import approx_fprime\n'), ((3701, 3742), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'score'], {'rtol': '(0.0001)'}), '(grad, score, rtol=0.0001)\n', (3716, 3742), False, 'from numpy.testing import assert_allclose\n'), ((4350, 4386), 'statsmodels.tools.numdiff.approx_fprime', 'approx_fprime', (['params', 'model.loglike'], {}), '(params, model.loglike)\n', (4363, 4386), False, 'from statsmodels.tools.numdiff import approx_fprime\n'), ((4431, 4472), 'numpy.testing.assert_allclose', 'assert_allclose', (['grad', 'score'], {'rtol': '(0.0001)'}), '(grad, score, rtol=0.0001)\n', (4446, 4472), False, 'from numpy.testing import assert_allclose\n'), ((4809, 4825), 'numpy.ones', 'np.ones', (['(n // 10)'], {}), '(n // 10)\n', (4816, 4825), True, 'import numpy as np\n'), ((4916, 4932), 'numpy.ones', 'np.ones', (['(n // 10)'], {}), '(n // 10)\n', (4923, 4932), True, 'import numpy as np\n'), ((5019, 5036), 'numpy.dot', 'np.dot', (['x', 'params'], {}), '(x, params)\n', (5025, 5036), True, 'import numpy as np\n'), ((6150, 6166), 'numpy.ones', 'np.ones', (['(n // 10)'], {}), '(n // 10)\n', (6157, 6166), True, 'import numpy as np\n'), ((6257, 6273), 'numpy.ones', 'np.ones', (['(n // 10)'], {}), '(n // 10)\n', (6264, 6273), True, 'import numpy as np\n'), ((6360, 6377), 'numpy.dot', 'np.dot', (['x', 'params'], {}), '(x, params)\n', (6366, 6377), True, 'import numpy as np\n'), ((2523, 2555), 'statsmodels.discrete.conditional_models.ConditionalLogit', 'ConditionalLogit', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (2539, 2555), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((2591, 2625), 'statsmodels.discrete.conditional_models.ConditionalPoisson', 'ConditionalPoisson', (['y', 'x'], {'groups': 'g'}), '(y, x, groups=g)\n', (2609, 2625), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((2762, 2831), 'statsmodels.discrete.conditional_models.ConditionalLogit.from_formula', 'ConditionalLogit.from_formula', (['"""y ~ 0 + x1 + x2"""'], {'groups': '"""g"""', 'data': 'df'}), "('y ~ 0 + x1 + x2', groups='g', data=df)\n", (2791, 2831), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((2892, 2963), 'statsmodels.discrete.conditional_models.ConditionalPoisson.from_formula', 'ConditionalPoisson.from_formula', (['"""y ~ 0 + x1 + x2"""'], {'groups': '"""g"""', 'data': 'df'}), "('y ~ 0 + x1 + x2', groups='g', data=df)\n", (2923, 2963), False, 'from statsmodels.discrete.conditional_models import ConditionalLogit, ConditionalPoisson\n'), ((5074, 5091), 'numpy.exp', 'np.exp', (['(-lin_pred)'], {}), '(-lin_pred)\n', (5080, 5091), True, 'import numpy as np\n'), ((5102, 5127), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (5119, 5127), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
class Agent(object):
__metaclass__ = ABCMeta
def __init__(self, name, id_, action_num, env):
self.name = name
self.id_ = id_
self.action_num = action_num
# len(env.action_space[id_])
# self.opp_action_space = env.action_space[0:id_] + env.action_space[id_:-1]
def set_pi(self, pi):
# assert len(pi) == self.actin_num
self.pi = pi
def done(self, env):
pass
@abstractmethod
def act(self, s, exploration, env):
pass
def update(self, s, a, o, r, s2, env):
pass
@staticmethod
def format_time(n):
return ""
# s = humanfriendly.format_size(n)
# return s.replace(' ', '').replace('bytes', '').replace('byte', '').rstrip('B')
def full_name(self, env):
return "{}_{}_{}".format(env.name, self.name, self.id_)
class StationaryAgent(Agent):
def __init__(self, id_, action_num, env, pi=None):
super().__init__("stationary", id_, action_num, env)
if pi is None:
pi = np.random.dirichlet([1.0] * self.action_num)
self.pi = np.array(pi, dtype=np.double)
StationaryAgent.normalize(self.pi)
def act(self, s, exploration, env):
if self.verbose:
print("pi of agent {}: {}".format(self.id_, self.pi))
return StationaryAgent.sample(self.pi)
@staticmethod
def normalize(pi):
minprob = np.min(pi)
if minprob < 0.0:
pi -= minprob
pi /= np.sum(pi)
@staticmethod
def sample(pi):
return np.random.choice(pi.size, size=1, p=pi)[0]
class RandomAgent(StationaryAgent):
def __init__(self, id_, action_num, env):
assert action_num > 0
super().__init__(id_, env, action_num, pi=[1.0 / action_num] * action_num)
self.name = "random"
| [
"numpy.random.choice",
"numpy.array",
"numpy.random.dirichlet",
"numpy.sum",
"numpy.min"
]
| [((1170, 1199), 'numpy.array', 'np.array', (['pi'], {'dtype': 'np.double'}), '(pi, dtype=np.double)\n', (1178, 1199), True, 'import numpy as np\n'), ((1482, 1492), 'numpy.min', 'np.min', (['pi'], {}), '(pi)\n', (1488, 1492), True, 'import numpy as np\n'), ((1559, 1569), 'numpy.sum', 'np.sum', (['pi'], {}), '(pi)\n', (1565, 1569), True, 'import numpy as np\n'), ((1107, 1151), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([1.0] * self.action_num)'], {}), '([1.0] * self.action_num)\n', (1126, 1151), True, 'import numpy as np\n'), ((1624, 1663), 'numpy.random.choice', 'np.random.choice', (['pi.size'], {'size': '(1)', 'p': 'pi'}), '(pi.size, size=1, p=pi)\n', (1640, 1663), True, 'import numpy as np\n')] |
#
# @lc app=leetcode id=290 lang=python3
#
# [290] Word Pattern
#
# https://leetcode.com/problems/word-pattern/description/
#
# algorithms
# Easy (35.86%)
# Likes: 825
# Dislikes: 113
# Total Accepted: 164K
# Total Submissions: 455.9K
# Testcase Example: '"abba"\n"dog cat cat dog"'
#
# Given a pattern and a string str, find if str follows the same pattern.
#
# Here follow means a full match, such that there is a bijection between a
# letter in pattern and a non-empty word in str.
#
# Example 1:
#
#
# Input: pattern = "abba", str = "dog cat cat dog"
# Output: true
#
# Example 2:
#
#
# Input:pattern = "abba", str = "dog cat cat fish"
# Output: false
#
# Example 3:
#
#
# Input: pattern = "aaaa", str = "dog cat cat dog"
# Output: false
#
# Example 4:
#
#
# Input: pattern = "abba", str = "dog dog dog dog"
# Output: false
#
# Notes:
# You may assume pattern contains only lowercase letters, and str contains
# lowercase letters that may be separated by a single space.
#
#
# @lc code=start
from collections import defaultdict
class Solution:
def wordPattern(self, pattern: str, str1: str) -> bool:
if len(pattern)!=len(str1.split()):
return False
abmap = defaultdict(str)
bamap = defaultdict(str)
for a,b in zip(pattern, str1.split()):
if abmap[a]=='' and bamap[b]=='':
abmap[a]=b
bamap[b]=a
elif abmap[a]!=b or bamap[b]!=a:
return False
return True
# @lc code=end
| [
"collections.defaultdict"
]
| [((1216, 1232), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (1227, 1232), False, 'from collections import defaultdict\n'), ((1249, 1265), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (1260, 1265), False, 'from collections import defaultdict\n')] |
from torch import nn
class MyAwesomeModel(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=5, kernel_size=3),
nn.ReLU(),
nn.Conv2d(in_channels=5, out_channels=3, kernel_size=3, stride=2)
)
self.fc = nn.Sequential(nn.Linear(432, 100),
nn.ReLU(),
nn.Linear(100,10),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.cnn(x).view(x.size(0), -1)
return self.fc(x)
| [
"torch.nn.ReLU",
"torch.nn.LogSoftmax",
"torch.nn.Linear",
"torch.nn.Conv2d"
]
| [((141, 196), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(5)', 'kernel_size': '(3)'}), '(in_channels=1, out_channels=5, kernel_size=3)\n', (150, 196), False, 'from torch import nn\n'), ((230, 239), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (237, 239), False, 'from torch import nn\n'), ((273, 338), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(5)', 'out_channels': '(3)', 'kernel_size': '(3)', 'stride': '(2)'}), '(in_channels=5, out_channels=3, kernel_size=3, stride=2)\n', (282, 338), False, 'from torch import nn\n'), ((405, 424), 'torch.nn.Linear', 'nn.Linear', (['(432)', '(100)'], {}), '(432, 100)\n', (414, 424), False, 'from torch import nn\n'), ((458, 467), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (465, 467), False, 'from torch import nn\n'), ((501, 519), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(10)'], {}), '(100, 10)\n', (510, 519), False, 'from torch import nn\n'), ((552, 572), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (565, 572), False, 'from torch import nn\n')] |
import unittest
from musket_core import coders
import numpy as np
import pandas as pd
import os
import math
fl=__file__
fl=os.path.dirname(fl)
class TestCoders(unittest.TestCase):
def test_binary_num(self):
a=np.array([0,1,0,1])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, 1, "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, 0, "should be zero")
pass
def test_binary_str(self):
a=np.array(["0","1","0","1"])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, "1", "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, "0", "should be zero")
pass
def test_binary_str2(self):
a=np.array(["","1","","1"])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 0, "should be zero")
self.assertEqual(bc[1], 1, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, "1", "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, "", "should be zero")
pass
def test_binary_bool(self):
a=np.array([True,False,True,False])
bc=coders.get_coder("binary",a, None)
self.assertEqual(bc[0], 1, "should be zero")
self.assertEqual(bc[1], 0, "should be one")
v=bc._decode(np.array([0.6]))
self.assertEqual(v, True, "should be one")
v=bc._decode(np.array([0.2]))
self.assertEqual(v, False, "should be zero")
pass
def test_categorical_num(self):
a=np.array([0,1,2,1])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, 2, "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, 0, "should be zero")
pass
def test_categorical_str(self):
a=np.array(["a","b","c","b"])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, "c", "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, "a", "should be zero")
pass
def test_categorical_str2(self):
a=np.array(["","b","c","b"])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][0], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(v, "c", "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, "", "should be zero")
pass
def test_categorical_pd(self):
a=np.array([math.nan,1,2,1])
bc=coders.get_coder("categorical_one_hot",a, None)
self.assertEqual(bc[0][2], True, "should be zero")
self.assertEqual(bc[0][1], False, "should be one")
v=bc._decode(np.array([0.3,0.4,0.45]))
self.assertEqual(math.isnan(v),True, "should be one")
v=bc._decode(np.array([0.2,0.1,0.1]))
self.assertEqual(v, 1, "should be zero")
pass
def test_multiclass(self):
a=np.array(["1 2","0 2","0",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([False,True,True])).sum(), 3,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass
def test_multiclass1(self):
a=np.array(["1_2","0_2","0",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([False,True,True])).sum(), 3,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass
def test_multiclass2(self):
a=np.array(["1","","",""])
bc=coders.get_coder("multi_class",a, None)
val=bc[0]
self.assertEqual((val==np.array([True])).sum(), 1,"Fixing format")
for i in range(len(a)):
val=bc[i]
r=bc._decode(val)
self.assertEqual(r, a[i], "Decoding should work also")
pass | [
"os.path.dirname",
"numpy.array",
"musket_core.coders.get_coder",
"math.isnan"
]
| [((134, 153), 'os.path.dirname', 'os.path.dirname', (['fl'], {}), '(fl)\n', (149, 153), False, 'import os\n'), ((237, 259), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (245, 259), True, 'import numpy as np\n'), ((269, 304), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""binary"""', 'a', 'None'], {}), "('binary', a, None)\n", (285, 304), False, 'from musket_core import coders\n'), ((645, 675), 'numpy.array', 'np.array', (["['0', '1', '0', '1']"], {}), "(['0', '1', '0', '1'])\n", (653, 675), True, 'import numpy as np\n'), ((685, 720), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""binary"""', 'a', 'None'], {}), "('binary', a, None)\n", (701, 720), False, 'from musket_core import coders\n'), ((1066, 1094), 'numpy.array', 'np.array', (["['', '1', '', '1']"], {}), "(['', '1', '', '1'])\n", (1074, 1094), True, 'import numpy as np\n'), ((1104, 1139), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""binary"""', 'a', 'None'], {}), "('binary', a, None)\n", (1120, 1139), False, 'from musket_core import coders\n'), ((1484, 1520), 'numpy.array', 'np.array', (['[True, False, True, False]'], {}), '([True, False, True, False])\n', (1492, 1520), True, 'import numpy as np\n'), ((1530, 1565), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""binary"""', 'a', 'None'], {}), "('binary', a, None)\n", (1546, 1565), False, 'from musket_core import coders\n'), ((1924, 1946), 'numpy.array', 'np.array', (['[0, 1, 2, 1]'], {}), '([0, 1, 2, 1])\n', (1932, 1946), True, 'import numpy as np\n'), ((1956, 2004), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""categorical_one_hot"""', 'a', 'None'], {}), "('categorical_one_hot', a, None)\n", (1972, 2004), False, 'from musket_core import coders\n'), ((2396, 2426), 'numpy.array', 'np.array', (["['a', 'b', 'c', 'b']"], {}), "(['a', 'b', 'c', 'b'])\n", (2404, 2426), True, 'import numpy as np\n'), ((2436, 2484), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""categorical_one_hot"""', 'a', 'None'], {}), "('categorical_one_hot', a, None)\n", (2452, 2484), False, 'from musket_core import coders\n'), ((2881, 2910), 'numpy.array', 'np.array', (["['', 'b', 'c', 'b']"], {}), "(['', 'b', 'c', 'b'])\n", (2889, 2910), True, 'import numpy as np\n'), ((2920, 2968), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""categorical_one_hot"""', 'a', 'None'], {}), "('categorical_one_hot', a, None)\n", (2936, 2968), False, 'from musket_core import coders\n'), ((3362, 3391), 'numpy.array', 'np.array', (['[math.nan, 1, 2, 1]'], {}), '([math.nan, 1, 2, 1])\n', (3370, 3391), True, 'import numpy as np\n'), ((3401, 3449), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""categorical_one_hot"""', 'a', 'None'], {}), "('categorical_one_hot', a, None)\n", (3417, 3449), False, 'from musket_core import coders\n'), ((3850, 3883), 'numpy.array', 'np.array', (["['1 2', '0 2', '0', '']"], {}), "(['1 2', '0 2', '0', ''])\n", (3858, 3883), True, 'import numpy as np\n'), ((3893, 3933), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""multi_class"""', 'a', 'None'], {}), "('multi_class', a, None)\n", (3909, 3933), False, 'from musket_core import coders\n'), ((4268, 4301), 'numpy.array', 'np.array', (["['1_2', '0_2', '0', '']"], {}), "(['1_2', '0_2', '0', ''])\n", (4276, 4301), True, 'import numpy as np\n'), ((4311, 4351), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""multi_class"""', 'a', 'None'], {}), "('multi_class', a, None)\n", (4327, 4351), False, 'from musket_core import coders\n'), ((4686, 4713), 'numpy.array', 'np.array', (["['1', '', '', '']"], {}), "(['1', '', '', ''])\n", (4694, 4713), True, 'import numpy as np\n'), ((4723, 4763), 'musket_core.coders.get_coder', 'coders.get_coder', (['"""multi_class"""', 'a', 'None'], {}), "('multi_class', a, None)\n", (4739, 4763), False, 'from musket_core import coders\n'), ((433, 448), 'numpy.array', 'np.array', (['[0.6]'], {}), '([0.6])\n', (441, 448), True, 'import numpy as np\n'), ((521, 536), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (529, 536), True, 'import numpy as np\n'), ((849, 864), 'numpy.array', 'np.array', (['[0.6]'], {}), '([0.6])\n', (857, 864), True, 'import numpy as np\n'), ((939, 954), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (947, 954), True, 'import numpy as np\n'), ((1268, 1283), 'numpy.array', 'np.array', (['[0.6]'], {}), '([0.6])\n', (1276, 1283), True, 'import numpy as np\n'), ((1358, 1373), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (1366, 1373), True, 'import numpy as np\n'), ((1694, 1709), 'numpy.array', 'np.array', (['[0.6]'], {}), '([0.6])\n', (1702, 1709), True, 'import numpy as np\n'), ((1785, 1800), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (1793, 1800), True, 'import numpy as np\n'), ((2156, 2182), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.45]'], {}), '([0.3, 0.4, 0.45])\n', (2164, 2182), True, 'import numpy as np\n'), ((2253, 2278), 'numpy.array', 'np.array', (['[0.2, 0.1, 0.1]'], {}), '([0.2, 0.1, 0.1])\n', (2261, 2278), True, 'import numpy as np\n'), ((2636, 2662), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.45]'], {}), '([0.3, 0.4, 0.45])\n', (2644, 2662), True, 'import numpy as np\n'), ((2735, 2760), 'numpy.array', 'np.array', (['[0.2, 0.1, 0.1]'], {}), '([0.2, 0.1, 0.1])\n', (2743, 2760), True, 'import numpy as np\n'), ((3120, 3146), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.45]'], {}), '([0.3, 0.4, 0.45])\n', (3128, 3146), True, 'import numpy as np\n'), ((3219, 3244), 'numpy.array', 'np.array', (['[0.2, 0.1, 0.1]'], {}), '([0.2, 0.1, 0.1])\n', (3227, 3244), True, 'import numpy as np\n'), ((3601, 3627), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.45]'], {}), '([0.3, 0.4, 0.45])\n', (3609, 3627), True, 'import numpy as np\n'), ((3653, 3666), 'math.isnan', 'math.isnan', (['v'], {}), '(v)\n', (3663, 3666), False, 'import math\n'), ((3712, 3737), 'numpy.array', 'np.array', (['[0.2, 0.1, 0.1]'], {}), '([0.2, 0.1, 0.1])\n', (3720, 3737), True, 'import numpy as np\n'), ((3994, 4023), 'numpy.array', 'np.array', (['[False, True, True]'], {}), '([False, True, True])\n', (4002, 4023), True, 'import numpy as np\n'), ((4412, 4441), 'numpy.array', 'np.array', (['[False, True, True]'], {}), '([False, True, True])\n', (4420, 4441), True, 'import numpy as np\n'), ((4824, 4840), 'numpy.array', 'np.array', (['[True]'], {}), '([True])\n', (4832, 4840), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
if __name__ == "__main__":
if 'posix' in os.name and os.geteuid() == 0:
print("manage.py should not be run as root. Use `su zulip` to drop root.")
sys.exit(1)
if (os.access('/etc/zulip/zulip.conf', os.R_OK) and not
os.access('/etc/zulip/zulip-secrets.conf', os.R_OK)):
# The best way to detect running manage.py as another user in
# production before importing anything that would require that
# access is to check for access to /etc/zulip/zulip.conf (in
# which case it's a production server, not a dev environment)
# and lack of access for /etc/zulip/zulip-secrets.conf (which
# should be only readable by root and zulip)
print("Error accessing Zulip secrets; manage.py in production must be run as the zulip user.")
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from scripts.lib.zulip_tools import log_management_command
log_management_command(" ".join(sys.argv), settings.MANAGEMENT_LOG_PATH)
os.environ.setdefault("PYTHONSTARTUP", os.path.join(BASE_DIR, "scripts/lib/pythonrc.py"))
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
try:
execute_from_command_line(sys.argv)
except CommandError as e:
print(e, file=sys.stderr)
sys.exit(1)
| [
"os.environ.setdefault",
"django.core.management.execute_from_command_line",
"sys.argv.append",
"os.access",
"os.path.join",
"os.geteuid",
"sys.exit",
"os.path.abspath",
"sys.path.append"
]
| [((99, 124), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (114, 124), False, 'import sys\n'), ((72, 97), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n'), ((1003, 1071), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""zproject.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'zproject.settings')\n", (1024, 1071), False, 'import os\n'), ((334, 345), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (342, 345), False, 'import sys\n'), ((354, 397), 'os.access', 'os.access', (['"""/etc/zulip/zulip.conf"""', 'os.R_OK'], {}), "('/etc/zulip/zulip.conf', os.R_OK)\n", (363, 397), False, 'import os\n'), ((986, 997), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (994, 997), False, 'import sys\n'), ((1416, 1465), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""scripts/lib/pythonrc.py"""'], {}), "(BASE_DIR, 'scripts/lib/pythonrc.py')\n", (1428, 1465), False, 'import os\n'), ((1538, 1568), 'sys.argv.append', 'sys.argv.append', (['"""--traceback"""'], {}), "('--traceback')\n", (1553, 1568), False, 'import sys\n'), ((1586, 1621), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['sys.argv'], {}), '(sys.argv)\n', (1611, 1621), False, 'from django.core.management import execute_from_command_line\n'), ((223, 235), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (233, 235), False, 'import os\n'), ((418, 469), 'os.access', 'os.access', (['"""/etc/zulip/zulip-secrets.conf"""', 'os.R_OK'], {}), "('/etc/zulip/zulip-secrets.conf', os.R_OK)\n", (427, 469), False, 'import os\n'), ((1694, 1705), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1702, 1705), False, 'import sys\n')] |
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.intsights.parser import IntSightsParser
class TestIntSightsParser(DojoTestCase):
def test_intsights_parser_with_one_critical_vuln_has_one_findings_json(
self):
testfile = open("unittests/scans/intsights/intsights_one_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
'5c80dbf83b4a3900078b6be6',
finding.unique_id_from_tool)
self.assertEqual(
'HTTP headers weakness in initech.com web server',
finding.title)
self.assertEquals('Critical', finding.severity)
self.assertEquals(
"https://dashboard.intsights.com/#/threat-command/alerts?search=5c80dbf83b4a3900078b6be6",
finding.references)
def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv(
self):
testfile = open("unittests/scans/intsights/intsights_one_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
"mn7xy83finmmth4ja363rci9",
finding.unique_id_from_tool)
self.assertEqual(
"HTTP headers weakness in company-domain.com web server",
finding.title)
def test_intsights_parser_with_many_vuln_has_many_findings_json(self):
testfile = open("unittests/scans/intsights/intsights_many_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_intsights_parser_with_many_vuln_has_many_findings_csv(self):
testfile = open("unittests/scans/intsights/intsights_many_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(9, len(findings))
def test_intsights_parser_invalid_text_with_error_csv(self):
with self.assertRaises(ValueError):
testfile = open(
"unittests/scans/intsights/intsights_invalid_file.txt")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
| [
"dojo.models.Test",
"dojo.tools.intsights.parser.IntSightsParser"
]
| [((358, 375), 'dojo.tools.intsights.parser.IntSightsParser', 'IntSightsParser', ([], {}), '()\n', (373, 375), False, 'from dojo.tools.intsights.parser import IntSightsParser\n'), ((1169, 1186), 'dojo.tools.intsights.parser.IntSightsParser', 'IntSightsParser', ([], {}), '()\n', (1184, 1186), False, 'from dojo.tools.intsights.parser import IntSightsParser\n'), ((1750, 1767), 'dojo.tools.intsights.parser.IntSightsParser', 'IntSightsParser', ([], {}), '()\n', (1765, 1767), False, 'from dojo.tools.intsights.parser import IntSightsParser\n'), ((2062, 2079), 'dojo.tools.intsights.parser.IntSightsParser', 'IntSightsParser', ([], {}), '()\n', (2077, 2079), False, 'from dojo.tools.intsights.parser import IntSightsParser\n'), ((425, 431), 'dojo.models.Test', 'Test', ([], {}), '()\n', (429, 431), False, 'from dojo.models import Test\n'), ((1236, 1242), 'dojo.models.Test', 'Test', ([], {}), '()\n', (1240, 1242), False, 'from dojo.models import Test\n'), ((1817, 1823), 'dojo.models.Test', 'Test', ([], {}), '()\n', (1821, 1823), False, 'from dojo.models import Test\n'), ((2129, 2135), 'dojo.models.Test', 'Test', ([], {}), '()\n', (2133, 2135), False, 'from dojo.models import Test\n'), ((2437, 2454), 'dojo.tools.intsights.parser.IntSightsParser', 'IntSightsParser', ([], {}), '()\n', (2452, 2454), False, 'from dojo.tools.intsights.parser import IntSightsParser\n'), ((2508, 2514), 'dojo.models.Test', 'Test', ([], {}), '()\n', (2512, 2514), False, 'from dojo.models import Test\n')] |
from __future__ import unicode_literals
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIRequestFactory, force_authenticate
from django_comments_xtd import django_comments
from django_comments_xtd.api.views import CommentCreate
from django_comments_xtd.tests.models import Article, Diary
request_factory = APIRequestFactory()
def post_comment(data, auth_user=None):
request = request_factory.post(reverse('comments-xtd-api-create'), data)
if auth_user:
force_authenticate(request, user=auth_user)
view = CommentCreate.as_view()
return view(request)
class CommentCreateTestCase(TestCase):
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="October", slug="october", body="What I did on October...")
self.form = django_comments.get_form()(self.article)
def test_post_returns_2xx_response(self):
data = {"name": "Bob", "email": "<EMAIL>",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine...",
"honeypot": ""}
data.update(self.form.initial)
response = post_comment(data)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.mock_mailer.call_count, 1)
def test_post_returns_4xx_response(self):
# It uses an authenticated user, but the user has no mail address.
self.user = User.objects.create_user("bob", "", "pwd")
data = {"name": "", "email": "",
"followup": True, "reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal eine kleine...",
"honeypot": ""}
data.update(self.form.initial)
response = post_comment(data, auth_user=self.user)
self.assertEqual(response.status_code, 400)
self.assertTrue('name' in response.data)
self.assertTrue('email' in response.data)
self.assertEqual(self.mock_mailer.call_count, 0)
| [
"mock.patch",
"rest_framework.test.force_authenticate",
"django_comments_xtd.django_comments.get_form",
"django_comments_xtd.api.views.CommentCreate.as_view",
"django_comments_xtd.tests.models.Article.objects.create",
"django.urls.reverse",
"rest_framework.test.APIRequestFactory",
"django.contrib.auth.models.User.objects.create_user"
]
| [((554, 573), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (571, 573), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((774, 797), 'django_comments_xtd.api.views.CommentCreate.as_view', 'CommentCreate.as_view', ([], {}), '()\n', (795, 797), False, 'from django_comments_xtd.api.views import CommentCreate\n'), ((651, 685), 'django.urls.reverse', 'reverse', (['"""comments-xtd-api-create"""'], {}), "('comments-xtd-api-create')\n", (658, 685), False, 'from django.urls import reverse\n'), ((719, 762), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request'], {'user': 'auth_user'}), '(request, user=auth_user)\n', (737, 762), False, 'from rest_framework.test import APIRequestFactory, force_authenticate\n'), ((903, 947), 'mock.patch', 'patch', (['"""django_comments_xtd.views.send_mail"""'], {}), "('django_comments_xtd.views.send_mail')\n", (908, 947), False, 'from mock import patch\n'), ((1014, 1107), 'django_comments_xtd.tests.models.Article.objects.create', 'Article.objects.create', ([], {'title': '"""October"""', 'slug': '"""october"""', 'body': '"""What I did on October..."""'}), "(title='October', slug='october', body=\n 'What I did on October...')\n", (1036, 1107), False, 'from django_comments_xtd.tests.models import Article, Diary\n'), ((1767, 1809), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['"""bob"""', '""""""', '"""pwd"""'], {}), "('bob', '', 'pwd')\n", (1791, 1809), False, 'from django.contrib.auth.models import User\n'), ((1136, 1162), 'django_comments_xtd.django_comments.get_form', 'django_comments.get_form', ([], {}), '()\n', (1160, 1162), False, 'from django_comments_xtd import django_comments\n')] |
'''
@file momentum_kinematics_optimizer.py
@package momentumopt
@author <NAME> (<EMAIL>)
@license License BSD-3-Clause
@copyright Copyright (c) 2019, New York University and Max Planck Gesellschaft.
@date 2019-10-08
'''
import os
import numpy as np
from momentumopt.kinoptpy.qp import QpSolver
from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics
from pinocchio import RobotWrapper
import pinocchio as se3
from pinocchio.utils import zero
from pymomentum import *
from momentumopt.quadruped.quadruped_wrapper import QuadrupedWrapper
from momentumopt.kinoptpy.min_jerk_traj import *
from pymomentum import \
PlannerVectorParam_KinematicDefaultJointPositions, \
PlannerIntParam_NumTimesteps, \
PlannerDoubleParam_TimeStep
class Contact(object):
def __init__(self, position, start_time, end_time):
self.pos = position
self.init_time = start_time
self.final_time = end_time
def position(self):
return self.pos
def start_time(self):
return self.init_time
def end_time(self):
return self.final_time
def get_contact_plan(contact_states, effs):
contacts = {}
for i, eff in enumerate(effs):
num_contacts = len(contact_states(i))
contacts[eff] = []
for j in range(num_contacts):
contact_ = contact_states(i)[j]
start_time = contact_.start_time
end_time = contact_.end_time
position = contact_.position
contacts[eff].append(Contact(position, start_time, end_time))
return contacts
def generate_eff_traj(contacts, z_offset):
effs = contacts.keys()
eff_traj_poly = {}
for eff in effs:
cnt = contacts[eff]
num_contacts = len(cnt)
poly_traj = [
PolynominalList(), PolynominalList(), PolynominalList()
]
for i in range(num_contacts):
# Create a constant polynominal for endeffector on the ground.
t = [cnt[i].start_time(), cnt[i].end_time()]
for idx in range(3):
poly_traj[idx].append(t, constant_poly(cnt[i].position()[idx]))
# If there is a contact following, add the transition between
# the two contact points.
if i < num_contacts - 1:
t = [cnt[i].end_time(), cnt[i+1].start_time()]
for idx in range(3):
via = None
if idx == 2:
via = z_offset + cnt[i].position()[idx]
poly = poly_points(t, cnt[i].position()[idx], cnt[i+1].position()[idx], via)
poly_traj[idx].append(t, poly)
eff_traj_poly[eff] = poly_traj
# returns end eff trajectories
return eff_traj_poly
class EndeffectorTrajectoryGenerator(object):
def __init__(self):
self.z_offset = 0.1
def get_z_bound(self, mom_kin_optimizer):
z_max = min(max(mom_kin_optimizer.com_dyn[:, 2]), self.max_bound)
z_min = max(min(mom_kin_optimizer.com_dyn[:, 2]), self.min_bound)
return z_max, z_min
def __call__(self, mom_kin_optimizer):
'''
Computes the endeffector positions and velocities.
Returns endeff_pos_ref, endeff_vel_ref
[0]: endeff_pos_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
[1]: endeff_vel_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
'''
dt = mom_kin_optimizer.dt
num_eff = len(mom_kin_optimizer.eff_names)
num_time_steps = mom_kin_optimizer.num_time_steps
contacts = get_contact_plan(mom_kin_optimizer.contact_sequence.contact_states,
mom_kin_optimizer.eff_names)
# Generate minimum jerk trajectories
eff_traj_poly = generate_eff_traj(contacts, self.z_offset)
# Compute the endeffector position and velocity trajectories.
endeff_pos_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_vel_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_contact = np.zeros((num_time_steps, num_eff))
for it in range(num_time_steps):
for eff, name in enumerate(mom_kin_optimizer.eff_names):
endeff_pos_ref[it][eff] = [eff_traj_poly[name][i].eval(it * dt) for i in range(3)]
endeff_vel_ref[it][eff] = [eff_traj_poly[name][i].deval(it * dt) for i in range(3)]
# HACK: If the velocity is zero, assume the endeffector is in
# contact with the ground.
if np.all(endeff_vel_ref[it][eff] == 0.):
endeff_contact[it][eff] = 1.
else:
endeff_contact[it][eff] = 0.
return endeff_pos_ref, endeff_vel_ref, endeff_contact
class JointTrajectoryGenerator(object):
def __init__(self):
self.dt =.01
self.num_time_steps = None
self.q_init = None
self.poly_traj = None
def joint_traj(self, q_via):
self.poly_traj = []
for i in range(len(self.q_init)):
self.poly_traj = np.append(self.poly_traj, [PolynominalList()])
for j in range(len(self.q_init)):
for i in range (len(q_via[:,0])+1):
if i==0:
t = [0, q_via[0,0]/self.dt]
poly = poly_points(t, self.q_init[j], q_via[i,j+1])
self.poly_traj[j].append(t, poly)
elif(i==len(q_via[:,0])):
t = [q_via[i-1,0]/self.dt, self.num_time_steps]
poly = poly_points(t, q_via[i-1,j+1], self.q_init[j])
self.poly_traj[j].append(t, poly)
else:
t = [q_via[i-1,0]/self.dt, q_via[i,0]/self.dt]
poly = poly_points(t, q_via[i-1,j+1], q_via[i,j+1])
self.poly_traj[j].append(t, poly)
def eval_traj(self,t):
q = np.zeros((1,len(self.q_init)),float)
for j in range(len(self.q_init)):
q[0,j] = self.poly_traj[j].eval(t)
return np.matrix(q)
class MomentumKinematicsOptimizer(object):
def __init__(self):
self.q_init = None
self.dq_init = None
self.reg_orientation = 1e-2
self.reg_joint_position = 2.
self.joint_des = None
def reset(self):
self.kinematics_sequence = KinematicsSequence()
self.kinematics_sequence.resize(self.planner_setting.get(PlannerIntParam_NumTimesteps),
self.planner_setting.get(PlannerIntParam_NumDofs))
def initialize(self, planner_setting, max_iterations=50, eps=0.001, endeff_traj_generator=None,
RobotWrapper=QuadrupedWrapper):
self.planner_setting = planner_setting
if endeff_traj_generator is None:
endeff_traj_generator = EndeffectorTrajectoryGenerator()
self.endeff_traj_generator = endeff_traj_generator
self.dt = planner_setting.get(PlannerDoubleParam_TimeStep)
self.num_time_steps = planner_setting.get(PlannerIntParam_NumTimesteps)
self.max_iterations = max_iterations
self.eps = eps
self.robot = RobotWrapper()
self.reset()
# Holds dynamics and kinematics results
self.com_dyn = np.zeros((self.num_time_steps, 3))
self.lmom_dyn = np.zeros((self.num_time_steps, 3))
self.amom_dyn = np.zeros((self.num_time_steps, 3))
self.com_kin = np.zeros((self.num_time_steps, 3))
self.lmom_kin = np.zeros((self.num_time_steps, 3))
self.amom_kin = np.zeros((self.num_time_steps, 3))
self.q_kin = np.zeros((self.num_time_steps, self.robot.model.nq))
self.dq_kin = np.zeros((self.num_time_steps, self.robot.model.nv))
self.hip_names = ['{}_HFE'.format(eff) for eff in self.robot.effs]
self.hip_ids = [self.robot.model.getFrameId(name) for name in self.hip_names]
self.eff_names = ['{}_{}'.format(eff, self.robot.joints_list[-1]) for eff in self.robot.effs]
self.inv_kin = PointContactInverseKinematics(self.robot.model, self.eff_names)
self.motion_eff = {
'trajectory': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'trajectory_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne))
}
def fill_data_from_dynamics(self):
# The centroidal information
for it in range(self.num_time_steps):
self.com_dyn[it] = self.dynamic_sequence.dynamics_states[it].com
self.lmom_dyn[it] = self.dynamic_sequence.dynamics_states[it].lmom
self.amom_dyn[it] = self.dynamic_sequence.dynamics_states[it].amom
def fill_endeffector_trajectory(self):
self.endeff_pos_ref, self.endeff_vel_ref, self.endeff_contact = \
self.endeff_traj_generator(self)
def fill_kinematic_result(self, it, q, dq):
def framesPos(frames):
return np.vstack([data.oMf[idx].translation for idx in frames]).reshape(-1)
def framesVel(frames):
return np.vstack([
self.inv_kin.get_world_oriented_frame_jacobian(q, idx).dot(dq)[:3] for idx in frames
]).reshape(-1)
data = self.inv_kin.robot.data
hg = self.inv_kin.robot.centroidalMomentum(q, dq)
# Storing on the internal array.
self.com_kin[it] = self.inv_kin.robot.com(q).T
self.lmom_kin[it] = hg.linear.T
self.amom_kin[it] = hg.angular.T
self.q_kin[it] = q.T
self.dq_kin[it] = dq.T
# The endeffector informations as well.
self.motion_eff['trajectory'][it] = framesPos(self.inv_kin.endeff_ids)
self.motion_eff['velocity'][it] = self.inv_kin.J[6:(self.inv_kin.ne + 2) * 3].dot(dq).T
self.motion_eff['trajectory_wrt_base'][it] = \
self.motion_eff['trajectory'][it] - framesPos(self.hip_ids)
self.motion_eff['velocity_wrt_base'][it] = \
self.motion_eff['velocity'][it] - framesVel(self.hip_ids)
# Storing on the kinematic sequence.
kinematic_state = self.kinematics_sequence.kinematics_states[it]
kinematic_state.com = self.com_kin[it]
kinematic_state.lmom = self.lmom_kin[it]
kinematic_state.amom = self.amom_kin[it]
kinematic_state.robot_posture.base_position = q[:3]
kinematic_state.robot_posture.base_orientation = q[3:7]
kinematic_state.robot_posture.joint_positions = q[7:]
kinematic_state.robot_velocity.base_linear_velocity = dq[:3]
kinematic_state.robot_velocity.base_angular_velocity = dq[3:6]
kinematic_state.robot_velocity.joint_velocities = dq[6:]
def optimize_initial_position(self, init_state):
# Optimize the initial configuration
q = se3.neutral(self.robot.model)
plan_joint_init_pos = self.planner_setting.get(
PlannerVectorParam_KinematicDefaultJointPositions)
if len(plan_joint_init_pos) != self.robot.num_ctrl_joints:
raise ValueError(
'Number of joints in config file not same as required for robot\n' +
'Got %d joints but robot expects %d joints.' % (
len(plan_joint_init_pos), self.robot.num_ctrl_joints))
q[7:] = np.matrix(plan_joint_init_pos).T
q[2] = self.robot.floor_height + 0.32
dq = np.matrix(np.zeros(self.robot.robot.nv)).T
com_ref = init_state.com
lmom_ref = np.zeros(3)
amom_ref = np.zeros(3)
endeff_pos_ref = np.array([init_state.effPosition(i) for i in range(init_state.effNum())])
endeff_vel_ref = np.matrix(np.zeros((init_state.effNum(), 3)))
endeff_contact = np.ones(init_state.effNum())
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.matrix([0.0, 0, 0.]).T))
q[3:7] = quad_goal.coeffs()
for iters in range(self.max_iterations):
# Adding small P controller for the base orientation to always start with flat
# oriented base.
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
amom_ref = 1e-1 * se3.log((quad_goal * quad_q.inverse()).matrix())
res = self.inv_kin.compute(q, dq, com_ref, lmom_ref, amom_ref,
endeff_pos_ref, endeff_vel_ref, endeff_contact, None)
q = se3.integrate(self.robot.model, q, res)
if np.linalg.norm(res) < 1e-3:
print('Found initial configuration after {} iterations'.format(iters + 1))
break
if iters == self.max_iterations - 1:
print('Failed to converge for initial setup.')
print("initial configuration: \n", q)
self.q_init = q.copy()
self.dq_init = dq.copy()
def optimize(self, init_state, contact_sequence, dynamic_sequence, plotting=False):
self.init_state = init_state
self.contact_sequence = contact_sequence
self.dynamic_sequence = dynamic_sequence
self.q_via = None
# Create array with centroidal and endeffector informations.
self.fill_data_from_dynamics()
self.fill_endeffector_trajectory()
# Run the optimization for the initial configuration only once.
if self.q_init is None:
self.optimize_initial_position(init_state)
# Get the desired joint trajectory
# print "num_joint_via:",self.planner_setting.get(PlannerIntParam_NumJointViapoints)
# print "joint_via:",self.planner_setting.get(PlannerCVectorParam_JointViapoints)
# TODO: this is for jump, should go to config file
# q_jump = [1., 0.1, -0.2 ,0.1, -0.2 ,-0.1, 0.2 ,-0.1, 0.2]
# q_via = np.matrix([.75, np.pi/2, -np.pi, np.pi/2, -np.pi, -np.pi/2, np.pi, -np.pi/2, np.pi]).T
# q_max = np.matrix([1.35, .7*np.pi/2, -.7*np.pi, .7*np.pi/2, -.7*np.pi, -.7*np.pi/2, .7*np.pi, -.7*np.pi/2, .7*np.pi]).T
# q_via0 = np.vstack((q_via.T, q_jump))
# self.q_via = np.vstack((q_via0, q_max.T))
joint_traj_gen = JointTrajectoryGenerator()
joint_traj_gen.num_time_steps = self.num_time_steps
joint_traj_gen.q_init = self.q_init[7:]
self.joint_des = np.zeros((len(self.q_init[7:]),self.num_time_steps), float)
if self.q_via is None:
for i in range (self.num_time_steps):
self.joint_des[:,i] = self.q_init[7 : ].T
else:
joint_traj_gen.joint_traj(self.q_via)
for it in range(self.num_time_steps):
self.joint_des[:,it] = joint_traj_gen.eval_traj(it)
# Compute inverse kinematics over the full trajectory.
self.inv_kin.is_init_time = 0
q, dq = self.q_init.copy(), self.dq_init.copy()
for it in range(self.num_time_steps):
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.matrix([0.0, 0, 0.]).T))
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
amom_ref = (self.reg_orientation * se3.log((quad_goal * quad_q.inverse()).matrix()).T + self.amom_dyn[it]).reshape(-1)
joint_regularization_ref = self.reg_joint_position * (np.matrix(self.joint_des[:,it]).T - q[7 : ])
# joint_regularization_ref = self.reg_joint_position * (self.q_init[7 : ] - q[7 : ])
# Fill the kinematics results for it.
self.inv_kin.forward_robot(q, dq)
self.fill_kinematic_result(it, q, dq)
dq = self.inv_kin.compute(
q, dq, self.com_dyn[it], self.lmom_dyn[it], amom_ref,
self.endeff_pos_ref[it], self.endeff_vel_ref[it],
self.endeff_contact[it], joint_regularization_ref)
# Integrate to the next state.
q = se3.integrate(self.robot.model, q, dq * self.dt)
| [
"numpy.all",
"pinocchio.integrate",
"pinocchio.neutral",
"numpy.zeros",
"pinocchio.RobotWrapper",
"numpy.vstack",
"numpy.linalg.norm",
"numpy.matrix",
"momentumopt.kinoptpy.inverse_kinematics.PointContactInverseKinematics"
]
| [((3951, 3989), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff, 3)'], {}), '((num_time_steps, num_eff, 3))\n', (3959, 3989), True, 'import numpy as np\n'), ((4015, 4053), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff, 3)'], {}), '((num_time_steps, num_eff, 3))\n', (4023, 4053), True, 'import numpy as np\n'), ((4079, 4114), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff)'], {}), '((num_time_steps, num_eff))\n', (4087, 4114), True, 'import numpy as np\n'), ((6070, 6082), 'numpy.matrix', 'np.matrix', (['q'], {}), '(q)\n', (6079, 6082), True, 'import numpy as np\n'), ((7184, 7198), 'pinocchio.RobotWrapper', 'RobotWrapper', ([], {}), '()\n', (7196, 7198), False, 'from pinocchio import RobotWrapper\n'), ((7293, 7327), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7301, 7327), True, 'import numpy as np\n'), ((7352, 7386), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7360, 7386), True, 'import numpy as np\n'), ((7411, 7445), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7419, 7445), True, 'import numpy as np\n'), ((7470, 7504), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7478, 7504), True, 'import numpy as np\n'), ((7529, 7563), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7537, 7563), True, 'import numpy as np\n'), ((7588, 7622), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7596, 7622), True, 'import numpy as np\n'), ((7644, 7696), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.robot.model.nq)'], {}), '((self.num_time_steps, self.robot.model.nq))\n', (7652, 7696), True, 'import numpy as np\n'), ((7719, 7771), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.robot.model.nv)'], {}), '((self.num_time_steps, self.robot.model.nv))\n', (7727, 7771), True, 'import numpy as np\n'), ((8059, 8122), 'momentumopt.kinoptpy.inverse_kinematics.PointContactInverseKinematics', 'PointContactInverseKinematics', (['self.robot.model', 'self.eff_names'], {}), '(self.robot.model, self.eff_names)\n', (8088, 8122), False, 'from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics\n'), ((10961, 10990), 'pinocchio.neutral', 'se3.neutral', (['self.robot.model'], {}), '(self.robot.model)\n', (10972, 10990), True, 'import pinocchio as se3\n'), ((11638, 11649), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (11646, 11649), True, 'import numpy as np\n'), ((11669, 11680), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (11677, 11680), True, 'import numpy as np\n'), ((8178, 8230), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (8186, 8230), True, 'import numpy as np\n'), ((8256, 8308), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (8264, 8308), True, 'import numpy as np\n'), ((8345, 8397), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (8353, 8397), True, 'import numpy as np\n'), ((8432, 8484), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (8440, 8484), True, 'import numpy as np\n'), ((11450, 11480), 'numpy.matrix', 'np.matrix', (['plan_joint_init_pos'], {}), '(plan_joint_init_pos)\n', (11459, 11480), True, 'import numpy as np\n'), ((12545, 12584), 'pinocchio.integrate', 'se3.integrate', (['self.robot.model', 'q', 'res'], {}), '(self.robot.model, q, res)\n', (12558, 12584), True, 'import pinocchio as se3\n'), ((15958, 16006), 'pinocchio.integrate', 'se3.integrate', (['self.robot.model', 'q', '(dq * self.dt)'], {}), '(self.robot.model, q, dq * self.dt)\n', (15971, 16006), True, 'import pinocchio as se3\n'), ((4566, 4604), 'numpy.all', 'np.all', (['(endeff_vel_ref[it][eff] == 0.0)'], {}), '(endeff_vel_ref[it][eff] == 0.0)\n', (4572, 4604), True, 'import numpy as np\n'), ((11552, 11581), 'numpy.zeros', 'np.zeros', (['self.robot.robot.nv'], {}), '(self.robot.robot.nv)\n', (11560, 11581), True, 'import numpy as np\n'), ((12601, 12620), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (12615, 12620), True, 'import numpy as np\n'), ((9113, 9169), 'numpy.vstack', 'np.vstack', (['[data.oMf[idx].translation for idx in frames]'], {}), '([data.oMf[idx].translation for idx in frames])\n', (9122, 9169), True, 'import numpy as np\n'), ((11960, 11984), 'numpy.matrix', 'np.matrix', (['[0.0, 0, 0.0]'], {}), '([0.0, 0, 0.0])\n', (11969, 11984), True, 'import numpy as np\n'), ((15040, 15064), 'numpy.matrix', 'np.matrix', (['[0.0, 0, 0.0]'], {}), '([0.0, 0, 0.0])\n', (15049, 15064), True, 'import numpy as np\n'), ((15354, 15386), 'numpy.matrix', 'np.matrix', (['self.joint_des[:, it]'], {}), '(self.joint_des[:, it])\n', (15363, 15386), True, 'import numpy as np\n')] |
import json
import urllib
import os
import jupyterhub
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode
from jupyterhub.auth import Authenticator
from tornado import gen
class HttpAuthenticator(Authenticator):
server = Unicode(
None,
allow_none=True,
config=True,
help="""
Http authentication server.
"""
)
appid = Unicode(
None,
allow_none=True,
config=True,
help="""
Application Id recognized by the http authentication server
"""
)
@gen.coroutine
def authenticate(self, handler, data):
http_client = AsyncHTTPClient()
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
}
params = dict(
type="json",
appid=self.appid,
ac=data['username'],
pw=data['password']
)
req = HTTPRequest(self.server,
method="POST",
headers=headers,
body=urllib.parse.urlencode(params),
validate_cert = False
)
resp = yield http_client.fetch(req)
reply = json.loads(resp.body.decode('utf8', 'replace'))
if reply.get("code") == 200:
return (reply.get("data").get("UserCN"))
else:
return None
| [
"urllib.parse.urlencode",
"traitlets.Unicode",
"tornado.httpclient.AsyncHTTPClient"
]
| [((265, 373), 'traitlets.Unicode', 'Unicode', (['None'], {'allow_none': '(True)', 'config': '(True)', 'help': '"""\n Http authentication server.\n """'}), '(None, allow_none=True, config=True, help=\n """\n Http authentication server.\n """)\n', (272, 373), False, 'from traitlets import Unicode\n'), ((424, 567), 'traitlets.Unicode', 'Unicode', (['None'], {'allow_none': '(True)', 'config': '(True)', 'help': '"""\n Application Id recognized by the http authentication server\n """'}), '(None, allow_none=True, config=True, help=\n """\n Application Id recognized by the http authentication server\n """\n )\n', (431, 567), False, 'from traitlets import Unicode\n'), ((680, 697), 'tornado.httpclient.AsyncHTTPClient', 'AsyncHTTPClient', ([], {}), '()\n', (695, 697), False, 'from tornado.httpclient import HTTPRequest, AsyncHTTPClient\n'), ((1120, 1150), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['params'], {}), '(params)\n', (1142, 1150), False, 'import urllib\n')] |
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# NOT -> ParameterModule
# NOT -> children_and_parameters
# NOT -> flatten_model
# NOT -> lr_range
# NOT -> scheduling functions
# NOT -> SmoothenValue
# YES -> lr_find
# NOT -> plot_lr_find
# NOT TO BE MODIFIED
class ParameterModule(nn.Module):
"Register a lone parameter 'p' in a module"
def __init__(self, p:nn.Parameter):
super().__init__()
self.val = p
def forward(self, x):
return x
# NOT TO BE MODIFIED
# To be used to flatten_model
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# NOT TO BE MODIFIED
flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if len(list(m.children())) else [m]
# NOT TO BE MODIFIED
def lr_range(model, lr):
"""
Build differential learning rate from lr. It will give you the
Arguments:
model :- torch.nn.Module
lr :- float or slice
Returns:
Depending upon lr
"""
if not isinstance(lr, slice):
return lr
num_layer = len([nn.Sequential(*flatten_model(model))])
if lr.start:
mult = lr.stop / lr.start
step = mult**(1/(num_layer-1))
res = np.array([lr.start*(step**i) for i in range(num_layer)])
else:
res = [lr.stop/10.]*(num_layer-1) + [lr.stop]
return np.array(res)
# NOT TO BE MODIFIED
# These are the functions that would give us the values of lr. Liks for linearly
# increasing lr we would use annealing_linear.
# You can add your own custom function, for producing lr.
# By defualt annealing_exp is used for both lr and momentum
def annealing_no(start, end, pct:float):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct:float):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct:float):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct:float):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start, end, pct:float, degree):
return end + (start-end) * (1-pct)**degree
# NOT TO BE MODIFIED
class Stepper():
"""
Used to step from start, end ('vals') over 'n_iter' iterations on a schedule.
We will create a stepper object and then use one of the above annelaing functions,
to step from start lr to end lr.
"""
def __init__(self, vals, n_iter:int, func=None):
self.start, self.end = (vals[0], vals[1]) if isinstance(vals, tuple) else (vals,0)
self.n_iter = max(1, n_iter)
if func is None:
self.func = annealing_linear if isinstance(vals, tuple) else annealing_no
else:
self.func = func
self.n = 0
def step(self):
"Return next value along annealed schedule"
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return 'True' if schedule completed"
return self.n >= self.n_iter
# NOT TO BE MODIFIED
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc) using `beta`."
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
# TO BE MODIFIED IN SOME CASES
def lr_find(data_loader, model, loss_fn, opt, wd:int=0, start_lr:float=1e-7, end_lr:float=10,
num_it:int=100, stop_div:bool=True, smooth_beta:float=0.98, use_gpu:bool=True,
device=torch.device('cuda'), anneal_func=annealing_exp):
"""
The main function that you will call to plot learning_rate vs losses graph. It is
the only function from lr_find.py that you will call. By default it will use GPU. It
assumes your model is already on GPU if you use use_gpu.
Arguments:-
data_loader :- torch.utils.data.DataLoader
model :- torch.nn.Module
loss_fn :- torch.nn.LossFunction
opt :- torch.optim.Optimizer
wd :- weight decay (default=0).
start_lr :- The learning rate from where to start in lr_find (default=1e-7)
end_lr :- The learning rate at which to end lr_find (default=10)
num_it :- Number of iterations for lr_find (default=100)
stop_div :- If the loss diverges, then stop early (default=True)
smooth_beta :- The beta value to smoothen the running avergae of the loss function (default=0.98)
use_gpu :- True (train on GPU) else CPU
anneal_func :- The step function you want to use (default exp)
device :- Torch device to use for training model (default GPU)
Returns:
losses :- list of smoothened version of losses
lrs :- list of all lrs that we test
"""
model.train()
stop = False
flag = False
best_loss = 0.
iteration = 0
losses = []
lrs = []
lrs.append(start_lr)
start_lr = lr_range(model, start_lr)
start_lr = np.array(start_lr) if isinstance(start_lr, (tuple, list)) else start_lr
end_lr = lr_range(model, end_lr)
end_lr = np.array(end_lr) if isinstance(end_lr, (tuple, list)) else end_lr
sched = Stepper((start_lr, end_lr), num_it, anneal_func)
smoothener = SmoothenValue(smooth_beta)
epochs = int(np.ceil(num_it/len(data_loader)))
# save model_dict
model_state = model.state_dict()
opt_state = opt.state_dict()
# Set optimizer learning_rate = start_lr
for group in opt.param_groups:
group['lr'] = sched.start
for i in range(epochs):
for data in data_loader:
opt.zero_grad()
################### TO BE MODIFIED ###################
# Depending on your model, you will have to modify your
# data pipeline and how you give inputs to your model.
inputs, labels = data
if use_gpu:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
#####################################################
if use_gpu:
smoothener.add_value(loss.detach().cpu())
else:
smoothener.add_value(loss.detach())
smooth_loss = smoothener.smooth
losses.append(smooth_loss)
loss.backward()
################### TO BE MODIFIED ###################
# For AdamW. If you want to use Adam, comment these lines
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-wd * group['lr'], param.data)
#####################################################
opt.step()
# Change lr
new_lr = sched.step()
lrs.append(new_lr)
for group in opt.param_groups:
group['lr'] = new_lr
################### TO BE MODIFIED ###################
# You necessarily don't want to change it. But in cases
# when you are maximizing the loss, then you will have
# to change it.
if iteration == 0 or smooth_loss < best_loss:
best_loss = smooth_loss
iteration += 1
if sched.is_done or (stop_div and (smooth_loss > 4*best_loss or torch.isnan(loss))):
flag = True
break
#####################################################
if iteration%10 == 0:
print(f'Iteration: {iteration}')
if flag:
break
# Load state dict
model.load_state_dict(model_state)
opt.load_state_dict(opt_state)
lrs.pop()
print(f'LR Finder is complete.')
return losses, lrs
# NOT TO BE MODIFIED
def plot_lr_find(losses, lrs, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None):
"""
It will take the losses and lrs returned by lr_find as input.
Arguments:-
skip_start -> It will skip skip_start lrs from the start
skip_end -> It will skip skip_end lrs from the end
suggestion -> If you want to see the point where the gradient changes most
return_fig -> True then get the fig in the return statement
"""
lrs = lrs[skip_start:-skip_end] if skip_end > 0 else lrs[skip_start:]
losses = losses[skip_start:-skip_end] if skip_end > 0 else losses[skip_start:]
losses = [x.item() for x in losses]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try:
mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
if return_fig is not None:
return fig
| [
"numpy.array",
"matplotlib.pyplot.FormatStrFormatter",
"numpy.cos",
"torch.isnan",
"matplotlib.pyplot.subplots",
"torch.device"
]
| [((1675, 1688), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1683, 1688), True, 'import numpy as np\n'), ((4280, 4300), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4292, 4300), False, 'import torch\n'), ((9315, 9333), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9327, 9333), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2517), 'numpy.cos', 'np.cos', (['(np.pi * pct)'], {}), '(np.pi * pct)\n', (2504, 2517), True, 'import numpy as np\n'), ((5705, 5723), 'numpy.array', 'np.array', (['start_lr'], {}), '(start_lr)\n', (5713, 5723), True, 'import numpy as np\n'), ((5827, 5843), 'numpy.array', 'np.array', (['end_lr'], {}), '(end_lr)\n', (5835, 5843), True, 'import numpy as np\n'), ((9478, 9508), 'matplotlib.pyplot.FormatStrFormatter', 'plt.FormatStrFormatter', (['"""%.0e"""'], {}), "('%.0e')\n", (9500, 9508), True, 'import matplotlib.pyplot as plt\n'), ((8153, 8170), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (8164, 8170), False, 'import torch\n'), ((9573, 9589), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (9581, 9589), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymortests.base import runmodule
if __name__ == "__main__":
runmodule(filename=__file__)
| [
"pymortests.base.runmodule"
]
| [((293, 321), 'pymortests.base.runmodule', 'runmodule', ([], {'filename': '__file__'}), '(filename=__file__)\n', (302, 321), False, 'from pymortests.base import runmodule\n')] |
# The MIT License
#
# Copyright 2014, 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
__all__ = ['PyJsParser', 'Node', 'WrappingNode', 'node_to_dict', 'parse', 'translate_js', 'translate', 'syntax_tree_translate',
'DEFAULT_HEADER']
__author__ = '<NAME>'
__version__ = '2.2.0'
from pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict
from translator import translate_js, trasnlate, syntax_tree_translate, DEFAULT_HEADER
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
| [
"pyjsparser.PyJsParser"
]
| [((1686, 1698), 'pyjsparser.PyJsParser', 'PyJsParser', ([], {}), '()\n', (1696, 1698), False, 'from pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict\n')] |
import torch
def expanded_pairwise_distances(x, y):
'''
Input: x is a bxNxd matrix
y is an optional bxMxd matirx
Output: dist is a bxNxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
differences = x.unsqueeze(2) - y.unsqueeze(1)
distances = torch.sum(differences * differences, -1)
return distances
def chamfer_distance(x, y):
'''
input x and y are bxNxM matrix, b: batch, N:number of point, M: point dim (ex. 2 for 2D or 3 for 3D)
output is a bx1 Matrix with the value of the chamfer distance for each sample of the batch
'''
dist_vec = expanded_pairwise_distances(x, y)
min_distances = torch.topk(dist_vec, k=1, dim=2, largest=False).values
chamfer = torch.sum(min_distances, dim=1) / torch.tensor(x.shape[1])
return chamfer
class ChamferLoss(torch.nn.Module):
def forward(self, x, y):
chamfer = chamfer_distance(x, y)
return torch.sum(chamfer)
if __name__ == "__main__":
x = torch.tensor([
[
[0., 0., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
y = torch.tensor([
[
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
chamfer = ChamferLoss()
print('chamfer loss torch (cpu):', chamfer(x, y))
print('chamfer loss torch (cuda):', chamfer(x.cuda(), y.cuda()))
# import sys
# sys.path.append("../distance/chamfer/")
# import dist_chamfer as cd
# CD = cd.chamferDist()
# dist1, dist2, _, _= CD(x, y)
# print('orig', dist1)
| [
"torch.tensor",
"torch.topk",
"torch.sum"
]
| [((390, 430), 'torch.sum', 'torch.sum', (['(differences * differences)', '(-1)'], {}), '(differences * differences, -1)\n', (399, 430), False, 'import torch\n'), ((1095, 1220), 'torch.tensor', 'torch.tensor', (['[[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], [[1.0, 1.0, 0.0], [\n 1.0, 2.0, 0.0], [0.0, 1.0, 0.0]]]'], {}), '([[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], [[1.0, \n 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 1.0, 0.0]]])\n', (1107, 1220), False, 'import torch\n'), ((1322, 1447), 'torch.tensor', 'torch.tensor', (['[[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], [[1.0, 1.0, 0.0], [\n 1.0, 2.0, 0.0], [0.0, 1.0, 0.0]]]'], {}), '([[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], [[1.0, \n 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 1.0, 0.0]]])\n', (1334, 1447), False, 'import torch\n'), ((766, 813), 'torch.topk', 'torch.topk', (['dist_vec'], {'k': '(1)', 'dim': '(2)', 'largest': '(False)'}), '(dist_vec, k=1, dim=2, largest=False)\n', (776, 813), False, 'import torch\n'), ((835, 866), 'torch.sum', 'torch.sum', (['min_distances'], {'dim': '(1)'}), '(min_distances, dim=1)\n', (844, 866), False, 'import torch\n'), ((869, 893), 'torch.tensor', 'torch.tensor', (['x.shape[1]'], {}), '(x.shape[1])\n', (881, 893), False, 'import torch\n'), ((1040, 1058), 'torch.sum', 'torch.sum', (['chamfer'], {}), '(chamfer)\n', (1049, 1058), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict #, HttpResponseForbidden, Http404, , JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.contrib.admin.views.decorators import staff_member_required, user_passes_test
from rules.contrib.views import permission_required, objectgetter
from isisdata.models import *
from isisdata.utils import strip_punctuation, normalize
from isisdata import operations
from isisdata.filters import *
from isisdata import tasks as data_tasks
from curation import p3_port_utils
from curation.forms import *
from curation.contrib.views import check_rules
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
@check_rules('can_access_view_edit', fn=objectgetter(Authority, 'authority_id'))
def create_acrelation_for_authority(request, authority_id):
authority = get_object_or_404(Authority, pk=authority_id)
search_key = request.GET.get('search', request.POST.get('search'))
current_index = request.GET.get('current', request.POST.get('current'))
context = {
'curation_section': 'datasets',
'curation_subsection': 'authorities',
'instance': authority,
'search_key': search_key,
'current_index': current_index
}
if request.method == 'GET':
initial = {
'authority': authority.id,
'name_for_display_in_citation': authority.name
}
type_controlled = request.GET.get('type_controlled', None)
if type_controlled:
initial.update({'type_controlled': type_controlled.upper()})
form = ACRelationForm(prefix='acrelation', initial=initial)
elif request.method == 'POST':
form = ACRelationForm(request.POST, prefix='acrelation')
if form.is_valid():
form.save()
target = reverse('curation:curate_authority', args=(authority.id,)) + '?tab=acrelations'
if search_key and current_index:
target += '&search=%s¤t=%s' % (search_key, current_index)
return HttpResponseRedirect(target)
context.update({
'form': form,
})
template = 'curation/authority_acrelation_changeview.html'
return render(request, template, context)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
@check_rules('can_access_view_edit', fn=objectgetter(Authority, 'authority_id'))
def create_aarelation_for_authority(request, authority_id):
authority = get_object_or_404(Authority, pk=authority_id)
search_key = request.GET.get('search', request.POST.get('search'))
current_index = request.GET.get('current', request.POST.get('current'))
context = {
'curation_section': 'datasets',
'curation_subsection': 'authorities',
'instance': authority,
'search_key': search_key,
'current_index': current_index
}
if request.method == 'GET':
initial = {
'subject': authority.id
}
aarelation=AARelation()
aarelation.subject = authority
type_controlled = request.GET.get('type_controlled', None)
if type_controlled:
aarelation = dict(AARelation.TYPE_CHOICES)[type_controlled]
form = AARelationForm(prefix='aarelation', instance=aarelation)
elif request.method == 'POST':
form = AARelationForm(request.POST, prefix='aarelation')
if form.is_valid():
form.save()
target = reverse('curation:curate_authority', args=(authority.id,)) + '?tab=aarelations'
if search_key and current_index:
target += '&search=%s¤t=%s' % (search_key, current_index)
return HttpResponseRedirect(target)
context.update({
'form': form,
})
template = 'curation/authority_aarelation_changeview.html'
return render(request, template, context)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
@check_rules('can_access_view_edit', fn=objectgetter(Authority, 'authority_id'))
def acrelation_for_authority(request, authority_id, acrelation_id):
authority = get_object_or_404(Authority, pk=authority_id)
acrelation = get_object_or_404(ACRelation, pk=acrelation_id)
search_key = request.GET.get('search', request.POST.get('search'))
current_index = request.GET.get('current', request.POST.get('current'))
context = {
'curation_section': 'datasets',
'curation_subsection': 'authorities',
'instance': authority,
'acrelation': acrelation,
'search_key': search_key,
'current_index': current_index
}
if request.method == 'GET':
form = ACRelationForm(instance=acrelation, prefix='acrelation')
elif request.method == 'POST':
form = ACRelationForm(request.POST, instance=acrelation, prefix='acrelation')
if form.is_valid():
form.save()
target = reverse('curation:curate_authority', args=(authority.id,)) + '?tab=acrelations'
if search_key and current_index:
target += '&search=%s¤t=%s' % (search_key, current_index)
return HttpResponseRedirect(target)
context.update({
'form': form,
})
template = 'curation/authority_acrelation_changeview.html'
return render(request, template, context)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
@check_rules('can_access_view_edit', fn=objectgetter(Authority, 'authority_id'))
def aarelation_for_authority(request, authority_id, aarelation_id):
authority = get_object_or_404(Authority, pk=authority_id)
aarelation = get_object_or_404(AARelation, pk=aarelation_id)
search_key = request.GET.get('search', request.POST.get('search'))
current_index = request.GET.get('current', request.POST.get('current'))
context = {
'curation_section': 'datasets',
'curation_subsection': 'authorities',
'instance': authority,
'aarelation': aarelation,
'search_key': search_key,
'current_index': current_index
}
if request.method == 'GET':
form = AARelationForm(instance=aarelation, prefix='aarelation')
elif request.method == 'POST':
form = AARelationForm(request.POST, instance=aarelation, prefix='aarelation')
if form.is_valid():
form.save()
target = reverse('curation:curate_authority', args=(authority.id,)) + '?tab=aarelations'
if search_key and current_index:
target += '&search=%s¤t=%s' % (search_key, current_index)
return HttpResponseRedirect(target)
context.update({
'form': form,
})
template = 'curation/authority_aarelation_changeview.html'
return render(request, template, context)
@user_passes_test(lambda u: u.is_superuser or u.is_staff)
@check_rules('can_access_view_edit', fn=objectgetter(Authority, 'authority_id'))
def delete_aarelation_for_authority(request, authority_id, aarelation_id, format=None):
authority = get_object_or_404(Authority, pk=authority_id)
aarelation = get_object_or_404(AARelation, pk=aarelation_id)
search_key = request.GET.get('search', request.POST.get('search'))
current_index = request.GET.get('current', request.POST.get('current'))
context = {
'curation_section': 'datasets',
'curation_subsection': 'authorities',
'instance': authority,
'aarelation': aarelation,
'search_key': search_key,
'current_index': current_index
}
if request.POST.get('confirm', False) == 'true':
if not aarelation.modified_on:
aarelation.modified_on = datetime.datetime.now()
aarelation.delete()
if format == 'json':
return JsonResponse({'result': True})
target = reverse('curation:curate_authority', args=(authority.id,)) + '?tab=aarelations'
if search_key and current_index:
target += '&search=%s¤t=%s' % (search_key, current_index)
return HttpResponseRedirect(target)
if format == 'json':
return JsonResponse({'result': False})
template = 'curation/authority_aarelation_delete.html'
return render(request, template, context)
| [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"rules.contrib.views.objectgetter",
"django.contrib.admin.views.decorators.user_passes_test",
"django.http.JsonResponse",
"django.shortcuts.get_object_or_404",
"django.urls.reverse"
]
| [((844, 900), 'django.contrib.admin.views.decorators.user_passes_test', 'user_passes_test', (['(lambda u: u.is_superuser or u.is_staff)'], {}), '(lambda u: u.is_superuser or u.is_staff)\n', (860, 900), False, 'from django.contrib.admin.views.decorators import staff_member_required, user_passes_test\n'), ((2451, 2507), 'django.contrib.admin.views.decorators.user_passes_test', 'user_passes_test', (['(lambda u: u.is_superuser or u.is_staff)'], {}), '(lambda u: u.is_superuser or u.is_staff)\n', (2467, 2507), False, 'from django.contrib.admin.views.decorators import staff_member_required, user_passes_test\n'), ((4070, 4126), 'django.contrib.admin.views.decorators.user_passes_test', 'user_passes_test', (['(lambda u: u.is_superuser or u.is_staff)'], {}), '(lambda u: u.is_superuser or u.is_staff)\n', (4086, 4126), False, 'from django.contrib.admin.views.decorators import staff_member_required, user_passes_test\n'), ((5512, 5568), 'django.contrib.admin.views.decorators.user_passes_test', 'user_passes_test', (['(lambda u: u.is_superuser or u.is_staff)'], {}), '(lambda u: u.is_superuser or u.is_staff)\n', (5528, 5568), False, 'from django.contrib.admin.views.decorators import staff_member_required, user_passes_test\n'), ((6954, 7010), 'django.contrib.admin.views.decorators.user_passes_test', 'user_passes_test', (['(lambda u: u.is_superuser or u.is_staff)'], {}), '(lambda u: u.is_superuser or u.is_staff)\n', (6970, 7010), False, 'from django.contrib.admin.views.decorators import staff_member_required, user_passes_test\n'), ((1058, 1103), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Authority'], {'pk': 'authority_id'}), '(Authority, pk=authority_id)\n', (1075, 1103), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((2414, 2448), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (2420, 2448), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((2665, 2710), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Authority'], {'pk': 'authority_id'}), '(Authority, pk=authority_id)\n', (2682, 2710), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((4032, 4066), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (4038, 4066), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((4292, 4337), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Authority'], {'pk': 'authority_id'}), '(Authority, pk=authority_id)\n', (4309, 4337), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((4355, 4402), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ACRelation'], {'pk': 'acrelation_id'}), '(ACRelation, pk=acrelation_id)\n', (4372, 4402), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((5475, 5509), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (5481, 5509), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((5734, 5779), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Authority'], {'pk': 'authority_id'}), '(Authority, pk=authority_id)\n', (5751, 5779), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((5797, 5844), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['AARelation'], {'pk': 'aarelation_id'}), '(AARelation, pk=aarelation_id)\n', (5814, 5844), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((6917, 6951), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (6923, 6951), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((7196, 7241), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Authority'], {'pk': 'authority_id'}), '(Authority, pk=authority_id)\n', (7213, 7241), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((7259, 7306), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['AARelation'], {'pk': 'aarelation_id'}), '(AARelation, pk=aarelation_id)\n', (7276, 7306), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((8365, 8399), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (8371, 8399), False, 'from django.shortcuts import get_object_or_404, render, redirect\n'), ((941, 980), 'rules.contrib.views.objectgetter', 'objectgetter', (['Authority', '"""authority_id"""'], {}), "(Authority, 'authority_id')\n", (953, 980), False, 'from rules.contrib.views import permission_required, objectgetter\n'), ((2548, 2587), 'rules.contrib.views.objectgetter', 'objectgetter', (['Authority', '"""authority_id"""'], {}), "(Authority, 'authority_id')\n", (2560, 2587), False, 'from rules.contrib.views import permission_required, objectgetter\n'), ((4167, 4206), 'rules.contrib.views.objectgetter', 'objectgetter', (['Authority', '"""authority_id"""'], {}), "(Authority, 'authority_id')\n", (4179, 4206), False, 'from rules.contrib.views import permission_required, objectgetter\n'), ((5609, 5648), 'rules.contrib.views.objectgetter', 'objectgetter', (['Authority', '"""authority_id"""'], {}), "(Authority, 'authority_id')\n", (5621, 5648), False, 'from rules.contrib.views import permission_required, objectgetter\n'), ((8192, 8220), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['target'], {}), '(target)\n', (8212, 8220), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((8262, 8293), 'django.http.JsonResponse', 'JsonResponse', (["{'result': False}"], {}), "({'result': False})\n", (8274, 8293), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((7051, 7090), 'rules.contrib.views.objectgetter', 'objectgetter', (['Authority', '"""authority_id"""'], {}), "(Authority, 'authority_id')\n", (7063, 7090), False, 'from rules.contrib.views import permission_required, objectgetter\n'), ((7931, 7961), 'django.http.JsonResponse', 'JsonResponse', (["{'result': True}"], {}), "({'result': True})\n", (7943, 7961), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((7980, 8038), 'django.urls.reverse', 'reverse', (['"""curation:curate_authority"""'], {'args': '(authority.id,)'}), "('curation:curate_authority', args=(authority.id,))\n", (7987, 8038), False, 'from django.urls import reverse\n'), ((2260, 2288), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['target'], {}), '(target)\n', (2280, 2288), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((3878, 3906), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['target'], {}), '(target)\n', (3898, 3906), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((5321, 5349), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['target'], {}), '(target)\n', (5341, 5349), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((6763, 6791), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['target'], {}), '(target)\n', (6783, 6791), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict\n'), ((2036, 2094), 'django.urls.reverse', 'reverse', (['"""curation:curate_authority"""'], {'args': '(authority.id,)'}), "('curation:curate_authority', args=(authority.id,))\n", (2043, 2094), False, 'from django.urls import reverse\n'), ((3654, 3712), 'django.urls.reverse', 'reverse', (['"""curation:curate_authority"""'], {'args': '(authority.id,)'}), "('curation:curate_authority', args=(authority.id,))\n", (3661, 3712), False, 'from django.urls import reverse\n'), ((5097, 5155), 'django.urls.reverse', 'reverse', (['"""curation:curate_authority"""'], {'args': '(authority.id,)'}), "('curation:curate_authority', args=(authority.id,))\n", (5104, 5155), False, 'from django.urls import reverse\n'), ((6539, 6597), 'django.urls.reverse', 'reverse', (['"""curation:curate_authority"""'], {'args': '(authority.id,)'}), "('curation:curate_authority', args=(authority.id,))\n", (6546, 6597), False, 'from django.urls import reverse\n')] |
#!/usr/bin/env python3
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Run the tests of the project.
This script expects a suite function in <project_package>.test,
which returns a unittest.TestSuite.
Test coverage dependencies: coverage, lxml.
"""
__authors__ = ["<NAME>", "<NAME>"]
__date__ = "30/09/2020"
__license__ = "MIT"
import distutils.util
import logging
import os
import subprocess
import sys
import importlib
# Capture all default warnings
logging.captureWarnings(True)
import warnings
warnings.simplefilter('default')
logger = logging.getLogger("run_tests")
logger.setLevel(logging.WARNING)
logger.info("Python %s %s", sys.version, tuple.__itemsize__ * 8)
try:
import numpy
except Exception as error:
logger.warning("Numpy missing: %s", error)
else:
logger.info("Numpy %s", numpy.version.version)
try:
import h5py
except Exception as error:
logger.warning("h5py missing: %s", error)
else:
logger.info("h5py %s", h5py.version.version)
def get_project_name(root_dir):
"""Retrieve project name by running python setup.py --name in root_dir.
:param str root_dir: Directory where to run the command.
:return: The name of the project stored in root_dir
"""
logger.debug("Getting project name in %s", root_dir)
p = subprocess.Popen([sys.executable, "setup.py", "--name"],
shell=False, cwd=root_dir, stdout=subprocess.PIPE)
name, _stderr_data = p.communicate()
logger.debug("subprocess ended with rc= %s", p.returncode)
return name.split()[-1].decode('ascii')
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def build_project(name, root_dir):
"""Run python setup.py build for the project.
Build directory can be modified by environment variables.
:param str name: Name of the project.
:param str root_dir: Root directory of the project
:return: The path to the directory were build was performed
"""
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
if os.environ.get("PYBUILD_NAME") == name:
# we are in the debian packaging way
home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1]
elif os.environ.get("BUILDPYTHONPATH"):
home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", ""))
else:
home = os.path.join(root_dir, "build", architecture)
logger.warning("Building %s to %s", name, home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=root_dir)
logger.debug("subprocess ended with rc= %s", p.wait())
if os.path.isdir(home):
return home
alt_home = os.path.join(os.path.dirname(home), "lib")
if os.path.isdir(alt_home):
return alt_home
def import_project_module(project_name, project_dir):
"""Import project module, from the system of from the project directory"""
if "--installed" in sys.argv:
try:
module = importlib.import_module(project_name)
except Exception:
logger.error("Cannot run tests on installed version: %s not installed or raising error.",
project_name)
raise
else: # Use built source
build_dir = build_project(project_name, project_dir)
if build_dir is None:
logging.error("Built project is not available !!! investigate")
sys.path.insert(0, build_dir)
logger.warning("Patched sys.path, added: '%s'", build_dir)
module = importlib.import_module(project_name)
return module
if __name__ == "__main__": # Needed for multiprocessing support on Windows
import pytest
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = get_project_name(PROJECT_DIR)
logger.info("Project name: %s", PROJECT_NAME)
project_module = import_project_module(PROJECT_NAME, PROJECT_DIR)
PROJECT_VERSION = getattr(project_module, 'version', '')
PROJECT_PATH = project_module.__path__[0]
def normalize_option(option):
option_parts = option.split(os.path.sep)
if option_parts == ["src", "silx"]:
return PROJECT_PATH
if option_parts[:2] == ["src", "silx"]:
return os.path.join(PROJECT_PATH, *option_parts[2:])
return option
args = [normalize_option(p) for p in sys.argv[1:] if p != "--installed"]
# Run test on PROJECT_PATH if nothing is specified
without_options = [a for a in args if not a.startswith("-")]
if len(without_options) == 0:
args += [PROJECT_PATH]
argv = ["--rootdir", PROJECT_PATH] + args
sys.exit(pytest.main(argv))
| [
"logging.getLogger",
"sys.path.insert",
"importlib.import_module",
"logging.captureWarnings",
"subprocess.Popen",
"os.environ.get",
"os.path.join",
"pytest.main",
"os.path.dirname",
"os.path.isdir",
"warnings.simplefilter",
"os.path.abspath",
"logging.error",
"distutils.sysconfig.get_config_var"
]
| [((1714, 1743), 'logging.captureWarnings', 'logging.captureWarnings', (['(True)'], {}), '(True)\n', (1737, 1743), False, 'import logging\n'), ((1760, 1792), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (1781, 1792), False, 'import warnings\n'), ((1803, 1833), 'logging.getLogger', 'logging.getLogger', (['"""run_tests"""'], {}), "('run_tests')\n", (1820, 1833), False, 'import logging\n'), ((2539, 2651), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'setup.py', '--name']"], {'shell': '(False)', 'cwd': 'root_dir', 'stdout': 'subprocess.PIPE'}), "([sys.executable, 'setup.py', '--name'], shell=False, cwd=\n root_dir, stdout=subprocess.PIPE)\n", (2555, 2651), False, 'import subprocess\n'), ((3067, 3103), 'distutils.sysconfig.get_config_var', 'sysconfig.get_config_var', (['"""Py_DEBUG"""'], {}), "('Py_DEBUG')\n", (3091, 3103), True, 'import distutils.sysconfig as sysconfig\n'), ((4131, 4218), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'setup.py', 'build']"], {'shell': '(False)', 'cwd': 'root_dir'}), "([sys.executable, 'setup.py', 'build'], shell=False, cwd=\n root_dir)\n", (4147, 4218), False, 'import subprocess\n'), ((4306, 4325), 'os.path.isdir', 'os.path.isdir', (['home'], {}), '(home)\n', (4319, 4325), False, 'import os\n'), ((4412, 4435), 'os.path.isdir', 'os.path.isdir', (['alt_home'], {}), '(alt_home)\n', (4425, 4435), False, 'import os\n'), ((3730, 3760), 'os.environ.get', 'os.environ.get', (['"""PYBUILD_NAME"""'], {}), "('PYBUILD_NAME')\n", (3744, 3760), False, 'import os\n'), ((3894, 3927), 'os.environ.get', 'os.environ.get', (['"""BUILDPYTHONPATH"""'], {}), "('BUILDPYTHONPATH')\n", (3908, 3927), False, 'import os\n'), ((4375, 4396), 'os.path.dirname', 'os.path.dirname', (['home'], {}), '(home)\n', (4390, 4396), False, 'import os\n'), ((5092, 5121), 'sys.path.insert', 'sys.path.insert', (['(0)', 'build_dir'], {}), '(0, build_dir)\n', (5107, 5121), False, 'import sys\n'), ((5206, 5243), 'importlib.import_module', 'importlib.import_module', (['project_name'], {}), '(project_name)\n', (5229, 5243), False, 'import importlib\n'), ((5393, 5418), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5408, 5418), False, 'import os\n'), ((6316, 6333), 'pytest.main', 'pytest.main', (['argv'], {}), '(argv)\n', (6327, 6333), False, 'import pytest\n'), ((4024, 4069), 'os.path.join', 'os.path.join', (['root_dir', '"""build"""', 'architecture'], {}), "(root_dir, 'build', architecture)\n", (4036, 4069), False, 'import os\n'), ((4664, 4701), 'importlib.import_module', 'importlib.import_module', (['project_name'], {}), '(project_name)\n', (4687, 4701), False, 'import importlib\n'), ((5020, 5083), 'logging.error', 'logging.error', (['"""Built project is not available !!! investigate"""'], {}), "('Built project is not available !!! investigate')\n", (5033, 5083), False, 'import logging\n'), ((5924, 5969), 'os.path.join', 'os.path.join', (['PROJECT_PATH', '*option_parts[2:]'], {}), '(PROJECT_PATH, *option_parts[2:])\n', (5936, 5969), False, 'import os\n'), ((3960, 3997), 'os.environ.get', 'os.environ.get', (['"""BUILDPYTHONPATH"""', '""""""'], {}), "('BUILDPYTHONPATH', '')\n", (3974, 3997), False, 'import os\n'), ((3830, 3862), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""', '""""""'], {}), "('PYTHONPATH', '')\n", (3844, 3862), False, 'import os\n')] |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON, RERAISED_EXCEPTIONS
from .unic import unic
EXCLUDE_ROBOT_TRACES = not os.getenv('ROBOT_INTERNAL_TRACES')
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces=exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exc_info=None, exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = exc_info or sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
class _ErrorDetails(object):
_generic_exception_names = ('AssertionError', 'AssertionFailedError',
'Exception', 'Error', 'RuntimeError',
'RuntimeException')
def __init__(self, exc_type, exc_value, exc_traceback,
exclude_robot_traces=True):
self.error = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._exclude_robot_traces = exclude_robot_traces
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
def _get_message(self):
raise NotImplementedError
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_details(self):
raise NotImplementedError
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if self._is_generic_exception(name):
return message
return '%s: %s' % (name, message)
def _is_generic_exception(self, name):
return (name in self._generic_exception_names or
isinstance(self.error, RobotError) or
getattr(self.error, 'ROBOT_SUPPRESS_NAME', False))
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
name = self._get_name(self._exc_type)
return self._format_message(name, unic(self.error))
def _get_details(self):
if isinstance(self.error, RobotError):
return self.error.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
if not self._exclude_robot_traces:
return False
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self.error.getMessage()
else:
exc_msg = str(self.error)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self.error.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self.error.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
| [
"traceback.format_tb",
"os.getenv",
"re.compile",
"sys.exc_info",
"java.io.PrintWriter",
"traceback.tb_frame.f_globals.get",
"java.io.StringWriter"
]
| [((796, 830), 'os.getenv', 'os.getenv', (['"""ROBOT_INTERNAL_TRACES"""'], {}), "('ROBOT_INTERNAL_TRACES')\n", (805, 830), False, 'import os\n'), ((4930, 4959), 're.compile', 're.compile', (['"""^\\\\s+at (\\\\w.+)"""'], {}), "('^\\\\s+at (\\\\w.+)')\n", (4940, 4959), False, 'import re\n'), ((1976, 1990), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1988, 1990), False, 'import sys\n'), ((4769, 4813), 'traceback.tb_frame.f_globals.get', 'traceback.tb_frame.f_globals.get', (['"""__name__"""'], {}), "('__name__')\n", (4801, 4813), False, 'import traceback\n'), ((5754, 5768), 'java.io.StringWriter', 'StringWriter', ([], {}), '()\n', (5766, 5768), False, 'from java.io import StringWriter, PrintWriter\n'), ((5804, 5823), 'java.io.PrintWriter', 'PrintWriter', (['output'], {}), '(output)\n', (5815, 5823), False, 'from java.io import StringWriter, PrintWriter\n'), ((4588, 4611), 'traceback.format_tb', 'traceback.format_tb', (['tb'], {}), '(tb)\n', (4607, 4611), False, 'import traceback\n')] |
import unittest
from unittest import TestCase
from misc import verify
class TestVerify(TestCase):
"""Tests misc.py verifies function."""
def test_verify__with_zero_threshold_and_expected_succeeds(self):
"""Test passes when expected rate, actual rate and threshold are all zero."""
result = verify(metric="Query failure rate", actual=0.0, expected=0.0, threshold=0.0)
self.assertEqual(result, 0)
def test_verify__fails_when_positive_delta_is_larger_than_postive_threshold(self):
"""Test fails when positive delta between actual rate and expected rate exceeds positive threshold."""
result = verify(metric="Update latency", actual=200, expected=100, threshold=0.1)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_is_smaller_than_negative_threshold(self):
"""Test fails when negative delta between actual rate and expected rate exceeds negative threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=-0.01)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_and_positive_threshold(self):
"""Test fails when delta between actual rate and expected rate exceeds threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=0.01)
self.assertEqual(result, 0)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"misc.verify"
]
| [((1414, 1429), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1427, 1429), False, 'import unittest\n'), ((318, 394), 'misc.verify', 'verify', ([], {'metric': '"""Query failure rate"""', 'actual': '(0.0)', 'expected': '(0.0)', 'threshold': '(0.0)'}), "(metric='Query failure rate', actual=0.0, expected=0.0, threshold=0.0)\n", (324, 394), False, 'from misc import verify\n'), ((647, 719), 'misc.verify', 'verify', ([], {'metric': '"""Update latency"""', 'actual': '(200)', 'expected': '(100)', 'threshold': '(0.1)'}), "(metric='Update latency', actual=200, expected=100, threshold=0.1)\n", (653, 719), False, 'from misc import verify\n'), ((974, 1047), 'misc.verify', 'verify', ([], {'metric': '"""Update latency"""', 'actual': '(50)', 'expected': '(100)', 'threshold': '(-0.01)'}), "(metric='Update latency', actual=50, expected=100, threshold=-0.01)\n", (980, 1047), False, 'from misc import verify\n'), ((1272, 1344), 'misc.verify', 'verify', ([], {'metric': '"""Update latency"""', 'actual': '(50)', 'expected': '(100)', 'threshold': '(0.01)'}), "(metric='Update latency', actual=50, expected=100, threshold=0.01)\n", (1278, 1344), False, 'from misc import verify\n')] |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
from django.contrib.auth.apps import AuthConfig
from django.contrib.contenttypes.apps import ContentTypesConfig
from django.contrib.sessions.apps import SessionsConfig
from django.db.models.signals import post_migrate
from django_celery_results.apps import CeleryResultConfig
from geotrek.common.utils.signals import check_srid_has_meter_unit, pm_callback
class GeotrekConfig(AppConfig):
"""
Base class to handle table move on right schemas, and load SQL files
!! WARNING !! need to create subclass in geotrek.myapp.apps for project apps,
and create subclasses here for external subclasses
"""
def ready(self):
post_migrate.connect(pm_callback, sender=self, dispatch_uid='geotrek.core.pm_callback')
check_srid_has_meter_unit()
class AuthGeotrekConfig(AuthConfig, GeotrekConfig):
"""
bind for django.contrib.auth
"""
pass
class ContenttypeGeotrekConfig(ContentTypesConfig, GeotrekConfig):
"""
bind for django.contrib.contenttype
"""
pass
class SessionsGeotrekConfig(SessionsConfig, GeotrekConfig):
pass
class AdminGeotrekConfig(AdminConfig, GeotrekConfig):
pass
class CeleryGeotrekConfig(GeotrekConfig, CeleryResultConfig):
pass
class EasyThumbnailsGeotrekConfig(GeotrekConfig):
name = 'easy_thumbnails'
verbose_name = 'Easy thumbnails'
| [
"django.db.models.signals.post_migrate.connect",
"geotrek.common.utils.signals.check_srid_has_meter_unit"
]
| [((730, 822), 'django.db.models.signals.post_migrate.connect', 'post_migrate.connect', (['pm_callback'], {'sender': 'self', 'dispatch_uid': '"""geotrek.core.pm_callback"""'}), "(pm_callback, sender=self, dispatch_uid=\n 'geotrek.core.pm_callback')\n", (750, 822), False, 'from django.db.models.signals import post_migrate\n'), ((826, 853), 'geotrek.common.utils.signals.check_srid_has_meter_unit', 'check_srid_has_meter_unit', ([], {}), '()\n', (851, 853), False, 'from geotrek.common.utils.signals import check_srid_has_meter_unit, pm_callback\n')] |
import sys, os
from subprocess import call
try:
from downloadPdb import downloadPDB
except ImportError:
from .downloadPdb import downloadPDB
pdbListFile="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/117_dimers_list.tsv"
outPath="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/pdbFiles/rawPDBs"
USE_BIO_UNIT=False
##def downloadPDB(pdbId, pdbOutPath, useBioUnit):
#### descargar pdb: wget ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz o ya descomprimido
#### wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz |zcat > 1i1q.pdb
## outName= os.path.join(pdbOutPath,pdbId+'.pdb')
## if not os.path.isfile(outName):
## if useBioUnit:
## cmd= 'wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/%s.pdb1.gz |zcat > %s'%(pdbId.lower(), outName)
## else:
## cmd= 'wget -qO- http://www.pdb.org/pdb/files/%s.pdb | cat > %s'%(pdbId.upper(), outName)
## print(cmd)
## call(cmd, shell= True)
def downloadInFile(fname, outPath, useBioUnit):
with open(fname) as f:
for line in f:
pdbId= line.split()[0]
print(pdbId)
downloadPDB(pdbId, outPath, bioUnit= 0 if useBioUnit else None)
if __name__=="__main__":
if len(sys.argv)==3:
pdbListFile= os.path.abspath(os.path.expanduser(sys.argv[1]))
outPath= os.path.abspath(os.path.expanduser(sys.argv[2]))
print( pdbListFile, outPath)
downloadInFile(pdbListFile, outPath, USE_BIO_UNIT)
| [
"downloadPdb.downloadPDB",
"os.path.expanduser"
]
| [((1146, 1208), 'downloadPdb.downloadPDB', 'downloadPDB', (['pdbId', 'outPath'], {'bioUnit': '(0 if useBioUnit else None)'}), '(pdbId, outPath, bioUnit=0 if useBioUnit else None)\n', (1157, 1208), False, 'from downloadPdb import downloadPDB\n'), ((1299, 1330), 'os.path.expanduser', 'os.path.expanduser', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1317, 1330), False, 'import sys, os\n'), ((1361, 1392), 'os.path.expanduser', 'os.path.expanduser', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (1379, 1392), False, 'import sys, os\n')] |
import re
from rest_framework import serializers
from .models import Collection, CollectionIcon
class CollectionSerializer(serializers.ModelSerializer):
"""Collections's serializer"""
class Meta:
model = Collection
read_only = ('token', )
class CollectionIconSerializer(serializers.ModelSerializer):
"""CollectionIcon's Serializer. """
class Meta:
model = CollectionIcon
def validate_width(self, attrs, source):
width = attrs[source]
if width < 1.0:
raise serializers.ValidationError('Width should be greater than 1.0')
return attrs
def validate_name(self, attrs, source):
name = attrs[source].lower()
name = re.sub(r'[^a-z0-9\-]', '-', name).strip('-')
name = re.sub(r'-+', '-', name)
if name:
attrs[source] = name
else:
raise serializers.ValidationError('Invalid name')
return attrs
def validate(self, attrs):
packicon = attrs.get('packicon')
svg_d = attrs.get('svg_d')
width = attrs.get('width')
if packicon or (svg_d and width): return attrs
raise serializers.ValidationError(
'Either a packicon or the shape of icon should be given'
)
| [
"re.sub",
"rest_framework.serializers.ValidationError"
]
| [((780, 803), 're.sub', 're.sub', (['"""-+"""', '"""-"""', 'name'], {}), "('-+', '-', name)\n", (786, 803), False, 'import re\n'), ((1164, 1254), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Either a packicon or the shape of icon should be given"""'], {}), "(\n 'Either a packicon or the shape of icon should be given')\n", (1191, 1254), False, 'from rest_framework import serializers\n'), ((538, 601), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Width should be greater than 1.0"""'], {}), "('Width should be greater than 1.0')\n", (565, 601), False, 'from rest_framework import serializers\n'), ((887, 930), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Invalid name"""'], {}), "('Invalid name')\n", (914, 930), False, 'from rest_framework import serializers\n'), ((720, 753), 're.sub', 're.sub', (['"""[^a-z0-9\\\\-]"""', '"""-"""', 'name'], {}), "('[^a-z0-9\\\\-]', '-', name)\n", (726, 753), False, 'import re\n')] |
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "readme.md").read_text(encoding="utf-8")
setup(
name="data_dashboard",
version="0.1.1",
description="Dashboard to explore the data and to create baseline Machine Learning model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/maciek3000/data_dashboard",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization"
],
package_dir={"data_dashboard": "data_dashboard"},
packages=find_packages(),
python_requires=">=3.7",
install_requires=[
"pandas>=1.2.3",
"numpy>=1.19.5",
"scipy>=1.6.1",
"beautifulsoup4>=4.9.3",
"scikit-learn>=0.24.1",
"seaborn>=0.11.1",
"bokeh>=2.3.0",
"Jinja2>=2.11.3",
"xgboost>=1.3.3",
"lightgbm>=3.2.0"
],
package_data={
"data_dashboard": ["static/*", "templates/*", "examples/*"]
},
project_urls={
"Github": "https://github.com/maciek3000/data_dashboard",
},
)
| [
"setuptools.find_packages",
"pathlib.Path"
]
| [((1018, 1033), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1031, 1033), False, 'from setuptools import setup, find_packages\n'), ((68, 90), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (80, 90), False, 'import pathlib\n')] |
#coding: utf-8
import sys
import os
import asyncio
import websockets
import json
import socket
import xlrd
#global vars
phd_data = None
pro_data = None
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('192.168.127.12', 65535))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def read_xls(name):
try:
book = xlrd.open_workbook(name)
except:
print("Open Excel(%s) failed!" % name)
for i in range(book.nsheets):
s = book.sheet_by_index(i)
sname = s.name
svalue = list()
for r in range(s.nrows):
svalue.append( s.row_values(r) )
ctx[i] = (sname, svalue)
return ctx
#生成json
def gen_pro():
ret = {
"header": [
{
"name": "id",
"title": "ID",
"size": 50,
"sortable": True,
"sortDir": "asc",
"format": "number"
},
{
"name": "name",
"title": "Name",
"sortable": True
},
{
"name": "start",
"title": "Start",
"sortable": True,
"size": 150,
"format": "date",
"formatMask": "dd-mm-yyyy"
},
{
"name": "age",
"title": "Age",
"sortable": True,
"size": 80
},
{
"name": "salary",
"title": "Salary",
"sortable": True,
"size": 150,
"format": "money",
"show": True
}
],
"data":[]
}
return ret
async def proc_msg(ws, msg):
method = msg.get('method')
if method == 'host_ip':
ip=get_host_ip()
ret = {
"method":method,
"type":'success',
'return':ip
}
await ws.send(json.dumps(ret))
elif method=='genpro':
phd_file = msg.get('phd_file')
if phd_file:
phd_data = read_xls(phd_file)
pro_file = msg.get('pro_file')
if pro_file:
pro_data = read_xls(pro_file)
data = gen_pro()
ret = {
"method":method,
"type":'success',
'return':data
}
await ws.send(json.dumps(ret))
else:
ret = {'type':'unknown'}
await ws.send(json.dumps(ret))
async def recv_msg(websocket):
while True:
recv_text = await websocket.recv()
try:
msg = json.loads(recv_text)
await proc_msg(websocket, msg)
except:
ret = {'type':'error'}
await ws.send(json.dumps(ret))
async def main_logic(websocket, path):
await recv_msg(websocket)
port = 5678
if len(sys.argv) >=2:
port = sys.argv[1]
ws_server = websockets.serve(main_logic, '0.0.0.0', port)
asyncio.get_event_loop().run_until_complete(ws_server)
asyncio.get_event_loop().run_forever()
| [
"json.loads",
"socket.socket",
"xlrd.open_workbook",
"json.dumps",
"websockets.serve",
"asyncio.get_event_loop"
]
| [((2634, 2679), 'websockets.serve', 'websockets.serve', (['main_logic', '"""0.0.0.0"""', 'port'], {}), "(main_logic, '0.0.0.0', port)\n", (2650, 2679), False, 'import websockets\n'), ((195, 243), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (208, 243), False, 'import socket\n'), ((411, 435), 'xlrd.open_workbook', 'xlrd.open_workbook', (['name'], {}), '(name)\n', (429, 435), False, 'import xlrd\n'), ((2681, 2705), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2703, 2705), False, 'import asyncio\n'), ((2736, 2760), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2758, 2760), False, 'import asyncio\n'), ((2335, 2356), 'json.loads', 'json.loads', (['recv_text'], {}), '(recv_text)\n', (2345, 2356), False, 'import json\n'), ((1704, 1719), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (1714, 1719), False, 'import json\n'), ((2113, 2128), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (2123, 2128), False, 'import json\n'), ((2196, 2211), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (2206, 2211), False, 'import json\n'), ((2477, 2492), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (2487, 2492), False, 'import json\n')] |
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4 import DFA
from antlr4.Utils import str_list
from antlr4.dfa.DFAState import DFAState
class DFASerializer(object):
__slots__ = ('dfa', 'literalNames', 'symbolicNames')
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write("-")
buf.write(label)
buf.write("->")
buf.write(self.getStateString(t))
buf.write('\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i:int):
if i==0:
return "EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return str(i-1)
def getStateString(self, s:DFAState):
n = s.stateNumber
baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + "=>" + str_list(s.predicates)
else:
return baseStateStr + "=>" + str(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa:DFA):
super().__init__(dfa, None)
def getEdgeLabel(self, i:int):
return "'" + chr(i) + "'"
| [
"io.StringIO",
"antlr4.Utils.str_list"
]
| [((757, 767), 'io.StringIO', 'StringIO', ([], {}), '()\n', (765, 767), False, 'from io import StringIO\n'), ((2186, 2208), 'antlr4.Utils.str_list', 'str_list', (['s.predicates'], {}), '(s.predicates)\n', (2194, 2208), False, 'from antlr4.Utils import str_list\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Calibration Controller
Performs calibration for hue, center of camera position, and servo offsets
"""
import os
import cv2
import time
import json
import argparse
import datetime
import numpy as np
import logging as log
from env import MoabEnv
from typing import Tuple
from common import Vector2
from detector import hsv_detector
from controllers import pid_controller
from dataclasses import dataclass, astuple
from hardware import plate_angles_to_servo_positions
@dataclass
class CalibHue:
hue: int = 44 # Reasonable default
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
@dataclass
class CalibPos:
position: Tuple[float, float] = (0.0, 0.0)
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
@dataclass
class CalibServos:
servos: Tuple[float, float, float] = (0.0, 0.0, 0.0)
success: bool = False
early_quit: bool = False # If menu is pressed before the calibration is complete
def __iter__(self):
return iter(astuple(self))
def ball_close_enough(x, y, radius, max_ball_dist=0.045, min_ball_dist=0.01):
# reject balls which are too far from the center and too small
return (
np.abs(x) < max_ball_dist
and np.abs(y) < max_ball_dist
and radius > min_ball_dist
)
def calibrate_hue(camera_fn, detector_fn, is_menu_down_fn):
hue_low = 0
hue_high = 360
hue_steps = 41 # Is 41 instead of 40 so that the steps are even
img_frame, elapsed_time = camera_fn()
hue_options = list(np.linspace(hue_low, hue_high, hue_steps))
detected_hues = []
for hue in hue_options:
if is_menu_down_fn():
return CalibHue(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue, debug=True)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
log.info(
f"hue={hue:0.3f}, ball_detected={ball_detected}, "
f"(x, y)={x:0.3f} {y:0.3f}, radius={radius:0.3f}"
)
detected_hues.append(hue)
if len(detected_hues) > 0:
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
detected_hues_rad = np.radians(detected_hues)
sines, cosines = np.sin(detected_hues_rad), np.cos(detected_hues_rad)
sin_mean, cos_mean = np.mean(sines), np.mean(cosines)
avg_hue_rad = np.arctan2(sin_mean, cos_mean)
avg_hue = np.degrees(avg_hue_rad) % 360 # Convert back to [0, 360]
print(f"Hues are: {detected_hues}")
print(f"Hue calibrated: {avg_hue:0.2f}")
print(f"Avg hue: {avg_hue:0.2f}")
return CalibHue(hue=int(avg_hue), success=True)
else:
log.warning(f"Hue calibration failed.")
return CalibHue()
def calibrate_pos(camera_fn, detector_fn, hue, is_menu_down_fn):
for i in range(10): # Try and detect for 10 frames before giving up
if is_menu_down_fn():
return CalibPos(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
x_offset = round(x, 3)
y_offset = round(y, 3)
log.info(f"Offset calibrated: [{x_offset:.3f}, {y_offset:.3f}]")
return CalibPos(position=(x_offset, y_offset), success=True)
log.warning(f"Offset calibration failed.")
return CalibPos()
def calibrate_servo_offsets(pid_fn, env, stationary_vel=0.005, time_limit=20):
start_time = time.time()
action = Vector2(0, 0)
# Initial high vel_history (to use the vel_hist[-100:] later)
vel_x_hist = [1.0 for _ in range(100)]
vel_y_hist = [1.0 for _ in range(100)]
# Run until the ball has stabilized or the time limit was reached
while time.time() < start_time + time_limit:
state = env.step(action)
action, info = pid_fn(state)
(x, y, vel_x, vel_y, sum_x, sum_y), ball_detected, buttons = state
# Quit on menu down
if buttons.menu_button:
return CalibServos(early_quit=True)
if ball_detected:
vel_x_hist.append(vel_x)
vel_y_hist.append(vel_y)
prev_100_x = np.mean(np.abs(vel_x_hist[-100:]))
prev_100_y = np.mean(np.abs(vel_y_hist[-100:]))
print("Prev 100: ", (prev_100_x, prev_100_y))
# If the average velocity for the last 100 timesteps is under the limit
if (prev_100_x < stationary_vel) and (prev_100_y < stationary_vel):
# Calculate offsets by calculating servo positions at the
# current stable position and subtracting the `default` zeroed
# position of the servos.
servos = np.array(plate_angles_to_servo_positions(*action))
servos_zeroed = np.array(plate_angles_to_servo_positions(0, 0))
servo_offsets = list(servos - servos_zeroed)
return CalibServos(servos=servo_offsets, success=True)
# If the plate could be stabilized in time_limit seconds, quit
log.warning(f"Servo calibration failed.")
return CalibServos()
def write_calibration(calibration_dict, calibration_file="bot.json"):
log.info("Writing calibration.")
# write out stuff
with open(calibration_file, "w+") as outfile:
log.info(f"Creating calibration file {calibration_file}")
json.dump(calibration_dict, outfile, indent=4, sort_keys=True)
def read_calibration(calibration_file="bot.json"):
log.info("Reading previous calibration.")
if os.path.isfile(calibration_file):
with open(calibration_file, "r") as f:
calibration_dict = json.load(f)
else: # Use defaults
calibration_dict = {
"ball_hue": 44,
"plate_offsets": (0.0, 0.0),
"servo_offsets": (0.0, 0.0, 0.0),
}
return calibration_dict
def wait_for_joystick_or_menu(hardware, sleep_time=1 / 30):
"""Waits for either the joystick or the menu. Returns the buttons"""
while True:
buttons = hardware.get_buttons()
if buttons.menu_button or buttons.joy_button:
return buttons
time.sleep(sleep_time)
def wait_for_menu(hardware, sleep_time=1 / 30):
while True:
menu_button, joy_button, joy_x, joy_y = hardware.get_buttons()
time.sleep(sleep_time)
if menu_button:
return
def run_calibration(env, pid_fn, calibration_file):
# Get some hidden things from env
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
def is_menu_down(hardware=hardware) -> bool:
return hardware.get_buttons().menu_button
# lift plate up first
hardware.set_angles(0, 0)
# Display message and wait for joystick
hardware.display(
"put ball on stand\nclick joystick",
# "Place ball in\ncenter using\nclear stand.\n\n" "Click joystick\nwhen ready."
scrolling=True,
)
buttons = wait_for_joystick_or_menu(hardware)
if buttons.menu_button: # Early quit
hardware.go_up()
return
hardware.display("Calibrating...")
hue_calib = calibrate_hue(camera_fn, detector_fn, is_menu_down)
if hue_calib.early_quit:
hardware.go_up()
return
# Calibrate position
pos_calib = calibrate_pos(camera_fn, detector_fn, hue_calib.hue, is_menu_down)
if pos_calib.early_quit:
hardware.go_up()
return
# Save calibration
calibration_dict = read_calibration(calibration_file)
calibration_dict["ball_hue"] = hue_calib.hue
calibration_dict["plate_offsets"] = pos_calib.position
x_offset, y_offset = pos_calib.position
write_calibration(calibration_dict)
# Update the environment to use the new calibration
# Warning! This mutates the state!
hardware.reset_calibration(calibration_file=calibration_file)
if pos_calib.success and hue_calib.success: # and servo_calib.success:
hardware.display(f"Ok! Ball hue={hue_calib.hue}\nClick menu...", scrolling=True)
elif not (pos_calib.success or hue_calib.success): # or servo_calib.success):
hardware.display("Calibration failed\nClick menu...", scrolling=True)
else:
hue_str = (
f"Hue calib:\nsuccessful\nBall hue = {hue_calib.hue}\n\n"
if hue_calib.success
else "Hue calib:\nfailed\n\n"
)
pos_str = (
f"Position \ncalib:\nsuccessful\nPosition = \n({100*x_offset:.1f}, {100*y_offset:.1f})cm\n\n"
if hue_calib.success
else "(X, Y) calib:\nfailed\n\n"
)
hardware.display(
"Calibration\npartially succeeded\n\n"
+ hue_str
+ pos_str
+ "Click menu\nto return...\n",
scrolling=True,
)
# When the calibration is complete, save the image of what the moab camera
# sees (useful for debugging when the hue calibration fails)
# Have a nice filename with the time and whether it succeeded or failed
time_of_day = datetime.datetime.now().strftime("%H%M%S")
filename = "/tmp/hue"
if hue_calib.success:
filename += f".{hue_calib.hue}.{time_of_day}.jpg"
else:
filename += f".fail.{time_of_day}.jpg"
img_frame, _ = camera_fn()
# Huemask keeps an internal cache. By sending a new hue (hue + 1) invalidates
# the cache. TODO: added this while searching for a state bug
detector_fn(img_frame, hue=hue_calib.hue + 1, debug=True, filename=filename)
hardware.go_up()
def run_servo_calibration(env, pid_fn, calibration_file):
# Warning: servo calib works but doesn't currently give a good calibration
raise NotImplementedError
# Get some hidden things from env
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
# Start the calibration with uncalibrated servos
hardware.servo_offsets = (0, 0, 0)
# lift plate up fist
hardware.set_angles(0, 0)
# Calibrate servo offsets
hardware.display(
"Calibarating\nservos\n\n"
"Place ball in\ncenter without\n stand.\n\n"
"Click joystick\nto continue.",
scrolling=True,
)
buttons = wait_for_joystick_or_menu(hardware)
if buttons.menu_button: # Early quit
hardware.go_up()
return
hardware.display("Calibrating\nservos...", scrolling=True)
servo_calib = calibrate_servo_offsets(pid_fn, env)
# Save calibration
calibration_dict = read_calibration(calibration_file)
calibration_dict["servo_offsets"] = servo_calib.servos
s1, s2, s3 = servo_calib.servos
write_calibration(calibration_dict)
# Update the environment to use the new calibration
# Warning! This mutates the state!
env.reset_calibration(calibration_file=calibration_file)
if servo_calib.success:
hardware.display(
f"servo offsets =\n({s1:.2f}, {s2:.2f}, {s3:.2f})\n\n"
"Click menu\nto return...\n",
scrolling=True,
)
print(f"servo offsets =\n({s1:.2f}, {s2:.2f}, {s3:.2f})")
else:
hardware.display(
"Calibration\nfailed\n\nClick menu\nto return...", scrolling=True
)
hardware.go_up()
def calibrate_controller(**kwargs):
run_calibration(
kwargs["env"],
kwargs["pid_fn"],
kwargs["calibration_file"],
)
def wait_for_menu_and_stream():
# Get some hidden things from env to be able to stream the calib results
env = kwargs["env"]
hardware = env.hardware
camera_fn = hardware.camera
detector_fn = hardware.detector
menu_button = False
while not menu_button:
img_frame, _ = camera_fn()
detector_fn(img_frame, debug=True) # Save to streaming
menu, joy, _, _ = hardware.get_buttons()
if menu or joy:
break
env.hardware.go_up()
return wait_for_menu_and_stream
def main(calibration_file, frequency=30, debug=True):
pid_fn = pid_controller(frequency=frequency)
with MoabEnv(frequency=frequency, debug=debug) as env:
env.step((0, 0))
time.sleep(0.2)
env.hardware.enable_servos()
time.sleep(0.2)
env.hardware.set_servos(133, 133, 133)
run_calibration(env, pid_fn, calibration_file)
env.hardware.disable_servos()
if __name__ == "__main__": # Parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-f", "--file", default="bot.json", type=str)
args, _ = parser.parse_known_args()
main(args.file, debug=args.debug)
| [
"numpy.radians",
"time.sleep",
"numpy.arctan2",
"numpy.sin",
"logging.info",
"hardware.plate_angles_to_servo_positions",
"numpy.mean",
"argparse.ArgumentParser",
"env.MoabEnv",
"numpy.linspace",
"numpy.degrees",
"numpy.abs",
"controllers.pid_controller",
"logging.warning",
"os.path.isfile",
"numpy.cos",
"time.time",
"dataclasses.astuple",
"datetime.datetime.now",
"common.Vector2",
"json.load",
"json.dump"
]
| [((3843, 3885), 'logging.warning', 'log.warning', (['f"""Offset calibration failed."""'], {}), "(f'Offset calibration failed.')\n", (3854, 3885), True, 'import logging as log\n'), ((4006, 4017), 'time.time', 'time.time', ([], {}), '()\n', (4015, 4017), False, 'import time\n'), ((4031, 4044), 'common.Vector2', 'Vector2', (['(0)', '(0)'], {}), '(0, 0)\n', (4038, 4044), False, 'from common import Vector2\n'), ((5574, 5615), 'logging.warning', 'log.warning', (['f"""Servo calibration failed."""'], {}), "(f'Servo calibration failed.')\n", (5585, 5615), True, 'import logging as log\n'), ((5717, 5749), 'logging.info', 'log.info', (['"""Writing calibration."""'], {}), "('Writing calibration.')\n", (5725, 5749), True, 'import logging as log\n'), ((6017, 6058), 'logging.info', 'log.info', (['"""Reading previous calibration."""'], {}), "('Reading previous calibration.')\n", (6025, 6058), True, 'import logging as log\n'), ((6067, 6099), 'os.path.isfile', 'os.path.isfile', (['calibration_file'], {}), '(calibration_file)\n', (6081, 6099), False, 'import os\n'), ((12588, 12623), 'controllers.pid_controller', 'pid_controller', ([], {'frequency': 'frequency'}), '(frequency=frequency)\n', (12602, 12623), False, 'from controllers import pid_controller\n'), ((13005, 13030), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13028, 13030), False, 'import argparse\n'), ((1801, 1842), 'numpy.linspace', 'np.linspace', (['hue_low', 'hue_high', 'hue_steps'], {}), '(hue_low, hue_high, hue_steps)\n', (1812, 1842), True, 'import numpy as np\n'), ((2574, 2599), 'numpy.radians', 'np.radians', (['detected_hues'], {}), '(detected_hues)\n', (2584, 2599), True, 'import numpy as np\n'), ((2762, 2792), 'numpy.arctan2', 'np.arctan2', (['sin_mean', 'cos_mean'], {}), '(sin_mean, cos_mean)\n', (2772, 2792), True, 'import numpy as np\n'), ((3080, 3119), 'logging.warning', 'log.warning', (['f"""Hue calibration failed."""'], {}), "(f'Hue calibration failed.')\n", (3091, 3119), True, 'import logging as log\n'), ((4279, 4290), 'time.time', 'time.time', ([], {}), '()\n', (4288, 4290), False, 'import time\n'), ((5831, 5888), 'logging.info', 'log.info', (['f"""Creating calibration file {calibration_file}"""'], {}), "(f'Creating calibration file {calibration_file}')\n", (5839, 5888), True, 'import logging as log\n'), ((5897, 5959), 'json.dump', 'json.dump', (['calibration_dict', 'outfile'], {'indent': '(4)', 'sort_keys': '(True)'}), '(calibration_dict, outfile, indent=4, sort_keys=True)\n', (5906, 5959), False, 'import json\n'), ((6681, 6703), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (6691, 6703), False, 'import time\n'), ((6849, 6871), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (6859, 6871), False, 'import time\n'), ((12634, 12675), 'env.MoabEnv', 'MoabEnv', ([], {'frequency': 'frequency', 'debug': 'debug'}), '(frequency=frequency, debug=debug)\n', (12641, 12675), False, 'from env import MoabEnv\n'), ((12717, 12732), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (12727, 12732), False, 'import time\n'), ((12778, 12793), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (12788, 12793), False, 'import time\n'), ((772, 785), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (779, 785), False, 'from dataclasses import dataclass, astuple\n'), ((1020, 1033), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (1027, 1033), False, 'from dataclasses import dataclass, astuple\n'), ((1281, 1294), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (1288, 1294), False, 'from dataclasses import dataclass, astuple\n'), ((1464, 1473), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1470, 1473), True, 'import numpy as np\n'), ((1502, 1511), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (1508, 1511), True, 'import numpy as np\n'), ((2251, 2367), 'logging.info', 'log.info', (['f"""hue={hue:0.3f}, ball_detected={ball_detected}, (x, y)={x:0.3f} {y:0.3f}, radius={radius:0.3f}"""'], {}), "(\n f'hue={hue:0.3f}, ball_detected={ball_detected}, (x, y)={x:0.3f} {y:0.3f}, radius={radius:0.3f}'\n )\n", (2259, 2367), True, 'import logging as log\n'), ((2625, 2650), 'numpy.sin', 'np.sin', (['detected_hues_rad'], {}), '(detected_hues_rad)\n', (2631, 2650), True, 'import numpy as np\n'), ((2652, 2677), 'numpy.cos', 'np.cos', (['detected_hues_rad'], {}), '(detected_hues_rad)\n', (2658, 2677), True, 'import numpy as np\n'), ((2707, 2721), 'numpy.mean', 'np.mean', (['sines'], {}), '(sines)\n', (2714, 2721), True, 'import numpy as np\n'), ((2723, 2739), 'numpy.mean', 'np.mean', (['cosines'], {}), '(cosines)\n', (2730, 2739), True, 'import numpy as np\n'), ((2811, 2834), 'numpy.degrees', 'np.degrees', (['avg_hue_rad'], {}), '(avg_hue_rad)\n', (2821, 2834), True, 'import numpy as np\n'), ((3700, 3764), 'logging.info', 'log.info', (['f"""Offset calibrated: [{x_offset:.3f}, {y_offset:.3f}]"""'], {}), "(f'Offset calibrated: [{x_offset:.3f}, {y_offset:.3f}]')\n", (3708, 3764), True, 'import logging as log\n'), ((6179, 6191), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6188, 6191), False, 'import json\n'), ((9582, 9605), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9603, 9605), False, 'import datetime\n'), ((4707, 4732), 'numpy.abs', 'np.abs', (['vel_x_hist[-100:]'], {}), '(vel_x_hist[-100:])\n', (4713, 4732), True, 'import numpy as np\n'), ((4767, 4792), 'numpy.abs', 'np.abs', (['vel_y_hist[-100:]'], {}), '(vel_y_hist[-100:])\n', (4773, 4792), True, 'import numpy as np\n'), ((5247, 5287), 'hardware.plate_angles_to_servo_positions', 'plate_angles_to_servo_positions', (['*action'], {}), '(*action)\n', (5278, 5287), False, 'from hardware import plate_angles_to_servo_positions\n'), ((5330, 5367), 'hardware.plate_angles_to_servo_positions', 'plate_angles_to_servo_positions', (['(0)', '(0)'], {}), '(0, 0)\n', (5361, 5367), False, 'from hardware import plate_angles_to_servo_positions\n')] |
from datetime import datetime, timedelta
from django.test import TestCase
from django.test.utils import override_settings
from marketing.tasks import (
delete_multiple_contacts_tasks,
list_all_bounces_unsubscribes,
run_all_campaigns,
run_campaign,
send_campaign_email_to_admin_contact,
send_scheduled_campaigns,
upload_csv_file,
)
from marketing.tests import TestMarketingModel
class TestCeleryTasks(TestMarketingModel, TestCase):
@override_settings(
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND="memory",
)
def test_celery_tasks(self):
task = run_campaign.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
self.campaign.reply_to_email = None
self.campaign.save()
task = run_campaign.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
self.campaign.schedule_date_time = datetime.now()
self.campaign.save()
task = run_all_campaigns.apply()
self.assertEqual("SUCCESS", task.state)
task = list_all_bounces_unsubscribes.apply()
self.assertEqual("SUCCESS", task.state)
task = send_scheduled_campaigns.apply()
self.assertEqual("SUCCESS", task.state)
task = delete_multiple_contacts_tasks.apply(
(self.contact_list.id,),
)
self.assertEqual("SUCCESS", task.state)
task = send_campaign_email_to_admin_contact.apply(
(self.campaign.id,),
)
self.assertEqual("SUCCESS", task.state)
valid_rows = [
{
"company name": "company_name_1",
"email": "<EMAIL>",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_2",
"email": "<EMAIL>",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_3",
"email": "<EMAIL>",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_4",
"email": "<EMAIL>",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
]
invalid_rows = [
{
"company name": "company_name_1",
"email": "useremail.com",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
{
"company name": "company_name_2",
"email": "user2@email",
"first name": "first_name",
"last name": "last_name",
"city": "Hyderabad",
"state": "Telangana",
},
]
task = upload_csv_file.apply(
(
valid_rows,
invalid_rows,
self.user.id,
[
self.contact_list.id,
],
self.company.id,
),
)
self.assertEqual("SUCCESS", task.state)
| [
"marketing.tasks.send_scheduled_campaigns.apply",
"marketing.tasks.send_campaign_email_to_admin_contact.apply",
"marketing.tasks.run_campaign.apply",
"marketing.tasks.run_all_campaigns.apply",
"datetime.datetime.now",
"marketing.tasks.delete_multiple_contacts_tasks.apply",
"django.test.utils.override_settings",
"marketing.tasks.list_all_bounces_unsubscribes.apply",
"marketing.tasks.upload_csv_file.apply"
]
| [((468, 581), 'django.test.utils.override_settings', 'override_settings', ([], {'CELERY_EAGER_PROPAGATES_EXCEPTIONS': '(True)', 'CELERY_ALWAYS_EAGER': '(True)', 'BROKER_BACKEND': '"""memory"""'}), "(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,\n CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')\n", (485, 581), False, 'from django.test.utils import override_settings\n'), ((657, 696), 'marketing.tasks.run_campaign.apply', 'run_campaign.apply', (['(self.campaign.id,)'], {}), '((self.campaign.id,))\n', (675, 696), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((858, 897), 'marketing.tasks.run_campaign.apply', 'run_campaign.apply', (['(self.campaign.id,)'], {}), '((self.campaign.id,))\n', (876, 897), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((1013, 1027), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1025, 1027), False, 'from datetime import datetime, timedelta\n'), ((1073, 1098), 'marketing.tasks.run_all_campaigns.apply', 'run_all_campaigns.apply', ([], {}), '()\n', (1096, 1098), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((1163, 1200), 'marketing.tasks.list_all_bounces_unsubscribes.apply', 'list_all_bounces_unsubscribes.apply', ([], {}), '()\n', (1198, 1200), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((1265, 1297), 'marketing.tasks.send_scheduled_campaigns.apply', 'send_scheduled_campaigns.apply', ([], {}), '()\n', (1295, 1297), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((1362, 1423), 'marketing.tasks.delete_multiple_contacts_tasks.apply', 'delete_multiple_contacts_tasks.apply', (['(self.contact_list.id,)'], {}), '((self.contact_list.id,))\n', (1398, 1423), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((1511, 1574), 'marketing.tasks.send_campaign_email_to_admin_contact.apply', 'send_campaign_email_to_admin_contact.apply', (['(self.campaign.id,)'], {}), '((self.campaign.id,))\n', (1553, 1574), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n'), ((3397, 3506), 'marketing.tasks.upload_csv_file.apply', 'upload_csv_file.apply', (['(valid_rows, invalid_rows, self.user.id, [self.contact_list.id], self.\n company.id)'], {}), '((valid_rows, invalid_rows, self.user.id, [self.\n contact_list.id], self.company.id))\n', (3418, 3506), False, 'from marketing.tasks import delete_multiple_contacts_tasks, list_all_bounces_unsubscribes, run_all_campaigns, run_campaign, send_campaign_email_to_admin_contact, send_scheduled_campaigns, upload_csv_file\n')] |
import unittest
from JorGpi.pickup.pickup import SmartPickUp,Reference,CommandLineOptions
class TestPickupIron(unittest.TestCase):
@staticmethod
def options(*args):
return CommandLineOptions(*args)
def test_iron_001(self):
_input = "test -R _VASP/Fe/noFlip -D _VASP/Fe/flip00000 -E Fe -J1 -U mRy".split(" ")
options = TestPickupIron.options(*_input)
elements = ''.join(options('elements'))
self.assertEqual(elements,'Fe$')
ref = Reference(options('reference')+"/POSCAR")
self.assertEqual(ref(),0)
self.assertEqual(options('number_of_interactions'),1)
pickerUpper = SmartPickUp(options('number_of_interactions'),elements)
pickerUpper.read(options('reference'),*options('directories'),reference=ref())
self.assertEqual(options('units'),'mRy')
_J_ij = pickerUpper.solve(units=options('units')).flatten()
self.assertEqual(_J_ij[0],1.1861042008301703)
self.assertEqual(_J_ij[1],4.157645364906014)
| [
"JorGpi.pickup.pickup.CommandLineOptions"
]
| [((190, 215), 'JorGpi.pickup.pickup.CommandLineOptions', 'CommandLineOptions', (['*args'], {}), '(*args)\n', (208, 215), False, 'from JorGpi.pickup.pickup import SmartPickUp, Reference, CommandLineOptions\n')] |
# Copyright 2018 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# RUN: "%python" "%s"
#
import pydffi
import sys
F = pydffi.FFI()
CU = F.cdef('''
#include <stdint.h>
typedef int32_t MyInt;
typedef struct {
int a;
int b;
} A;
''')
assert(CU.types.MyInt == F.Int32Ty)
assert(isinstance(CU.types.A, pydffi.StructType))
| [
"pydffi.FFI"
]
| [((639, 651), 'pydffi.FFI', 'pydffi.FFI', ([], {}), '()\n', (649, 651), False, 'import pydffi\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.